Merge tag 'drm-vc4-next-2015-12-21' of http://github.com/anholt/linux into drm-next

I've decided to just send this fixes-for-next pull request now, even if
we don't have a patch for the CONFIG_PM_SLEEP build failure reviewed.
If you like my patch for that, I'd be happy to see it applied directly.

This pull request brings in little fixes from Dan Carpenter for the 3D
support added in this -next cycle.

* tag 'drm-vc4-next-2015-12-21' of http://github.com/anholt/linux:
  drm/vc4: fix an error code
  drm/vc4: allocate enough memory in vc4_save_hang_state()
  drm/vc4: copy_to_user() returns the number of bytes remaining
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index c66d641..6c6e81a 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -986,10 +986,7 @@
 !Idrivers/gpu/drm/drm_atomic.c
     </sect2>
     <sect2>
-      <title>Frame Buffer Creation</title>
-      <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
-				     struct drm_file *file_priv,
-				     struct drm_mode_fb_cmd2 *mode_cmd);</synopsis>
+      <title>Frame Buffer Abstraction</title>
       <para>
         Frame buffers are abstract memory objects that provide a source of
         pixels to scanout to a CRTC. Applications explicitly request the
@@ -1008,73 +1005,6 @@
 	and so expects TTM handles in the create ioctl and not GEM handles.
       </para>
       <para>
-        Drivers must first validate the requested frame buffer parameters passed
-        through the mode_cmd argument. In particular this is where invalid
-        sizes, pixel formats or pitches can be caught.
-      </para>
-      <para>
-        If the parameters are deemed valid, drivers then create, initialize and
-        return an instance of struct <structname>drm_framebuffer</structname>.
-        If desired the instance can be embedded in a larger driver-specific
-	structure. Drivers must fill its <structfield>width</structfield>,
-	<structfield>height</structfield>, <structfield>pitches</structfield>,
-        <structfield>offsets</structfield>, <structfield>depth</structfield>,
-        <structfield>bits_per_pixel</structfield> and
-        <structfield>pixel_format</structfield> fields from the values passed
-        through the <parameter>drm_mode_fb_cmd2</parameter> argument. They
-        should call the <function>drm_helper_mode_fill_fb_struct</function>
-        helper function to do so.
-      </para>
-
-      <para>
-	The initialization of the new framebuffer instance is finalized with a
-	call to <function>drm_framebuffer_init</function> which takes a pointer
-	to DRM frame buffer operations (struct
-	<structname>drm_framebuffer_funcs</structname>). Note that this function
-	publishes the framebuffer and so from this point on it can be accessed
-	concurrently from other threads. Hence it must be the last step in the
-	driver's framebuffer initialization sequence. Frame buffer operations
-	are
-        <itemizedlist>
-          <listitem>
-            <synopsis>int (*create_handle)(struct drm_framebuffer *fb,
-		     struct drm_file *file_priv, unsigned int *handle);</synopsis>
-            <para>
-              Create a handle to the frame buffer underlying memory object. If
-              the frame buffer uses a multi-plane format, the handle will
-              reference the memory object associated with the first plane.
-            </para>
-            <para>
-              Drivers call <function>drm_gem_handle_create</function> to create
-              the handle.
-            </para>
-          </listitem>
-          <listitem>
-            <synopsis>void (*destroy)(struct drm_framebuffer *framebuffer);</synopsis>
-            <para>
-              Destroy the frame buffer object and frees all associated
-              resources. Drivers must call
-              <function>drm_framebuffer_cleanup</function> to free resources
-              allocated by the DRM core for the frame buffer object, and must
-              make sure to unreference all memory objects associated with the
-              frame buffer. Handles created by the
-              <methodname>create_handle</methodname> operation are released by
-              the DRM core.
-            </para>
-          </listitem>
-          <listitem>
-            <synopsis>int (*dirty)(struct drm_framebuffer *framebuffer,
-	     struct drm_file *file_priv, unsigned flags, unsigned color,
-	     struct drm_clip_rect *clips, unsigned num_clips);</synopsis>
-            <para>
-              This optional operation notifies the driver that a region of the
-              frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB
-              ioctl call.
-            </para>
-          </listitem>
-        </itemizedlist>
-      </para>
-      <para>
 	The lifetime of a drm framebuffer is controlled with a reference count,
 	drivers can grab additional references with
 	<function>drm_framebuffer_reference</function>and drop them
@@ -3570,92 +3500,6 @@
       <sect2>
         <title>DPIO</title>
 !Pdrivers/gpu/drm/i915/i915_reg.h DPIO
-	<table id="dpiox2">
-	  <title>Dual channel PHY (VLV/CHV/BXT)</title>
-	  <tgroup cols="8">
-	    <colspec colname="c0" />
-	    <colspec colname="c1" />
-	    <colspec colname="c2" />
-	    <colspec colname="c3" />
-	    <colspec colname="c4" />
-	    <colspec colname="c5" />
-	    <colspec colname="c6" />
-	    <colspec colname="c7" />
-	    <spanspec spanname="ch0" namest="c0" nameend="c3" />
-	    <spanspec spanname="ch1" namest="c4" nameend="c7" />
-	    <spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
-	    <spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
-	    <spanspec spanname="ch1pcs01" namest="c4" nameend="c5" />
-	    <spanspec spanname="ch1pcs23" namest="c6" nameend="c7" />
-	    <thead>
-	      <row>
-		<entry spanname="ch0">CH0</entry>
-		<entry spanname="ch1">CH1</entry>
-	      </row>
-	    </thead>
-	    <tbody valign="top" align="center">
-	      <row>
-		<entry spanname="ch0">CMN/PLL/REF</entry>
-		<entry spanname="ch1">CMN/PLL/REF</entry>
-	      </row>
-	      <row>
-		<entry spanname="ch0pcs01">PCS01</entry>
-		<entry spanname="ch0pcs23">PCS23</entry>
-		<entry spanname="ch1pcs01">PCS01</entry>
-		<entry spanname="ch1pcs23">PCS23</entry>
-	      </row>
-	      <row>
-		<entry>TX0</entry>
-		<entry>TX1</entry>
-		<entry>TX2</entry>
-		<entry>TX3</entry>
-		<entry>TX0</entry>
-		<entry>TX1</entry>
-		<entry>TX2</entry>
-		<entry>TX3</entry>
-	      </row>
-	      <row>
-		<entry spanname="ch0">DDI0</entry>
-		<entry spanname="ch1">DDI1</entry>
-	      </row>
-	    </tbody>
-	  </tgroup>
-	</table>
-	<table id="dpiox1">
-	  <title>Single channel PHY (CHV/BXT)</title>
-	  <tgroup cols="4">
-	    <colspec colname="c0" />
-	    <colspec colname="c1" />
-	    <colspec colname="c2" />
-	    <colspec colname="c3" />
-	    <spanspec spanname="ch0" namest="c0" nameend="c3" />
-	    <spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
-	    <spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
-	    <thead>
-	      <row>
-		<entry spanname="ch0">CH0</entry>
-	      </row>
-	    </thead>
-	    <tbody valign="top" align="center">
-	      <row>
-		<entry spanname="ch0">CMN/PLL/REF</entry>
-	      </row>
-	      <row>
-		<entry spanname="ch0pcs01">PCS01</entry>
-		<entry spanname="ch0pcs23">PCS23</entry>
-	      </row>
-	      <row>
-		<entry>TX0</entry>
-		<entry>TX1</entry>
-		<entry>TX2</entry>
-		<entry>TX3</entry>
-	      </row>
-	      <row>
-		<entry spanname="ch0">DDI2</entry>
-	      </row>
-	    </tbody>
-	  </tgroup>
-	</table>
       </sect2>
 
       <sect2>
diff --git a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
new file mode 100644
index 0000000..ed5e0a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
@@ -0,0 +1,54 @@
+Etnaviv DRM master device
+=========================
+
+The Etnaviv DRM master device is a virtual device needed to list all
+Vivante GPU cores that comprise the GPU subsystem.
+
+Required properties:
+- compatible: Should be one of
+    "fsl,imx-gpu-subsystem"
+    "marvell,dove-gpu-subsystem"
+- cores: Should contain a list of phandles pointing to Vivante GPU devices
+
+example:
+
+gpu-subsystem {
+	compatible = "fsl,imx-gpu-subsystem";
+	cores = <&gpu_2d>, <&gpu_3d>;
+};
+
+
+Vivante GPU core devices
+========================
+
+Required properties:
+- compatible: Should be "vivante,gc"
+  A more specific compatible is not needed, as the cores contain chip
+  identification registers at fixed locations, which provide all the
+  necessary information to the driver.
+- reg: should be register base and length as documented in the
+  datasheet
+- interrupts: Should contain the cores interrupt line
+- clocks: should contain one clock for entry in clock-names
+  see Documentation/devicetree/bindings/clock/clock-bindings.txt
+- clock-names:
+   - "bus":    AXI/register clock
+   - "core":   GPU core clock
+   - "shader": Shader clock (only required if GPU has feature PIPE_3D)
+
+Optional properties:
+- power-domains: a power domain consumer specifier according to
+  Documentation/devicetree/bindings/power/power_domain.txt
+
+example:
+
+gpu_3d: gpu@00130000 {
+	compatible = "vivante,gc";
+	reg = <0x00130000 0x4000>;
+	interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&clks IMX6QDL_CLK_GPU3D_AXI>,
+	         <&clks IMX6QDL_CLK_GPU3D_CORE>,
+	         <&clks IMX6QDL_CLK_GPU3D_SHADER>;
+	clock-names = "bus", "core", "shader";
+	power-domains = <&gpc 1>;
+};
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
new file mode 100644
index 0000000..50be5e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
@@ -0,0 +1,7 @@
+Boe Corporation 8.0" WUXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "boe,tv080wum-nl0"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
new file mode 100644
index 0000000..6497446
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
@@ -0,0 +1,7 @@
+Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,g121x1-l03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
new file mode 100644
index 0000000..a8e940f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
@@ -0,0 +1,7 @@
+Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
+
+Required properties:
+- compatible: should be "kyo,tcg121xglp"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
new file mode 100644
index 0000000..37dedf6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
@@ -0,0 +1,20 @@
+Panasonic 10" WUXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "panasonic,vvx10f034n00"
+- reg: DSI virtual channel of the peripheral
+- power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+	mdss_dsi@fd922800 {
+		panel@0 {
+			compatible = "panasonic,vvx10f034n00";
+			reg = <0>;
+			power-supply = <&vreg_vsp>;
+			backlight = <&lp8566_wled>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
new file mode 100644
index 0000000..0fbdab8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
@@ -0,0 +1,7 @@
+QiaoDian XianShi Corporation 4"3 TFT LCD panel
+
+Required properties:
+- compatible: should be "qiaodian,qd43003c0-40"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt
new file mode 100644
index 0000000..3770a11
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,ls043t1le01.txt
@@ -0,0 +1,22 @@
+Sharp Microelectronics 4.3" qHD TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,ls043t1le01-qhd"
+- reg: DSI virtual channel of the peripheral
+- power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+- reset-gpios: a GPIO spec for the reset pin
+
+Example:
+
+	mdss_dsi@fd922800 {
+		panel@0 {
+			compatible = "sharp,ls043t1le01-qhd";
+			reg = <0>;
+			avdd-supply = <&pm8941_l22>;
+			backlight = <&pm8941_wled>;
+			reset-gpios = <&pm8941_gpios 19 GPIO_ACTIVE_HIGH>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 55df1d4..b123731 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -33,6 +33,7 @@
 avago	Avago Technologies
 avic	Shanghai AVIC Optoelectronics Co., Ltd.
 axis	Axis Communications AB
+boe	BOE Technology Group Co., Ltd.
 bosch	Bosch Sensortec GmbH
 boundary	Boundary Devices Inc.
 brcm	Broadcom Corporation
@@ -123,6 +124,7 @@
 karo	Ka-Ro electronics GmbH
 keymile	Keymile GmbH
 kinetic Kinetic Technologies
+kyo	Kyocera Corporation
 lacie	LaCie
 lantiq	Lantiq Semiconductor
 lenovo	Lenovo Group Ltd.
@@ -180,6 +182,7 @@
 qcom	Qualcomm Technologies, Inc
 qemu	QEMU, a generic and open source machine emulator and virtualizer
 qi	Qi Hardware
+qiaodian	QiaoDian XianShi Corporation
 qnap	QNAP Systems, Inc.
 radxa	Radxa
 raidsonic	RaidSonic Technology GmbH
@@ -238,6 +241,7 @@
 variscite	Variscite Ltd.
 via	VIA Technologies, Inc.
 virtio	Virtual I/O Device Specification, developed by the OASIS consortium
+vivante	Vivante Corporation
 voipac	Voipac Technologies s.r.o.
 wexler	Wexler
 winbond Winbond Electronics corp.
diff --git a/MAINTAINERS b/MAINTAINERS
index 69c8a9c..9a68ea9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3743,6 +3743,15 @@
 F:	drivers/gpu/drm/sti
 F:	Documentation/devicetree/bindings/display/st,stih4xx.txt
 
+DRM DRIVERS FOR VIVANTE GPU IP
+M:	Lucas Stach <l.stach@pengutronix.de>
+R:	Russell King <linux+etnaviv@arm.linux.org.uk>
+R:	Christian Gmeiner <christian.gmeiner@gmail.com>
+L:	dri-devel@lists.freedesktop.org
+S:	Maintained
+F:	drivers/gpu/drm/etnaviv
+F:	Documentation/devicetree/bindings/display/etnaviv
+
 DSBR100 USB FM RADIO DRIVER
 M:	Alexey Klimov <klimov.linux@gmail.com>
 L:	linux-media@vger.kernel.org
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c4bf9a1..b02ac62 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -266,3 +266,5 @@
 source "drivers/gpu/drm/imx/Kconfig"
 
 source "drivers/gpu/drm/vc4/Kconfig"
+
+source "drivers/gpu/drm/etnaviv/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1e9ff4c..f858aa2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -75,3 +75,4 @@
 obj-y			+= panel/
 obj-y			+= bridge/
 obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
+obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 9bdc28c..4123680 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -1074,7 +1074,7 @@
 	return 0;
 }
 
-static struct drm_crtc_funcs armada_crtc_funcs = {
+static const struct drm_crtc_funcs armada_crtc_funcs = {
 	.cursor_set	= armada_drm_crtc_cursor_set,
 	.cursor_move	= armada_drm_crtc_cursor_move,
 	.destroy	= armada_drm_crtc_destroy,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 8168954..a45b32b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -333,6 +333,10 @@
 		.data = &atmel_hlcdc_dc_at91sam9x5,
 	},
 	{
+		.compatible = "atmel,sama5d2-hlcdc",
+		.data = &atmel_hlcdc_dc_sama5d4,
+	},
+	{
 		.compatible = "atmel,sama5d3-hlcdc",
 		.data = &atmel_hlcdc_dc_sama5d3,
 	},
@@ -342,6 +346,7 @@
 	},
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match);
 
 int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
 			      struct drm_display_mode *mode)
@@ -733,10 +738,6 @@
 	if (!ddev)
 		return -ENOMEM;
 
-	ret = drm_dev_set_unique(ddev, dev_name(ddev->dev));
-	if (ret)
-		goto err_unref;
-
 	ret = atmel_hlcdc_dc_load(ddev);
 	if (ret)
 		goto err_unref;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index d112900..0f7ec01 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -146,7 +146,7 @@
 			   cfg);
 }
 
-static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
 	.mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
 	.mode_set = atmel_hlcdc_rgb_encoder_mode_set,
 	.disable = atmel_hlcdc_panel_encoder_disable,
@@ -192,7 +192,7 @@
 	return &rgb->encoder;
 }
 
-static struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
 	.get_modes = atmel_hlcdc_panel_get_modes,
 	.mode_valid = atmel_hlcdc_rgb_mode_valid,
 	.best_encoder = atmel_hlcdc_rgb_best_encoder,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index a88be6d..2849f1b 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -119,7 +119,7 @@
 	bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
 	if (event) {
 		spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
-		drm_send_vblank_event(bochs->dev, -1, event);
+		drm_crtc_send_vblank_event(crtc, event);
 		spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
 	}
 	return 0;
@@ -245,13 +245,13 @@
 	return connector_status_connected;
 }
 
-struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
 	.get_modes = bochs_connector_get_modes,
 	.mode_valid = bochs_connector_mode_valid,
 	.best_encoder = bochs_connector_best_encoder,
 };
 
-struct drm_connector_funcs bochs_connector_connector_funcs = {
+static const struct drm_connector_funcs bochs_connector_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = bochs_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -283,7 +283,7 @@
 	bochs->dev->mode_config.preferred_depth = 24;
 	bochs->dev->mode_config.prefer_shadow = 0;
 
-	bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs;
+	bochs->dev->mode_config.funcs = &bochs_mode_funcs;
 
 	bochs_crtc_init(bochs->dev);
 	bochs_encoder_init(bochs->dev);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 6dddd39..27e2022 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -22,7 +22,6 @@
 	  Designware HDMI block.  This is used in conjunction with
 	  the i.MX6 HDMI driver.
 
-
 config DRM_NXP_PTN3460
 	tristate "NXP PTN3460 DP/LVDS bridge"
 	depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index d4e28be..f13c33d 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,6 +1,6 @@
 ccflags-y := -Iinclude/drm
 
-obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
-obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw_hdmi-ahb-audio.o
+obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
+obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
similarity index 99%
rename from drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
rename to drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
index 59f630f..122bb01 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
@@ -21,7 +21,7 @@
 #include <sound/pcm_drm_eld.h>
 #include <sound/pcm_iec958.h>
 
-#include "dw_hdmi-audio.h"
+#include "dw-hdmi-audio.h"
 
 #define DRIVER_NAME "dw-hdmi-ahb-audio"
 
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-audio.h b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
similarity index 100%
rename from drivers/gpu/drm/bridge/dw_hdmi-audio.h
rename to drivers/gpu/drm/bridge/dw-hdmi-audio.h
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
similarity index 99%
rename from drivers/gpu/drm/bridge/dw_hdmi.c
rename to drivers/gpu/drm/bridge/dw-hdmi.c
index 56de9f1..77cafa9 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -27,8 +27,8 @@
 #include <drm/drm_encoder_slave.h>
 #include <drm/bridge/dw_hdmi.h>
 
-#include "dw_hdmi.h"
-#include "dw_hdmi-audio.h"
+#include "dw-hdmi.h"
+#include "dw-hdmi-audio.h"
 
 #define HDMI_EDID_LEN		512
 
@@ -1514,7 +1514,7 @@
 	mutex_unlock(&hdmi->mutex);
 }
 
-static struct drm_connector_funcs dw_hdmi_connector_funcs = {
+static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = dw_hdmi_connector_detect,
@@ -1522,13 +1522,13 @@
 	.force = dw_hdmi_connector_force,
 };
 
-static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
 	.get_modes = dw_hdmi_connector_get_modes,
 	.mode_valid = dw_hdmi_connector_mode_valid,
 	.best_encoder = dw_hdmi_connector_best_encoder,
 };
 
-static struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
 	.enable = dw_hdmi_bridge_enable,
 	.disable = dw_hdmi_bridge_disable,
 	.pre_enable = dw_hdmi_bridge_nop,
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h
similarity index 100%
rename from drivers/gpu/drm/bridge/dw_hdmi.h
rename to drivers/gpu/drm/bridge/dw-hdmi.h
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 0ffa3a6..7ecd59f 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -242,7 +242,7 @@
 	return ptn_bridge->bridge.encoder;
 }
 
-static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
 	.get_modes = ptn3460_get_modes,
 	.best_encoder = ptn3460_best_encoder,
 };
@@ -258,7 +258,7 @@
 	drm_connector_cleanup(connector);
 }
 
-static struct drm_connector_funcs ptn3460_connector_funcs = {
+static const struct drm_connector_funcs ptn3460_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = ptn3460_detect,
@@ -299,7 +299,7 @@
 	return ret;
 }
 
-static struct drm_bridge_funcs ptn3460_bridge_funcs = {
+static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
 	.pre_enable = ptn3460_pre_enable,
 	.enable = ptn3460_enable,
 	.disable = ptn3460_disable,
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 276719e..4a02854 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -533,12 +533,12 @@
 	kfree(connector);
 }
 
-struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
 	.get_modes = cirrus_vga_get_modes,
 	.best_encoder = cirrus_connector_best_encoder,
 };
 
-struct drm_connector_funcs cirrus_vga_connector_funcs = {
+static const struct drm_connector_funcs cirrus_vga_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = cirrus_vga_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 63f925b..268d37f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -88,8 +88,7 @@
 
 static bool
 check_pending_encoder_assignment(struct drm_atomic_state *state,
-				 struct drm_encoder *new_encoder,
-				 struct drm_connector *new_connector)
+				 struct drm_encoder *new_encoder)
 {
 	struct drm_connector *connector;
 	struct drm_connector_state *conn_state;
@@ -257,7 +256,7 @@
 		return 0;
 	}
 
-	if (!check_pending_encoder_assignment(state, new_encoder, connector)) {
+	if (!check_pending_encoder_assignment(state, new_encoder)) {
 		DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
 				 connector->base.id,
 				 connector->name);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 809959d..3b6627d 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1673,6 +1673,7 @@
 		if (mgr->proposed_vcpis[i]) {
 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
 		} else {
 			port = NULL;
 			req_payload.num_slots = 0;
@@ -1688,6 +1689,7 @@
 			if (req_payload.num_slots) {
 				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
 				mgr->payloads[i].num_slots = req_payload.num_slots;
+				mgr->payloads[i].vcpi = req_payload.vcpi;
 			} else if (mgr->payloads[i].num_slots) {
 				mgr->payloads[i].num_slots = 0;
 				drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
@@ -1823,7 +1825,7 @@
 {
 	struct drm_dp_sideband_msg_reply_body reply;
 
-	reply.reply_type = 1;
+	reply.reply_type = 0;
 	reply.req_type = req_type;
 	drm_dp_encode_sideband_reply(&reply, msg);
 	return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7dd6728..bf934cde 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -633,8 +633,17 @@
 		}
 	}
 
+	if (parent) {
+		ret = drm_dev_set_unique(dev, dev_name(parent));
+		if (ret)
+			goto err_setunique;
+	}
+
 	return dev;
 
+err_setunique:
+	if (drm_core_check_feature(dev, DRIVER_GEM))
+		drm_gem_destroy(dev);
 err_ctxbitmap:
 	drm_legacy_ctxbitmap_cleanup(dev);
 	drm_ht_remove(&dev->map_hash);
@@ -797,23 +806,18 @@
 /**
  * drm_dev_set_unique - Set the unique name of a DRM device
  * @dev: device of which to set the unique name
- * @fmt: format string for unique name
+ * @name: unique name
  *
- * Sets the unique name of a DRM device using the specified format string and
- * a variable list of arguments. Drivers can use this at driver probe time if
- * the unique name of the devices they drive is static.
+ * Sets the unique name of a DRM device using the specified string. Drivers
+ * can use this at driver probe time if the unique name of the devices they
+ * drive is static.
  *
  * Return: 0 on success or a negative error code on failure.
  */
-int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
+int drm_dev_set_unique(struct drm_device *dev, const char *name)
 {
-	va_list ap;
-
 	kfree(dev->unique);
-
-	va_start(ap, fmt);
-	dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
-	va_end(ap);
+	dev->unique = kstrdup(name, GFP_KERNEL);
 
 	return dev->unique ? 0 : -ENOMEM;
 }
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index d18b88b..e862907 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -124,7 +124,7 @@
  * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
  */
 
-static inline struct drm_encoder_slave_funcs *
+static inline const struct drm_encoder_slave_funcs *
 get_slave_funcs(struct drm_encoder *enc)
 {
 	return to_encoder_slave(enc)->slave_funcs;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index b7d5b84..5543fa8 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -266,7 +266,7 @@
 	fbi = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(fbi)) {
 		ret = PTR_ERR(fbi);
-		goto err_drm_gem_cma_free_object;
+		goto err_gem_free_object;
 	}
 
 	fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
@@ -299,8 +299,8 @@
 
 err_fb_info_destroy:
 	drm_fb_helper_release_fbi(helper);
-err_drm_gem_cma_free_object:
-	drm_gem_cma_free_object(&obj->base);
+err_gem_free_object:
+	dev->driver->gem_free_object(&obj->base);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 0f7b00b..e5df53b 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -121,7 +121,7 @@
 	return cma_obj;
 
 error:
-	drm_gem_cma_free_object(&cma_obj->base);
+	drm->driver->gem_free_object(&cma_obj->base);
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -171,7 +171,7 @@
 	return cma_obj;
 
 err_handle_create:
-	drm_gem_cma_free_object(gem_obj);
+	drm->driver->gem_free_object(gem_obj);
 
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 2d5ca8ee..6e6a9c5 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -365,6 +365,44 @@
 }
 EXPORT_SYMBOL(mipi_dsi_create_packet);
 
+/**
+ * mipi_dsi_shutdown_peripheral() - sends a Shutdown Peripheral command
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi)
+{
+	struct mipi_dsi_msg msg = {
+		.channel = dsi->channel,
+		.type = MIPI_DSI_SHUTDOWN_PERIPHERAL,
+		.tx_buf = (u8 [2]) { 0, 0 },
+		.tx_len = 2,
+	};
+
+	return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
+
+/**
+ * mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
+{
+	struct mipi_dsi_msg msg = {
+		.channel = dsi->channel,
+		.type = MIPI_DSI_TURN_ON_PERIPHERAL,
+		.tx_buf = (u8 [2]) { 0, 0 },
+		.tx_len = 2,
+	};
+
+	return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
+
 /*
  * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
  *    the payload in a long packet transmitted from the peripheral back to the
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 5a8a78d..20775c0 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,10 +553,10 @@
  * drivers/video/fbmon.c
  *
  * Standard GTF parameters:
- * M = 600
- * C = 40
- * K = 128
- * J = 20
+ *     M = 600
+ *     C = 40
+ *     K = 128
+ *     J = 20
  *
  * Returns:
  * The modeline based on the GTF algorithm stored in a drm_display_mode object.
@@ -1244,7 +1244,7 @@
  * This uses the same parameters as the fb modedb.c, except for an extra
  * force-enable, force-enable-digital and force-disable bit at the end:
  *
- *	<xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
  *
  * The intermediate drm_cmdline_mode structure is required to store additional
  * options from the command line modline like the force-enable/disable flag.
@@ -1523,4 +1523,4 @@
 
 out:
 	return ret;
-}
\ No newline at end of file
+}
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c2f5971..e3a4adf 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -40,17 +40,15 @@
  * The basic usage pattern is to:
  *
  *     drm_modeset_acquire_init(&ctx)
- *   retry:
+ *     retry:
  *     foreach (lock in random_ordered_set_of_locks) {
- *       ret = drm_modeset_lock(lock, &ctx)
- *       if (ret == -EDEADLK) {
- *          drm_modeset_backoff(&ctx);
- *          goto retry;
- *       }
+ *         ret = drm_modeset_lock(lock, &ctx)
+ *         if (ret == -EDEADLK) {
+ *             drm_modeset_backoff(&ctx);
+ *             goto retry;
+ *         }
  *     }
- *
  *     ... do stuff ...
- *
  *     drm_modeset_drop_locks(&ctx);
  *     drm_modeset_acquire_fini(&ctx);
  */
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9f935f5..27aa718 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -313,19 +313,15 @@
  *
  * Export callbacks:
  *
- *  - @gem_prime_pin (optional): prepare a GEM object for exporting
- *
- *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
- *
- *  - @gem_prime_vmap: vmap a buffer exported by your driver
- *
- *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
- *
- *  - @gem_prime_mmap (optional): mmap a buffer exported by your driver
+ *  * @gem_prime_pin (optional): prepare a GEM object for exporting
+ *  * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
+ *  * @gem_prime_vmap: vmap a buffer exported by your driver
+ *  * @gem_prime_vunmap: vunmap a buffer exported by your driver
+ *  * @gem_prime_mmap (optional): mmap a buffer exported by your driver
  *
  * Import callback:
  *
- *  - @gem_prime_import_sg_table (import): produce a GEM object from another
+ *  * @gem_prime_import_sg_table (import): produce a GEM object from another
  *    driver's scatter/gather table
  */
 
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
new file mode 100644
index 0000000..2cde7a5
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -0,0 +1,20 @@
+
+config DRM_ETNAVIV
+	tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
+	depends on DRM
+	depends on ARCH_MXC || ARCH_DOVE
+	select SHMEM
+	select TMPFS
+	select IOMMU_API
+	select IOMMU_SUPPORT
+	select WANT_DEV_COREDUMP
+	help
+	  DRM driver for Vivante GPUs.
+
+config DRM_ETNAVIV_REGISTER_LOGGING
+	bool "enable ETNAVIV register logging"
+	depends on DRM_ETNAVIV
+	help
+	  Compile in support for logging register reads/writes in a format
+	  that can be parsed by envytools demsm tool.  If enabled, register
+	  logging can be switched on via etnaviv.reglog=y module param.
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
new file mode 100644
index 0000000..1086e98
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -0,0 +1,14 @@
+etnaviv-y := \
+	etnaviv_buffer.o \
+	etnaviv_cmd_parser.o \
+	etnaviv_drv.o \
+	etnaviv_dump.o \
+	etnaviv_gem_prime.o \
+	etnaviv_gem_submit.o \
+	etnaviv_gem.o \
+	etnaviv_gpu.o \
+	etnaviv_iommu_v2.o \
+	etnaviv_iommu.o \
+	etnaviv_mmu.o
+
+obj-$(CONFIG_DRM_ETNAVIV)	+= etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
new file mode 100644
index 0000000..8c44ba9
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
@@ -0,0 +1,218 @@
+#ifndef CMDSTREAM_XML
+#define CMDSTREAM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- cmdstream.xml (  12589 bytes, from 2014-02-17 14:57:56)
+- common.xml    (  18437 bytes, from 2015-03-25 11:27:41)
+
+Copyright (C) 2014
+*/
+
+
+#define FE_OPCODE_LOAD_STATE					0x00000001
+#define FE_OPCODE_END						0x00000002
+#define FE_OPCODE_NOP						0x00000003
+#define FE_OPCODE_DRAW_2D					0x00000004
+#define FE_OPCODE_DRAW_PRIMITIVES				0x00000005
+#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES			0x00000006
+#define FE_OPCODE_WAIT						0x00000007
+#define FE_OPCODE_LINK						0x00000008
+#define FE_OPCODE_STALL						0x00000009
+#define FE_OPCODE_CALL						0x0000000a
+#define FE_OPCODE_RETURN					0x0000000b
+#define FE_OPCODE_CHIP_SELECT					0x0000000d
+#define PRIMITIVE_TYPE_POINTS					0x00000001
+#define PRIMITIVE_TYPE_LINES					0x00000002
+#define PRIMITIVE_TYPE_LINE_STRIP				0x00000003
+#define PRIMITIVE_TYPE_TRIANGLES				0x00000004
+#define PRIMITIVE_TYPE_TRIANGLE_STRIP				0x00000005
+#define PRIMITIVE_TYPE_TRIANGLE_FAN				0x00000006
+#define PRIMITIVE_TYPE_LINE_LOOP				0x00000007
+#define PRIMITIVE_TYPE_QUADS					0x00000008
+#define VIV_FE_LOAD_STATE					0x00000000
+
+#define VIV_FE_LOAD_STATE_HEADER				0x00000000
+#define VIV_FE_LOAD_STATE_HEADER_OP__MASK			0xf8000000
+#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT			27
+#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE			0x08000000
+#define VIV_FE_LOAD_STATE_HEADER_FIXP				0x04000000
+#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK			0x03ff0000
+#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT			16
+#define VIV_FE_LOAD_STATE_HEADER_COUNT(x)			(((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK)
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK			0x0000ffff
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT			0
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x)			(((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK)
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR			2
+
+#define VIV_FE_END						0x00000000
+
+#define VIV_FE_END_HEADER					0x00000000
+#define VIV_FE_END_HEADER_EVENT_ID__MASK			0x0000001f
+#define VIV_FE_END_HEADER_EVENT_ID__SHIFT			0
+#define VIV_FE_END_HEADER_EVENT_ID(x)				(((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK)
+#define VIV_FE_END_HEADER_EVENT_ENABLE				0x00000100
+#define VIV_FE_END_HEADER_OP__MASK				0xf8000000
+#define VIV_FE_END_HEADER_OP__SHIFT				27
+#define VIV_FE_END_HEADER_OP_END				0x10000000
+
+#define VIV_FE_NOP						0x00000000
+
+#define VIV_FE_NOP_HEADER					0x00000000
+#define VIV_FE_NOP_HEADER_OP__MASK				0xf8000000
+#define VIV_FE_NOP_HEADER_OP__SHIFT				27
+#define VIV_FE_NOP_HEADER_OP_NOP				0x18000000
+
+#define VIV_FE_DRAW_2D						0x00000000
+
+#define VIV_FE_DRAW_2D_HEADER					0x00000000
+#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK			0x0000ff00
+#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT			8
+#define VIV_FE_DRAW_2D_HEADER_COUNT(x)				(((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK)
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK			0x07ff0000
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT			16
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x)			(((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK)
+#define VIV_FE_DRAW_2D_HEADER_OP__MASK				0xf8000000
+#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT				27
+#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D			0x20000000
+
+#define VIV_FE_DRAW_2D_TOP_LEFT					0x00000008
+#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK				0x0000ffff
+#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT			0
+#define VIV_FE_DRAW_2D_TOP_LEFT_X(x)				(((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK)
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK				0xffff0000
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT			16
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x)				(((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK)
+
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT				0x0000000c
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK			0x0000ffff
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT			0
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x)			(((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK)
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK			0xffff0000
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT			16
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x)			(((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK)
+
+#define VIV_FE_DRAW_PRIMITIVES					0x00000000
+
+#define VIV_FE_DRAW_PRIMITIVES_HEADER				0x00000000
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK			0xf8000000
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT			27
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES	0x28000000
+
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND				0x00000004
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK		0x000000ff
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT		0
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x)			(((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK)
+
+#define VIV_FE_DRAW_PRIMITIVES_START				0x00000008
+
+#define VIV_FE_DRAW_PRIMITIVES_COUNT				0x0000000c
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES				0x00000000
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER			0x00000000
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK		0xf8000000
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT		27
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES	0x30000000
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND			0x00000004
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK	0x000000ff
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT	0
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x)		(((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK)
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START			0x00000008
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT			0x0000000c
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET			0x00000010
+
+#define VIV_FE_WAIT						0x00000000
+
+#define VIV_FE_WAIT_HEADER					0x00000000
+#define VIV_FE_WAIT_HEADER_DELAY__MASK				0x0000ffff
+#define VIV_FE_WAIT_HEADER_DELAY__SHIFT				0
+#define VIV_FE_WAIT_HEADER_DELAY(x)				(((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK)
+#define VIV_FE_WAIT_HEADER_OP__MASK				0xf8000000
+#define VIV_FE_WAIT_HEADER_OP__SHIFT				27
+#define VIV_FE_WAIT_HEADER_OP_WAIT				0x38000000
+
+#define VIV_FE_LINK						0x00000000
+
+#define VIV_FE_LINK_HEADER					0x00000000
+#define VIV_FE_LINK_HEADER_PREFETCH__MASK			0x0000ffff
+#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT			0
+#define VIV_FE_LINK_HEADER_PREFETCH(x)				(((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK)
+#define VIV_FE_LINK_HEADER_OP__MASK				0xf8000000
+#define VIV_FE_LINK_HEADER_OP__SHIFT				27
+#define VIV_FE_LINK_HEADER_OP_LINK				0x40000000
+
+#define VIV_FE_LINK_ADDRESS					0x00000004
+
+#define VIV_FE_STALL						0x00000000
+
+#define VIV_FE_STALL_HEADER					0x00000000
+#define VIV_FE_STALL_HEADER_OP__MASK				0xf8000000
+#define VIV_FE_STALL_HEADER_OP__SHIFT				27
+#define VIV_FE_STALL_HEADER_OP_STALL				0x48000000
+
+#define VIV_FE_STALL_TOKEN					0x00000004
+#define VIV_FE_STALL_TOKEN_FROM__MASK				0x0000001f
+#define VIV_FE_STALL_TOKEN_FROM__SHIFT				0
+#define VIV_FE_STALL_TOKEN_FROM(x)				(((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK)
+#define VIV_FE_STALL_TOKEN_TO__MASK				0x00001f00
+#define VIV_FE_STALL_TOKEN_TO__SHIFT				8
+#define VIV_FE_STALL_TOKEN_TO(x)				(((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK)
+
+#define VIV_FE_CALL						0x00000000
+
+#define VIV_FE_CALL_HEADER					0x00000000
+#define VIV_FE_CALL_HEADER_PREFETCH__MASK			0x0000ffff
+#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT			0
+#define VIV_FE_CALL_HEADER_PREFETCH(x)				(((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK)
+#define VIV_FE_CALL_HEADER_OP__MASK				0xf8000000
+#define VIV_FE_CALL_HEADER_OP__SHIFT				27
+#define VIV_FE_CALL_HEADER_OP_CALL				0x50000000
+
+#define VIV_FE_CALL_ADDRESS					0x00000004
+
+#define VIV_FE_CALL_RETURN_PREFETCH				0x00000008
+
+#define VIV_FE_CALL_RETURN_ADDRESS				0x0000000c
+
+#define VIV_FE_RETURN						0x00000000
+
+#define VIV_FE_RETURN_HEADER					0x00000000
+#define VIV_FE_RETURN_HEADER_OP__MASK				0xf8000000
+#define VIV_FE_RETURN_HEADER_OP__SHIFT				27
+#define VIV_FE_RETURN_HEADER_OP_RETURN				0x58000000
+
+#define VIV_FE_CHIP_SELECT					0x00000000
+
+#define VIV_FE_CHIP_SELECT_HEADER				0x00000000
+#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK			0xf8000000
+#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT			27
+#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT		0x68000000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15			0x00008000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14			0x00004000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13			0x00002000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12			0x00001000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11			0x00000800
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10			0x00000400
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9			0x00000200
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8			0x00000100
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7			0x00000080
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6			0x00000040
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5			0x00000020
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4			0x00000010
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3			0x00000008
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2			0x00000004
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1			0x00000002
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0			0x00000001
+
+
+#endif /* CMDSTREAM_XML */
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
new file mode 100644
index 0000000..9e585d5
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -0,0 +1,249 @@
+#ifndef COMMON_XML
+#define COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state_vg.xml (   5973 bytes, from 2015-03-25 11:26:01)
+- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
+
+Copyright (C) 2015
+*/
+
+
+#define PIPE_ID_PIPE_3D						0x00000000
+#define PIPE_ID_PIPE_2D						0x00000001
+#define SYNC_RECIPIENT_FE					0x00000001
+#define SYNC_RECIPIENT_RA					0x00000005
+#define SYNC_RECIPIENT_PE					0x00000007
+#define SYNC_RECIPIENT_DE					0x0000000b
+#define SYNC_RECIPIENT_VG					0x0000000f
+#define SYNC_RECIPIENT_TESSELATOR				0x00000010
+#define SYNC_RECIPIENT_VG2					0x00000011
+#define SYNC_RECIPIENT_TESSELATOR2				0x00000012
+#define SYNC_RECIPIENT_VG3					0x00000013
+#define SYNC_RECIPIENT_TESSELATOR3				0x00000014
+#define ENDIAN_MODE_NO_SWAP					0x00000000
+#define ENDIAN_MODE_SWAP_16					0x00000001
+#define ENDIAN_MODE_SWAP_32					0x00000002
+#define chipModel_GC300						0x00000300
+#define chipModel_GC320						0x00000320
+#define chipModel_GC350						0x00000350
+#define chipModel_GC355						0x00000355
+#define chipModel_GC400						0x00000400
+#define chipModel_GC410						0x00000410
+#define chipModel_GC420						0x00000420
+#define chipModel_GC450						0x00000450
+#define chipModel_GC500						0x00000500
+#define chipModel_GC530						0x00000530
+#define chipModel_GC600						0x00000600
+#define chipModel_GC700						0x00000700
+#define chipModel_GC800						0x00000800
+#define chipModel_GC860						0x00000860
+#define chipModel_GC880						0x00000880
+#define chipModel_GC1000					0x00001000
+#define chipModel_GC2000					0x00002000
+#define chipModel_GC2100					0x00002100
+#define chipModel_GC4000					0x00004000
+#define RGBA_BITS_R						0x00000001
+#define RGBA_BITS_G						0x00000002
+#define RGBA_BITS_B						0x00000004
+#define RGBA_BITS_A						0x00000008
+#define chipFeatures_FAST_CLEAR					0x00000001
+#define chipFeatures_SPECIAL_ANTI_ALIASING			0x00000002
+#define chipFeatures_PIPE_3D					0x00000004
+#define chipFeatures_DXT_TEXTURE_COMPRESSION			0x00000008
+#define chipFeatures_DEBUG_MODE					0x00000010
+#define chipFeatures_Z_COMPRESSION				0x00000020
+#define chipFeatures_YUV420_SCALER				0x00000040
+#define chipFeatures_MSAA					0x00000080
+#define chipFeatures_DC						0x00000100
+#define chipFeatures_PIPE_2D					0x00000200
+#define chipFeatures_ETC1_TEXTURE_COMPRESSION			0x00000400
+#define chipFeatures_FAST_SCALER				0x00000800
+#define chipFeatures_HIGH_DYNAMIC_RANGE				0x00001000
+#define chipFeatures_YUV420_TILER				0x00002000
+#define chipFeatures_MODULE_CG					0x00004000
+#define chipFeatures_MIN_AREA					0x00008000
+#define chipFeatures_NO_EARLY_Z					0x00010000
+#define chipFeatures_NO_422_TEXTURE				0x00020000
+#define chipFeatures_BUFFER_INTERLEAVING			0x00040000
+#define chipFeatures_BYTE_WRITE_2D				0x00080000
+#define chipFeatures_NO_SCALER					0x00100000
+#define chipFeatures_YUY2_AVERAGING				0x00200000
+#define chipFeatures_HALF_PE_CACHE				0x00400000
+#define chipFeatures_HALF_TX_CACHE				0x00800000
+#define chipFeatures_YUY2_RENDER_TARGET				0x01000000
+#define chipFeatures_MEM32					0x02000000
+#define chipFeatures_PIPE_VG					0x04000000
+#define chipFeatures_VGTS					0x08000000
+#define chipFeatures_FE20					0x10000000
+#define chipFeatures_BYTE_WRITE_3D				0x20000000
+#define chipFeatures_RS_YUV_TARGET				0x40000000
+#define chipFeatures_32_BIT_INDICES				0x80000000
+#define chipMinorFeatures0_FLIP_Y				0x00000001
+#define chipMinorFeatures0_DUAL_RETURN_BUS			0x00000002
+#define chipMinorFeatures0_ENDIANNESS_CONFIG			0x00000004
+#define chipMinorFeatures0_TEXTURE_8K				0x00000008
+#define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER		0x00000010
+#define chipMinorFeatures0_SPECIAL_MSAA_LOD			0x00000020
+#define chipMinorFeatures0_FAST_CLEAR_FLUSH			0x00000040
+#define chipMinorFeatures0_2DPE20				0x00000080
+#define chipMinorFeatures0_CORRECT_AUTO_DISABLE			0x00000100
+#define chipMinorFeatures0_RENDERTARGET_8K			0x00000200
+#define chipMinorFeatures0_2BITPERTILE				0x00000400
+#define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED	0x00000800
+#define chipMinorFeatures0_SUPER_TILED				0x00001000
+#define chipMinorFeatures0_VG_20				0x00002000
+#define chipMinorFeatures0_TS_EXTENDED_COMMANDS			0x00004000
+#define chipMinorFeatures0_COMPRESSION_FIFO_FIXED		0x00008000
+#define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL			0x00010000
+#define chipMinorFeatures0_VG_FILTER				0x00020000
+#define chipMinorFeatures0_VG_21				0x00040000
+#define chipMinorFeatures0_SHADER_HAS_W				0x00080000
+#define chipMinorFeatures0_HAS_SQRT_TRIG			0x00100000
+#define chipMinorFeatures0_MORE_MINOR_FEATURES			0x00200000
+#define chipMinorFeatures0_MC20					0x00400000
+#define chipMinorFeatures0_MSAA_SIDEBAND			0x00800000
+#define chipMinorFeatures0_BUG_FIXES0				0x01000000
+#define chipMinorFeatures0_VAA					0x02000000
+#define chipMinorFeatures0_BYPASS_IN_MSAA			0x04000000
+#define chipMinorFeatures0_HZ					0x08000000
+#define chipMinorFeatures0_NEW_TEXTURE				0x10000000
+#define chipMinorFeatures0_2D_A8_TARGET				0x20000000
+#define chipMinorFeatures0_CORRECT_STENCIL			0x40000000
+#define chipMinorFeatures0_ENHANCE_VR				0x80000000
+#define chipMinorFeatures1_RSUV_SWIZZLE				0x00000001
+#define chipMinorFeatures1_V2_COMPRESSION			0x00000002
+#define chipMinorFeatures1_VG_DOUBLE_BUFFER			0x00000004
+#define chipMinorFeatures1_EXTRA_EVENT_STATES			0x00000008
+#define chipMinorFeatures1_NO_STRIPING_NEEDED			0x00000010
+#define chipMinorFeatures1_TEXTURE_STRIDE			0x00000020
+#define chipMinorFeatures1_BUG_FIXES3				0x00000040
+#define chipMinorFeatures1_AUTO_DISABLE				0x00000080
+#define chipMinorFeatures1_AUTO_RESTART_TS			0x00000100
+#define chipMinorFeatures1_DISABLE_PE_GATING			0x00000200
+#define chipMinorFeatures1_L2_WINDOWING				0x00000400
+#define chipMinorFeatures1_HALF_FLOAT				0x00000800
+#define chipMinorFeatures1_PIXEL_DITHER				0x00001000
+#define chipMinorFeatures1_TWO_STENCIL_REFERENCE		0x00002000
+#define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT		0x00004000
+#define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH		0x00008000
+#define chipMinorFeatures1_2D_DITHER				0x00010000
+#define chipMinorFeatures1_BUG_FIXES5				0x00020000
+#define chipMinorFeatures1_NEW_2D				0x00040000
+#define chipMinorFeatures1_NEW_FP				0x00080000
+#define chipMinorFeatures1_TEXTURE_HALIGN			0x00100000
+#define chipMinorFeatures1_NON_POWER_OF_TWO			0x00200000
+#define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT		0x00400000
+#define chipMinorFeatures1_HALTI0				0x00800000
+#define chipMinorFeatures1_CORRECT_OVERFLOW_VG			0x01000000
+#define chipMinorFeatures1_NEGATIVE_LOG_FIX			0x02000000
+#define chipMinorFeatures1_RESOLVE_OFFSET			0x04000000
+#define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK			0x08000000
+#define chipMinorFeatures1_MMU_VERSION				0x10000000
+#define chipMinorFeatures1_WIDE_LINE				0x20000000
+#define chipMinorFeatures1_BUG_FIXES6				0x40000000
+#define chipMinorFeatures1_FC_FLUSH_STALL			0x80000000
+#define chipMinorFeatures2_LINE_LOOP				0x00000001
+#define chipMinorFeatures2_LOGIC_OP				0x00000002
+#define chipMinorFeatures2_UNK2					0x00000004
+#define chipMinorFeatures2_SUPERTILED_TEXTURE			0x00000008
+#define chipMinorFeatures2_UNK4					0x00000010
+#define chipMinorFeatures2_RECT_PRIMITIVE			0x00000020
+#define chipMinorFeatures2_COMPOSITION				0x00000040
+#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT		0x00000080
+#define chipMinorFeatures2_UNK8					0x00000100
+#define chipMinorFeatures2_UNK9					0x00000200
+#define chipMinorFeatures2_UNK10				0x00000400
+#define chipMinorFeatures2_SAMPLERBASE_16			0x00000800
+#define chipMinorFeatures2_UNK12				0x00001000
+#define chipMinorFeatures2_UNK13				0x00002000
+#define chipMinorFeatures2_UNK14				0x00004000
+#define chipMinorFeatures2_EXTRA_TEXTURE_STATE			0x00008000
+#define chipMinorFeatures2_FULL_DIRECTFB			0x00010000
+#define chipMinorFeatures2_2D_TILING				0x00020000
+#define chipMinorFeatures2_THREAD_WALKER_IN_PS			0x00040000
+#define chipMinorFeatures2_TILE_FILLER				0x00080000
+#define chipMinorFeatures2_UNK20				0x00100000
+#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT			0x00200000
+#define chipMinorFeatures2_UNK22				0x00400000
+#define chipMinorFeatures2_UNK23				0x00800000
+#define chipMinorFeatures2_UNK24				0x01000000
+#define chipMinorFeatures2_MIXED_STREAMS			0x02000000
+#define chipMinorFeatures2_2D_420_L2CACHE			0x04000000
+#define chipMinorFeatures2_UNK27				0x08000000
+#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH			0x10000000
+#define chipMinorFeatures2_TEXTURE_TILED_READ			0x20000000
+#define chipMinorFeatures2_UNK30				0x40000000
+#define chipMinorFeatures2_UNK31				0x80000000
+#define chipMinorFeatures3_ROTATION_STALL_FIX			0x00000001
+#define chipMinorFeatures3_UNK1					0x00000002
+#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX		0x00000004
+#define chipMinorFeatures3_UNK3					0x00000008
+#define chipMinorFeatures3_UNK4					0x00000010
+#define chipMinorFeatures3_UNK5					0x00000020
+#define chipMinorFeatures3_UNK6					0x00000040
+#define chipMinorFeatures3_UNK7					0x00000080
+#define chipMinorFeatures3_UNK8					0x00000100
+#define chipMinorFeatures3_UNK9					0x00000200
+#define chipMinorFeatures3_BUG_FIXES10				0x00000400
+#define chipMinorFeatures3_UNK11				0x00000800
+#define chipMinorFeatures3_BUG_FIXES11				0x00001000
+#define chipMinorFeatures3_UNK13				0x00002000
+#define chipMinorFeatures3_UNK14				0x00004000
+#define chipMinorFeatures3_UNK15				0x00008000
+#define chipMinorFeatures3_UNK16				0x00010000
+#define chipMinorFeatures3_UNK17				0x00020000
+#define chipMinorFeatures3_UNK18				0x00040000
+#define chipMinorFeatures3_UNK19				0x00080000
+#define chipMinorFeatures3_UNK20				0x00100000
+#define chipMinorFeatures3_UNK21				0x00200000
+#define chipMinorFeatures3_UNK22				0x00400000
+#define chipMinorFeatures3_UNK23				0x00800000
+#define chipMinorFeatures3_UNK24				0x01000000
+#define chipMinorFeatures3_UNK25				0x02000000
+#define chipMinorFeatures3_UNK26				0x04000000
+#define chipMinorFeatures3_UNK27				0x08000000
+#define chipMinorFeatures3_UNK28				0x10000000
+#define chipMinorFeatures3_UNK29				0x20000000
+#define chipMinorFeatures3_UNK30				0x40000000
+#define chipMinorFeatures3_UNK31				0x80000000
+#define chipMinorFeatures4_UNK0					0x00000001
+#define chipMinorFeatures4_UNK1					0x00000002
+#define chipMinorFeatures4_UNK2					0x00000004
+#define chipMinorFeatures4_UNK3					0x00000008
+#define chipMinorFeatures4_UNK4					0x00000010
+#define chipMinorFeatures4_UNK5					0x00000020
+#define chipMinorFeatures4_UNK6					0x00000040
+#define chipMinorFeatures4_UNK7					0x00000080
+#define chipMinorFeatures4_UNK8					0x00000100
+#define chipMinorFeatures4_UNK9					0x00000200
+#define chipMinorFeatures4_UNK10				0x00000400
+#define chipMinorFeatures4_UNK11				0x00000800
+#define chipMinorFeatures4_UNK12				0x00001000
+#define chipMinorFeatures4_UNK13				0x00002000
+#define chipMinorFeatures4_UNK14				0x00004000
+#define chipMinorFeatures4_UNK15				0x00008000
+#define chipMinorFeatures4_UNK16				0x00010000
+#define chipMinorFeatures4_UNK17				0x00020000
+#define chipMinorFeatures4_UNK18				0x00040000
+#define chipMinorFeatures4_UNK19				0x00080000
+#define chipMinorFeatures4_UNK20				0x00100000
+#define chipMinorFeatures4_UNK21				0x00200000
+#define chipMinorFeatures4_UNK22				0x00400000
+#define chipMinorFeatures4_UNK23				0x00800000
+#define chipMinorFeatures4_UNK24				0x01000000
+#define chipMinorFeatures4_UNK25				0x02000000
+#define chipMinorFeatures4_UNK26				0x04000000
+#define chipMinorFeatures4_UNK27				0x08000000
+#define chipMinorFeatures4_UNK28				0x10000000
+#define chipMinorFeatures4_UNK29				0x20000000
+#define chipMinorFeatures4_UNK30				0x40000000
+#define chipMinorFeatures4_UNK31				0x80000000
+
+#endif /* COMMON_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
new file mode 100644
index 0000000..332c55e
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2014 Etnaviv Project
+ * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+
+#include "common.xml.h"
+#include "state.xml.h"
+#include "cmdstream.xml.h"
+
+/*
+ * Command Buffer helper:
+ */
+
+
+static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
+{
+	u32 *vaddr = (u32 *)buffer->vaddr;
+
+	BUG_ON(buffer->user_size >= buffer->size);
+
+	vaddr[buffer->user_size / 4] = data;
+	buffer->user_size += 4;
+}
+
+static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
+	u32 reg, u32 value)
+{
+	u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
+
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	/* write a register via cmd stream */
+	OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
+		    VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
+		    VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
+	OUT(buffer, value);
+}
+
+static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
+{
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	OUT(buffer, VIV_FE_END_HEADER_OP_END);
+}
+
+static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
+{
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
+}
+
+static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
+	u16 prefetch, u32 address)
+{
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
+		    VIV_FE_LINK_HEADER_PREFETCH(prefetch));
+	OUT(buffer, address);
+}
+
+static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
+	u32 from, u32 to)
+{
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
+	OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
+}
+
+static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
+{
+	u32 flush;
+	u32 stall;
+
+	/*
+	 * This assumes that if we're switching to 2D, we're switching
+	 * away from 3D, and vice versa.  Hence, if we're switching to
+	 * the 2D core, we need to flush the 3D depth and color caches,
+	 * otherwise we need to flush the 2D pixel engine cache.
+	 */
+	if (pipe == ETNA_PIPE_2D)
+		flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
+	else
+		flush = VIVS_GL_FLUSH_CACHE_PE2D;
+
+	stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
+		VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
+
+	CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
+	CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
+
+	CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+
+	CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+		       VIVS_GL_PIPE_SELECT_PIPE(pipe));
+}
+
+static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
+{
+	return buf->paddr - gpu->memory_base;
+}
+
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
+	struct etnaviv_cmdbuf *buf, u32 off, u32 len)
+{
+	u32 size = buf->size;
+	u32 *ptr = buf->vaddr + off;
+
+	dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
+			ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
+
+	print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+			ptr, len * 4, 0);
+}
+
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+	/* initialize buffer */
+	buffer->user_size = 0;
+
+	CMD_WAIT(buffer);
+	CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
+
+	return buffer->user_size / 8;
+}
+
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+	/* Replace the last WAIT with an END */
+	buffer->user_size -= 16;
+
+	CMD_END(buffer);
+	mb();
+}
+
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
+	struct etnaviv_cmdbuf *cmdbuf)
+{
+	struct etnaviv_cmdbuf *buffer = gpu->buffer;
+	u32 *lw = buffer->vaddr + buffer->user_size - 16;
+	u32 back, link_target, link_size, reserve_size, extra_size = 0;
+
+	if (drm_debug & DRM_UT_DRIVER)
+		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+
+	/*
+	 * If we need to flush the MMU prior to submitting this buffer, we
+	 * will need to append a mmu flush load state, followed by a new
+	 * link to this buffer - a total of four additional words.
+	 */
+	if (gpu->mmu->need_flush || gpu->switch_context) {
+		/* link command */
+		extra_size += 2;
+		/* flush command */
+		if (gpu->mmu->need_flush)
+			extra_size += 2;
+		/* pipe switch commands */
+		if (gpu->switch_context)
+			extra_size += 8;
+	}
+
+	reserve_size = (6 + extra_size) * 4;
+
+	/*
+	 * if we are going to completely overflow the buffer, we need to wrap.
+	 */
+	if (buffer->user_size + reserve_size > buffer->size)
+		buffer->user_size = 0;
+
+	/* save offset back into main buffer */
+	back = buffer->user_size + reserve_size - 6 * 4;
+	link_target = gpu_va(gpu, buffer) + buffer->user_size;
+	link_size = 6;
+
+	/* Skip over any extra instructions */
+	link_target += extra_size * sizeof(u32);
+
+	if (drm_debug & DRM_UT_DRIVER)
+		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
+			link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
+
+	/* jump back from cmd to main buffer */
+	CMD_LINK(cmdbuf, link_size, link_target);
+
+	link_target = gpu_va(gpu, cmdbuf);
+	link_size = cmdbuf->size / 8;
+
+
+
+	if (drm_debug & DRM_UT_DRIVER) {
+		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+			       cmdbuf->vaddr, cmdbuf->size, 0);
+
+		pr_info("link op: %p\n", lw);
+		pr_info("link addr: %p\n", lw + 1);
+		pr_info("addr: 0x%08x\n", link_target);
+		pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
+		pr_info("event: %d\n", event);
+	}
+
+	if (gpu->mmu->need_flush || gpu->switch_context) {
+		u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
+
+		if (gpu->mmu->need_flush) {
+			/* Add the MMU flush */
+			CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
+				       VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
+				       VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
+				       VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
+				       VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
+				       VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+
+			gpu->mmu->need_flush = false;
+		}
+
+		if (gpu->switch_context) {
+			etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
+			gpu->switch_context = false;
+		}
+
+		/* And the link to the first buffer */
+		CMD_LINK(buffer, link_size, link_target);
+
+		/* Update the link target to point to above instructions */
+		link_target = new_target;
+		link_size = extra_size;
+	}
+
+	/* trigger event */
+	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
+		       VIVS_GL_EVENT_FROM_PE);
+
+	/* append WAIT/LINK to main buffer */
+	CMD_WAIT(buffer);
+	CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
+
+	/* Change WAIT into a LINK command; write the address first. */
+	*(lw + 1) = link_target;
+	mb();
+	*(lw) = VIV_FE_LINK_HEADER_OP_LINK |
+		VIV_FE_LINK_HEADER_PREFETCH(link_size);
+	mb();
+
+	if (drm_debug & DRM_UT_DRIVER)
+		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
new file mode 100644
index 0000000..dcfd565
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+
+#include "cmdstream.xml.h"
+
+#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT)
+
+struct etna_validation_state {
+	struct etnaviv_gpu *gpu;
+	const struct drm_etnaviv_gem_submit_reloc *relocs;
+	unsigned int num_relocs;
+	u32 *start;
+};
+
+static const struct {
+	u16 offset;
+	u16 size;
+} etnaviv_sensitive_states[] __initconst = {
+#define ST(start, num) { (start) >> 2, (num) }
+	/* 2D */
+	ST(0x1200, 1),
+	ST(0x1228, 1),
+	ST(0x1238, 1),
+	ST(0x1284, 1),
+	ST(0x128c, 1),
+	ST(0x1304, 1),
+	ST(0x1310, 1),
+	ST(0x1318, 1),
+	ST(0x12800, 4),
+	ST(0x128a0, 4),
+	ST(0x128c0, 4),
+	ST(0x12970, 4),
+	ST(0x12a00, 8),
+	ST(0x12b40, 8),
+	ST(0x12b80, 8),
+	ST(0x12ce0, 8),
+	/* 3D */
+	ST(0x0644, 1),
+	ST(0x064c, 1),
+	ST(0x0680, 8),
+	ST(0x1410, 1),
+	ST(0x1430, 1),
+	ST(0x1458, 1),
+	ST(0x1460, 8),
+	ST(0x1480, 8),
+	ST(0x1500, 8),
+	ST(0x1520, 8),
+	ST(0x1608, 1),
+	ST(0x1610, 1),
+	ST(0x1658, 1),
+	ST(0x165c, 1),
+	ST(0x1664, 1),
+	ST(0x1668, 1),
+	ST(0x16a4, 1),
+	ST(0x16c0, 8),
+	ST(0x16e0, 8),
+	ST(0x1740, 8),
+	ST(0x2400, 14 * 16),
+	ST(0x10800, 32 * 16),
+#undef ST
+};
+
+#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u)
+static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE);
+
+void __init etnaviv_validate_init(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++)
+		bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset,
+			   etnaviv_sensitive_states[i].size);
+}
+
+static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state,
+	unsigned int buf_offset, unsigned int state_addr)
+{
+	if (state->num_relocs && state->relocs->submit_offset < buf_offset) {
+		dev_warn_once(state->gpu->dev,
+			      "%s: relocation for non-sensitive state 0x%x at offset %u\n",
+			      __func__, state_addr,
+			      state->relocs->submit_offset);
+		while (state->num_relocs &&
+		       state->relocs->submit_offset < buf_offset) {
+			state->relocs++;
+			state->num_relocs--;
+		}
+	}
+}
+
+static bool etnaviv_validate_load_state(struct etna_validation_state *state,
+	u32 *ptr, unsigned int state_offset, unsigned int num)
+{
+	unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num);
+	unsigned int st_offset = state_offset, buf_offset;
+
+	for_each_set_bit_from(st_offset, etnaviv_states, size) {
+		buf_offset = (ptr - state->start +
+			      st_offset - state_offset) * 4;
+
+		etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4);
+		if (state->num_relocs &&
+		    state->relocs->submit_offset == buf_offset) {
+			state->relocs++;
+			state->num_relocs--;
+			continue;
+		}
+
+		dev_warn_ratelimited(state->gpu->dev,
+				     "%s: load state touches restricted state 0x%x at offset %u\n",
+				     __func__, st_offset * 4, buf_offset);
+		return false;
+	}
+
+	if (state->num_relocs) {
+		buf_offset = (ptr - state->start + num) * 4;
+		etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 +
+					      state->relocs->submit_offset -
+					      buf_offset);
+	}
+
+	return true;
+}
+
+static uint8_t cmd_length[32] = {
+	[FE_OPCODE_DRAW_PRIMITIVES] = 4,
+	[FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
+	[FE_OPCODE_NOP] = 2,
+	[FE_OPCODE_STALL] = 2,
+};
+
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream,
+			      unsigned int size,
+			      struct drm_etnaviv_gem_submit_reloc *relocs,
+			      unsigned int reloc_size)
+{
+	struct etna_validation_state state;
+	u32 *buf = stream;
+	u32 *end = buf + size;
+
+	state.gpu = gpu;
+	state.relocs = relocs;
+	state.num_relocs = reloc_size;
+	state.start = stream;
+
+	while (buf < end) {
+		u32 cmd = *buf;
+		unsigned int len, n, off;
+		unsigned int op = cmd >> 27;
+
+		switch (op) {
+		case FE_OPCODE_LOAD_STATE:
+			n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT);
+			len = ALIGN(1 + n, 2);
+			if (buf + len > end)
+				break;
+
+			off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET);
+			if (!etnaviv_validate_load_state(&state, buf + 1,
+							 off, n))
+				return false;
+			break;
+
+		case FE_OPCODE_DRAW_2D:
+			n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT);
+			if (n == 0)
+				n = 256;
+			len = 2 + n * 2;
+			break;
+
+		default:
+			len = cmd_length[op];
+			if (len == 0) {
+				dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n",
+					__func__, op, buf - state.start);
+				return false;
+			}
+			break;
+		}
+
+		buf += len;
+	}
+
+	if (buf > end) {
+		dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n",
+			__func__, buf - state.start, size);
+		return false;
+	}
+
+	return true;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
new file mode 100644
index 0000000..5c89ebb
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/component.h>
+#include <linux/of_platform.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_gem.h"
+
+#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
+static bool reglog;
+MODULE_PARM_DESC(reglog, "Enable register read/write logging");
+module_param(reglog, bool, 0600);
+#else
+#define reglog 0
+#endif
+
+void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
+		const char *dbgname)
+{
+	struct resource *res;
+	void __iomem *ptr;
+
+	if (name)
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	else
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	ptr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ptr)) {
+		dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
+			PTR_ERR(ptr));
+		return ptr;
+	}
+
+	if (reglog)
+		dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
+			   dbgname, ptr, (size_t)resource_size(res));
+
+	return ptr;
+}
+
+void etnaviv_writel(u32 data, void __iomem *addr)
+{
+	if (reglog)
+		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+
+	writel(data, addr);
+}
+
+u32 etnaviv_readl(const void __iomem *addr)
+{
+	u32 val = readl(addr);
+
+	if (reglog)
+		printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
+
+	return val;
+}
+
+/*
+ * DRM operations:
+ */
+
+
+static void load_gpu(struct drm_device *dev)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < ETNA_MAX_PIPES; i++) {
+		struct etnaviv_gpu *g = priv->gpu[i];
+
+		if (g) {
+			int ret;
+
+			ret = etnaviv_gpu_init(g);
+			if (ret) {
+				dev_err(g->dev, "hw init failed: %d\n", ret);
+				priv->gpu[i] = NULL;
+			}
+		}
+	}
+}
+
+static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct etnaviv_file_private *ctx;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	file->driver_priv = ctx;
+
+	return 0;
+}
+
+static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	struct etnaviv_file_private *ctx = file->driver_priv;
+	unsigned int i;
+
+	for (i = 0; i < ETNA_MAX_PIPES; i++) {
+		struct etnaviv_gpu *gpu = priv->gpu[i];
+
+		if (gpu) {
+			mutex_lock(&gpu->lock);
+			if (gpu->lastctx == ctx)
+				gpu->lastctx = NULL;
+			mutex_unlock(&gpu->lock);
+		}
+	}
+
+	kfree(ctx);
+}
+
+/*
+ * DRM debugfs:
+ */
+
+#ifdef CONFIG_DEBUG_FS
+static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+
+	etnaviv_gem_describe_objects(priv, m);
+
+	return 0;
+}
+
+static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
+{
+	int ret;
+
+	read_lock(&dev->vma_offset_manager->vm_lock);
+	ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+	read_unlock(&dev->vma_offset_manager->vm_lock);
+
+	return ret;
+}
+
+static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+	seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
+
+	mutex_lock(&gpu->mmu->lock);
+	drm_mm_dump_table(m, &gpu->mmu->mm);
+	mutex_unlock(&gpu->mmu->lock);
+
+	return 0;
+}
+
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+	struct etnaviv_cmdbuf *buf = gpu->buffer;
+	u32 size = buf->size;
+	u32 *ptr = buf->vaddr;
+	u32 i;
+
+	seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
+			buf->vaddr, (u64)buf->paddr, size - buf->user_size);
+
+	for (i = 0; i < size / 4; i++) {
+		if (i && !(i % 4))
+			seq_puts(m, "\n");
+		if (i % 4 == 0)
+			seq_printf(m, "\t0x%p: ", ptr + i);
+		seq_printf(m, "%08x ", *(ptr + i));
+	}
+	seq_puts(m, "\n");
+}
+
+static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+	seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
+
+	mutex_lock(&gpu->lock);
+	etnaviv_buffer_dump(gpu, m);
+	mutex_unlock(&gpu->lock);
+
+	return 0;
+}
+
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	int (*show)(struct drm_device *dev, struct seq_file *m) =
+			node->info_ent->data;
+
+	return show(dev, m);
+}
+
+static int show_each_gpu(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	struct etnaviv_gpu *gpu;
+	int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
+			node->info_ent->data;
+	unsigned int i;
+	int ret = 0;
+
+	for (i = 0; i < ETNA_MAX_PIPES; i++) {
+		gpu = priv->gpu[i];
+		if (!gpu)
+			continue;
+
+		ret = show(gpu, m);
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
+}
+
+static struct drm_info_list etnaviv_debugfs_list[] = {
+		{"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
+		{"gem", show_unlocked, 0, etnaviv_gem_show},
+		{ "mm", show_unlocked, 0, etnaviv_mm_show },
+		{"mmu", show_each_gpu, 0, etnaviv_mmu_show},
+		{"ring", show_each_gpu, 0, etnaviv_ring_show},
+};
+
+static int etnaviv_debugfs_init(struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	ret = drm_debugfs_create_files(etnaviv_debugfs_list,
+			ARRAY_SIZE(etnaviv_debugfs_list),
+			minor->debugfs_root, minor);
+
+	if (ret) {
+		dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(etnaviv_debugfs_list,
+			ARRAY_SIZE(etnaviv_debugfs_list), minor);
+}
+#endif
+
+/*
+ * DRM ioctls:
+ */
+
+static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	struct drm_etnaviv_param *args = data;
+	struct etnaviv_gpu *gpu;
+
+	if (args->pipe >= ETNA_MAX_PIPES)
+		return -EINVAL;
+
+	gpu = priv->gpu[args->pipe];
+	if (!gpu)
+		return -ENXIO;
+
+	return etnaviv_gpu_get_param(gpu, args->param, &args->value);
+}
+
+static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_etnaviv_gem_new *args = data;
+
+	if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
+			    ETNA_BO_FORCE_MMU))
+		return -EINVAL;
+
+	return etnaviv_gem_new_handle(dev, file, args->size,
+			args->flags, &args->handle);
+}
+
+#define TS(t) ((struct timespec){ \
+	.tv_sec = (t).tv_sec, \
+	.tv_nsec = (t).tv_nsec \
+})
+
+static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_etnaviv_gem_cpu_prep *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(dev, file, args->handle);
+	if (!obj)
+		return -ENOENT;
+
+	ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_etnaviv_gem_cpu_fini *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (args->flags)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(dev, file, args->handle);
+	if (!obj)
+		return -ENOENT;
+
+	ret = etnaviv_gem_cpu_fini(obj);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_etnaviv_gem_info *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (args->pad)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(dev, file, args->handle);
+	if (!obj)
+		return -ENOENT;
+
+	ret = etnaviv_gem_mmap_offset(obj, &args->offset);
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_etnaviv_wait_fence *args = data;
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	struct timespec *timeout = &TS(args->timeout);
+	struct etnaviv_gpu *gpu;
+
+	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
+		return -EINVAL;
+
+	if (args->pipe >= ETNA_MAX_PIPES)
+		return -EINVAL;
+
+	gpu = priv->gpu[args->pipe];
+	if (!gpu)
+		return -ENXIO;
+
+	if (args->flags & ETNA_WAIT_NONBLOCK)
+		timeout = NULL;
+
+	return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
+						    timeout);
+}
+
+static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_etnaviv_gem_userptr *args = data;
+	int access;
+
+	if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
+	    args->flags == 0)
+		return -EINVAL;
+
+	if (offset_in_page(args->user_ptr | args->user_size) ||
+	    (uintptr_t)args->user_ptr != args->user_ptr ||
+	    (u32)args->user_size != args->user_size ||
+	    args->user_ptr & ~PAGE_MASK)
+		return -EINVAL;
+
+	if (args->flags & ETNA_USERPTR_WRITE)
+		access = VERIFY_WRITE;
+	else
+		access = VERIFY_READ;
+
+	if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
+		       args->user_size))
+		return -EFAULT;
+
+	return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
+				       args->user_size, args->flags,
+				       &args->handle);
+}
+
+static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	struct drm_etnaviv_gem_wait *args = data;
+	struct timespec *timeout = &TS(args->timeout);
+	struct drm_gem_object *obj;
+	struct etnaviv_gpu *gpu;
+	int ret;
+
+	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
+		return -EINVAL;
+
+	if (args->pipe >= ETNA_MAX_PIPES)
+		return -EINVAL;
+
+	gpu = priv->gpu[args->pipe];
+	if (!gpu)
+		return -ENXIO;
+
+	obj = drm_gem_object_lookup(dev, file, args->handle);
+	if (!obj)
+		return -ENOENT;
+
+	if (args->flags & ETNA_WAIT_NONBLOCK)
+		timeout = NULL;
+
+	ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+static const struct drm_ioctl_desc etnaviv_ioctls[] = {
+#define ETNA_IOCTL(n, func, flags) \
+	DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
+	ETNA_IOCTL(GET_PARAM,    get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
+	ETNA_IOCTL(GEM_NEW,      gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
+	ETNA_IOCTL(GEM_INFO,     gem_info,     DRM_AUTH|DRM_RENDER_ALLOW),
+	ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
+	ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
+	ETNA_IOCTL(GEM_SUBMIT,   gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
+	ETNA_IOCTL(WAIT_FENCE,   wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
+	ETNA_IOCTL(GEM_USERPTR,  gem_userptr,  DRM_AUTH|DRM_RENDER_ALLOW),
+	ETNA_IOCTL(GEM_WAIT,     gem_wait,     DRM_AUTH|DRM_RENDER_ALLOW),
+};
+
+static const struct vm_operations_struct vm_ops = {
+	.fault = etnaviv_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static const struct file_operations fops = {
+	.owner              = THIS_MODULE,
+	.open               = drm_open,
+	.release            = drm_release,
+	.unlocked_ioctl     = drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl       = drm_compat_ioctl,
+#endif
+	.poll               = drm_poll,
+	.read               = drm_read,
+	.llseek             = no_llseek,
+	.mmap               = etnaviv_gem_mmap,
+};
+
+static struct drm_driver etnaviv_drm_driver = {
+	.driver_features    = DRIVER_HAVE_IRQ |
+				DRIVER_GEM |
+				DRIVER_PRIME |
+				DRIVER_RENDER,
+	.open               = etnaviv_open,
+	.preclose           = etnaviv_preclose,
+	.set_busid          = drm_platform_set_busid,
+	.gem_free_object    = etnaviv_gem_free_object,
+	.gem_vm_ops         = &vm_ops,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export   = drm_gem_prime_export,
+	.gem_prime_import   = drm_gem_prime_import,
+	.gem_prime_pin      = etnaviv_gem_prime_pin,
+	.gem_prime_unpin    = etnaviv_gem_prime_unpin,
+	.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
+	.gem_prime_vmap     = etnaviv_gem_prime_vmap,
+	.gem_prime_vunmap   = etnaviv_gem_prime_vunmap,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init       = etnaviv_debugfs_init,
+	.debugfs_cleanup    = etnaviv_debugfs_cleanup,
+#endif
+	.ioctls             = etnaviv_ioctls,
+	.num_ioctls         = DRM_ETNAVIV_NUM_IOCTLS,
+	.fops               = &fops,
+	.name               = "etnaviv",
+	.desc               = "etnaviv DRM",
+	.date               = "20151214",
+	.major              = 1,
+	.minor              = 0,
+};
+
+/*
+ * Platform driver:
+ */
+static int etnaviv_bind(struct device *dev)
+{
+	struct etnaviv_drm_private *priv;
+	struct drm_device *drm;
+	int ret;
+
+	drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
+	if (!drm)
+		return -ENOMEM;
+
+	drm->platformdev = to_platform_device(dev);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(dev, "failed to allocate private data\n");
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+	drm->dev_private = priv;
+
+	priv->wq = alloc_ordered_workqueue("etnaviv", 0);
+	if (!priv->wq) {
+		ret = -ENOMEM;
+		goto out_wq;
+	}
+
+	mutex_init(&priv->gem_lock);
+	INIT_LIST_HEAD(&priv->gem_list);
+	priv->num_gpus = 0;
+
+	dev_set_drvdata(dev, drm);
+
+	ret = component_bind_all(dev, drm);
+	if (ret < 0)
+		goto out_bind;
+
+	load_gpu(drm);
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto out_register;
+
+	return 0;
+
+out_register:
+	component_unbind_all(dev, drm);
+out_bind:
+	flush_workqueue(priv->wq);
+	destroy_workqueue(priv->wq);
+out_wq:
+	kfree(priv);
+out_unref:
+	drm_dev_unref(drm);
+
+	return ret;
+}
+
+static void etnaviv_unbind(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct etnaviv_drm_private *priv = drm->dev_private;
+
+	drm_dev_unregister(drm);
+
+	flush_workqueue(priv->wq);
+	destroy_workqueue(priv->wq);
+
+	component_unbind_all(dev, drm);
+
+	drm->dev_private = NULL;
+	kfree(priv);
+
+	drm_put_dev(drm);
+}
+
+static const struct component_master_ops etnaviv_master_ops = {
+	.bind = etnaviv_bind,
+	.unbind = etnaviv_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+	struct device_node *np = data;
+
+	return dev->of_node == np;
+}
+
+static int compare_str(struct device *dev, void *data)
+{
+	return !strcmp(dev_name(dev), data);
+}
+
+static int etnaviv_pdev_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct component_match *match = NULL;
+
+	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+	if (node) {
+		struct device_node *core_node;
+		int i;
+
+		for (i = 0; ; i++) {
+			core_node = of_parse_phandle(node, "cores", i);
+			if (!core_node)
+				break;
+
+			component_match_add(&pdev->dev, &match, compare_of,
+					    core_node);
+			of_node_put(core_node);
+		}
+	} else if (dev->platform_data) {
+		char **names = dev->platform_data;
+		unsigned i;
+
+		for (i = 0; names[i]; i++)
+			component_match_add(dev, &match, compare_str, names[i]);
+	}
+
+	return component_master_add_with_match(dev, &etnaviv_master_ops, match);
+}
+
+static int etnaviv_pdev_remove(struct platform_device *pdev)
+{
+	component_master_del(&pdev->dev, &etnaviv_master_ops);
+
+	return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "fsl,imx-gpu-subsystem" },
+	{ .compatible = "marvell,dove-gpu-subsystem" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver etnaviv_platform_driver = {
+	.probe      = etnaviv_pdev_probe,
+	.remove     = etnaviv_pdev_remove,
+	.driver     = {
+		.owner  = THIS_MODULE,
+		.name   = "etnaviv",
+		.of_match_table = dt_match,
+	},
+};
+
+static int __init etnaviv_init(void)
+{
+	int ret;
+
+	etnaviv_validate_init();
+
+	ret = platform_driver_register(&etnaviv_gpu_driver);
+	if (ret != 0)
+		return ret;
+
+	ret = platform_driver_register(&etnaviv_platform_driver);
+	if (ret != 0)
+		platform_driver_unregister(&etnaviv_gpu_driver);
+
+	return ret;
+}
+module_init(etnaviv_init);
+
+static void __exit etnaviv_exit(void)
+{
+	platform_driver_unregister(&etnaviv_gpu_driver);
+	platform_driver_unregister(&etnaviv_platform_driver);
+}
+module_exit(etnaviv_exit);
+
+MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
+MODULE_DESCRIPTION("etnaviv DRM Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:etnaviv");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
new file mode 100644
index 0000000..d6bd438
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_DRV_H__
+#define __ETNAVIV_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/etnaviv_drm.h>
+
+struct etnaviv_cmdbuf;
+struct etnaviv_gpu;
+struct etnaviv_mmu;
+struct etnaviv_gem_object;
+struct etnaviv_gem_submit;
+
+struct etnaviv_file_private {
+	/* currently we don't do anything useful with this.. but when
+	 * per-context address spaces are supported we'd keep track of
+	 * the context's page-tables here.
+	 */
+	int dummy;
+};
+
+struct etnaviv_drm_private {
+	int num_gpus;
+	struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+
+	/* list of GEM objects: */
+	struct mutex gem_lock;
+	struct list_head gem_list;
+
+	struct workqueue_struct *wq;
+};
+
+static inline void etnaviv_queue_work(struct drm_device *dev,
+	struct work_struct *w)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+
+	queue_work(priv->wq, w);
+}
+
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+		struct drm_file *file);
+
+int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
+int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
+	struct drm_gem_object *obj, u32 *iova);
+void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj);
+struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
+void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
+void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
+	struct dma_buf_attachment *attach, struct sg_table *sg);
+int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
+void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
+void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
+		struct timespec *timeout);
+int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
+void etnaviv_gem_free_object(struct drm_gem_object *obj);
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+		u32 size, u32 flags, u32 *handle);
+struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
+		u32 size, u32 flags);
+struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
+		u32 size, u32 flags);
+int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
+	uintptr_t ptr, u32 size, u32 flags, u32 *handle);
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
+	struct etnaviv_cmdbuf *cmdbuf);
+void etnaviv_validate_init(void);
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
+	u32 *stream, unsigned int size,
+	struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size);
+
+#ifdef CONFIG_DEBUG_FS
+void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
+	struct seq_file *m);
+#endif
+
+void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
+		const char *dbgname);
+void etnaviv_writel(u32 data, void __iomem *addr);
+u32 etnaviv_readl(const void __iomem *addr);
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+/*
+ * Return the storage size of a structure with a variable length array.
+ * The array is nelem elements of elem_size, where the base structure
+ * is defined by base.  If the size overflows size_t, return zero.
+ */
+static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
+{
+	if (elem_size && nelem > (SIZE_MAX - base) / elem_size)
+		return 0;
+	return base + nelem * elem_size;
+}
+
+/* returns true if fence a comes after fence b */
+static inline bool fence_after(u32 a, u32 b)
+{
+	return (s32)(a - b) > 0;
+}
+
+static inline bool fence_after_eq(u32 a, u32 b)
+{
+	return (s32)(a - b) >= 0;
+}
+
+static inline unsigned long etnaviv_timeout_to_jiffies(
+	const struct timespec *timeout)
+{
+	unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+	unsigned long start_jiffies = jiffies;
+	unsigned long remaining_jiffies;
+
+	if (time_after(start_jiffies, timeout_jiffies))
+		remaining_jiffies = 0;
+	else
+		remaining_jiffies = timeout_jiffies - start_jiffies;
+
+	return remaining_jiffies;
+}
+
+#endif /* __ETNAVIV_DRV_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
new file mode 100644
index 0000000..bf8fa85
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/devcoredump.h>
+#include "etnaviv_dump.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "state.xml.h"
+#include "state_hi.xml.h"
+
+struct core_dump_iterator {
+	void *start;
+	struct etnaviv_dump_object_header *hdr;
+	void *data;
+};
+
+static const unsigned short etnaviv_dump_registers[] = {
+	VIVS_HI_AXI_STATUS,
+	VIVS_HI_CLOCK_CONTROL,
+	VIVS_HI_IDLE_STATE,
+	VIVS_HI_AXI_CONFIG,
+	VIVS_HI_INTR_ENBL,
+	VIVS_HI_CHIP_IDENTITY,
+	VIVS_HI_CHIP_FEATURE,
+	VIVS_HI_CHIP_MODEL,
+	VIVS_HI_CHIP_REV,
+	VIVS_HI_CHIP_DATE,
+	VIVS_HI_CHIP_TIME,
+	VIVS_HI_CHIP_MINOR_FEATURE_0,
+	VIVS_HI_CACHE_CONTROL,
+	VIVS_HI_AXI_CONTROL,
+	VIVS_PM_POWER_CONTROLS,
+	VIVS_PM_MODULE_CONTROLS,
+	VIVS_PM_MODULE_STATUS,
+	VIVS_PM_PULSE_EATER,
+	VIVS_MC_MMU_FE_PAGE_TABLE,
+	VIVS_MC_MMU_TX_PAGE_TABLE,
+	VIVS_MC_MMU_PE_PAGE_TABLE,
+	VIVS_MC_MMU_PEZ_PAGE_TABLE,
+	VIVS_MC_MMU_RA_PAGE_TABLE,
+	VIVS_MC_DEBUG_MEMORY,
+	VIVS_MC_MEMORY_BASE_ADDR_RA,
+	VIVS_MC_MEMORY_BASE_ADDR_FE,
+	VIVS_MC_MEMORY_BASE_ADDR_TX,
+	VIVS_MC_MEMORY_BASE_ADDR_PEZ,
+	VIVS_MC_MEMORY_BASE_ADDR_PE,
+	VIVS_MC_MEMORY_TIMING_CONTROL,
+	VIVS_MC_BUS_CONFIG,
+	VIVS_FE_DMA_STATUS,
+	VIVS_FE_DMA_DEBUG_STATE,
+	VIVS_FE_DMA_ADDRESS,
+	VIVS_FE_DMA_LOW,
+	VIVS_FE_DMA_HIGH,
+	VIVS_FE_AUTO_FLUSH,
+};
+
+static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
+	u32 type, void *data_end)
+{
+	struct etnaviv_dump_object_header *hdr = iter->hdr;
+
+	hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
+	hdr->type = cpu_to_le32(type);
+	hdr->file_offset = cpu_to_le32(iter->data - iter->start);
+	hdr->file_size = cpu_to_le32(data_end - iter->data);
+
+	iter->hdr++;
+	iter->data += hdr->file_size;
+}
+
+static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
+	struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_dump_registers *reg = iter->data;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
+		reg->reg = etnaviv_dump_registers[i];
+		reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
+	}
+
+	etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
+}
+
+static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
+	struct etnaviv_gpu *gpu, size_t mmu_size)
+{
+	etnaviv_iommu_dump(gpu->mmu, iter->data);
+
+	etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
+}
+
+static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
+	void *ptr, size_t size, u64 iova)
+{
+	memcpy(iter->data, ptr, size);
+
+	iter->hdr->iova = cpu_to_le64(iova);
+
+	etnaviv_core_dump_header(iter, type, iter->data + size);
+}
+
+void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+{
+	struct core_dump_iterator iter;
+	struct etnaviv_vram_mapping *vram;
+	struct etnaviv_gem_object *obj;
+	struct etnaviv_cmdbuf *cmd;
+	unsigned int n_obj, n_bomap_pages;
+	size_t file_size, mmu_size;
+	__le64 *bomap, *bomap_start;
+
+	mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
+
+	/* We always dump registers, mmu, ring and end marker */
+	n_obj = 4;
+	n_bomap_pages = 0;
+	file_size = ARRAY_SIZE(etnaviv_dump_registers) *
+			sizeof(struct etnaviv_dump_registers) +
+		    mmu_size + gpu->buffer->size;
+
+	/* Add in the active command buffers */
+	list_for_each_entry(cmd, &gpu->active_cmd_list, node) {
+		file_size += cmd->size;
+		n_obj++;
+	}
+
+	/* Add in the active buffer objects */
+	list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
+		if (!vram->use)
+			continue;
+
+		obj = vram->object;
+		file_size += obj->base.size;
+		n_bomap_pages += obj->base.size >> PAGE_SHIFT;
+		n_obj++;
+	}
+
+	/* If we have any buffer objects, add a bomap object */
+	if (n_bomap_pages) {
+		file_size += n_bomap_pages * sizeof(__le64);
+		n_obj++;
+	}
+
+	/* Add the size of the headers */
+	file_size += sizeof(*iter.hdr) * n_obj;
+
+	/* Allocate the file in vmalloc memory, it's likely to be big */
+	iter.start = vmalloc(file_size);
+	if (!iter.start) {
+		dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
+		return;
+	}
+
+	/* Point the data member after the headers */
+	iter.hdr = iter.start;
+	iter.data = &iter.hdr[n_obj];
+
+	memset(iter.hdr, 0, iter.data - iter.start);
+
+	etnaviv_core_dump_registers(&iter, gpu);
+	etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
+	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
+			      gpu->buffer->size, gpu->buffer->paddr);
+
+	list_for_each_entry(cmd, &gpu->active_cmd_list, node)
+		etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
+				      cmd->size, cmd->paddr);
+
+	/* Reserve space for the bomap */
+	if (n_bomap_pages) {
+		bomap_start = bomap = iter.data;
+		memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
+		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
+					 bomap + n_bomap_pages);
+	} else {
+		/* Silence warning */
+		bomap_start = bomap = NULL;
+	}
+
+	list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
+		struct page **pages;
+		void *vaddr;
+
+		if (vram->use == 0)
+			continue;
+
+		obj = vram->object;
+
+		pages = etnaviv_gem_get_pages(obj);
+		if (pages) {
+			int j;
+
+			iter.hdr->data[0] = bomap - bomap_start;
+
+			for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
+				*bomap++ = cpu_to_le64(page_to_phys(*pages++));
+		}
+
+		iter.hdr->iova = cpu_to_le64(vram->iova);
+
+		vaddr = etnaviv_gem_vaddr(&obj->base);
+		if (vaddr && !IS_ERR(vaddr))
+			memcpy(iter.data, vaddr, obj->base.size);
+
+		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
+					 obj->base.size);
+	}
+
+	etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
+
+	dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
new file mode 100644
index 0000000..97f2f8d
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Etnaviv devcoredump file definitions
+ */
+#ifndef ETNAVIV_DUMP_H
+#define ETNAVIV_DUMP_H
+
+#include <linux/types.h>
+
+enum {
+	ETDUMP_MAGIC = 0x414e5445,
+	ETDUMP_BUF_REG = 0,
+	ETDUMP_BUF_MMU,
+	ETDUMP_BUF_RING,
+	ETDUMP_BUF_CMD,
+	ETDUMP_BUF_BOMAP,
+	ETDUMP_BUF_BO,
+	ETDUMP_BUF_END,
+};
+
+struct etnaviv_dump_object_header {
+	__le32 magic;
+	__le32 type;
+	__le32 file_offset;
+	__le32 file_size;
+	__le64 iova;
+	__le32 data[2];
+};
+
+/* Registers object, an array of these */
+struct etnaviv_dump_registers {
+	__le32 reg;
+	__le32 value;
+};
+
+#ifdef __KERNEL__
+struct etnaviv_gpu;
+void etnaviv_core_dump(struct etnaviv_gpu *gpu);
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
new file mode 100644
index 0000000..8d6f859
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
+{
+	struct drm_device *dev = etnaviv_obj->base.dev;
+	struct sg_table *sgt = etnaviv_obj->sgt;
+
+	/*
+	 * For non-cached buffers, ensure the new pages are clean
+	 * because display controller, GPU, etc. are not coherent.
+	 */
+	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
+		dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+}
+
+static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
+{
+	struct drm_device *dev = etnaviv_obj->base.dev;
+	struct sg_table *sgt = etnaviv_obj->sgt;
+
+	/*
+	 * For non-cached buffers, ensure the new pages are clean
+	 * because display controller, GPU, etc. are not coherent:
+	 *
+	 * WARNING: The DMA API does not support concurrent CPU
+	 * and device access to the memory area.  With BIDIRECTIONAL,
+	 * we will clean the cache lines which overlap the region,
+	 * and invalidate all cache lines (partially) contained in
+	 * the region.
+	 *
+	 * If you have dirty data in the overlapping cache lines,
+	 * that will corrupt the GPU-written data.  If you have
+	 * written into the remainder of the region, this can
+	 * discard those writes.
+	 */
+	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
+		dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+}
+
+/* called with etnaviv_obj->lock held */
+static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+	struct drm_device *dev = etnaviv_obj->base.dev;
+	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
+
+	if (IS_ERR(p)) {
+		dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
+		return PTR_ERR(p);
+	}
+
+	etnaviv_obj->pages = p;
+
+	return 0;
+}
+
+static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+	if (etnaviv_obj->sgt) {
+		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
+		sg_free_table(etnaviv_obj->sgt);
+		kfree(etnaviv_obj->sgt);
+		etnaviv_obj->sgt = NULL;
+	}
+	if (etnaviv_obj->pages) {
+		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
+				  true, false);
+
+		etnaviv_obj->pages = NULL;
+	}
+}
+
+struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+	int ret;
+
+	lockdep_assert_held(&etnaviv_obj->lock);
+
+	if (!etnaviv_obj->pages) {
+		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
+		if (ret < 0)
+			return ERR_PTR(ret);
+	}
+
+	if (!etnaviv_obj->sgt) {
+		struct drm_device *dev = etnaviv_obj->base.dev;
+		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+		struct sg_table *sgt;
+
+		sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+		if (IS_ERR(sgt)) {
+			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
+				PTR_ERR(sgt));
+			return ERR_CAST(sgt);
+		}
+
+		etnaviv_obj->sgt = sgt;
+
+		etnaviv_gem_scatter_map(etnaviv_obj);
+	}
+
+	return etnaviv_obj->pages;
+}
+
+void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+	lockdep_assert_held(&etnaviv_obj->lock);
+	/* when we start tracking the pin count, then do something here */
+}
+
+static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
+		struct vm_area_struct *vma)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+	pgprot_t vm_page_prot;
+
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
+	vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+	if (etnaviv_obj->flags & ETNA_BO_WC) {
+		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
+	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
+		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
+	} else {
+		/*
+		 * Shunt off cached objs to shmem file so they have their own
+		 * address_space (so unmap_mapping_range does what we want,
+		 * in particular in the case of mmap'd dmabufs)
+		 */
+		fput(vma->vm_file);
+		get_file(obj->filp);
+		vma->vm_pgoff = 0;
+		vma->vm_file  = obj->filp;
+
+		vma->vm_page_prot = vm_page_prot;
+	}
+
+	return 0;
+}
+
+int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct etnaviv_gem_object *obj;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret) {
+		DBG("mmap failed: %d", ret);
+		return ret;
+	}
+
+	obj = to_etnaviv_bo(vma->vm_private_data);
+	return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+	struct page **pages, *page;
+	pgoff_t pgoff;
+	int ret;
+
+	/*
+	 * Make sure we don't parallel update on a fault, nor move or remove
+	 * something from beneath our feet.  Note that vm_insert_page() is
+	 * specifically coded to take care of this, so we don't have to.
+	 */
+	ret = mutex_lock_interruptible(&etnaviv_obj->lock);
+	if (ret)
+		goto out;
+
+	/* make sure we have pages attached now */
+	pages = etnaviv_gem_get_pages(etnaviv_obj);
+	mutex_unlock(&etnaviv_obj->lock);
+
+	if (IS_ERR(pages)) {
+		ret = PTR_ERR(pages);
+		goto out;
+	}
+
+	/* We don't use vmf->pgoff since that has the fake offset: */
+	pgoff = ((unsigned long)vmf->virtual_address -
+			vma->vm_start) >> PAGE_SHIFT;
+
+	page = pages[pgoff];
+
+	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
+
+	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+
+out:
+	switch (ret) {
+	case -EAGAIN:
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
+{
+	int ret;
+
+	/* Make it mmapable */
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret)
+		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
+	else
+		*offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+	return ret;
+}
+
+static struct etnaviv_vram_mapping *
+etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
+			     struct etnaviv_iommu *mmu)
+{
+	struct etnaviv_vram_mapping *mapping;
+
+	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
+		if (mapping->mmu == mmu)
+			return mapping;
+	}
+
+	return NULL;
+}
+
+int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
+	struct drm_gem_object *obj, u32 *iova)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+	struct etnaviv_vram_mapping *mapping;
+	struct page **pages;
+	int ret = 0;
+
+	mutex_lock(&etnaviv_obj->lock);
+	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
+	if (mapping) {
+		/*
+		 * Holding the object lock prevents the use count changing
+		 * beneath us.  If the use count is zero, the MMU might be
+		 * reaping this object, so take the lock and re-check that
+		 * the MMU owns this mapping to close this race.
+		 */
+		if (mapping->use == 0) {
+			mutex_lock(&gpu->mmu->lock);
+			if (mapping->mmu == gpu->mmu)
+				mapping->use += 1;
+			else
+				mapping = NULL;
+			mutex_unlock(&gpu->mmu->lock);
+			if (mapping)
+				goto out;
+		} else {
+			mapping->use += 1;
+			goto out;
+		}
+	}
+
+	pages = etnaviv_gem_get_pages(etnaviv_obj);
+	if (IS_ERR(pages)) {
+		ret = PTR_ERR(pages);
+		goto out;
+	}
+
+	/*
+	 * See if we have a reaped vram mapping we can re-use before
+	 * allocating a fresh mapping.
+	 */
+	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
+	if (!mapping) {
+		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+		if (!mapping)
+			return -ENOMEM;
+
+		INIT_LIST_HEAD(&mapping->scan_node);
+		mapping->object = etnaviv_obj;
+	} else {
+		list_del(&mapping->obj_node);
+	}
+
+	mapping->mmu = gpu->mmu;
+	mapping->use = 1;
+
+	ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
+				    mapping);
+	if (ret < 0)
+		kfree(mapping);
+	else
+		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
+
+out:
+	mutex_unlock(&etnaviv_obj->lock);
+
+	if (!ret) {
+		/* Take a reference on the object */
+		drm_gem_object_reference(obj);
+		*iova = mapping->iova;
+	}
+
+	return ret;
+}
+
+void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+	struct etnaviv_vram_mapping *mapping;
+
+	mutex_lock(&etnaviv_obj->lock);
+	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
+
+	WARN_ON(mapping->use == 0);
+	mapping->use -= 1;
+	mutex_unlock(&etnaviv_obj->lock);
+
+	drm_gem_object_unreference_unlocked(obj);
+}
+
+void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+	mutex_lock(&etnaviv_obj->lock);
+	if (!etnaviv_obj->vaddr) {
+		struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
+
+		if (IS_ERR(pages))
+			return ERR_CAST(pages);
+
+		etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+	}
+	mutex_unlock(&etnaviv_obj->lock);
+
+	return etnaviv_obj->vaddr;
+}
+
+static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
+{
+	if (op & ETNA_PREP_READ)
+		return DMA_FROM_DEVICE;
+	else if (op & ETNA_PREP_WRITE)
+		return DMA_TO_DEVICE;
+	else
+		return DMA_BIDIRECTIONAL;
+}
+
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
+		struct timespec *timeout)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+	struct drm_device *dev = obj->dev;
+	bool write = !!(op & ETNA_PREP_WRITE);
+	int ret;
+
+	if (op & ETNA_PREP_NOSYNC) {
+		if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
+							  write))
+			return -EBUSY;
+	} else {
+		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
+
+		ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+							  write, true, remain);
+		if (ret <= 0)
+			return ret == 0 ? -ETIMEDOUT : ret;
+	}
+
+	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
+		if (!etnaviv_obj->sgt) {
+			void *ret;
+
+			mutex_lock(&etnaviv_obj->lock);
+			ret = etnaviv_gem_get_pages(etnaviv_obj);
+			mutex_unlock(&etnaviv_obj->lock);
+			if (IS_ERR(ret))
+				return PTR_ERR(ret);
+		}
+
+		dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
+				    etnaviv_obj->sgt->nents,
+				    etnaviv_op_to_dma_dir(op));
+		etnaviv_obj->last_cpu_prep_op = op;
+	}
+
+	return 0;
+}
+
+int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
+		/* fini without a prep is almost certainly a userspace error */
+		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
+		dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
+			etnaviv_obj->sgt->nents,
+			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
+		etnaviv_obj->last_cpu_prep_op = 0;
+	}
+
+	return 0;
+}
+
+int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+	struct timespec *timeout)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void etnaviv_gem_describe_fence(struct fence *fence,
+	const char *type, struct seq_file *m)
+{
+	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		seq_printf(m, "\t%9s: %s %s seq %u\n",
+			   type,
+			   fence->ops->get_driver_name(fence),
+			   fence->ops->get_timeline_name(fence),
+			   fence->seqno);
+}
+
+static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+	struct reservation_object *robj = etnaviv_obj->resv;
+	struct reservation_object_list *fobj;
+	struct fence *fence;
+	unsigned long off = drm_vma_node_start(&obj->vma_node);
+
+	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
+			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
+			obj->name, obj->refcount.refcount.counter,
+			off, etnaviv_obj->vaddr, obj->size);
+
+	rcu_read_lock();
+	fobj = rcu_dereference(robj->fence);
+	if (fobj) {
+		unsigned int i, shared_count = fobj->shared_count;
+
+		for (i = 0; i < shared_count; i++) {
+			fence = rcu_dereference(fobj->shared[i]);
+			etnaviv_gem_describe_fence(fence, "Shared", m);
+		}
+	}
+
+	fence = rcu_dereference(robj->fence_excl);
+	if (fence)
+		etnaviv_gem_describe_fence(fence, "Exclusive", m);
+	rcu_read_unlock();
+}
+
+void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
+	struct seq_file *m)
+{
+	struct etnaviv_gem_object *etnaviv_obj;
+	int count = 0;
+	size_t size = 0;
+
+	mutex_lock(&priv->gem_lock);
+	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
+		struct drm_gem_object *obj = &etnaviv_obj->base;
+
+		seq_puts(m, "   ");
+		etnaviv_gem_describe(obj, m);
+		count++;
+		size += obj->size;
+	}
+	mutex_unlock(&priv->gem_lock);
+
+	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+	if (etnaviv_obj->vaddr)
+		vunmap(etnaviv_obj->vaddr);
+	put_pages(etnaviv_obj);
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
+	.get_pages = etnaviv_gem_shmem_get_pages,
+	.release = etnaviv_gem_shmem_release,
+};
+
+void etnaviv_gem_free_object(struct drm_gem_object *obj)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+	struct etnaviv_vram_mapping *mapping, *tmp;
+
+	/* object should not be active */
+	WARN_ON(is_active(etnaviv_obj));
+
+	list_del(&etnaviv_obj->gem_node);
+
+	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
+				 obj_node) {
+		struct etnaviv_iommu *mmu = mapping->mmu;
+
+		WARN_ON(mapping->use);
+
+		if (mmu)
+			etnaviv_iommu_unmap_gem(mmu, mapping);
+
+		list_del(&mapping->obj_node);
+		kfree(mapping);
+	}
+
+	drm_gem_free_mmap_offset(obj);
+	etnaviv_obj->ops->release(etnaviv_obj);
+	if (etnaviv_obj->resv == &etnaviv_obj->_resv)
+		reservation_object_fini(&etnaviv_obj->_resv);
+	drm_gem_object_release(obj);
+
+	kfree(etnaviv_obj);
+}
+
+int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+	mutex_lock(&priv->gem_lock);
+	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
+	mutex_unlock(&priv->gem_lock);
+
+	return 0;
+}
+
+static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
+	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
+	struct drm_gem_object **obj)
+{
+	struct etnaviv_gem_object *etnaviv_obj;
+	unsigned sz = sizeof(*etnaviv_obj);
+	bool valid = true;
+
+	/* validate flags */
+	switch (flags & ETNA_BO_CACHE_MASK) {
+	case ETNA_BO_UNCACHED:
+	case ETNA_BO_CACHED:
+	case ETNA_BO_WC:
+		break;
+	default:
+		valid = false;
+	}
+
+	if (!valid) {
+		dev_err(dev->dev, "invalid cache flag: %x\n",
+			(flags & ETNA_BO_CACHE_MASK));
+		return -EINVAL;
+	}
+
+	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
+	if (!etnaviv_obj)
+		return -ENOMEM;
+
+	etnaviv_obj->flags = flags;
+	etnaviv_obj->ops = ops;
+	if (robj) {
+		etnaviv_obj->resv = robj;
+	} else {
+		etnaviv_obj->resv = &etnaviv_obj->_resv;
+		reservation_object_init(&etnaviv_obj->_resv);
+	}
+
+	mutex_init(&etnaviv_obj->lock);
+	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
+
+	*obj = &etnaviv_obj->base;
+
+	return 0;
+}
+
+static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
+		u32 size, u32 flags)
+{
+	struct drm_gem_object *obj = NULL;
+	int ret;
+
+	size = PAGE_ALIGN(size);
+
+	ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
+				   &etnaviv_gem_shmem_ops, &obj);
+	if (ret)
+		goto fail;
+
+	ret = drm_gem_object_init(dev, obj, size);
+	if (ret == 0) {
+		struct address_space *mapping;
+
+		/*
+		 * Our buffers are kept pinned, so allocating them
+		 * from the MOVABLE zone is a really bad idea, and
+		 * conflicts with CMA.  See coments above new_inode()
+		 * why this is required _and_ expected if you're
+		 * going to pin these pages.
+		 */
+		mapping = file_inode(obj->filp)->i_mapping;
+		mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+	}
+
+	if (ret)
+		goto fail;
+
+	return obj;
+
+fail:
+	if (obj)
+		drm_gem_object_unreference_unlocked(obj);
+
+	return ERR_PTR(ret);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+		u32 size, u32 flags, u32 *handle)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	obj = __etnaviv_gem_new(dev, size, flags);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	ret = etnaviv_gem_obj_add(dev, obj);
+	if (ret < 0) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ret;
+	}
+
+	ret = drm_gem_handle_create(file, obj, handle);
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
+		u32 size, u32 flags)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	obj = __etnaviv_gem_new(dev, size, flags);
+	if (IS_ERR(obj))
+		return obj;
+
+	ret = etnaviv_gem_obj_add(dev, obj);
+	if (ret < 0) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(ret);
+	}
+
+	return obj;
+}
+
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
+	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
+	struct etnaviv_gem_object **res)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
+	if (ret)
+		return ret;
+
+	drm_gem_private_object_init(dev, obj, size);
+
+	*res = to_etnaviv_bo(obj);
+
+	return 0;
+}
+
+struct get_pages_work {
+	struct work_struct work;
+	struct mm_struct *mm;
+	struct task_struct *task;
+	struct etnaviv_gem_object *etnaviv_obj;
+};
+
+static struct page **etnaviv_gem_userptr_do_get_pages(
+	struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
+{
+	int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+	struct page **pvec;
+	uintptr_t ptr;
+
+	pvec = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!pvec)
+		return ERR_PTR(-ENOMEM);
+
+	pinned = 0;
+	ptr = etnaviv_obj->userptr.ptr;
+
+	down_read(&mm->mmap_sem);
+	while (pinned < npages) {
+		ret = get_user_pages(task, mm, ptr, npages - pinned,
+				     !etnaviv_obj->userptr.ro, 0,
+				     pvec + pinned, NULL);
+		if (ret < 0)
+			break;
+
+		ptr += ret * PAGE_SIZE;
+		pinned += ret;
+	}
+	up_read(&mm->mmap_sem);
+
+	if (ret < 0) {
+		release_pages(pvec, pinned, 0);
+		drm_free_large(pvec);
+		return ERR_PTR(ret);
+	}
+
+	return pvec;
+}
+
+static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
+{
+	struct get_pages_work *work = container_of(_work, typeof(*work), work);
+	struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
+	struct page **pvec;
+
+	pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
+
+	mutex_lock(&etnaviv_obj->lock);
+	if (IS_ERR(pvec)) {
+		etnaviv_obj->userptr.work = ERR_CAST(pvec);
+	} else {
+		etnaviv_obj->userptr.work = NULL;
+		etnaviv_obj->pages = pvec;
+	}
+
+	mutex_unlock(&etnaviv_obj->lock);
+	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+
+	mmput(work->mm);
+	put_task_struct(work->task);
+	kfree(work);
+}
+
+static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+	struct page **pvec = NULL;
+	struct get_pages_work *work;
+	struct mm_struct *mm;
+	int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+
+	if (etnaviv_obj->userptr.work) {
+		if (IS_ERR(etnaviv_obj->userptr.work)) {
+			ret = PTR_ERR(etnaviv_obj->userptr.work);
+			etnaviv_obj->userptr.work = NULL;
+		} else {
+			ret = -EAGAIN;
+		}
+		return ret;
+	}
+
+	mm = get_task_mm(etnaviv_obj->userptr.task);
+	pinned = 0;
+	if (mm == current->mm) {
+		pvec = drm_malloc_ab(npages, sizeof(struct page *));
+		if (!pvec) {
+			mmput(mm);
+			return -ENOMEM;
+		}
+
+		pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
+					       !etnaviv_obj->userptr.ro, pvec);
+		if (pinned < 0) {
+			drm_free_large(pvec);
+			mmput(mm);
+			return pinned;
+		}
+
+		if (pinned == npages) {
+			etnaviv_obj->pages = pvec;
+			mmput(mm);
+			return 0;
+		}
+	}
+
+	release_pages(pvec, pinned, 0);
+	drm_free_large(pvec);
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (!work) {
+		mmput(mm);
+		return -ENOMEM;
+	}
+
+	get_task_struct(current);
+	drm_gem_object_reference(&etnaviv_obj->base);
+
+	work->mm = mm;
+	work->task = current;
+	work->etnaviv_obj = etnaviv_obj;
+
+	etnaviv_obj->userptr.work = &work->work;
+	INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
+
+	etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
+
+	return -EAGAIN;
+}
+
+static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+	if (etnaviv_obj->sgt) {
+		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
+		sg_free_table(etnaviv_obj->sgt);
+		kfree(etnaviv_obj->sgt);
+	}
+	if (etnaviv_obj->pages) {
+		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+
+		release_pages(etnaviv_obj->pages, npages, 0);
+		drm_free_large(etnaviv_obj->pages);
+	}
+	put_task_struct(etnaviv_obj->userptr.task);
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
+	.get_pages = etnaviv_gem_userptr_get_pages,
+	.release = etnaviv_gem_userptr_release,
+};
+
+int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
+	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
+{
+	struct etnaviv_gem_object *etnaviv_obj;
+	int ret;
+
+	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
+				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
+	if (ret)
+		return ret;
+
+	etnaviv_obj->userptr.ptr = ptr;
+	etnaviv_obj->userptr.task = current;
+	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
+	get_task_struct(current);
+
+	ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+		return ret;
+	}
+
+	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
new file mode 100644
index 0000000..a300b4b
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_GEM_H__
+#define __ETNAVIV_GEM_H__
+
+#include <linux/reservation.h>
+#include "etnaviv_drv.h"
+
+struct etnaviv_gem_ops;
+struct etnaviv_gem_object;
+
+struct etnaviv_gem_userptr {
+	uintptr_t ptr;
+	struct task_struct *task;
+	struct work_struct *work;
+	bool ro;
+};
+
+struct etnaviv_vram_mapping {
+	struct list_head obj_node;
+	struct list_head scan_node;
+	struct list_head mmu_node;
+	struct etnaviv_gem_object *object;
+	struct etnaviv_iommu *mmu;
+	struct drm_mm_node vram_node;
+	unsigned int use;
+	u32 iova;
+};
+
+struct etnaviv_gem_object {
+	struct drm_gem_object base;
+	const struct etnaviv_gem_ops *ops;
+	struct mutex lock;
+
+	u32 flags;
+
+	struct list_head gem_node;
+	struct etnaviv_gpu *gpu;     /* non-null if active */
+	atomic_t gpu_active;
+	u32 access;
+
+	struct page **pages;
+	struct sg_table *sgt;
+	void *vaddr;
+
+	/* normally (resv == &_resv) except for imported bo's */
+	struct reservation_object *resv;
+	struct reservation_object _resv;
+
+	struct list_head vram_list;
+
+	/* cache maintenance */
+	u32 last_cpu_prep_op;
+
+	struct etnaviv_gem_userptr userptr;
+};
+
+static inline
+struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
+{
+	return container_of(obj, struct etnaviv_gem_object, base);
+}
+
+struct etnaviv_gem_ops {
+	int (*get_pages)(struct etnaviv_gem_object *);
+	void (*release)(struct etnaviv_gem_object *);
+};
+
+static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
+{
+	return atomic_read(&etnaviv_obj->gpu_active) != 0;
+}
+
+#define MAX_CMDS 4
+
+/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
+ * associated with the cmdstream submission for synchronization (and
+ * make it easier to unwind when things go wrong, etc).  This only
+ * lasts for the duration of the submit-ioctl.
+ */
+struct etnaviv_gem_submit {
+	struct drm_device *dev;
+	struct etnaviv_gpu *gpu;
+	struct ww_acquire_ctx ticket;
+	u32 fence;
+	unsigned int nr_bos;
+	struct {
+		u32 flags;
+		struct etnaviv_gem_object *obj;
+		u32 iova;
+	} bos[0];
+};
+
+int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+	struct timespec *timeout);
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
+	struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
+	struct etnaviv_gem_object **res);
+int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
+struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
+void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
+
+#endif /* __ETNAVIV_GEM_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
new file mode 100644
index 0000000..e94db4f
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/dma-buf.h>
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+
+
+struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+	BUG_ON(!etnaviv_obj->sgt);  /* should have already pinned! */
+
+	return etnaviv_obj->sgt;
+}
+
+void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	return etnaviv_gem_vaddr(obj);
+}
+
+void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	/* TODO msm_gem_vunmap() */
+}
+
+int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
+{
+	if (!obj->import_attach) {
+		struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+		mutex_lock(&etnaviv_obj->lock);
+		etnaviv_gem_get_pages(etnaviv_obj);
+		mutex_unlock(&etnaviv_obj->lock);
+	}
+	return 0;
+}
+
+void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
+{
+	if (!obj->import_attach) {
+		struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+		mutex_lock(&etnaviv_obj->lock);
+		etnaviv_gem_put_pages(to_etnaviv_bo(obj));
+		mutex_unlock(&etnaviv_obj->lock);
+	}
+}
+
+static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+	if (etnaviv_obj->vaddr)
+		dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
+			       etnaviv_obj->vaddr);
+
+	/* Don't drop the pages for imported dmabuf, as they are not
+	 * ours, just free the array we allocated:
+	 */
+	if (etnaviv_obj->pages)
+		drm_free_large(etnaviv_obj->pages);
+
+	drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
+	/* .get_pages should never be called */
+	.release = etnaviv_gem_prime_release,
+};
+
+struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
+	struct dma_buf_attachment *attach, struct sg_table *sgt)
+{
+	struct etnaviv_gem_object *etnaviv_obj;
+	size_t size = PAGE_ALIGN(attach->dmabuf->size);
+	int ret, npages;
+
+	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
+				      attach->dmabuf->resv,
+				      &etnaviv_gem_prime_ops, &etnaviv_obj);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	npages = size / PAGE_SIZE;
+
+	etnaviv_obj->sgt = sgt;
+	etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!etnaviv_obj->pages) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
+					       NULL, npages);
+	if (ret)
+		goto fail;
+
+	ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
+	if (ret)
+		goto fail;
+
+	return &etnaviv_obj->base;
+
+fail:
+	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
new file mode 100644
index 0000000..1aba01a
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/reservation.h>
+#include "etnaviv_drv.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+
+/*
+ * Cmdstream submission:
+ */
+
+#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
+/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
+#define BO_LOCKED   0x4000
+#define BO_PINNED   0x2000
+
+static inline void __user *to_user_ptr(u64 address)
+{
+	return (void __user *)(uintptr_t)address;
+}
+
+static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
+		struct etnaviv_gpu *gpu, size_t nr)
+{
+	struct etnaviv_gem_submit *submit;
+	size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
+
+	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+	if (submit) {
+		submit->dev = dev;
+		submit->gpu = gpu;
+
+		/* initially, until copy_from_user() and bo lookup succeeds: */
+		submit->nr_bos = 0;
+
+		ww_acquire_init(&submit->ticket, &reservation_ww_class);
+	}
+
+	return submit;
+}
+
+static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
+	struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
+	unsigned nr_bos)
+{
+	struct drm_etnaviv_gem_submit_bo *bo;
+	unsigned i;
+	int ret = 0;
+
+	spin_lock(&file->table_lock);
+
+	for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
+		struct drm_gem_object *obj;
+
+		if (bo->flags & BO_INVALID_FLAGS) {
+			DRM_ERROR("invalid flags: %x\n", bo->flags);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		submit->bos[i].flags = bo->flags;
+
+		/* normally use drm_gem_object_lookup(), but for bulk lookup
+		 * all under single table_lock just hit object_idr directly:
+		 */
+		obj = idr_find(&file->object_idr, bo->handle);
+		if (!obj) {
+			DRM_ERROR("invalid handle %u at index %u\n",
+				  bo->handle, i);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		/*
+		 * Take a refcount on the object. The file table lock
+		 * prevents the object_idr's refcount on this being dropped.
+		 */
+		drm_gem_object_reference(obj);
+
+		submit->bos[i].obj = to_etnaviv_bo(obj);
+	}
+
+out_unlock:
+	submit->nr_bos = i;
+	spin_unlock(&file->table_lock);
+
+	return ret;
+}
+
+static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
+{
+	if (submit->bos[i].flags & BO_LOCKED) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+		ww_mutex_unlock(&etnaviv_obj->resv->lock);
+		submit->bos[i].flags &= ~BO_LOCKED;
+	}
+}
+
+static int submit_lock_objects(struct etnaviv_gem_submit *submit)
+{
+	int contended, slow_locked = -1, i, ret = 0;
+
+retry:
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+		if (slow_locked == i)
+			slow_locked = -1;
+
+		contended = i;
+
+		if (!(submit->bos[i].flags & BO_LOCKED)) {
+			ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
+					&submit->ticket);
+			if (ret == -EALREADY)
+				DRM_ERROR("BO at index %u already on submit list\n",
+					  i);
+			if (ret)
+				goto fail;
+			submit->bos[i].flags |= BO_LOCKED;
+		}
+	}
+
+	ww_acquire_done(&submit->ticket);
+
+	return 0;
+
+fail:
+	for (; i >= 0; i--)
+		submit_unlock_object(submit, i);
+
+	if (slow_locked > 0)
+		submit_unlock_object(submit, slow_locked);
+
+	if (ret == -EDEADLK) {
+		struct etnaviv_gem_object *etnaviv_obj;
+
+		etnaviv_obj = submit->bos[contended].obj;
+
+		/* we lost out in a seqno race, lock and retry.. */
+		ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
+				&submit->ticket);
+		if (!ret) {
+			submit->bos[contended].flags |= BO_LOCKED;
+			slow_locked = contended;
+			goto retry;
+		}
+	}
+
+	return ret;
+}
+
+static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
+{
+	unsigned int context = submit->gpu->fence_context;
+	int i, ret = 0;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+
+		ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
+{
+	int i;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+		if (submit->bos[i].flags & BO_PINNED)
+			etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base);
+
+		submit->bos[i].iova = 0;
+		submit->bos[i].flags &= ~BO_PINNED;
+	}
+}
+
+static int submit_pin_objects(struct etnaviv_gem_submit *submit)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+		u32 iova;
+
+		ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base,
+					   &iova);
+		if (ret)
+			break;
+
+		submit->bos[i].flags |= BO_PINNED;
+		submit->bos[i].iova = iova;
+	}
+
+	return ret;
+}
+
+static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
+		struct etnaviv_gem_object **obj, u32 *iova)
+{
+	if (idx >= submit->nr_bos) {
+		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+				idx, submit->nr_bos);
+		return -EINVAL;
+	}
+
+	if (obj)
+		*obj = submit->bos[idx].obj;
+	if (iova)
+		*iova = submit->bos[idx].iova;
+
+	return 0;
+}
+
+/* process the reloc's and patch up the cmdstream as needed: */
+static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
+		u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
+		u32 nr_relocs)
+{
+	u32 i, last_offset = 0;
+	u32 *ptr = stream;
+	int ret;
+
+	for (i = 0; i < nr_relocs; i++) {
+		const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
+		struct etnaviv_gem_object *bobj;
+		u32 iova, off;
+
+		if (unlikely(r->flags)) {
+			DRM_ERROR("invalid reloc flags\n");
+			return -EINVAL;
+		}
+
+		if (r->submit_offset % 4) {
+			DRM_ERROR("non-aligned reloc offset: %u\n",
+				  r->submit_offset);
+			return -EINVAL;
+		}
+
+		/* offset in dwords: */
+		off = r->submit_offset / 4;
+
+		if ((off >= size ) ||
+				(off < last_offset)) {
+			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+			return -EINVAL;
+		}
+
+		ret = submit_bo(submit, r->reloc_idx, &bobj, &iova);
+		if (ret)
+			return ret;
+
+		if (r->reloc_offset >=
+		    bobj->base.size - sizeof(*ptr)) {
+			DRM_ERROR("relocation %u outside object", i);
+			return -EINVAL;
+		}
+
+		ptr[off] = iova + r->reloc_offset;
+
+		last_offset = off;
+	}
+
+	return 0;
+}
+
+static void submit_cleanup(struct etnaviv_gem_submit *submit)
+{
+	unsigned i;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+		submit_unlock_object(submit, i);
+		drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+	}
+
+	ww_acquire_fini(&submit->ticket);
+	kfree(submit);
+}
+
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	struct drm_etnaviv_gem_submit *args = data;
+	struct drm_etnaviv_gem_submit_reloc *relocs;
+	struct drm_etnaviv_gem_submit_bo *bos;
+	struct etnaviv_gem_submit *submit;
+	struct etnaviv_cmdbuf *cmdbuf;
+	struct etnaviv_gpu *gpu;
+	void *stream;
+	int ret;
+
+	if (args->pipe >= ETNA_MAX_PIPES)
+		return -EINVAL;
+
+	gpu = priv->gpu[args->pipe];
+	if (!gpu)
+		return -ENXIO;
+
+	if (args->stream_size % 4) {
+		DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
+			  args->stream_size);
+		return -EINVAL;
+	}
+
+	if (args->exec_state != ETNA_PIPE_3D &&
+	    args->exec_state != ETNA_PIPE_2D &&
+	    args->exec_state != ETNA_PIPE_VG) {
+		DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
+		return -EINVAL;
+	}
+
+	/*
+	 * Copy the command submission and bo array to kernel space in
+	 * one go, and do this outside of any locks.
+	 */
+	bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
+	relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
+	stream = drm_malloc_ab(1, args->stream_size);
+	cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
+					args->nr_bos);
+	if (!bos || !relocs || !stream || !cmdbuf) {
+		ret = -ENOMEM;
+		goto err_submit_cmds;
+	}
+
+	cmdbuf->exec_state = args->exec_state;
+	cmdbuf->ctx = file->driver_priv;
+
+	ret = copy_from_user(bos, to_user_ptr(args->bos),
+			     args->nr_bos * sizeof(*bos));
+	if (ret) {
+		ret = -EFAULT;
+		goto err_submit_cmds;
+	}
+
+	ret = copy_from_user(relocs, to_user_ptr(args->relocs),
+			     args->nr_relocs * sizeof(*relocs));
+	if (ret) {
+		ret = -EFAULT;
+		goto err_submit_cmds;
+	}
+
+	ret = copy_from_user(stream, to_user_ptr(args->stream),
+			     args->stream_size);
+	if (ret) {
+		ret = -EFAULT;
+		goto err_submit_cmds;
+	}
+
+	submit = submit_create(dev, gpu, args->nr_bos);
+	if (!submit) {
+		ret = -ENOMEM;
+		goto err_submit_cmds;
+	}
+
+	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
+	if (ret)
+		goto err_submit_objects;
+
+	ret = submit_lock_objects(submit);
+	if (ret)
+		goto err_submit_objects;
+
+	if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
+				      relocs, args->nr_relocs)) {
+		ret = -EINVAL;
+		goto err_submit_objects;
+	}
+
+	ret = submit_fence_sync(submit);
+	if (ret)
+		goto err_submit_objects;
+
+	ret = submit_pin_objects(submit);
+	if (ret)
+		goto out;
+
+	ret = submit_reloc(submit, stream, args->stream_size / 4,
+			   relocs, args->nr_relocs);
+	if (ret)
+		goto out;
+
+	memcpy(cmdbuf->vaddr, stream, args->stream_size);
+	cmdbuf->user_size = ALIGN(args->stream_size, 8);
+
+	ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
+	if (ret == 0)
+		cmdbuf = NULL;
+
+	args->fence = submit->fence;
+
+out:
+	submit_unpin_objects(submit);
+
+	/*
+	 * If we're returning -EAGAIN, it may be due to the userptr code
+	 * wanting to run its workqueue outside of any locks. Flush our
+	 * workqueue to ensure that it is run in a timely manner.
+	 */
+	if (ret == -EAGAIN)
+		flush_workqueue(priv->wq);
+
+err_submit_objects:
+	submit_cleanup(submit);
+
+err_submit_cmds:
+	/* if we still own the cmdbuf */
+	if (cmdbuf)
+		etnaviv_gpu_cmdbuf_free(cmdbuf);
+	if (stream)
+		drm_free_large(stream);
+	if (bos)
+		drm_free_large(bos);
+	if (relocs)
+		drm_free_large(relocs);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
new file mode 100644
index 0000000..d39093d
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -0,0 +1,1644 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/component.h>
+#include <linux/fence.h>
+#include <linux/moduleparam.h>
+#include <linux/of_device.h>
+#include "etnaviv_dump.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_iommu.h"
+#include "etnaviv_iommu_v2.h"
+#include "common.xml.h"
+#include "state.xml.h"
+#include "state_hi.xml.h"
+#include "cmdstream.xml.h"
+
+static const struct platform_device_id gpu_ids[] = {
+	{ .name = "etnaviv-gpu,2d" },
+	{ },
+};
+
+static bool etnaviv_dump_core = true;
+module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
+
+/*
+ * Driver functions:
+ */
+
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
+{
+	switch (param) {
+	case ETNAVIV_PARAM_GPU_MODEL:
+		*value = gpu->identity.model;
+		break;
+
+	case ETNAVIV_PARAM_GPU_REVISION:
+		*value = gpu->identity.revision;
+		break;
+
+	case ETNAVIV_PARAM_GPU_FEATURES_0:
+		*value = gpu->identity.features;
+		break;
+
+	case ETNAVIV_PARAM_GPU_FEATURES_1:
+		*value = gpu->identity.minor_features0;
+		break;
+
+	case ETNAVIV_PARAM_GPU_FEATURES_2:
+		*value = gpu->identity.minor_features1;
+		break;
+
+	case ETNAVIV_PARAM_GPU_FEATURES_3:
+		*value = gpu->identity.minor_features2;
+		break;
+
+	case ETNAVIV_PARAM_GPU_FEATURES_4:
+		*value = gpu->identity.minor_features3;
+		break;
+
+	case ETNAVIV_PARAM_GPU_STREAM_COUNT:
+		*value = gpu->identity.stream_count;
+		break;
+
+	case ETNAVIV_PARAM_GPU_REGISTER_MAX:
+		*value = gpu->identity.register_max;
+		break;
+
+	case ETNAVIV_PARAM_GPU_THREAD_COUNT:
+		*value = gpu->identity.thread_count;
+		break;
+
+	case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
+		*value = gpu->identity.vertex_cache_size;
+		break;
+
+	case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
+		*value = gpu->identity.shader_core_count;
+		break;
+
+	case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
+		*value = gpu->identity.pixel_pipes;
+		break;
+
+	case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
+		*value = gpu->identity.vertex_output_buffer_size;
+		break;
+
+	case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
+		*value = gpu->identity.buffer_size;
+		break;
+
+	case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
+		*value = gpu->identity.instruction_count;
+		break;
+
+	case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
+		*value = gpu->identity.num_constants;
+		break;
+
+	default:
+		DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
+{
+	if (gpu->identity.minor_features0 &
+	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
+		u32 specs[2];
+
+		specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
+		specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
+
+		gpu->identity.stream_count =
+			(specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
+				>> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
+		gpu->identity.register_max =
+			(specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
+				>> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
+		gpu->identity.thread_count =
+			(specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
+				>> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
+		gpu->identity.vertex_cache_size =
+			(specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
+				>> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
+		gpu->identity.shader_core_count =
+			(specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
+				>> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
+		gpu->identity.pixel_pipes =
+			(specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
+				>> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
+		gpu->identity.vertex_output_buffer_size =
+			(specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
+				>> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
+
+		gpu->identity.buffer_size =
+			(specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
+				>> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
+		gpu->identity.instruction_count =
+			(specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
+				>> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
+		gpu->identity.num_constants =
+			(specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
+				>> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
+	}
+
+	/* Fill in the stream count if not specified */
+	if (gpu->identity.stream_count == 0) {
+		if (gpu->identity.model >= 0x1000)
+			gpu->identity.stream_count = 4;
+		else
+			gpu->identity.stream_count = 1;
+	}
+
+	/* Convert the register max value */
+	if (gpu->identity.register_max)
+		gpu->identity.register_max = 1 << gpu->identity.register_max;
+	else if (gpu->identity.model == 0x0400)
+		gpu->identity.register_max = 32;
+	else
+		gpu->identity.register_max = 64;
+
+	/* Convert thread count */
+	if (gpu->identity.thread_count)
+		gpu->identity.thread_count = 1 << gpu->identity.thread_count;
+	else if (gpu->identity.model == 0x0400)
+		gpu->identity.thread_count = 64;
+	else if (gpu->identity.model == 0x0500 ||
+		 gpu->identity.model == 0x0530)
+		gpu->identity.thread_count = 128;
+	else
+		gpu->identity.thread_count = 256;
+
+	if (gpu->identity.vertex_cache_size == 0)
+		gpu->identity.vertex_cache_size = 8;
+
+	if (gpu->identity.shader_core_count == 0) {
+		if (gpu->identity.model >= 0x1000)
+			gpu->identity.shader_core_count = 2;
+		else
+			gpu->identity.shader_core_count = 1;
+	}
+
+	if (gpu->identity.pixel_pipes == 0)
+		gpu->identity.pixel_pipes = 1;
+
+	/* Convert virtex buffer size */
+	if (gpu->identity.vertex_output_buffer_size) {
+		gpu->identity.vertex_output_buffer_size =
+			1 << gpu->identity.vertex_output_buffer_size;
+	} else if (gpu->identity.model == 0x0400) {
+		if (gpu->identity.revision < 0x4000)
+			gpu->identity.vertex_output_buffer_size = 512;
+		else if (gpu->identity.revision < 0x4200)
+			gpu->identity.vertex_output_buffer_size = 256;
+		else
+			gpu->identity.vertex_output_buffer_size = 128;
+	} else {
+		gpu->identity.vertex_output_buffer_size = 512;
+	}
+
+	switch (gpu->identity.instruction_count) {
+	case 0:
+		if ((gpu->identity.model == 0x2000 &&
+		     gpu->identity.revision == 0x5108) ||
+		    gpu->identity.model == 0x880)
+			gpu->identity.instruction_count = 512;
+		else
+			gpu->identity.instruction_count = 256;
+		break;
+
+	case 1:
+		gpu->identity.instruction_count = 1024;
+		break;
+
+	case 2:
+		gpu->identity.instruction_count = 2048;
+		break;
+
+	default:
+		gpu->identity.instruction_count = 256;
+		break;
+	}
+
+	if (gpu->identity.num_constants == 0)
+		gpu->identity.num_constants = 168;
+}
+
+static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+{
+	u32 chipIdentity;
+
+	chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
+
+	/* Special case for older graphic cores. */
+	if (VIVS_HI_CHIP_IDENTITY_FAMILY(chipIdentity) ==  0x01) {
+		gpu->identity.model    = 0x500; /* gc500 */
+		gpu->identity.revision = VIVS_HI_CHIP_IDENTITY_REVISION(chipIdentity);
+	} else {
+
+		gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
+		gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
+
+		/*
+		 * !!!! HACK ALERT !!!!
+		 * Because people change device IDs without letting software
+		 * know about it - here is the hack to make it all look the
+		 * same.  Only for GC400 family.
+		 */
+		if ((gpu->identity.model & 0xff00) == 0x0400 &&
+		    gpu->identity.model != 0x0420) {
+			gpu->identity.model = gpu->identity.model & 0x0400;
+		}
+
+		/* Another special case */
+		if (gpu->identity.model == 0x300 &&
+		    gpu->identity.revision == 0x2201) {
+			u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
+			u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
+
+			if (chipDate == 0x20080814 && chipTime == 0x12051100) {
+				/*
+				 * This IP has an ECO; put the correct
+				 * revision in it.
+				 */
+				gpu->identity.revision = 0x1051;
+			}
+		}
+	}
+
+	dev_info(gpu->dev, "model: GC%x, revision: %x\n",
+		 gpu->identity.model, gpu->identity.revision);
+
+	gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
+
+	/* Disable fast clear on GC700. */
+	if (gpu->identity.model == 0x700)
+		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
+
+	if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
+	    (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
+
+		/*
+		 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
+		 * registers.
+		 */
+		gpu->identity.minor_features0 = 0;
+		gpu->identity.minor_features1 = 0;
+		gpu->identity.minor_features2 = 0;
+		gpu->identity.minor_features3 = 0;
+	} else
+		gpu->identity.minor_features0 =
+				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
+
+	if (gpu->identity.minor_features0 &
+	    chipMinorFeatures0_MORE_MINOR_FEATURES) {
+		gpu->identity.minor_features1 =
+				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
+		gpu->identity.minor_features2 =
+				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
+		gpu->identity.minor_features3 =
+				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
+	}
+
+	/* GC600 idle register reports zero bits where modules aren't present */
+	if (gpu->identity.model == chipModel_GC600) {
+		gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
+				 VIVS_HI_IDLE_STATE_RA |
+				 VIVS_HI_IDLE_STATE_SE |
+				 VIVS_HI_IDLE_STATE_PA |
+				 VIVS_HI_IDLE_STATE_SH |
+				 VIVS_HI_IDLE_STATE_PE |
+				 VIVS_HI_IDLE_STATE_DE |
+				 VIVS_HI_IDLE_STATE_FE;
+	} else {
+		gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
+	}
+
+	etnaviv_hw_specs(gpu);
+}
+
+static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
+{
+	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
+		  VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
+	gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
+}
+
+static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
+{
+	u32 control, idle;
+	unsigned long timeout;
+	bool failed = true;
+
+	/* TODO
+	 *
+	 * - clock gating
+	 * - puls eater
+	 * - what about VG?
+	 */
+
+	/* We hope that the GPU resets in under one second */
+	timeout = jiffies + msecs_to_jiffies(1000);
+
+	while (time_is_after_jiffies(timeout)) {
+		control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+			  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
+
+		/* enable clock */
+		etnaviv_gpu_load_clock(gpu, control);
+
+		/* Wait for stable clock.  Vivante's code waited for 1ms */
+		usleep_range(1000, 10000);
+
+		/* isolate the GPU. */
+		control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
+		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+		/* set soft reset. */
+		control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
+		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+		/* wait for reset. */
+		msleep(1);
+
+		/* reset soft reset bit. */
+		control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
+		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+		/* reset GPU isolation. */
+		control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
+		gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+		/* read idle register. */
+		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+		/* try reseting again if FE it not idle */
+		if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
+			dev_dbg(gpu->dev, "FE is not idle\n");
+			continue;
+		}
+
+		/* read reset register. */
+		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+
+		/* is the GPU idle? */
+		if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
+		    ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
+			dev_dbg(gpu->dev, "GPU is not idle\n");
+			continue;
+		}
+
+		failed = false;
+		break;
+	}
+
+	if (failed) {
+		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+		control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+
+		dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
+			idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
+			control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
+			control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
+
+		return -EBUSY;
+	}
+
+	/* We rely on the GPU running, so program the clock */
+	control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+		  VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
+
+	/* enable clock */
+	etnaviv_gpu_load_clock(gpu, control);
+
+	return 0;
+}
+
+static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
+{
+	u16 prefetch;
+
+	if (gpu->identity.model == chipModel_GC320 &&
+	    gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
+	    (gpu->identity.revision == 0x5007 ||
+	     gpu->identity.revision == 0x5220)) {
+		u32 mc_memory_debug;
+
+		mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
+
+		if (gpu->identity.revision == 0x5007)
+			mc_memory_debug |= 0x0c;
+		else
+			mc_memory_debug |= 0x08;
+
+		gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
+	}
+
+	/*
+	 * Update GPU AXI cache atttribute to "cacheable, no allocate".
+	 * This is necessary to prevent the iMX6 SoC locking up.
+	 */
+	gpu_write(gpu, VIVS_HI_AXI_CONFIG,
+		  VIVS_HI_AXI_CONFIG_AWCACHE(2) |
+		  VIVS_HI_AXI_CONFIG_ARCACHE(2));
+
+	/* GC2000 rev 5108 needs a special bus config */
+	if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
+		u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
+		bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
+				VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
+		bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
+			      VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
+		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
+	}
+
+	/* set base addresses */
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
+
+	/* setup the MMU page table pointers */
+	etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
+
+	/* Start command processor */
+	prefetch = etnaviv_buffer_init(gpu);
+
+	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
+	gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
+		  gpu->buffer->paddr - gpu->memory_base);
+	gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
+		  VIVS_FE_COMMAND_CONTROL_ENABLE |
+		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+}
+
+int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+{
+	int ret, i;
+	struct iommu_domain *iommu;
+	enum etnaviv_iommu_version version;
+	bool mmuv2;
+
+	ret = pm_runtime_get_sync(gpu->dev);
+	if (ret < 0)
+		return ret;
+
+	etnaviv_hw_identify(gpu);
+
+	if (gpu->identity.model == 0) {
+		dev_err(gpu->dev, "Unknown GPU model\n");
+		pm_runtime_put_autosuspend(gpu->dev);
+		return -ENXIO;
+	}
+
+	ret = etnaviv_hw_reset(gpu);
+	if (ret)
+		goto fail;
+
+	/* Setup IOMMU.. eventually we will (I think) do this once per context
+	 * and have separate page tables per context.  For now, to keep things
+	 * simple and to get something working, just use a single address space:
+	 */
+	mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
+	dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
+
+	if (!mmuv2) {
+		iommu = etnaviv_iommu_domain_alloc(gpu);
+		version = ETNAVIV_IOMMU_V1;
+	} else {
+		iommu = etnaviv_iommu_v2_domain_alloc(gpu);
+		version = ETNAVIV_IOMMU_V2;
+	}
+
+	if (!iommu) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* TODO: we will leak here memory - fix it! */
+
+	gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
+	if (!gpu->mmu) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* Create buffer: */
+	gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
+	if (!gpu->buffer) {
+		ret = -ENOMEM;
+		dev_err(gpu->dev, "could not create command buffer\n");
+		goto fail;
+	}
+	if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
+		ret = -EINVAL;
+		dev_err(gpu->dev,
+			"command buffer outside valid memory window\n");
+		goto free_buffer;
+	}
+
+	/* Setup event management */
+	spin_lock_init(&gpu->event_spinlock);
+	init_completion(&gpu->event_free);
+	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
+		gpu->event[i].used = false;
+		complete(&gpu->event_free);
+	}
+
+	/* Now program the hardware */
+	mutex_lock(&gpu->lock);
+	etnaviv_gpu_hw_init(gpu);
+	mutex_unlock(&gpu->lock);
+
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
+
+	return 0;
+
+free_buffer:
+	etnaviv_gpu_cmdbuf_free(gpu->buffer);
+	gpu->buffer = NULL;
+fail:
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
+
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct dma_debug {
+	u32 address[2];
+	u32 state[2];
+};
+
+static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
+{
+	u32 i;
+
+	debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+	debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
+
+	for (i = 0; i < 500; i++) {
+		debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+		debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
+
+		if (debug->address[0] != debug->address[1])
+			break;
+
+		if (debug->state[0] != debug->state[1])
+			break;
+	}
+}
+
+int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+	struct dma_debug debug;
+	u32 dma_lo, dma_hi, axi, idle;
+	int ret;
+
+	seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
+
+	ret = pm_runtime_get_sync(gpu->dev);
+	if (ret < 0)
+		return ret;
+
+	dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
+	dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
+	axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
+	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+	verify_dma(gpu, &debug);
+
+	seq_puts(m, "\tfeatures\n");
+	seq_printf(m, "\t minor_features0: 0x%08x\n",
+		   gpu->identity.minor_features0);
+	seq_printf(m, "\t minor_features1: 0x%08x\n",
+		   gpu->identity.minor_features1);
+	seq_printf(m, "\t minor_features2: 0x%08x\n",
+		   gpu->identity.minor_features2);
+	seq_printf(m, "\t minor_features3: 0x%08x\n",
+		   gpu->identity.minor_features3);
+
+	seq_puts(m, "\tspecs\n");
+	seq_printf(m, "\t stream_count:  %d\n",
+			gpu->identity.stream_count);
+	seq_printf(m, "\t register_max: %d\n",
+			gpu->identity.register_max);
+	seq_printf(m, "\t thread_count: %d\n",
+			gpu->identity.thread_count);
+	seq_printf(m, "\t vertex_cache_size: %d\n",
+			gpu->identity.vertex_cache_size);
+	seq_printf(m, "\t shader_core_count: %d\n",
+			gpu->identity.shader_core_count);
+	seq_printf(m, "\t pixel_pipes: %d\n",
+			gpu->identity.pixel_pipes);
+	seq_printf(m, "\t vertex_output_buffer_size: %d\n",
+			gpu->identity.vertex_output_buffer_size);
+	seq_printf(m, "\t buffer_size: %d\n",
+			gpu->identity.buffer_size);
+	seq_printf(m, "\t instruction_count: %d\n",
+			gpu->identity.instruction_count);
+	seq_printf(m, "\t num_constants: %d\n",
+			gpu->identity.num_constants);
+
+	seq_printf(m, "\taxi: 0x%08x\n", axi);
+	seq_printf(m, "\tidle: 0x%08x\n", idle);
+	idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
+	if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
+		seq_puts(m, "\t FE is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
+		seq_puts(m, "\t DE is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
+		seq_puts(m, "\t PE is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
+		seq_puts(m, "\t SH is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
+		seq_puts(m, "\t PA is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
+		seq_puts(m, "\t SE is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
+		seq_puts(m, "\t RA is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
+		seq_puts(m, "\t TX is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
+		seq_puts(m, "\t VG is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
+		seq_puts(m, "\t IM is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
+		seq_puts(m, "\t FP is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
+		seq_puts(m, "\t TS is not idle\n");
+	if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
+		seq_puts(m, "\t AXI low power mode\n");
+
+	if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
+		u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
+		u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
+		u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
+
+		seq_puts(m, "\tMC\n");
+		seq_printf(m, "\t read0: 0x%08x\n", read0);
+		seq_printf(m, "\t read1: 0x%08x\n", read1);
+		seq_printf(m, "\t write: 0x%08x\n", write);
+	}
+
+	seq_puts(m, "\tDMA ");
+
+	if (debug.address[0] == debug.address[1] &&
+	    debug.state[0] == debug.state[1]) {
+		seq_puts(m, "seems to be stuck\n");
+	} else if (debug.address[0] == debug.address[1]) {
+		seq_puts(m, "adress is constant\n");
+	} else {
+		seq_puts(m, "is runing\n");
+	}
+
+	seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
+	seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
+	seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
+	seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
+	seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
+		   dma_lo, dma_hi);
+
+	ret = 0;
+
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
+
+	return ret;
+}
+#endif
+
+/*
+ * Power Management:
+ */
+static int enable_clk(struct etnaviv_gpu *gpu)
+{
+	if (gpu->clk_core)
+		clk_prepare_enable(gpu->clk_core);
+	if (gpu->clk_shader)
+		clk_prepare_enable(gpu->clk_shader);
+
+	return 0;
+}
+
+static int disable_clk(struct etnaviv_gpu *gpu)
+{
+	if (gpu->clk_core)
+		clk_disable_unprepare(gpu->clk_core);
+	if (gpu->clk_shader)
+		clk_disable_unprepare(gpu->clk_shader);
+
+	return 0;
+}
+
+static int enable_axi(struct etnaviv_gpu *gpu)
+{
+	if (gpu->clk_bus)
+		clk_prepare_enable(gpu->clk_bus);
+
+	return 0;
+}
+
+static int disable_axi(struct etnaviv_gpu *gpu)
+{
+	if (gpu->clk_bus)
+		clk_disable_unprepare(gpu->clk_bus);
+
+	return 0;
+}
+
+/*
+ * Hangcheck detection for locked gpu:
+ */
+static void recover_worker(struct work_struct *work)
+{
+	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+					       recover_work);
+	unsigned long flags;
+	unsigned int i;
+
+	dev_err(gpu->dev, "hangcheck recover!\n");
+
+	if (pm_runtime_get_sync(gpu->dev) < 0)
+		return;
+
+	mutex_lock(&gpu->lock);
+
+	/* Only catch the first event, or when manually re-armed */
+	if (etnaviv_dump_core) {
+		etnaviv_core_dump(gpu);
+		etnaviv_dump_core = false;
+	}
+
+	etnaviv_hw_reset(gpu);
+
+	/* complete all events, the GPU won't do it after the reset */
+	spin_lock_irqsave(&gpu->event_spinlock, flags);
+	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
+		if (!gpu->event[i].used)
+			continue;
+		fence_signal(gpu->event[i].fence);
+		gpu->event[i].fence = NULL;
+		gpu->event[i].used = false;
+		complete(&gpu->event_free);
+		/*
+		 * Decrement the PM count for each stuck event. This is safe
+		 * even in atomic context as we use ASYNC RPM here.
+		 */
+		pm_runtime_put_autosuspend(gpu->dev);
+	}
+	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+	gpu->completed_fence = gpu->active_fence;
+
+	etnaviv_gpu_hw_init(gpu);
+	gpu->switch_context = true;
+
+	mutex_unlock(&gpu->lock);
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
+
+	/* Retire the buffer objects in a work */
+	etnaviv_queue_work(gpu->drm, &gpu->retire_work);
+}
+
+static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
+{
+	DBG("%s", dev_name(gpu->dev));
+	mod_timer(&gpu->hangcheck_timer,
+		  round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
+}
+
+static void hangcheck_handler(unsigned long data)
+{
+	struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
+	u32 fence = gpu->completed_fence;
+	bool progress = false;
+
+	if (fence != gpu->hangcheck_fence) {
+		gpu->hangcheck_fence = fence;
+		progress = true;
+	}
+
+	if (!progress) {
+		u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+		int change = dma_addr - gpu->hangcheck_dma_addr;
+
+		if (change < 0 || change > 16) {
+			gpu->hangcheck_dma_addr = dma_addr;
+			progress = true;
+		}
+	}
+
+	if (!progress && fence_after(gpu->active_fence, fence)) {
+		dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
+		dev_err(gpu->dev, "     completed fence: %u\n", fence);
+		dev_err(gpu->dev, "     active fence: %u\n",
+			gpu->active_fence);
+		etnaviv_queue_work(gpu->drm, &gpu->recover_work);
+	}
+
+	/* if still more pending work, reset the hangcheck timer: */
+	if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
+		hangcheck_timer_reset(gpu);
+}
+
+static void hangcheck_disable(struct etnaviv_gpu *gpu)
+{
+	del_timer_sync(&gpu->hangcheck_timer);
+	cancel_work_sync(&gpu->recover_work);
+}
+
+/* fence object management */
+struct etnaviv_fence {
+	struct etnaviv_gpu *gpu;
+	struct fence base;
+};
+
+static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+{
+	return container_of(fence, struct etnaviv_fence, base);
+}
+
+static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+{
+	return "etnaviv";
+}
+
+static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+{
+	struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+	return dev_name(f->gpu->dev);
+}
+
+static bool etnaviv_fence_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static bool etnaviv_fence_signaled(struct fence *fence)
+{
+	struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+	return fence_completed(f->gpu, f->base.seqno);
+}
+
+static void etnaviv_fence_release(struct fence *fence)
+{
+	struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+	kfree_rcu(f, base.rcu);
+}
+
+static const struct fence_ops etnaviv_fence_ops = {
+	.get_driver_name = etnaviv_fence_get_driver_name,
+	.get_timeline_name = etnaviv_fence_get_timeline_name,
+	.enable_signaling = etnaviv_fence_enable_signaling,
+	.signaled = etnaviv_fence_signaled,
+	.wait = fence_default_wait,
+	.release = etnaviv_fence_release,
+};
+
+static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_fence *f;
+
+	f = kzalloc(sizeof(*f), GFP_KERNEL);
+	if (!f)
+		return NULL;
+
+	f->gpu = gpu;
+
+	fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+		   gpu->fence_context, ++gpu->next_fence);
+
+	return &f->base;
+}
+
+int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
+	unsigned int context, bool exclusive)
+{
+	struct reservation_object *robj = etnaviv_obj->resv;
+	struct reservation_object_list *fobj;
+	struct fence *fence;
+	int i, ret;
+
+	if (!exclusive) {
+		ret = reservation_object_reserve_shared(robj);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * If we have any shared fences, then the exclusive fence
+	 * should be ignored as it will already have been signalled.
+	 */
+	fobj = reservation_object_get_list(robj);
+	if (!fobj || fobj->shared_count == 0) {
+		/* Wait on any existing exclusive fence which isn't our own */
+		fence = reservation_object_get_excl(robj);
+		if (fence && fence->context != context) {
+			ret = fence_wait(fence, true);
+			if (ret)
+				return ret;
+		}
+	}
+
+	if (!exclusive || !fobj)
+		return 0;
+
+	for (i = 0; i < fobj->shared_count; i++) {
+		fence = rcu_dereference_protected(fobj->shared[i],
+						reservation_object_held(robj));
+		if (fence->context != context) {
+			ret = fence_wait(fence, true);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * event management:
+ */
+
+static unsigned int event_alloc(struct etnaviv_gpu *gpu)
+{
+	unsigned long ret, flags;
+	unsigned int i, event = ~0U;
+
+	ret = wait_for_completion_timeout(&gpu->event_free,
+					  msecs_to_jiffies(10 * 10000));
+	if (!ret)
+		dev_err(gpu->dev, "wait_for_completion_timeout failed");
+
+	spin_lock_irqsave(&gpu->event_spinlock, flags);
+
+	/* find first free event */
+	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
+		if (gpu->event[i].used == false) {
+			gpu->event[i].used = true;
+			event = i;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+
+	return event;
+}
+
+static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&gpu->event_spinlock, flags);
+
+	if (gpu->event[event].used == false) {
+		dev_warn(gpu->dev, "event %u is already marked as free",
+			 event);
+		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+	} else {
+		gpu->event[event].used = false;
+		spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+
+		complete(&gpu->event_free);
+	}
+}
+
+/*
+ * Cmdstream submission/retirement:
+ */
+
+struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
+	size_t nr_bos)
+{
+	struct etnaviv_cmdbuf *cmdbuf;
+	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
+				 sizeof(*cmdbuf));
+
+	cmdbuf = kzalloc(sz, GFP_KERNEL);
+	if (!cmdbuf)
+		return NULL;
+
+	cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
+					       GFP_KERNEL);
+	if (!cmdbuf->vaddr) {
+		kfree(cmdbuf);
+		return NULL;
+	}
+
+	cmdbuf->gpu = gpu;
+	cmdbuf->size = size;
+
+	return cmdbuf;
+}
+
+void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
+{
+	dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
+			      cmdbuf->vaddr, cmdbuf->paddr);
+	kfree(cmdbuf);
+}
+
+static void retire_worker(struct work_struct *work)
+{
+	struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+					       retire_work);
+	u32 fence = gpu->completed_fence;
+	struct etnaviv_cmdbuf *cmdbuf, *tmp;
+	unsigned int i;
+
+	mutex_lock(&gpu->lock);
+	list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
+		if (!fence_is_signaled(cmdbuf->fence))
+			break;
+
+		list_del(&cmdbuf->node);
+		fence_put(cmdbuf->fence);
+
+		for (i = 0; i < cmdbuf->nr_bos; i++) {
+			struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
+
+			atomic_dec(&etnaviv_obj->gpu_active);
+			/* drop the refcount taken in etnaviv_gpu_submit */
+			etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
+		}
+
+		etnaviv_gpu_cmdbuf_free(cmdbuf);
+	}
+
+	gpu->retired_fence = fence;
+
+	mutex_unlock(&gpu->lock);
+
+	wake_up_all(&gpu->fence_event);
+}
+
+int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
+	u32 fence, struct timespec *timeout)
+{
+	int ret;
+
+	if (fence_after(fence, gpu->next_fence)) {
+		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
+				fence, gpu->next_fence);
+		return -EINVAL;
+	}
+
+	if (!timeout) {
+		/* No timeout was requested: just test for completion */
+		ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
+	} else {
+		unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
+
+		ret = wait_event_interruptible_timeout(gpu->fence_event,
+						fence_completed(gpu, fence),
+						remaining);
+		if (ret == 0) {
+			DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
+				fence, gpu->retired_fence,
+				gpu->completed_fence);
+			ret = -ETIMEDOUT;
+		} else if (ret != -ERESTARTSYS) {
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Wait for an object to become inactive.  This, on it's own, is not race
+ * free: the object is moved by the retire worker off the active list, and
+ * then the iova is put.  Moreover, the object could be re-submitted just
+ * after we notice that it's become inactive.
+ *
+ * Although the retirement happens under the gpu lock, we don't want to hold
+ * that lock in this function while waiting.
+ */
+int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
+	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
+{
+	unsigned long remaining;
+	long ret;
+
+	if (!timeout)
+		return !is_active(etnaviv_obj) ? 0 : -EBUSY;
+
+	remaining = etnaviv_timeout_to_jiffies(timeout);
+
+	ret = wait_event_interruptible_timeout(gpu->fence_event,
+					       !is_active(etnaviv_obj),
+					       remaining);
+	if (ret > 0) {
+		struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+
+		/* Synchronise with the retire worker */
+		flush_workqueue(priv->wq);
+		return 0;
+	} else if (ret == -ERESTARTSYS) {
+		return -ERESTARTSYS;
+	} else {
+		return -ETIMEDOUT;
+	}
+}
+
+int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
+{
+	return pm_runtime_get_sync(gpu->dev);
+}
+
+void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
+{
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
+}
+
+/* add bo's to gpu's ring, and kick gpu: */
+int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
+	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
+{
+	struct fence *fence;
+	unsigned int event, i;
+	int ret;
+
+	ret = etnaviv_gpu_pm_get_sync(gpu);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&gpu->lock);
+
+	/*
+	 * TODO
+	 *
+	 * - flush
+	 * - data endian
+	 * - prefetch
+	 *
+	 */
+
+	event = event_alloc(gpu);
+	if (unlikely(event == ~0U)) {
+		DRM_ERROR("no free event\n");
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+
+	fence = etnaviv_gpu_fence_alloc(gpu);
+	if (!fence) {
+		event_free(gpu, event);
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	gpu->event[event].fence = fence;
+	submit->fence = fence->seqno;
+	gpu->active_fence = submit->fence;
+
+	if (gpu->lastctx != cmdbuf->ctx) {
+		gpu->mmu->need_flush = true;
+		gpu->switch_context = true;
+		gpu->lastctx = cmdbuf->ctx;
+	}
+
+	etnaviv_buffer_queue(gpu, event, cmdbuf);
+
+	cmdbuf->fence = fence;
+	list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
+
+	/* We're committed to adding this command buffer, hold a PM reference */
+	pm_runtime_get_noresume(gpu->dev);
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+		u32 iova;
+
+		/* Each cmdbuf takes a refcount on the iova */
+		etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
+		cmdbuf->bo[i] = etnaviv_obj;
+		atomic_inc(&etnaviv_obj->gpu_active);
+
+		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
+			reservation_object_add_excl_fence(etnaviv_obj->resv,
+							  fence);
+		else
+			reservation_object_add_shared_fence(etnaviv_obj->resv,
+							    fence);
+	}
+	cmdbuf->nr_bos = submit->nr_bos;
+	hangcheck_timer_reset(gpu);
+	ret = 0;
+
+out_unlock:
+	mutex_unlock(&gpu->lock);
+
+	etnaviv_gpu_pm_put(gpu);
+
+	return ret;
+}
+
+/*
+ * Init/Cleanup:
+ */
+static irqreturn_t irq_handler(int irq, void *data)
+{
+	struct etnaviv_gpu *gpu = data;
+	irqreturn_t ret = IRQ_NONE;
+
+	u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
+
+	if (intr != 0) {
+		int event;
+
+		pm_runtime_mark_last_busy(gpu->dev);
+
+		dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
+
+		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
+			dev_err(gpu->dev, "AXI bus error\n");
+			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
+		}
+
+		while ((event = ffs(intr)) != 0) {
+			struct fence *fence;
+
+			event -= 1;
+
+			intr &= ~(1 << event);
+
+			dev_dbg(gpu->dev, "event %u\n", event);
+
+			fence = gpu->event[event].fence;
+			gpu->event[event].fence = NULL;
+			fence_signal(fence);
+
+			/*
+			 * Events can be processed out of order.  Eg,
+			 * - allocate and queue event 0
+			 * - allocate event 1
+			 * - event 0 completes, we process it
+			 * - allocate and queue event 0
+			 * - event 1 and event 0 complete
+			 * we can end up processing event 0 first, then 1.
+			 */
+			if (fence_after(fence->seqno, gpu->completed_fence))
+				gpu->completed_fence = fence->seqno;
+
+			event_free(gpu, event);
+
+			/*
+			 * We need to balance the runtime PM count caused by
+			 * each submission.  Upon submission, we increment
+			 * the runtime PM counter, and allocate one event.
+			 * So here, we put the runtime PM count for each
+			 * completed event.
+			 */
+			pm_runtime_put_autosuspend(gpu->dev);
+		}
+
+		/* Retire the buffer objects in a work */
+		etnaviv_queue_work(gpu->drm, &gpu->retire_work);
+
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
+{
+	int ret;
+
+	ret = enable_clk(gpu);
+	if (ret)
+		return ret;
+
+	ret = enable_axi(gpu);
+	if (ret) {
+		disable_clk(gpu);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
+{
+	int ret;
+
+	ret = disable_axi(gpu);
+	if (ret)
+		return ret;
+
+	ret = disable_clk(gpu);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
+{
+	if (gpu->buffer) {
+		unsigned long timeout;
+
+		/* Replace the last WAIT with END */
+		etnaviv_buffer_end(gpu);
+
+		/*
+		 * We know that only the FE is busy here, this should
+		 * happen quickly (as the WAIT is only 200 cycles).  If
+		 * we fail, just warn and continue.
+		 */
+		timeout = jiffies + msecs_to_jiffies(100);
+		do {
+			u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+			if ((idle & gpu->idle_mask) == gpu->idle_mask)
+				break;
+
+			if (time_is_before_jiffies(timeout)) {
+				dev_warn(gpu->dev,
+					 "timed out waiting for idle: idle=0x%x\n",
+					 idle);
+				break;
+			}
+
+			udelay(5);
+		} while (1);
+	}
+
+	return etnaviv_gpu_clk_disable(gpu);
+}
+
+#ifdef CONFIG_PM
+static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
+{
+	u32 clock;
+	int ret;
+
+	ret = mutex_lock_killable(&gpu->lock);
+	if (ret)
+		return ret;
+
+	clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+		VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
+
+	etnaviv_gpu_load_clock(gpu, clock);
+	etnaviv_gpu_hw_init(gpu);
+
+	gpu->switch_context = true;
+
+	mutex_unlock(&gpu->lock);
+
+	return 0;
+}
+#endif
+
+static int etnaviv_gpu_bind(struct device *dev, struct device *master,
+	void *data)
+{
+	struct drm_device *drm = data;
+	struct etnaviv_drm_private *priv = drm->dev_private;
+	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+	int ret;
+
+#ifdef CONFIG_PM
+	ret = pm_runtime_get_sync(gpu->dev);
+#else
+	ret = etnaviv_gpu_clk_enable(gpu);
+#endif
+	if (ret < 0)
+		return ret;
+
+	gpu->drm = drm;
+	gpu->fence_context = fence_context_alloc(1);
+	spin_lock_init(&gpu->fence_spinlock);
+
+	INIT_LIST_HEAD(&gpu->active_cmd_list);
+	INIT_WORK(&gpu->retire_work, retire_worker);
+	INIT_WORK(&gpu->recover_work, recover_worker);
+	init_waitqueue_head(&gpu->fence_event);
+
+	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
+			(unsigned long)gpu);
+
+	priv->gpu[priv->num_gpus++] = gpu;
+
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
+
+	return 0;
+}
+
+static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
+	void *data)
+{
+	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+
+	DBG("%s", dev_name(gpu->dev));
+
+	hangcheck_disable(gpu);
+
+#ifdef CONFIG_PM
+	pm_runtime_get_sync(gpu->dev);
+	pm_runtime_put_sync_suspend(gpu->dev);
+#else
+	etnaviv_gpu_hw_suspend(gpu);
+#endif
+
+	if (gpu->buffer) {
+		etnaviv_gpu_cmdbuf_free(gpu->buffer);
+		gpu->buffer = NULL;
+	}
+
+	if (gpu->mmu) {
+		etnaviv_iommu_destroy(gpu->mmu);
+		gpu->mmu = NULL;
+	}
+
+	gpu->drm = NULL;
+}
+
+static const struct component_ops gpu_ops = {
+	.bind = etnaviv_gpu_bind,
+	.unbind = etnaviv_gpu_unbind,
+};
+
+static const struct of_device_id etnaviv_gpu_match[] = {
+	{
+		.compatible = "vivante,gc"
+	},
+	{ /* sentinel */ }
+};
+
+static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct etnaviv_gpu *gpu;
+	int err = 0;
+
+	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
+	if (!gpu)
+		return -ENOMEM;
+
+	gpu->dev = &pdev->dev;
+	mutex_init(&gpu->lock);
+
+	/*
+	 * Set the GPU base address to the start of physical memory.  This
+	 * ensures that if we have up to 2GB, the v1 MMU can address the
+	 * highest memory.  This is important as command buffers may be
+	 * allocated outside of this limit.
+	 */
+	gpu->memory_base = PHYS_OFFSET;
+
+	/* Map registers: */
+	gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
+	if (IS_ERR(gpu->mmio))
+		return PTR_ERR(gpu->mmio);
+
+	/* Get Interrupt: */
+	gpu->irq = platform_get_irq(pdev, 0);
+	if (gpu->irq < 0) {
+		err = gpu->irq;
+		dev_err(dev, "failed to get irq: %d\n", err);
+		goto fail;
+	}
+
+	err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
+			       dev_name(gpu->dev), gpu);
+	if (err) {
+		dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
+		goto fail;
+	}
+
+	/* Get Clocks: */
+	gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
+	DBG("clk_bus: %p", gpu->clk_bus);
+	if (IS_ERR(gpu->clk_bus))
+		gpu->clk_bus = NULL;
+
+	gpu->clk_core = devm_clk_get(&pdev->dev, "core");
+	DBG("clk_core: %p", gpu->clk_core);
+	if (IS_ERR(gpu->clk_core))
+		gpu->clk_core = NULL;
+
+	gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
+	DBG("clk_shader: %p", gpu->clk_shader);
+	if (IS_ERR(gpu->clk_shader))
+		gpu->clk_shader = NULL;
+
+	/* TODO: figure out max mapped size */
+	dev_set_drvdata(dev, gpu);
+
+	/*
+	 * We treat the device as initially suspended.  The runtime PM
+	 * autosuspend delay is rather arbitary: no measurements have
+	 * yet been performed to determine an appropriate value.
+	 */
+	pm_runtime_use_autosuspend(gpu->dev);
+	pm_runtime_set_autosuspend_delay(gpu->dev, 200);
+	pm_runtime_enable(gpu->dev);
+
+	err = component_add(&pdev->dev, &gpu_ops);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register component: %d\n", err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	return err;
+}
+
+static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &gpu_ops);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int etnaviv_gpu_rpm_suspend(struct device *dev)
+{
+	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+	u32 idle, mask;
+
+	/* If we have outstanding fences, we're not idle */
+	if (gpu->completed_fence != gpu->active_fence)
+		return -EBUSY;
+
+	/* Check whether the hardware (except FE) is idle */
+	mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
+	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
+	if (idle != mask)
+		return -EBUSY;
+
+	return etnaviv_gpu_hw_suspend(gpu);
+}
+
+static int etnaviv_gpu_rpm_resume(struct device *dev)
+{
+	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+	int ret;
+
+	ret = etnaviv_gpu_clk_enable(gpu);
+	if (ret)
+		return ret;
+
+	/* Re-initialise the basic hardware state */
+	if (gpu->drm && gpu->buffer) {
+		ret = etnaviv_gpu_hw_resume(gpu);
+		if (ret) {
+			etnaviv_gpu_clk_disable(gpu);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
+	SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
+			   NULL)
+};
+
+struct platform_driver etnaviv_gpu_driver = {
+	.driver = {
+		.name = "etnaviv-gpu",
+		.owner = THIS_MODULE,
+		.pm = &etnaviv_gpu_pm_ops,
+		.of_match_table = etnaviv_gpu_match,
+	},
+	.probe = etnaviv_gpu_platform_probe,
+	.remove = etnaviv_gpu_platform_remove,
+	.id_table = gpu_ids,
+};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
new file mode 100644
index 0000000..c75d503
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_GPU_H__
+#define __ETNAVIV_GPU_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "etnaviv_drv.h"
+
+struct etnaviv_gem_submit;
+
+struct etnaviv_chip_identity {
+	/* Chip model. */
+	u32 model;
+
+	/* Revision value.*/
+	u32 revision;
+
+	/* Supported feature fields. */
+	u32 features;
+
+	/* Supported minor feature fields. */
+	u32 minor_features0;
+
+	/* Supported minor feature 1 fields. */
+	u32 minor_features1;
+
+	/* Supported minor feature 2 fields. */
+	u32 minor_features2;
+
+	/* Supported minor feature 3 fields. */
+	u32 minor_features3;
+
+	/* Number of streams supported. */
+	u32 stream_count;
+
+	/* Total number of temporary registers per thread. */
+	u32 register_max;
+
+	/* Maximum number of threads. */
+	u32 thread_count;
+
+	/* Number of shader cores. */
+	u32 shader_core_count;
+
+	/* Size of the vertex cache. */
+	u32 vertex_cache_size;
+
+	/* Number of entries in the vertex output buffer. */
+	u32 vertex_output_buffer_size;
+
+	/* Number of pixel pipes. */
+	u32 pixel_pipes;
+
+	/* Number of instructions. */
+	u32 instruction_count;
+
+	/* Number of constants. */
+	u32 num_constants;
+
+	/* Buffer size */
+	u32 buffer_size;
+};
+
+struct etnaviv_event {
+	bool used;
+	struct fence *fence;
+};
+
+struct etnaviv_cmdbuf;
+
+struct etnaviv_gpu {
+	struct drm_device *drm;
+	struct device *dev;
+	struct mutex lock;
+	struct etnaviv_chip_identity identity;
+	struct etnaviv_file_private *lastctx;
+	bool switch_context;
+
+	/* 'ring'-buffer: */
+	struct etnaviv_cmdbuf *buffer;
+
+	/* bus base address of memory  */
+	u32 memory_base;
+
+	/* event management: */
+	struct etnaviv_event event[30];
+	struct completion event_free;
+	spinlock_t event_spinlock;
+
+	/* list of currently in-flight command buffers */
+	struct list_head active_cmd_list;
+
+	u32 idle_mask;
+
+	/* Fencing support */
+	u32 next_fence;
+	u32 active_fence;
+	u32 completed_fence;
+	u32 retired_fence;
+	wait_queue_head_t fence_event;
+	unsigned int fence_context;
+	spinlock_t fence_spinlock;
+
+	/* worker for handling active-list retiring: */
+	struct work_struct retire_work;
+
+	void __iomem *mmio;
+	int irq;
+
+	struct etnaviv_iommu *mmu;
+
+	/* Power Control: */
+	struct clk *clk_bus;
+	struct clk *clk_core;
+	struct clk *clk_shader;
+
+	/* Hang Detction: */
+#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
+#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
+	struct timer_list hangcheck_timer;
+	u32 hangcheck_fence;
+	u32 hangcheck_dma_addr;
+	struct work_struct recover_work;
+};
+
+struct etnaviv_cmdbuf {
+	/* device this cmdbuf is allocated for */
+	struct etnaviv_gpu *gpu;
+	/* user context key, must be unique between all active users */
+	struct etnaviv_file_private *ctx;
+	/* cmdbuf properties */
+	void *vaddr;
+	dma_addr_t paddr;
+	u32 size;
+	u32 user_size;
+	/* fence after which this buffer is to be disposed */
+	struct fence *fence;
+	/* target exec state */
+	u32 exec_state;
+	/* per GPU in-flight list */
+	struct list_head node;
+	/* BOs attached to this command buffer */
+	unsigned int nr_bos;
+	struct etnaviv_gem_object *bo[0];
+};
+
+static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
+{
+	etnaviv_writel(data, gpu->mmio + reg);
+}
+
+static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
+{
+	return etnaviv_readl(gpu->mmio + reg);
+}
+
+static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
+{
+	return fence_after_eq(gpu->completed_fence, fence);
+}
+
+static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
+{
+	return fence_after_eq(gpu->retired_fence, fence);
+}
+
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
+
+int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
+
+#ifdef CONFIG_DEBUG_FS
+int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
+#endif
+
+int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
+	unsigned int context, bool exclusive);
+
+void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
+int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
+	u32 fence, struct timespec *timeout);
+int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
+	struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
+int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
+	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
+struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
+					      u32 size, size_t nr_bos);
+void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
+int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
+void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
+
+extern struct platform_driver etnaviv_gpu_driver;
+
+#endif /* __ETNAVIV_GPU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
new file mode 100644
index 0000000..522cfd4
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_iommu.h"
+#include "state_hi.xml.h"
+
+#define PT_SIZE		SZ_2M
+#define PT_ENTRIES	(PT_SIZE / sizeof(u32))
+
+#define GPU_MEM_START	0x80000000
+
+struct etnaviv_iommu_domain_pgtable {
+	u32 *pgtable;
+	dma_addr_t paddr;
+};
+
+struct etnaviv_iommu_domain {
+	struct iommu_domain domain;
+	struct device *dev;
+	void *bad_page_cpu;
+	dma_addr_t bad_page_dma;
+	struct etnaviv_iommu_domain_pgtable pgtable;
+	spinlock_t map_lock;
+};
+
+static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
+{
+	return container_of(domain, struct etnaviv_iommu_domain, domain);
+}
+
+static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
+			 size_t size)
+{
+	pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
+	if (!pgtable->pgtable)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
+			 size_t size)
+{
+	dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
+}
+
+static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
+			   unsigned long iova)
+{
+	/* calcuate index into page table */
+	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
+	phys_addr_t paddr;
+
+	paddr = pgtable->pgtable[index];
+
+	return paddr;
+}
+
+static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
+			  unsigned long iova, phys_addr_t paddr)
+{
+	/* calcuate index into page table */
+	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
+
+	pgtable->pgtable[index] = paddr;
+}
+
+static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
+{
+	u32 *p;
+	int ret, i;
+
+	etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+						  SZ_4K,
+						  &etnaviv_domain->bad_page_dma,
+						  GFP_KERNEL);
+	if (!etnaviv_domain->bad_page_cpu)
+		return -ENOMEM;
+
+	p = etnaviv_domain->bad_page_cpu;
+	for (i = 0; i < SZ_4K / 4; i++)
+		*p++ = 0xdead55aa;
+
+	ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
+	if (ret < 0) {
+		dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+				  etnaviv_domain->bad_page_cpu,
+				  etnaviv_domain->bad_page_dma);
+		return ret;
+	}
+
+	for (i = 0; i < PT_ENTRIES; i++)
+		etnaviv_domain->pgtable.pgtable[i] =
+			etnaviv_domain->bad_page_dma;
+
+	spin_lock_init(&etnaviv_domain->map_lock);
+
+	return 0;
+}
+
+static void etnaviv_domain_free(struct iommu_domain *domain)
+{
+	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+	pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
+
+	dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+			  etnaviv_domain->bad_page_cpu,
+			  etnaviv_domain->bad_page_dma);
+
+	kfree(etnaviv_domain);
+}
+
+static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
+	   phys_addr_t paddr, size_t size, int prot)
+{
+	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+	if (size != SZ_4K)
+		return -EINVAL;
+
+	spin_lock(&etnaviv_domain->map_lock);
+	pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
+	spin_unlock(&etnaviv_domain->map_lock);
+
+	return 0;
+}
+
+static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
+	unsigned long iova, size_t size)
+{
+	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+	if (size != SZ_4K)
+		return -EINVAL;
+
+	spin_lock(&etnaviv_domain->map_lock);
+	pgtable_write(&etnaviv_domain->pgtable, iova,
+		      etnaviv_domain->bad_page_dma);
+	spin_unlock(&etnaviv_domain->map_lock);
+
+	return SZ_4K;
+}
+
+static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
+	dma_addr_t iova)
+{
+	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+	return pgtable_read(&etnaviv_domain->pgtable, iova);
+}
+
+static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
+{
+	return PT_SIZE;
+}
+
+static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
+{
+	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+	memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
+}
+
+static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+	.ops = {
+		.domain_free = etnaviv_domain_free,
+		.map = etnaviv_iommuv1_map,
+		.unmap = etnaviv_iommuv1_unmap,
+		.iova_to_phys = etnaviv_iommu_iova_to_phys,
+		.pgsize_bitmap = SZ_4K,
+	},
+	.dump_size = etnaviv_iommuv1_dump_size,
+	.dump = etnaviv_iommuv1_dump,
+};
+
+void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
+	struct iommu_domain *domain)
+{
+	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+	u32 pgtable;
+
+	/* set page table address in MC */
+	pgtable = (u32)etnaviv_domain->pgtable.paddr;
+
+	gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
+	gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
+	gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
+	gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
+	gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
+}
+
+struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_iommu_domain *etnaviv_domain;
+	int ret;
+
+	etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
+	if (!etnaviv_domain)
+		return NULL;
+
+	etnaviv_domain->dev = gpu->dev;
+
+	etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
+	etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+	etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
+	etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
+
+	ret = __etnaviv_iommu_init(etnaviv_domain);
+	if (ret)
+		goto out_free;
+
+	return &etnaviv_domain->domain;
+
+out_free:
+	kfree(etnaviv_domain);
+	return NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
new file mode 100644
index 0000000..cf45503
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+  *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_IOMMU_H__
+#define __ETNAVIV_IOMMU_H__
+
+#include <linux/iommu.h>
+struct etnaviv_gpu;
+
+struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
+	struct iommu_domain *domain);
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+
+#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
new file mode 100644
index 0000000..fbb4aed
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+  *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_iommu.h"
+#include "state_hi.xml.h"
+
+
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
+{
+	/* TODO */
+	return NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
new file mode 100644
index 0000000..603ea41
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+  *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_IOMMU_V2_H__
+#define __ETNAVIV_IOMMU_V2_H__
+
+#include <linux/iommu.h>
+struct etnaviv_gpu;
+
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+
+#endif /* __ETNAVIV_IOMMU_V2_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
new file mode 100644
index 0000000..6743bc6
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
+		unsigned long iova, int flags, void *arg)
+{
+	DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+	return 0;
+}
+
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+		struct sg_table *sgt, unsigned len, int prot)
+{
+	struct iommu_domain *domain = iommu->domain;
+	struct scatterlist *sg;
+	unsigned int da = iova;
+	unsigned int i, j;
+	int ret;
+
+	if (!domain || !sgt)
+		return -EINVAL;
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		u32 pa = sg_dma_address(sg) - sg->offset;
+		size_t bytes = sg_dma_len(sg) + sg->offset;
+
+		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+
+		ret = iommu_map(domain, da, pa, bytes, prot);
+		if (ret)
+			goto fail;
+
+		da += bytes;
+	}
+
+	return 0;
+
+fail:
+	da = iova;
+
+	for_each_sg(sgt->sgl, sg, i, j) {
+		size_t bytes = sg_dma_len(sg) + sg->offset;
+
+		iommu_unmap(domain, da, bytes);
+		da += bytes;
+	}
+	return ret;
+}
+
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+		struct sg_table *sgt, unsigned len)
+{
+	struct iommu_domain *domain = iommu->domain;
+	struct scatterlist *sg;
+	unsigned int da = iova;
+	int i;
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		size_t bytes = sg_dma_len(sg) + sg->offset;
+		size_t unmapped;
+
+		unmapped = iommu_unmap(domain, da, bytes);
+		if (unmapped < bytes)
+			return unmapped;
+
+		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+
+		BUG_ON(!PAGE_ALIGNED(bytes));
+
+		da += bytes;
+	}
+
+	return 0;
+}
+
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
+	struct etnaviv_vram_mapping *mapping)
+{
+	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+	etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
+			    etnaviv_obj->sgt, etnaviv_obj->base.size);
+	drm_mm_remove_node(&mapping->vram_node);
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+	struct etnaviv_vram_mapping *mapping)
+{
+	struct etnaviv_vram_mapping *free = NULL;
+	struct sg_table *sgt = etnaviv_obj->sgt;
+	struct drm_mm_node *node;
+	int ret;
+
+	lockdep_assert_held(&etnaviv_obj->lock);
+
+	mutex_lock(&mmu->lock);
+
+	/* v1 MMU can optimize single entry (contiguous) scatterlists */
+	if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
+		u32 iova;
+
+		iova = sg_dma_address(sgt->sgl) - memory_base;
+		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
+			mapping->iova = iova;
+			list_add_tail(&mapping->mmu_node, &mmu->mappings);
+			mutex_unlock(&mmu->lock);
+			return 0;
+		}
+	}
+
+	node = &mapping->vram_node;
+	while (1) {
+		struct etnaviv_vram_mapping *m, *n;
+		struct list_head list;
+		bool found;
+
+		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
+			etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
+			DRM_MM_SEARCH_DEFAULT);
+
+		if (ret != -ENOSPC)
+			break;
+
+		/*
+		 * If we did not search from the start of the MMU region,
+		 * try again in case there are free slots.
+		 */
+		if (mmu->last_iova) {
+			mmu->last_iova = 0;
+			mmu->need_flush = true;
+			continue;
+		}
+
+		/* Try to retire some entries */
+		drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
+
+		found = 0;
+		INIT_LIST_HEAD(&list);
+		list_for_each_entry(free, &mmu->mappings, mmu_node) {
+			/* If this vram node has not been used, skip this. */
+			if (!free->vram_node.mm)
+				continue;
+
+			/*
+			 * If the iova is pinned, then it's in-use,
+			 * so we must keep its mapping.
+			 */
+			if (free->use)
+				continue;
+
+			list_add(&free->scan_node, &list);
+			if (drm_mm_scan_add_block(&free->vram_node)) {
+				found = true;
+				break;
+			}
+		}
+
+		if (!found) {
+			/* Nothing found, clean up and fail */
+			list_for_each_entry_safe(m, n, &list, scan_node)
+				BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
+			break;
+		}
+
+		/*
+		 * drm_mm does not allow any other operations while
+		 * scanning, so we have to remove all blocks first.
+		 * If drm_mm_scan_remove_block() returns false, we
+		 * can leave the block pinned.
+		 */
+		list_for_each_entry_safe(m, n, &list, scan_node)
+			if (!drm_mm_scan_remove_block(&m->vram_node))
+				list_del_init(&m->scan_node);
+
+		/*
+		 * Unmap the blocks which need to be reaped from the MMU.
+		 * Clear the mmu pointer to prevent the get_iova finding
+		 * this mapping.
+		 */
+		list_for_each_entry_safe(m, n, &list, scan_node) {
+			etnaviv_iommu_remove_mapping(mmu, m);
+			m->mmu = NULL;
+			list_del_init(&m->mmu_node);
+			list_del_init(&m->scan_node);
+		}
+
+		/*
+		 * We removed enough mappings so that the new allocation will
+		 * succeed.  Ensure that the MMU will be flushed before the
+		 * associated commit requesting this mapping, and retry the
+		 * allocation one more time.
+		 */
+		mmu->need_flush = true;
+	}
+
+	if (ret < 0) {
+		mutex_unlock(&mmu->lock);
+		return ret;
+	}
+
+	mmu->last_iova = node->start + etnaviv_obj->base.size;
+	mapping->iova = node->start;
+	ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
+				IOMMU_READ | IOMMU_WRITE);
+
+	if (ret < 0) {
+		drm_mm_remove_node(node);
+		mutex_unlock(&mmu->lock);
+		return ret;
+	}
+
+	list_add_tail(&mapping->mmu_node, &mmu->mappings);
+	mutex_unlock(&mmu->lock);
+
+	return ret;
+}
+
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_vram_mapping *mapping)
+{
+	WARN_ON(mapping->use);
+
+	mutex_lock(&mmu->lock);
+
+	/* If the vram node is on the mm, unmap and remove the node */
+	if (mapping->vram_node.mm == &mmu->mm)
+		etnaviv_iommu_remove_mapping(mmu, mapping);
+
+	list_del(&mapping->mmu_node);
+	mutex_unlock(&mmu->lock);
+}
+
+void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
+{
+	drm_mm_takedown(&mmu->mm);
+	iommu_domain_free(mmu->domain);
+	kfree(mmu);
+}
+
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
+	struct iommu_domain *domain, enum etnaviv_iommu_version version)
+{
+	struct etnaviv_iommu *mmu;
+
+	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
+	if (!mmu)
+		return ERR_PTR(-ENOMEM);
+
+	mmu->domain = domain;
+	mmu->gpu = gpu;
+	mmu->version = version;
+	mutex_init(&mmu->lock);
+	INIT_LIST_HEAD(&mmu->mappings);
+
+	drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
+		    domain->geometry.aperture_end -
+		      domain->geometry.aperture_start + 1);
+
+	iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
+
+	return mmu;
+}
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
+{
+	struct etnaviv_iommu_ops *ops;
+
+	ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
+
+	return ops->dump_size(iommu->domain);
+}
+
+void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
+{
+	struct etnaviv_iommu_ops *ops;
+
+	ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
+
+	ops->dump(iommu->domain, buf);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
new file mode 100644
index 0000000..fff215a4
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_MMU_H__
+#define __ETNAVIV_MMU_H__
+
+#include <linux/iommu.h>
+
+enum etnaviv_iommu_version {
+	ETNAVIV_IOMMU_V1 = 0,
+	ETNAVIV_IOMMU_V2,
+};
+
+struct etnaviv_gpu;
+struct etnaviv_vram_mapping;
+
+struct etnaviv_iommu_ops {
+	struct iommu_ops ops;
+	size_t (*dump_size)(struct iommu_domain *);
+	void (*dump)(struct iommu_domain *, void *);
+};
+
+struct etnaviv_iommu {
+	struct etnaviv_gpu *gpu;
+	struct iommu_domain *domain;
+
+	enum etnaviv_iommu_version version;
+
+	/* memory manager for GPU address area */
+	struct mutex lock;
+	struct list_head mappings;
+	struct drm_mm mm;
+	u32 last_iova;
+	bool need_flush;
+};
+
+struct etnaviv_gem_object;
+
+int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
+	int cnt);
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+	struct sg_table *sgt, unsigned len, int prot);
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+	struct sg_table *sgt, unsigned len);
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+	struct etnaviv_vram_mapping *mapping);
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_vram_mapping *mapping);
+void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
+void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
+
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
+	struct iommu_domain *domain, enum etnaviv_iommu_version version);
+
+#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
new file mode 100644
index 0000000..3682183
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
@@ -0,0 +1,351 @@
+#ifndef STATE_XML
+#define STATE_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state.xml    (  18882 bytes, from 2015-03-25 11:42:32)
+- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
+- state_hi.xml (  23420 bytes, from 2015-03-25 11:47:21)
+- state_2d.xml (  51549 bytes, from 2015-03-25 11:25:06)
+- state_3d.xml (  54600 bytes, from 2015-03-25 11:25:19)
+- state_vg.xml (   5973 bytes, from 2015-03-25 11:26:01)
+
+Copyright (C) 2015
+*/
+
+
+#define VARYING_COMPONENT_USE_UNUSED				0x00000000
+#define VARYING_COMPONENT_USE_USED				0x00000001
+#define VARYING_COMPONENT_USE_POINTCOORD_X			0x00000002
+#define VARYING_COMPONENT_USE_POINTCOORD_Y			0x00000003
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK		0x000000ff
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT		0
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x)		(((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
+#define VIVS_FE							0x00000000
+
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0)		       (0x00000600 + 0x4*(i0))
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE			0x00000004
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN			0x00000010
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK		0x0000000f
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT		0
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE			0x00000000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE	0x00000001
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT		0x00000002
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT	0x00000003
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT			0x00000004
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT		0x00000005
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT		0x00000008
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT		0x00000009
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED		0x0000000b
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2	0x0000000c
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2	0x0000000d
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK		0x00000030
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT		4
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x)			(((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE		0x00000080
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK		0x00000700
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT		8
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x)			(((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK			0x00003000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT		12
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x)			(((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK		0x0000c000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT		14
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF		0x00000000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON		0x00008000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK		0x00ff0000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT		16
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x)			(((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK			0xff000000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT		24
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x)			(((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK)
+
+#define VIVS_FE_CMD_STREAM_BASE_ADDR				0x00000640
+
+#define VIVS_FE_INDEX_STREAM_BASE_ADDR				0x00000644
+
+#define VIVS_FE_INDEX_STREAM_CONTROL				0x00000648
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK			0x00000003
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT		0
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR		0x00000000
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT	0x00000001
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT		0x00000002
+
+#define VIVS_FE_VERTEX_STREAM_BASE_ADDR				0x0000064c
+
+#define VIVS_FE_VERTEX_STREAM_CONTROL				0x00000650
+
+#define VIVS_FE_COMMAND_ADDRESS					0x00000654
+
+#define VIVS_FE_COMMAND_CONTROL					0x00000658
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK			0x0000ffff
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT			0
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x)			(((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK)
+#define VIVS_FE_COMMAND_CONTROL_ENABLE				0x00010000
+
+#define VIVS_FE_DMA_STATUS					0x0000065c
+
+#define VIVS_FE_DMA_DEBUG_STATE					0x00000660
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK			0x0000001f
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT		0
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE			0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC			0x00000001
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0			0x00000002
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0			0x00000003
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1			0x00000004
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1			0x00000005
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR			0x00000006
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD			0x00000007
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL		0x00000008
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL		0x00000009
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA		0x0000000a
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX		0x0000000b
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW			0x0000000c
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0		0x0000000d
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1		0x0000000e
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0		0x0000000f
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1		0x00000010
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO		0x00000011
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT			0x00000012
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK			0x00000013
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END			0x00000014
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL			0x00000015
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK		0x00000300
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT		8
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE		0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START		0x00000100
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ		0x00000200
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END		0x00000300
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK		0x00000c00
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT		10
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE		0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID	0x00000400
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID		0x00000800
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK		0x00003000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT		12
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE		0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX		0x00001000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL		0x00002000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK			0x0000c000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT		14
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE			0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR			0x00004000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC		0x00008000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK		0x00030000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT		16
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE		0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE		0x00010000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS		0x00020000
+
+#define VIVS_FE_DMA_ADDRESS					0x00000664
+
+#define VIVS_FE_DMA_LOW						0x00000668
+
+#define VIVS_FE_DMA_HIGH					0x0000066c
+
+#define VIVS_FE_AUTO_FLUSH					0x00000670
+
+#define VIVS_FE_UNK00678					0x00000678
+
+#define VIVS_FE_UNK0067C					0x0000067c
+
+#define VIVS_FE_VERTEX_STREAMS(i0)			       (0x00000000 + 0x4*(i0))
+#define VIVS_FE_VERTEX_STREAMS__ESIZE				0x00000004
+#define VIVS_FE_VERTEX_STREAMS__LEN				0x00000008
+
+#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0)		       (0x00000680 + 0x4*(i0))
+
+#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0)		       (0x000006a0 + 0x4*(i0))
+
+#define VIVS_FE_UNK00700(i0)				       (0x00000700 + 0x4*(i0))
+#define VIVS_FE_UNK00700__ESIZE					0x00000004
+#define VIVS_FE_UNK00700__LEN					0x00000010
+
+#define VIVS_FE_UNK00740(i0)				       (0x00000740 + 0x4*(i0))
+#define VIVS_FE_UNK00740__ESIZE					0x00000004
+#define VIVS_FE_UNK00740__LEN					0x00000010
+
+#define VIVS_FE_UNK00780(i0)				       (0x00000780 + 0x4*(i0))
+#define VIVS_FE_UNK00780__ESIZE					0x00000004
+#define VIVS_FE_UNK00780__LEN					0x00000010
+
+#define VIVS_GL							0x00000000
+
+#define VIVS_GL_PIPE_SELECT					0x00003800
+#define VIVS_GL_PIPE_SELECT_PIPE__MASK				0x00000001
+#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT				0
+#define VIVS_GL_PIPE_SELECT_PIPE(x)				(((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK)
+
+#define VIVS_GL_EVENT						0x00003804
+#define VIVS_GL_EVENT_EVENT_ID__MASK				0x0000001f
+#define VIVS_GL_EVENT_EVENT_ID__SHIFT				0
+#define VIVS_GL_EVENT_EVENT_ID(x)				(((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
+#define VIVS_GL_EVENT_FROM_FE					0x00000020
+#define VIVS_GL_EVENT_FROM_PE					0x00000040
+#define VIVS_GL_EVENT_SOURCE__MASK				0x00001f00
+#define VIVS_GL_EVENT_SOURCE__SHIFT				8
+#define VIVS_GL_EVENT_SOURCE(x)					(((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
+
+#define VIVS_GL_SEMAPHORE_TOKEN					0x00003808
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK			0x0000001f
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT			0
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x)				(((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK)
+#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK			0x00001f00
+#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT			8
+#define VIVS_GL_SEMAPHORE_TOKEN_TO(x)				(((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
+
+#define VIVS_GL_FLUSH_CACHE					0x0000380c
+#define VIVS_GL_FLUSH_CACHE_DEPTH				0x00000001
+#define VIVS_GL_FLUSH_CACHE_COLOR				0x00000002
+#define VIVS_GL_FLUSH_CACHE_TEXTURE				0x00000004
+#define VIVS_GL_FLUSH_CACHE_PE2D				0x00000008
+#define VIVS_GL_FLUSH_CACHE_TEXTUREVS				0x00000010
+#define VIVS_GL_FLUSH_CACHE_SHADER_L1				0x00000020
+#define VIVS_GL_FLUSH_CACHE_SHADER_L2				0x00000040
+
+#define VIVS_GL_FLUSH_MMU					0x00003810
+#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU				0x00000001
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1				0x00000002
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2				0x00000004
+#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU				0x00000008
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4				0x00000010
+
+#define VIVS_GL_VERTEX_ELEMENT_CONFIG				0x00003814
+
+#define VIVS_GL_MULTI_SAMPLE_CONFIG				0x00003818
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK		0x00000003
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT		0
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE		0x00000000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X		0x00000001
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X		0x00000002
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK		0x00000008
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK		0x000000f0
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT		4
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x)		(((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK		0x00000100
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK			0x00007000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT		12
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x)			(((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK			0x00008000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK			0x00030000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT		16
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x)			(((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK			0x00080000
+
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS			0x0000381c
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK		0x000000ff
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT		0
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x)			(((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
+
+#define VIVS_GL_VARYING_NUM_COMPONENTS				0x00003820
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK		0x00000007
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT		0
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x)			(((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK		0x00000070
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT		4
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x)			(((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK		0x00000700
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT		8
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x)			(((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK		0x00007000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT		12
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x)			(((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK		0x00070000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT		16
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x)			(((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK		0x00700000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT		20
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x)			(((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK		0x07000000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT		24
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x)			(((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK		0x70000000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT		28
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x)			(((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK)
+
+#define VIVS_GL_VARYING_COMPONENT_USE(i0)		       (0x00003828 + 0x4*(i0))
+#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE			0x00000004
+#define VIVS_GL_VARYING_COMPONENT_USE__LEN			0x00000002
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK		0x00000003
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT		0
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK		0x0000000c
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT		2
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK		0x00000030
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT		4
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK		0x000000c0
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT		6
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK		0x00000300
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT		8
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK		0x00000c00
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT		10
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK		0x00003000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT		12
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK		0x0000c000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT		14
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK		0x00030000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT		16
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK		0x000c0000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT		18
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK		0x00300000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT		20
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK		0x00c00000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT		22
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK		0x03000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT		24
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK		0x0c000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT		26
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK		0x30000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT		28
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK		0xc0000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT		30
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x)			(((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
+
+#define VIVS_GL_UNK03834					0x00003834
+
+#define VIVS_GL_UNK03838					0x00003838
+
+#define VIVS_GL_API_MODE					0x0000384c
+#define VIVS_GL_API_MODE_OPENGL					0x00000000
+#define VIVS_GL_API_MODE_OPENVG					0x00000001
+#define VIVS_GL_API_MODE_OPENCL					0x00000002
+
+#define VIVS_GL_CONTEXT_POINTER					0x00003850
+
+#define VIVS_GL_UNK03A00					0x00003a00
+
+#define VIVS_GL_STALL_TOKEN					0x00003c00
+#define VIVS_GL_STALL_TOKEN_FROM__MASK				0x0000001f
+#define VIVS_GL_STALL_TOKEN_FROM__SHIFT				0
+#define VIVS_GL_STALL_TOKEN_FROM(x)				(((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK)
+#define VIVS_GL_STALL_TOKEN_TO__MASK				0x00001f00
+#define VIVS_GL_STALL_TOKEN_TO__SHIFT				8
+#define VIVS_GL_STALL_TOKEN_TO(x)				(((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK)
+#define VIVS_GL_STALL_TOKEN_FLIP0				0x40000000
+#define VIVS_GL_STALL_TOKEN_FLIP1				0x80000000
+
+#define VIVS_DUMMY						0x00000000
+
+#define VIVS_DUMMY_DUMMY					0x0003fffc
+
+
+#endif /* STATE_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
new file mode 100644
index 0000000..0064f26
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -0,0 +1,407 @@
+#ifndef STATE_HI_XML
+#define STATE_HI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state_hi.xml (  23420 bytes, from 2015-03-25 11:47:21)
+- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
+
+Copyright (C) 2015
+*/
+
+
+#define MMU_EXCEPTION_SLAVE_NOT_PRESENT				0x00000001
+#define MMU_EXCEPTION_PAGE_NOT_PRESENT				0x00000002
+#define MMU_EXCEPTION_WRITE_VIOLATION				0x00000003
+#define VIVS_HI							0x00000000
+
+#define VIVS_HI_CLOCK_CONTROL					0x00000000
+#define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS				0x00000001
+#define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS				0x00000002
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK			0x000001fc
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT			2
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x)			(((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK)
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD			0x00000200
+#define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING		0x00000400
+#define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS		0x00000800
+#define VIVS_HI_CLOCK_CONTROL_SOFT_RESET			0x00001000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_3D				0x00010000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_2D				0x00020000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_VG				0x00040000
+#define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU			0x00080000
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK		0x00f00000
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT		20
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x)		(((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK)
+
+#define VIVS_HI_IDLE_STATE					0x00000004
+#define VIVS_HI_IDLE_STATE_FE					0x00000001
+#define VIVS_HI_IDLE_STATE_DE					0x00000002
+#define VIVS_HI_IDLE_STATE_PE					0x00000004
+#define VIVS_HI_IDLE_STATE_SH					0x00000008
+#define VIVS_HI_IDLE_STATE_PA					0x00000010
+#define VIVS_HI_IDLE_STATE_SE					0x00000020
+#define VIVS_HI_IDLE_STATE_RA					0x00000040
+#define VIVS_HI_IDLE_STATE_TX					0x00000080
+#define VIVS_HI_IDLE_STATE_VG					0x00000100
+#define VIVS_HI_IDLE_STATE_IM					0x00000200
+#define VIVS_HI_IDLE_STATE_FP					0x00000400
+#define VIVS_HI_IDLE_STATE_TS					0x00000800
+#define VIVS_HI_IDLE_STATE_AXI_LP				0x80000000
+
+#define VIVS_HI_AXI_CONFIG					0x00000008
+#define VIVS_HI_AXI_CONFIG_AWID__MASK				0x0000000f
+#define VIVS_HI_AXI_CONFIG_AWID__SHIFT				0
+#define VIVS_HI_AXI_CONFIG_AWID(x)				(((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK)
+#define VIVS_HI_AXI_CONFIG_ARID__MASK				0x000000f0
+#define VIVS_HI_AXI_CONFIG_ARID__SHIFT				4
+#define VIVS_HI_AXI_CONFIG_ARID(x)				(((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK)
+#define VIVS_HI_AXI_CONFIG_AWCACHE__MASK			0x00000f00
+#define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT			8
+#define VIVS_HI_AXI_CONFIG_AWCACHE(x)				(((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK)
+#define VIVS_HI_AXI_CONFIG_ARCACHE__MASK			0x0000f000
+#define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT			12
+#define VIVS_HI_AXI_CONFIG_ARCACHE(x)				(((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK)
+
+#define VIVS_HI_AXI_STATUS					0x0000000c
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK			0x0000000f
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT			0
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID(x)				(((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK)
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK			0x000000f0
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT			4
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID(x)				(((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK)
+#define VIVS_HI_AXI_STATUS_DET_WR_ERR				0x00000100
+#define VIVS_HI_AXI_STATUS_DET_RD_ERR				0x00000200
+
+#define VIVS_HI_INTR_ACKNOWLEDGE				0x00000010
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK			0x7fffffff
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT		0
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x)			(((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
+#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR			0x80000000
+
+#define VIVS_HI_INTR_ENBL					0x00000014
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK			0xffffffff
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT			0
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x)			(((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK)
+
+#define VIVS_HI_CHIP_IDENTITY					0x00000018
+#define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK			0xff000000
+#define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT			24
+#define VIVS_HI_CHIP_IDENTITY_FAMILY(x)				(((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK			0x00ff0000
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT			16
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT(x)			(((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK)
+#define VIVS_HI_CHIP_IDENTITY_REVISION__MASK			0x0000f000
+#define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT			12
+#define VIVS_HI_CHIP_IDENTITY_REVISION(x)			(((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
+
+#define VIVS_HI_CHIP_FEATURE					0x0000001c
+
+#define VIVS_HI_CHIP_MODEL					0x00000020
+
+#define VIVS_HI_CHIP_REV					0x00000024
+
+#define VIVS_HI_CHIP_DATE					0x00000028
+
+#define VIVS_HI_CHIP_TIME					0x0000002c
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_0				0x00000034
+
+#define VIVS_HI_CACHE_CONTROL					0x00000038
+
+#define VIVS_HI_MEMORY_COUNTER_RESET				0x0000003c
+
+#define VIVS_HI_PROFILE_READ_BYTES8				0x00000040
+
+#define VIVS_HI_PROFILE_WRITE_BYTES8				0x00000044
+
+#define VIVS_HI_CHIP_SPECS					0x00000048
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK			0x0000000f
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT			0
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x)			(((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK			0x000000f0
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT			4
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x)			(((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK			0x00000f00
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT			8
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x)			(((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK		0x0001f000
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT		12
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x)			(((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK		0x01f00000
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT		20
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x)			(((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK			0x0e000000
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT			25
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x)			(((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK	0xf0000000
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT	28
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x)		(((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
+
+#define VIVS_HI_PROFILE_WRITE_BURSTS				0x0000004c
+
+#define VIVS_HI_PROFILE_WRITE_REQUESTS				0x00000050
+
+#define VIVS_HI_PROFILE_READ_BURSTS				0x00000058
+
+#define VIVS_HI_PROFILE_READ_REQUESTS				0x0000005c
+
+#define VIVS_HI_PROFILE_READ_LASTS				0x00000060
+
+#define VIVS_HI_GP_OUT0						0x00000064
+
+#define VIVS_HI_GP_OUT1						0x00000068
+
+#define VIVS_HI_GP_OUT2						0x0000006c
+
+#define VIVS_HI_AXI_CONTROL					0x00000070
+#define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE			0x00000001
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_1				0x00000074
+
+#define VIVS_HI_PROFILE_TOTAL_CYCLES				0x00000078
+
+#define VIVS_HI_PROFILE_IDLE_CYCLES				0x0000007c
+
+#define VIVS_HI_CHIP_SPECS_2					0x00000080
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK			0x000000ff
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT			0
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x)			(((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK		0x0000ff00
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT		8
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x)		(((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK		0xffff0000
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT		16
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x)			(((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_2				0x00000084
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_3				0x00000088
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_4				0x00000094
+
+#define VIVS_PM							0x00000000
+
+#define VIVS_PM_POWER_CONTROLS					0x00000100
+#define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING	0x00000001
+#define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING	0x00000002
+#define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING	0x00000004
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK		0x000000f0
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT		4
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x)		(((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK)
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK		0xffff0000
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT		16
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x)		(((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK)
+
+#define VIVS_PM_MODULE_CONTROLS					0x00000104
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE	0x00000001
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE	0x00000002
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE	0x00000004
+
+#define VIVS_PM_MODULE_STATUS					0x00000108
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE		0x00000001
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE		0x00000002
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE		0x00000004
+
+#define VIVS_PM_PULSE_EATER					0x0000010c
+
+#define VIVS_MMUv2						0x00000000
+
+#define VIVS_MMUv2_SAFE_ADDRESS					0x00000180
+
+#define VIVS_MMUv2_CONFIGURATION				0x00000184
+#define VIVS_MMUv2_CONFIGURATION_MODE__MASK			0x00000001
+#define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT			0
+#define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K			0x00000000
+#define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K			0x00000001
+#define VIVS_MMUv2_CONFIGURATION_MODE_MASK			0x00000008
+#define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK			0x00000010
+#define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT			4
+#define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH			0x00000010
+#define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK			0x00000080
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK			0x00000100
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK			0xfffffc00
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT			10
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x)			(((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK)
+
+#define VIVS_MMUv2_STATUS					0x00000188
+#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK			0x00000003
+#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT			0
+#define VIVS_MMUv2_STATUS_EXCEPTION0(x)				(((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK			0x00000030
+#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT			4
+#define VIVS_MMUv2_STATUS_EXCEPTION1(x)				(((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK			0x00000300
+#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT			8
+#define VIVS_MMUv2_STATUS_EXCEPTION2(x)				(((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK			0x00003000
+#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT			12
+#define VIVS_MMUv2_STATUS_EXCEPTION3(x)				(((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK)
+
+#define VIVS_MMUv2_CONTROL					0x0000018c
+#define VIVS_MMUv2_CONTROL_ENABLE				0x00000001
+
+#define VIVS_MMUv2_EXCEPTION_ADDR(i0)			       (0x00000190 + 0x4*(i0))
+#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE			0x00000004
+#define VIVS_MMUv2_EXCEPTION_ADDR__LEN				0x00000004
+
+#define VIVS_MC							0x00000000
+
+#define VIVS_MC_MMU_FE_PAGE_TABLE				0x00000400
+
+#define VIVS_MC_MMU_TX_PAGE_TABLE				0x00000404
+
+#define VIVS_MC_MMU_PE_PAGE_TABLE				0x00000408
+
+#define VIVS_MC_MMU_PEZ_PAGE_TABLE				0x0000040c
+
+#define VIVS_MC_MMU_RA_PAGE_TABLE				0x00000410
+
+#define VIVS_MC_DEBUG_MEMORY					0x00000414
+#define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320		0x00000008
+#define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS			0x00100000
+#define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS			0x00200000
+
+#define VIVS_MC_MEMORY_BASE_ADDR_RA				0x00000418
+
+#define VIVS_MC_MEMORY_BASE_ADDR_FE				0x0000041c
+
+#define VIVS_MC_MEMORY_BASE_ADDR_TX				0x00000420
+
+#define VIVS_MC_MEMORY_BASE_ADDR_PEZ				0x00000424
+
+#define VIVS_MC_MEMORY_BASE_ADDR_PE				0x00000428
+
+#define VIVS_MC_MEMORY_TIMING_CONTROL				0x0000042c
+
+#define VIVS_MC_MEMORY_FLUSH					0x00000430
+
+#define VIVS_MC_PROFILE_CYCLE_COUNTER				0x00000438
+
+#define VIVS_MC_DEBUG_READ0					0x0000043c
+
+#define VIVS_MC_DEBUG_READ1					0x00000440
+
+#define VIVS_MC_DEBUG_WRITE					0x00000444
+
+#define VIVS_MC_PROFILE_RA_READ					0x00000448
+
+#define VIVS_MC_PROFILE_TX_READ					0x0000044c
+
+#define VIVS_MC_PROFILE_FE_READ					0x00000450
+
+#define VIVS_MC_PROFILE_PE_READ					0x00000454
+
+#define VIVS_MC_PROFILE_DE_READ					0x00000458
+
+#define VIVS_MC_PROFILE_SH_READ					0x0000045c
+
+#define VIVS_MC_PROFILE_PA_READ					0x00000460
+
+#define VIVS_MC_PROFILE_SE_READ					0x00000464
+
+#define VIVS_MC_PROFILE_MC_READ					0x00000468
+
+#define VIVS_MC_PROFILE_HI_READ					0x0000046c
+
+#define VIVS_MC_PROFILE_CONFIG0					0x00000470
+#define VIVS_MC_PROFILE_CONFIG0_FE__MASK			0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT			0
+#define VIVS_MC_PROFILE_CONFIG0_FE_RESET			0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_DE__MASK			0x00000f00
+#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT			8
+#define VIVS_MC_PROFILE_CONFIG0_DE_RESET			0x00000f00
+#define VIVS_MC_PROFILE_CONFIG0_PE__MASK			0x000f0000
+#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT			16
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE	0x00000000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE	0x00010000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE	0x00020000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE	0x00030000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D		0x000b0000
+#define VIVS_MC_PROFILE_CONFIG0_PE_RESET			0x000f0000
+#define VIVS_MC_PROFILE_CONFIG0_SH__MASK			0x0f000000
+#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT			24
+#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES		0x04000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER		0x07000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER	0x08000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER		0x09000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER	0x0a000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER	0x0b000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER	0x0c000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER	0x0d000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER	0x0e000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RESET			0x0f000000
+
+#define VIVS_MC_PROFILE_CONFIG1					0x00000474
+#define VIVS_MC_PROFILE_CONFIG1_PA__MASK			0x0000000f
+#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT			0
+#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER		0x00000003
+#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER		0x00000004
+#define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER		0x00000005
+#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER	0x00000006
+#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER	0x00000007
+#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER		0x00000008
+#define VIVS_MC_PROFILE_CONFIG1_PA_RESET			0x0000000f
+#define VIVS_MC_PROFILE_CONFIG1_SE__MASK			0x00000f00
+#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT			8
+#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT	0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT		0x00000100
+#define VIVS_MC_PROFILE_CONFIG1_SE_RESET			0x00000f00
+#define VIVS_MC_PROFILE_CONFIG1_RA__MASK			0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT			16
+#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT		0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT		0x00010000
+#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z	0x00020000
+#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT	0x00030000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER	0x00090000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER	0x000a0000
+#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT		0x000b0000
+#define VIVS_MC_PROFILE_CONFIG1_RA_RESET			0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_TX__MASK			0x0f000000
+#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT			24
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS	0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS	0x01000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS	0x02000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS	0x03000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN			0x04000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT		0x05000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT		0x06000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT		0x07000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT	0x08000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT	0x09000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_RESET			0x0f000000
+
+#define VIVS_MC_PROFILE_CONFIG2					0x00000478
+#define VIVS_MC_PROFILE_CONFIG2_MC__MASK			0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT			0
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE	0x00000001
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP	0x00000002
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE	0x00000003
+#define VIVS_MC_PROFILE_CONFIG2_MC_RESET			0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_HI__MASK			0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT			8
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED	0x00000000
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED	0x00000100
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED	0x00000200
+#define VIVS_MC_PROFILE_CONFIG2_HI_RESET			0x00000f00
+
+#define VIVS_MC_PROFILE_CONFIG3					0x0000047c
+
+#define VIVS_MC_BUS_CONFIG					0x00000480
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK			0x0000000f
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT			0
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x)			(((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK)
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK			0x000000f0
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT			4
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x)			(((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK)
+
+#define VIVS_MC_START_COMPOSITION				0x00000554
+
+#define VIVS_MC_128B_MERGE					0x00000558
+
+
+#endif /* STATE_HI_XML */
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 793e497..b79c316 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -953,7 +953,7 @@
 	drm_connector_cleanup(connector);
 }
 
-static struct drm_connector_funcs exynos_dp_connector_funcs = {
+static const struct drm_connector_funcs exynos_dp_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = exynos_dp_detect,
@@ -998,7 +998,7 @@
 	return &dp->encoder;
 }
 
-static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
 	.get_modes = exynos_dp_get_modes,
 	.best_encoder = exynos_dp_best_encoder,
 };
@@ -1176,14 +1176,14 @@
 {
 }
 
-static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
 	.mode_fixup = exynos_dp_mode_fixup,
 	.mode_set = exynos_dp_mode_set,
 	.enable = exynos_dp_enable,
 	.disable = exynos_dp_disable,
 };
 
-static struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9d30a0f..80f7974 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -93,7 +93,7 @@
 	}
 }
 
-static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
 	.enable		= exynos_drm_crtc_enable,
 	.disable	= exynos_drm_crtc_disable,
 	.mode_set_nofb	= exynos_drm_crtc_mode_set_nofb,
@@ -113,7 +113,7 @@
 	kfree(exynos_crtc);
 }
 
-static struct drm_crtc_funcs exynos_crtc_funcs = {
+static const struct drm_crtc_funcs exynos_crtc_funcs = {
 	.set_config	= drm_atomic_helper_set_config,
 	.page_flip	= drm_atomic_helper_page_flip,
 	.destroy	= exynos_drm_crtc_destroy,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 1dbf8dc..05350ae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -57,7 +57,7 @@
 	drm_connector_cleanup(connector);
 }
 
-static struct drm_connector_funcs exynos_dpi_connector_funcs = {
+static const struct drm_connector_funcs exynos_dpi_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = exynos_dpi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -100,7 +100,7 @@
 	return &ctx->encoder;
 }
 
-static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
 	.get_modes = exynos_dpi_get_modes,
 	.best_encoder = exynos_dpi_best_encoder,
 };
@@ -161,14 +161,14 @@
 	}
 }
 
-static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
 	.mode_fixup = exynos_dpi_mode_fixup,
 	.mode_set = exynos_dpi_mode_set,
 	.enable = exynos_dpi_enable,
 	.disable = exynos_dpi_disable,
 };
 
-static struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index bc09bba..d84a498 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1541,7 +1541,7 @@
 	connector->dev = NULL;
 }
 
-static struct drm_connector_funcs exynos_dsi_connector_funcs = {
+static const struct drm_connector_funcs exynos_dsi_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = exynos_dsi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1569,7 +1569,7 @@
 	return &dsi->encoder;
 }
 
-static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
 	.get_modes = exynos_dsi_get_modes,
 	.best_encoder = exynos_dsi_best_encoder,
 };
@@ -1622,14 +1622,14 @@
 	vm->hsync_len = m->hsync_end - m->hsync_start;
 }
 
-static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
 	.mode_fixup = exynos_dsi_mode_fixup,
 	.mode_set = exynos_dsi_mode_set,
 	.enable = exynos_dsi_enable,
 	.disable = exynos_dsi_disable,
 };
 
-static struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index f6bdb0d..cbbb1a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -110,7 +110,7 @@
 	return 0;
 }
 
-static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
+static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
 	.destroy	= exynos_drm_fb_destroy,
 	.create_handle	= exynos_drm_fb_create_handle,
 	.dirty		= exynos_drm_fb_dirty,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 8994eab..4eaef36 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -389,7 +389,7 @@
 	mutex_unlock(&mic_mutex);
 }
 
-struct drm_bridge_funcs mic_bridge_funcs = {
+static const struct drm_bridge_funcs mic_bridge_funcs = {
 	.disable = mic_disable,
 	.post_disable = mic_post_disable,
 	.pre_enable = mic_pre_enable,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 319aa31..0be29c1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -339,7 +339,7 @@
 {
 }
 
-static struct drm_connector_funcs vidi_connector_funcs = {
+static const struct drm_connector_funcs vidi_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = vidi_detect,
@@ -383,7 +383,7 @@
 	return &ctx->encoder;
 }
 
-static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
 	.get_modes = vidi_get_modes,
 	.best_encoder = vidi_best_encoder,
 };
@@ -431,14 +431,14 @@
 {
 }
 
-static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
 	.mode_fixup = exynos_vidi_mode_fixup,
 	.mode_set = exynos_vidi_mode_set,
 	.enable = exynos_vidi_enable,
 	.disable = exynos_vidi_disable,
 };
 
-static struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 7d5ca6c..21a29db 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -956,7 +956,7 @@
 	drm_connector_cleanup(connector);
 }
 
-static struct drm_connector_funcs hdmi_connector_funcs = {
+static const struct drm_connector_funcs hdmi_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = hdmi_detect,
@@ -1030,7 +1030,7 @@
 	return &hdata->encoder;
 }
 
-static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
 	.get_modes = hdmi_get_modes,
 	.mode_valid = hdmi_mode_valid,
 	.best_encoder = hdmi_best_encoder,
@@ -1641,14 +1641,14 @@
 	hdata->powered = false;
 }
 
-static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
 	.mode_fixup	= hdmi_mode_fixup,
 	.mode_set	= hdmi_mode_set,
 	.enable		= hdmi_enable,
 	.disable	= hdmi_disable,
 };
 
-static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 1930234..fca97d3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -363,7 +363,6 @@
 	fsl_dev->np = dev->of_node;
 	drm->dev_private = fsl_dev;
 	dev_set_drvdata(dev, fsl_dev);
-	drm_dev_set_unique(drm, dev_name(dev));
 
 	ret = drm_dev_register(drm, 0);
 	if (ret < 0)
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index 1a1acd3..7cd87a0 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -821,14 +821,18 @@
 	struct drm_device *dev = dsi_config->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
-
 	u32 pipeconf_reg = PIPEACONF;
 	u32 dspcntr_reg = DSPACNTR;
+	u32 pipeconf, dspcntr;
 
-	u32 pipeconf = dev_priv->pipeconf[pipe];
-	u32 dspcntr = dev_priv->dspcntr[pipe];
 	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
 
+	if (WARN_ON(pipe < 0))
+		return;
+
+	pipeconf = dev_priv->pipeconf[pipe];
+	dspcntr = dev_priv->dspcntr[pipe];
+
 	if (pipe) {
 		pipeconf_reg = PIPECCONF;
 		dspcntr_reg = DSPCCNTR;
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f2..533d1e3 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -752,7 +752,7 @@
 	adv7511->f_tmds = mode->clock;
 }
 
-static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
+static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
 	.dpms = adv7511_encoder_dpms,
 	.mode_valid = adv7511_encoder_mode_valid,
 	.mode_set = adv7511_encoder_mode_set,
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index d9a72c9..90db5f4 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -371,7 +371,7 @@
 	return 0;
 }
 
-static struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
+static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
 	.set_config = ch7006_encoder_set_config,
 	.destroy = ch7006_encoder_destroy,
 	.dpms = ch7006_encoder_dpms,
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 002ce78..c400428 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -341,7 +341,7 @@
 	drm_i2c_encoder_destroy(encoder);
 }
 
-static struct drm_encoder_slave_funcs sil164_encoder_funcs = {
+static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
 	.set_config = sil164_encoder_set_config,
 	.destroy = sil164_encoder_destroy,
 	.dpms = sil164_encoder_dpms,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 411a9c6..a8721fc 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1639,7 +1639,7 @@
 	intel_runtime_pm_get(dev_priv);
 	mutex_lock(&dev_priv->fbc.lock);
 
-	if (intel_fbc_enabled(dev_priv))
+	if (intel_fbc_is_active(dev_priv))
 		seq_puts(m, "FBC enabled\n");
 	else
 		seq_printf(m, "FBC disabled: %s\n",
@@ -1869,33 +1869,29 @@
 {
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
-	struct intel_fbdev *ifbdev = NULL;
-	struct intel_framebuffer *fb;
+	struct intel_framebuffer *fbdev_fb = NULL;
 	struct drm_framebuffer *drm_fb;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-	struct drm_i915_private *dev_priv = dev->dev_private;
+       if (to_i915(dev)->fbdev) {
+               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
 
-	ifbdev = dev_priv->fbdev;
-	if (ifbdev) {
-		fb = to_intel_framebuffer(ifbdev->helper.fb);
-
-		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
-			   fb->base.width,
-			   fb->base.height,
-			   fb->base.depth,
-			   fb->base.bits_per_pixel,
-			   fb->base.modifier[0],
-			   atomic_read(&fb->base.refcount.refcount));
-		describe_obj(m, fb->obj);
-		seq_putc(m, '\n');
-	}
+               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+                         fbdev_fb->base.width,
+                         fbdev_fb->base.height,
+                         fbdev_fb->base.depth,
+                         fbdev_fb->base.bits_per_pixel,
+                         fbdev_fb->base.modifier[0],
+                         atomic_read(&fbdev_fb->base.refcount.refcount));
+               describe_obj(m, fbdev_fb->obj);
+               seq_putc(m, '\n');
+       }
 #endif
 
 	mutex_lock(&dev->mode_config.fb_lock);
 	drm_for_each_fb(drm_fb, dev) {
-		fb = to_intel_framebuffer(drm_fb);
-		if (ifbdev && &fb->base == ifbdev->helper.fb)
+		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+		if (fb == fbdev_fb)
 			continue;
 
 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
@@ -2473,15 +2469,15 @@
 	if (!HAS_GUC_SCHED(dev_priv->dev))
 		return 0;
 
+	if (mutex_lock_interruptible(&dev->struct_mutex))
+		return 0;
+
 	/* Take a local copy of the GuC data, so we can dump it at leisure */
-	spin_lock(&dev_priv->guc.host2guc_lock);
 	guc = dev_priv->guc;
-	if (guc.execbuf_client) {
-		spin_lock(&guc.execbuf_client->wq_lock);
+	if (guc.execbuf_client)
 		client = *guc.execbuf_client;
-		spin_unlock(&guc.execbuf_client->wq_lock);
-	}
-	spin_unlock(&dev_priv->guc.host2guc_lock);
+
+	mutex_unlock(&dev->struct_mutex);
 
 	seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
 	seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
@@ -2582,8 +2578,11 @@
 		}
 	seq_puts(m, "\n");
 
-	/* CHV PSR has no kind of performance counter */
-	if (HAS_DDI(dev)) {
+	/*
+	 * VLV/CHV PSR has no kind of performance counter
+	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
+	 */
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
 			EDP_PSR_PERF_CNT_MASK;
 
@@ -2685,71 +2684,6 @@
 	return 0;
 }
 
-static const char *power_domain_str(enum intel_display_power_domain domain)
-{
-	switch (domain) {
-	case POWER_DOMAIN_PIPE_A:
-		return "PIPE_A";
-	case POWER_DOMAIN_PIPE_B:
-		return "PIPE_B";
-	case POWER_DOMAIN_PIPE_C:
-		return "PIPE_C";
-	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-		return "PIPE_A_PANEL_FITTER";
-	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-		return "PIPE_B_PANEL_FITTER";
-	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-		return "PIPE_C_PANEL_FITTER";
-	case POWER_DOMAIN_TRANSCODER_A:
-		return "TRANSCODER_A";
-	case POWER_DOMAIN_TRANSCODER_B:
-		return "TRANSCODER_B";
-	case POWER_DOMAIN_TRANSCODER_C:
-		return "TRANSCODER_C";
-	case POWER_DOMAIN_TRANSCODER_EDP:
-		return "TRANSCODER_EDP";
-	case POWER_DOMAIN_PORT_DDI_A_LANES:
-		return "PORT_DDI_A_LANES";
-	case POWER_DOMAIN_PORT_DDI_B_LANES:
-		return "PORT_DDI_B_LANES";
-	case POWER_DOMAIN_PORT_DDI_C_LANES:
-		return "PORT_DDI_C_LANES";
-	case POWER_DOMAIN_PORT_DDI_D_LANES:
-		return "PORT_DDI_D_LANES";
-	case POWER_DOMAIN_PORT_DDI_E_LANES:
-		return "PORT_DDI_E_LANES";
-	case POWER_DOMAIN_PORT_DSI:
-		return "PORT_DSI";
-	case POWER_DOMAIN_PORT_CRT:
-		return "PORT_CRT";
-	case POWER_DOMAIN_PORT_OTHER:
-		return "PORT_OTHER";
-	case POWER_DOMAIN_VGA:
-		return "VGA";
-	case POWER_DOMAIN_AUDIO:
-		return "AUDIO";
-	case POWER_DOMAIN_PLLS:
-		return "PLLS";
-	case POWER_DOMAIN_AUX_A:
-		return "AUX_A";
-	case POWER_DOMAIN_AUX_B:
-		return "AUX_B";
-	case POWER_DOMAIN_AUX_C:
-		return "AUX_C";
-	case POWER_DOMAIN_AUX_D:
-		return "AUX_D";
-	case POWER_DOMAIN_GMBUS:
-		return "GMBUS";
-	case POWER_DOMAIN_MODESET:
-		return "MODESET";
-	case POWER_DOMAIN_INIT:
-		return "INIT";
-	default:
-		MISSING_CASE(domain);
-		return "?";
-	}
-}
-
 static int i915_power_domain_info(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
@@ -2775,7 +2709,7 @@
 				continue;
 
 			seq_printf(m, "  %-23s %d\n",
-				 power_domain_str(power_domain),
+				 intel_display_power_domain_str(power_domain),
 				 power_domains->domain_use_count[power_domain]);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6344dfb..e6935f1 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -228,121 +228,83 @@
 	.need_gfx_hws = 1, .has_hotplug = 1, \
 	.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
-	.has_llc = 1
+	.has_llc = 1, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	IVB_CURSOR_OFFSETS
 
 static const struct intel_device_info intel_ivybridge_d_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.is_mobile = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.num_pipes = 0, /* legal, last one wins */
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
+#define VLV_FEATURES  \
+	.gen = 7, .num_pipes = 2, \
+	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.display_mmio_offset = VLV_DISPLAY_BASE, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_valleyview_m_info = {
-	GEN7_FEATURES,
-	.is_mobile = 1,
-	.num_pipes = 2,
+	VLV_FEATURES,
 	.is_valleyview = 1,
-	.display_mmio_offset = VLV_DISPLAY_BASE,
-	.has_fbc = 0, /* legal, last one wins */
-	.has_llc = 0, /* legal, last one wins */
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.is_mobile = 1,
 };
 
 static const struct intel_device_info intel_valleyview_d_info = {
-	GEN7_FEATURES,
-	.num_pipes = 2,
+	VLV_FEATURES,
 	.is_valleyview = 1,
-	.display_mmio_offset = VLV_DISPLAY_BASE,
-	.has_fbc = 0, /* legal, last one wins */
-	.has_llc = 0, /* legal, last one wins */
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
+#define HSW_FEATURES  \
+	GEN7_FEATURES, \
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+	.has_ddi = 1, \
+	.has_fpga_dbg = 1
+
 static const struct intel_device_info intel_haswell_d_info = {
-	GEN7_FEATURES,
+	HSW_FEATURES,
 	.is_haswell = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
-	GEN7_FEATURES,
+	HSW_FEATURES,
 	.is_haswell = 1,
 	.is_mobile = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_broadwell_d_info = {
-	.gen = 8, .num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-	.has_llc = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.has_fbc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
+	HSW_FEATURES,
+	.gen = 8,
 };
 
 static const struct intel_device_info intel_broadwell_m_info = {
-	.gen = 8, .is_mobile = 1, .num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-	.has_llc = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.has_fbc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
+	HSW_FEATURES,
+	.gen = 8, .is_mobile = 1,
 };
 
 static const struct intel_device_info intel_broadwell_gt3d_info = {
-	.gen = 8, .num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	HSW_FEATURES,
+	.gen = 8,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-	.has_llc = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.has_fbc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_broadwell_gt3m_info = {
-	.gen = 8, .is_mobile = 1, .num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	HSW_FEATURES,
+	.gen = 8, .is_mobile = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-	.has_llc = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.has_fbc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_cherryview_info = {
@@ -356,29 +318,16 @@
 };
 
 static const struct intel_device_info intel_skylake_info = {
+	HSW_FEATURES,
 	.is_skylake = 1,
-	.gen = 9, .num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-	.has_llc = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.has_fbc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
+	.gen = 9,
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
+	HSW_FEATURES,
 	.is_skylake = 1,
-	.gen = 9, .num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	.gen = 9,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-	.has_llc = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.has_fbc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_broxton_info = {
@@ -396,33 +345,18 @@
 };
 
 static const struct intel_device_info intel_kabylake_info = {
+	HSW_FEATURES,
 	.is_preliminary = 1,
 	.is_kabylake = 1,
 	.gen = 9,
-	.num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-	.has_llc = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.has_fbc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_kabylake_gt3_info = {
+	HSW_FEATURES,
 	.is_preliminary = 1,
 	.is_kabylake = 1,
 	.gen = 9,
-	.num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-	.has_llc = 1,
-	.has_ddi = 1,
-	.has_fpga_dbg = 1,
-	.has_fbc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	IVB_CURSOR_OFFSETS,
 };
 
 /*
@@ -465,6 +399,7 @@
 	INTEL_SKL_GT1_IDS(&intel_skylake_info),
 	INTEL_SKL_GT2_IDS(&intel_skylake_info),
 	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
+	INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
 	INTEL_BXT_IDS(&intel_broxton_info),
 	INTEL_KBL_GT1_IDS(&intel_kabylake_info),
 	INTEL_KBL_GT2_IDS(&intel_kabylake_info),
@@ -565,7 +500,8 @@
 				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
 				WARN_ON(!IS_SKYLAKE(dev) &&
 					!IS_KABYLAKE(dev));
-			} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
+			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+				   (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
 				dev_priv->pch_type = intel_virt_detect_pch(dev);
 			} else
 				continue;
@@ -624,6 +560,14 @@
 			      bool rpm_resume);
 static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
 
+static bool suspend_to_idle(struct drm_i915_private *dev_priv)
+{
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+	if (acpi_target_system_state() < ACPI_STATE_S3)
+		return true;
+#endif
+	return false;
+}
 
 static int i915_drm_suspend(struct drm_device *dev)
 {
@@ -676,11 +620,7 @@
 
 	i915_save_state(dev);
 
-	opregion_target_state = PCI_D3cold;
-#if IS_ENABLED(CONFIG_ACPI_SLEEP)
-	if (acpi_target_system_state() < ACPI_STATE_S3)
-		opregion_target_state = PCI_D1;
-#endif
+	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
 	intel_opregion_notify_adapter(dev, opregion_target_state);
 
 	intel_uncore_forcewake_reset(dev, false);
@@ -701,15 +641,26 @@
 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 {
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	bool fw_csr;
 	int ret;
 
-	intel_power_domains_suspend(dev_priv);
+	fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+	/*
+	 * In case of firmware assisted context save/restore don't manually
+	 * deinit the power domains. This also means the CSR/DMC firmware will
+	 * stay active, it will power down any HW resources as required and
+	 * also enable deeper system power states that would be blocked if the
+	 * firmware was inactive.
+	 */
+	if (!fw_csr)
+		intel_power_domains_suspend(dev_priv);
 
 	ret = intel_suspend_complete(dev_priv);
 
 	if (ret) {
 		DRM_ERROR("Suspend complete failed: %d\n", ret);
-		intel_power_domains_init_hw(dev_priv, true);
+		if (!fw_csr)
+			intel_power_domains_init_hw(dev_priv, true);
 
 		return ret;
 	}
@@ -730,6 +681,8 @@
 	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
 		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
+	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
+
 	return 0;
 }
 
@@ -842,8 +795,10 @@
 	 * FIXME: This should be solved with a special hdmi sink device or
 	 * similar so that power domains can be employed.
 	 */
-	if (pci_enable_device(dev->pdev))
-		return -EIO;
+	if (pci_enable_device(dev->pdev)) {
+		ret = -EIO;
+		goto out;
+	}
 
 	pci_set_master(dev->pdev);
 
@@ -861,7 +816,12 @@
 		hsw_disable_pc8(dev_priv);
 
 	intel_uncore_sanitize(dev);
-	intel_power_domains_init_hw(dev_priv, true);
+
+	if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+		intel_power_domains_init_hw(dev_priv, true);
+
+out:
+	dev_priv->suspended_to_idle = false;
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 15c6dc0..f1a8a53 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -57,7 +57,7 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20151120"
+#define DRIVER_DATE		"20151204"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -902,7 +902,6 @@
 	/* This is always the inner lock when overlapping with struct_mutex and
 	 * it's the outer lock when overlapping with stolen_lock. */
 	struct mutex lock;
-	unsigned long uncompressed_size;
 	unsigned threshold;
 	unsigned int fb_id;
 	unsigned int possible_framebuffer_bits;
@@ -915,21 +914,21 @@
 
 	bool false_color;
 
-	/* Tracks whether the HW is actually enabled, not whether the feature is
-	 * possible. */
 	bool enabled;
+	bool active;
 
 	struct intel_fbc_work {
-		struct delayed_work work;
-		struct intel_crtc *crtc;
+		bool scheduled;
+		struct work_struct work;
 		struct drm_framebuffer *fb;
-	} *fbc_work;
+		unsigned long enable_jiffies;
+	} work;
 
 	const char *no_fbc_reason;
 
-	bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
-	void (*enable_fbc)(struct intel_crtc *crtc);
-	void (*disable_fbc)(struct drm_i915_private *dev_priv);
+	bool (*is_active)(struct drm_i915_private *dev_priv);
+	void (*activate)(struct intel_crtc *crtc);
+	void (*deactivate)(struct drm_i915_private *dev_priv);
 };
 
 /**
@@ -1885,6 +1884,7 @@
 	u32 chv_phy_control;
 
 	u32 suspend_count;
+	bool suspended_to_idle;
 	struct i915_suspend_saved_registers regfile;
 	struct vlv_s0ix_state vlv_s0ix_state;
 
@@ -2608,11 +2608,13 @@
 #define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
 
 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
@@ -2749,17 +2751,47 @@
 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
 				   uint32_t mask,
 				   uint32_t bits);
-void
-ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
-void
-ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+			    uint32_t interrupt_mask,
+			    uint32_t enabled_irq_mask);
+static inline void
+ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+{
+	ilk_update_display_irq(dev_priv, bits, bits);
+}
+static inline void
+ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+{
+	ilk_update_display_irq(dev_priv, bits, 0);
+}
+void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+			 enum pipe pipe,
+			 uint32_t interrupt_mask,
+			 uint32_t enabled_irq_mask);
+static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
+				       enum pipe pipe, uint32_t bits)
+{
+	bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
+}
+static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
+					enum pipe pipe, uint32_t bits)
+{
+	bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
+}
 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 				  uint32_t interrupt_mask,
 				  uint32_t enabled_irq_mask);
-#define ibx_enable_display_interrupt(dev_priv, bits) \
-	ibx_display_interrupt_update((dev_priv), (bits), (bits))
-#define ibx_disable_display_interrupt(dev_priv, bits) \
-	ibx_display_interrupt_update((dev_priv), (bits), 0)
+static inline void
+ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+{
+	ibx_display_interrupt_update(dev_priv, bits, bits);
+}
+static inline void
+ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+{
+	ibx_display_interrupt_update(dev_priv, bits, 0);
+}
+
 
 /* i915_gem.c */
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f987389..b7d7cec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3077,7 +3077,7 @@
 		if (ret == 0)
 			ret = __i915_wait_request(req[i], reset_counter, true,
 						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
-						  file->driver_priv);
+						  to_rps_client(file));
 		i915_gem_request_unreference__unlocked(req[i]);
 	}
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 4b94004..43761c5 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -141,8 +141,6 @@
 	if (!ppgtt)
 		return;
 
-	WARN_ON(!list_empty(&ppgtt->base.active_list));
-
 	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
 				 mm_list) {
 		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index ed9f100..0d23785b 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -86,7 +86,6 @@
 		return -EINVAL;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-	spin_lock(&dev_priv->guc.host2guc_lock);
 
 	dev_priv->guc.action_count += 1;
 	dev_priv->guc.action_cmd = data[0];
@@ -119,7 +118,6 @@
 	}
 	dev_priv->guc.action_status = status;
 
-	spin_unlock(&dev_priv->guc.host2guc_lock);
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
 	return ret;
@@ -292,16 +290,12 @@
 	const uint32_t cacheline_size = cache_line_size();
 	uint32_t offset;
 
-	spin_lock(&guc->host2guc_lock);
-
 	/* Doorbell uses a single cache line within a page */
 	offset = offset_in_page(guc->db_cacheline);
 
 	/* Moving to next cache line to reduce contention */
 	guc->db_cacheline += cacheline_size;
 
-	spin_unlock(&guc->host2guc_lock);
-
 	DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
 			offset, guc->db_cacheline, cacheline_size);
 
@@ -322,13 +316,11 @@
 	const uint16_t end = start + half;
 	uint16_t id;
 
-	spin_lock(&guc->host2guc_lock);
 	id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
 	if (id == end)
 		id = GUC_INVALID_DOORBELL_ID;
 	else
 		bitmap_set(guc->doorbell_bitmap, id, 1);
-	spin_unlock(&guc->host2guc_lock);
 
 	DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
 			hi_pri ? "high" : "normal", id);
@@ -338,9 +330,7 @@
 
 static void release_doorbell(struct intel_guc *guc, uint16_t id)
 {
-	spin_lock(&guc->host2guc_lock);
 	bitmap_clear(guc->doorbell_bitmap, id, 1);
-	spin_unlock(&guc->host2guc_lock);
 }
 
 /*
@@ -487,16 +477,13 @@
 	struct guc_process_desc *desc;
 	void *base;
 	u32 size = sizeof(struct guc_wq_item);
-	int ret = 0, timeout_counter = 200;
+	int ret = -ETIMEDOUT, timeout_counter = 200;
 
 	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
 	desc = base + gc->proc_desc_offset;
 
 	while (timeout_counter-- > 0) {
-		ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
-				gc->wq_size) >= size, 1);
-
-		if (!ret) {
+		if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
 			*offset = gc->wq_tail;
 
 			/* advance the tail for next workqueue item */
@@ -505,7 +492,11 @@
 
 			/* this will break the loop */
 			timeout_counter = 0;
+			ret = 0;
 		}
+
+		if (timeout_counter)
+			usleep_range(1000, 2000);
 	};
 
 	kunmap_atomic(base);
@@ -597,15 +588,12 @@
 {
 	struct intel_guc *guc = client->guc;
 	enum intel_ring_id ring_id = rq->ring->id;
-	unsigned long flags;
 	int q_ret, b_ret;
 
 	/* Need this because of the deferred pin ctx and ring */
 	/* Shall we move this right after ring is pinned? */
 	lr_context_update(rq);
 
-	spin_lock_irqsave(&client->wq_lock, flags);
-
 	q_ret = guc_add_workqueue_item(client, rq);
 	if (q_ret == 0)
 		b_ret = guc_ring_doorbell(client);
@@ -620,12 +608,8 @@
 	} else {
 		client->retcode = 0;
 	}
-	spin_unlock_irqrestore(&client->wq_lock, flags);
-
-	spin_lock(&guc->host2guc_lock);
 	guc->submissions[ring_id] += 1;
 	guc->last_seqno[ring_id] = rq->seqno;
-	spin_unlock(&guc->host2guc_lock);
 
 	return q_ret;
 }
@@ -677,7 +661,7 @@
 /**
  * gem_release_guc_obj() - Release gem object allocated for GuC usage
  * @obj:	gem obj to be released
-  */
+ */
 static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
 {
 	if (!obj)
@@ -768,7 +752,6 @@
 	client->client_obj = obj;
 	client->wq_offset = GUC_DB_SIZE;
 	client->wq_size = GUC_WQ_SIZE;
-	spin_lock_init(&client->wq_lock);
 
 	client->doorbell_offset = select_doorbell_cacheline(guc);
 
@@ -871,8 +854,6 @@
 	if (!guc->ctx_pool_obj)
 		return -ENOMEM;
 
-	spin_lock_init(&dev_priv->guc.host2guc_lock);
-
 	ida_init(&guc->ctx_ids);
 
 	guc_create_log(guc);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c8ba949..e88d692 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -215,9 +215,9 @@
  * @interrupt_mask: mask of interrupt bits to update
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
-static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
-				   uint32_t interrupt_mask,
-				   uint32_t enabled_irq_mask)
+void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+			    uint32_t interrupt_mask,
+			    uint32_t enabled_irq_mask)
 {
 	uint32_t new_val;
 
@@ -239,18 +239,6 @@
 	}
 }
 
-void
-ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
-	ilk_update_display_irq(dev_priv, mask, mask);
-}
-
-void
-ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
-	ilk_update_display_irq(dev_priv, mask, 0);
-}
-
 /**
  * ilk_update_gt_irq - update GTIMR
  * @dev_priv: driver private
@@ -300,11 +288,11 @@
 }
 
 /**
-  * snb_update_pm_irq - update GEN6_PMIMR
-  * @dev_priv: driver private
-  * @interrupt_mask: mask of interrupt bits to update
-  * @enabled_irq_mask: mask of interrupt bits to enable
-  */
+ * snb_update_pm_irq - update GEN6_PMIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
 			      uint32_t interrupt_mask,
 			      uint32_t enabled_irq_mask)
@@ -418,11 +406,11 @@
 }
 
 /**
-  * bdw_update_port_irq - update DE port interrupt
-  * @dev_priv: driver private
-  * @interrupt_mask: mask of interrupt bits to update
-  * @enabled_irq_mask: mask of interrupt bits to enable
-  */
+ * bdw_update_port_irq - update DE port interrupt
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
 				uint32_t interrupt_mask,
 				uint32_t enabled_irq_mask)
@@ -450,6 +438,38 @@
 }
 
 /**
+ * bdw_update_pipe_irq - update DE pipe interrupt
+ * @dev_priv: driver private
+ * @pipe: pipe whose interrupt to update
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+			 enum pipe pipe,
+			 uint32_t interrupt_mask,
+			 uint32_t enabled_irq_mask)
+{
+	uint32_t new_val;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+		return;
+
+	new_val = dev_priv->de_irq_mask[pipe];
+	new_val &= ~interrupt_mask;
+	new_val |= (~enabled_irq_mask & interrupt_mask);
+
+	if (new_val != dev_priv->de_irq_mask[pipe]) {
+		dev_priv->de_irq_mask[pipe] = new_val;
+		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	}
+}
+
+/**
  * ibx_display_interrupt_update - update SDEIMR
  * @dev_priv: driver private
  * @interrupt_mask: mask of interrupt bits to update
@@ -1824,8 +1844,24 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
+	/*
+	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
+	 * unless we touch the hotplug register, even if hotplug_trigger is
+	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
+	 * errors.
+	 */
 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+	if (!hotplug_trigger) {
+		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
+			PORTD_HOTPLUG_STATUS_MASK |
+			PORTC_HOTPLUG_STATUS_MASK |
+			PORTB_HOTPLUG_STATUS_MASK;
+		dig_hotplug_reg &= ~mask;
+	}
+
 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	if (!hotplug_trigger)
+		return;
 
 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
 			   dig_hotplug_reg, hpd,
@@ -1840,8 +1876,7 @@
 	int pipe;
 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
-	if (hotplug_trigger)
-		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1934,8 +1969,7 @@
 	int pipe;
 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
-	if (hotplug_trigger)
-		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2351,13 +2385,9 @@
 				spt_irq_handler(dev, pch_iir);
 			else
 				cpt_irq_handler(dev, pch_iir);
-		} else {
-			/*
-			 * Like on previous PCH there seems to be something
-			 * fishy going on with forwarding PCH interrupts.
-			 */
-			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
-		}
+		} else
+			DRM_ERROR("The master control interrupt lied (SDE)!\n");
+
 	}
 
 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2645,7 +2675,7 @@
 						     DE_PIPE_VBLANK(pipe);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	ironlake_enable_display_irq(dev_priv, bit);
+	ilk_enable_display_irq(dev_priv, bit);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	return 0;
@@ -2670,10 +2700,9 @@
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
-	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
 	return 0;
 }
 
@@ -2700,7 +2729,7 @@
 						     DE_PIPE_VBLANK(pipe);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	ironlake_disable_display_irq(dev_priv, bit);
+	ilk_disable_display_irq(dev_priv, bit);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
@@ -2721,9 +2750,7 @@
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
-	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
@@ -3452,7 +3479,7 @@
 		 * setup is guaranteed to run in single-threaded context. But we
 		 * need it to make the assert_spin_locked happy. */
 		spin_lock_irq(&dev_priv->irq_lock);
-		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
 		spin_unlock_irq(&dev_priv->irq_lock);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1a12d44..1dae5ac 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -855,31 +855,31 @@
  *
  * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
  * digital port D (CHV) or port A (BXT).
- */
-/*
- * Dual channel PHY (VLV/CHV/BXT)
- * ---------------------------------
- * |      CH0      |      CH1      |
- * |  CMN/PLL/REF  |  CMN/PLL/REF  |
- * |---------------|---------------| Display PHY
- * | PCS01 | PCS23 | PCS01 | PCS23 |
- * |-------|-------|-------|-------|
- * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
- * ---------------------------------
- * |     DDI0      |     DDI1      | DP/HDMI ports
- * ---------------------------------
  *
- * Single channel PHY (CHV/BXT)
- * -----------------
- * |      CH0      |
- * |  CMN/PLL/REF  |
- * |---------------| Display PHY
- * | PCS01 | PCS23 |
- * |-------|-------|
- * |TX0|TX1|TX2|TX3|
- * -----------------
- * |     DDI2      | DP/HDMI port
- * -----------------
+ *
+ *     Dual channel PHY (VLV/CHV/BXT)
+ *     ---------------------------------
+ *     |      CH0      |      CH1      |
+ *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
+ *     |---------------|---------------| Display PHY
+ *     | PCS01 | PCS23 | PCS01 | PCS23 |
+ *     |-------|-------|-------|-------|
+ *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ *     ---------------------------------
+ *     |     DDI0      |     DDI1      | DP/HDMI ports
+ *     ---------------------------------
+ *
+ *     Single channel PHY (CHV/BXT)
+ *     -----------------
+ *     |      CH0      |
+ *     |  CMN/PLL/REF  |
+ *     |---------------| Display PHY
+ *     | PCS01 | PCS23 |
+ *     |-------|-------|
+ *     |TX0|TX1|TX2|TX3|
+ *     -----------------
+ *     |     DDI2      | DP/HDMI port
+ *     -----------------
  */
 #define DPIO_DEVFN			0
 
@@ -2973,6 +2973,13 @@
 #define OGAMC0			_MMIO(0x30024)
 
 /*
+ * GEN9 clock gating regs
+ */
+#define GEN9_CLKGATE_DIS_0		_MMIO(0x46530)
+#define   PWM2_GATING_DIS		(1 << 14)
+#define   PWM1_GATING_DIS		(1 << 13)
+
+/*
  * Display engine regs
  */
 
@@ -7549,6 +7556,7 @@
 #define SFUSE_STRAP			_MMIO(0xc2014)
 #define  SFUSE_STRAP_FUSE_LOCK		(1<<13)
 #define  SFUSE_STRAP_DISPLAY_DISABLED	(1<<7)
+#define  SFUSE_STRAP_CRT_DISABLED	(1<<6)
 #define  SFUSE_STRAP_DDIB_DETECTED	(1<<2)
 #define  SFUSE_STRAP_DDIC_DETECTED	(1<<1)
 #define  SFUSE_STRAP_DDID_DETECTED	(1<<0)
@@ -7706,7 +7714,7 @@
 #define BXT_DSI_PLL_RATIO_MAX		0x7D
 #define BXT_DSI_PLL_RATIO_MIN		0x22
 #define BXT_DSI_PLL_RATIO_MASK		0xFF
-#define BXT_REF_CLOCK_KHZ		19500
+#define BXT_REF_CLOCK_KHZ		19200
 
 #define BXT_DSI_PLL_ENABLE		_MMIO(0x46080)
 #define  BXT_DSI_PLL_DO_ENABLE		(1 << 31)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index ce82f9c..070470f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -356,7 +356,10 @@
 	general = find_section(bdb, BDB_GENERAL_FEATURES);
 	if (general) {
 		dev_priv->vbt.int_tv_support = general->int_tv_support;
-		dev_priv->vbt.int_crt_support = general->int_crt_support;
+		/* int_crt_support can't be trusted on earlier platforms */
+		if (bdb->version >= 155 &&
+		    (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
+			dev_priv->vbt.int_crt_support = general->int_crt_support;
 		dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
 		dev_priv->vbt.lvds_ssc_freq =
 			intel_bios_ssc_frequency(dev, general->ssc_freq);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 912c0ac..9285fc1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -777,11 +777,37 @@
 	struct intel_crt *crt;
 	struct intel_connector *intel_connector;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	i915_reg_t adpa_reg;
+	u32 adpa;
 
 	/* Skip machines without VGA that falsely report hotplug events */
 	if (dmi_check_system(intel_no_crt))
 		return;
 
+	if (HAS_PCH_SPLIT(dev))
+		adpa_reg = PCH_ADPA;
+	else if (IS_VALLEYVIEW(dev))
+		adpa_reg = VLV_ADPA;
+	else
+		adpa_reg = ADPA;
+
+	adpa = I915_READ(adpa_reg);
+	if ((adpa & ADPA_DAC_ENABLE) == 0) {
+		/*
+		 * On some machines (some IVB at least) CRT can be
+		 * fused off, but there's no known fuse bit to
+		 * indicate that. On these machine the ADPA register
+		 * works normally, except the DAC enable bit won't
+		 * take. So the only way to tell is attempt to enable
+		 * it and see what happens.
+		 */
+		I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
+			   ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+		if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
+			return;
+		I915_WRITE(adpa_reg, adpa);
+	}
+
 	crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
 	if (!crt)
 		return;
@@ -815,12 +841,7 @@
 		connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
 
-	if (HAS_PCH_SPLIT(dev))
-		crt->adpa_reg = PCH_ADPA;
-	else if (IS_VALLEYVIEW(dev))
-		crt->adpa_reg = VLV_ADPA;
-	else
-		crt->adpa_reg = ADPA;
+	crt->adpa_reg = adpa_reg;
 
 	crt->base.compute_config = intel_crt_compute_config;
 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 37efcd1..4afb310 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3151,7 +3151,7 @@
 		pipe_config->has_hdmi_sink = true;
 		intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
-		if (intel_hdmi->infoframe_enabled(&encoder->base))
+		if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
 			pipe_config->has_infoframe = true;
 		break;
 	case TRANS_DDI_MODE_SELECT_DVI:
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fc0d53a..bda6b9c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -44,6 +44,8 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_rect.h>
 #include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/dma-buf.h>
 
 /* Primary plane formats for gen <= 3 */
 static const uint32_t i8xx_primary_formats[] = {
@@ -2130,7 +2132,7 @@
 	 * need the check.
 	 */
 	if (HAS_GMCH_DISPLAY(dev_priv->dev))
-		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+		if (crtc->config->has_dsi_encoder)
 			assert_dsi_pll_enabled(dev_priv);
 		else
 			assert_pll_enabled(dev_priv, pipe);
@@ -3174,8 +3176,8 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->fbc.disable_fbc)
-		dev_priv->fbc.disable_fbc(dev_priv);
+	if (dev_priv->fbc.deactivate)
+		dev_priv->fbc.deactivate(dev_priv);
 
 	dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -4137,6 +4139,12 @@
 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
+	/*
+	 * Sometimes spurious CPU pipe underruns happen during FDI
+	 * training, at least with VGA+HDMI cloning. Suppress them.
+	 */
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
 	/* For PCH output, training FDI link */
 	dev_priv->display.fdi_link_train(crtc);
 
@@ -4170,6 +4178,8 @@
 
 	intel_fdi_normal_train(crtc);
 
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
 	/* For PCH DP, enable TRANS_DP_CTL */
 	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
 		const struct drm_display_mode *adjusted_mode =
@@ -4628,7 +4638,7 @@
 		return;
 
 	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
-		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
+		if (intel_crtc->config->has_dsi_encoder)
 			assert_dsi_pll_enabled(dev_priv);
 		else
 			assert_pll_enabled(dev_priv, pipe);
@@ -4784,7 +4794,6 @@
 {
 	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
 	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (atomic->wait_vblank)
 		intel_wait_for_vblank(dev, crtc->pipe);
@@ -4798,7 +4807,7 @@
 		intel_update_watermarks(&crtc->base);
 
 	if (atomic->update_fbc)
-		intel_fbc_update(dev_priv);
+		intel_fbc_update(crtc);
 
 	if (atomic->post_enable_primary)
 		intel_post_enable_primary(&crtc->base);
@@ -4813,7 +4822,7 @@
 	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
 
 	if (atomic->disable_fbc)
-		intel_fbc_disable_crtc(crtc);
+		intel_fbc_deactivate(crtc);
 
 	if (crtc->atomic.disable_ips)
 		hsw_disable_ips(crtc);
@@ -4921,6 +4930,8 @@
 	if (intel_crtc->config->has_pch_encoder)
 		intel_wait_for_vblank(dev, pipe);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+
+	intel_fbc_enable(intel_crtc);
 }
 
 /* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4938,7 +4949,6 @@
 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc->state);
-	bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
 	if (WARN_ON(intel_crtc->active))
 		return;
@@ -4971,10 +4981,12 @@
 
 	intel_crtc->active = true;
 
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	if (intel_crtc->config->has_pch_encoder)
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+	else
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		if (encoder->pre_pll_enable)
-			encoder->pre_pll_enable(encoder);
 		if (encoder->pre_enable)
 			encoder->pre_enable(encoder);
 	}
@@ -4982,7 +4994,7 @@
 	if (intel_crtc->config->has_pch_encoder)
 		dev_priv->display.fdi_link_train(crtc);
 
-	if (!is_dsi)
+	if (!intel_crtc->config->has_dsi_encoder)
 		intel_ddi_enable_pipe_clock(intel_crtc);
 
 	if (INTEL_INFO(dev)->gen >= 9)
@@ -4997,7 +5009,7 @@
 	intel_crtc_load_lut(crtc);
 
 	intel_ddi_set_pipe_settings(crtc);
-	if (!is_dsi)
+	if (!intel_crtc->config->has_dsi_encoder)
 		intel_ddi_enable_transcoder_func(crtc);
 
 	intel_update_watermarks(crtc);
@@ -5006,7 +5018,7 @@
 	if (intel_crtc->config->has_pch_encoder)
 		lpt_pch_enable(crtc);
 
-	if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
+	if (intel_crtc->config->dp_encoder_is_mst)
 		intel_ddi_set_vc_payload_alloc(crtc, true);
 
 	assert_vblank_disabled(crtc);
@@ -5017,9 +5029,13 @@
 		intel_opregion_notify_encoder(encoder, true);
 	}
 
-	if (intel_crtc->config->has_pch_encoder)
+	if (intel_crtc->config->has_pch_encoder) {
+		intel_wait_for_vblank(dev, pipe);
+		intel_wait_for_vblank(dev, pipe);
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      true);
+	}
 
 	/* If we change the relative order between pipe/planes enabling, we need
 	 * to change the workaround. */
@@ -5028,6 +5044,8 @@
 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
 	}
+
+	intel_fbc_enable(intel_crtc);
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
@@ -5062,12 +5080,22 @@
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
 
+	/*
+	 * Sometimes spurious CPU pipe underruns happen when the
+	 * pipe is already disabled, but FDI RX/TX is still enabled.
+	 * Happens at least with VGA+HDMI cloning. Suppress them.
+	 */
+	if (intel_crtc->config->has_pch_encoder)
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
 	intel_disable_pipe(intel_crtc);
 
 	ironlake_pfit_disable(intel_crtc, false);
 
-	if (intel_crtc->config->has_pch_encoder)
+	if (intel_crtc->config->has_pch_encoder) {
 		ironlake_fdi_disable(crtc);
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+	}
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->post_disable)
@@ -5098,6 +5126,8 @@
 	}
 
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+
+	intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5107,7 +5137,6 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
-	bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
 	if (intel_crtc->config->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
@@ -5126,7 +5155,7 @@
 	if (intel_crtc->config->dp_encoder_is_mst)
 		intel_ddi_set_vc_payload_alloc(crtc, false);
 
-	if (!is_dsi)
+	if (!intel_crtc->config->has_dsi_encoder)
 		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
 	if (INTEL_INFO(dev)->gen >= 9)
@@ -5134,7 +5163,7 @@
 	else
 		ironlake_pfit_disable(intel_crtc, false);
 
-	if (!is_dsi)
+	if (!intel_crtc->config->has_dsi_encoder)
 		intel_ddi_disable_pipe_clock(intel_crtc);
 
 	if (intel_crtc->config->has_pch_encoder) {
@@ -5149,6 +5178,8 @@
 	if (intel_crtc->config->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      true);
+
+	intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5214,10 +5245,6 @@
 	}
 }
 
-#define for_each_power_domain(domain, mask)				\
-	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
-		if ((1 << (domain)) & (mask))
-
 enum intel_display_power_domain
 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
 {
@@ -6140,13 +6167,10 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
-	bool is_dsi;
 
 	if (WARN_ON(intel_crtc->active))
 		return;
 
-	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
-
 	if (intel_crtc->config->has_dp_encoder)
 		intel_dp_set_m_n(intel_crtc, M1_N1);
 
@@ -6169,7 +6193,7 @@
 		if (encoder->pre_pll_enable)
 			encoder->pre_pll_enable(encoder);
 
-	if (!is_dsi) {
+	if (!intel_crtc->config->has_dsi_encoder) {
 		if (IS_CHERRYVIEW(dev)) {
 			chv_prepare_pll(intel_crtc, intel_crtc->config);
 			chv_enable_pll(intel_crtc, intel_crtc->config);
@@ -6248,6 +6272,8 @@
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		encoder->enable(encoder);
+
+	intel_fbc_enable(intel_crtc);
 }
 
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6295,7 +6321,7 @@
 		if (encoder->post_disable)
 			encoder->post_disable(encoder);
 
-	if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
+	if (!intel_crtc->config->has_dsi_encoder) {
 		if (IS_CHERRYVIEW(dev))
 			chv_disable_pll(dev_priv, pipe);
 		else if (IS_VALLEYVIEW(dev))
@@ -6310,6 +6336,8 @@
 
 	if (!IS_GEN2(dev))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+	intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
@@ -7908,8 +7936,6 @@
 	int refclk, num_connectors = 0;
 	intel_clock_t clock;
 	bool ok;
-	bool is_dsi = false;
-	struct intel_encoder *encoder;
 	const intel_limit_t *limit;
 	struct drm_atomic_state *state = crtc_state->base.state;
 	struct drm_connector *connector;
@@ -7919,26 +7945,14 @@
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
 
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != &crtc->base)
-			continue;
-
-		encoder = to_intel_encoder(connector_state->best_encoder);
-
-		switch (encoder->type) {
-		case INTEL_OUTPUT_DSI:
-			is_dsi = true;
-			break;
-		default:
-			break;
-		}
-
-		num_connectors++;
-	}
-
-	if (is_dsi)
+	if (crtc_state->has_dsi_encoder)
 		return 0;
 
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc == &crtc->base)
+			num_connectors++;
+	}
+
 	if (!crtc_state->clock_set) {
 		refclk = i9xx_get_refclk(crtc_state, num_connectors);
 
@@ -8931,7 +8945,7 @@
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
 
-	is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
+	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
 
 	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
 	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
@@ -9705,14 +9719,10 @@
 	else
 		cdclk = 337500;
 
-	/*
-	 * FIXME move the cdclk caclulation to
-	 * compute_config() so we can fail gracegully.
-	 */
 	if (cdclk > dev_priv->max_cdclk_freq) {
-		DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
-			  cdclk, dev_priv->max_cdclk_freq);
-		cdclk = dev_priv->max_cdclk_freq;
+		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+			      cdclk, dev_priv->max_cdclk_freq);
+		return -EINVAL;
 	}
 
 	to_intel_atomic_state(state)->cdclk = cdclk;
@@ -9807,6 +9817,7 @@
 		break;
 	case PORT_CLK_SEL_SPLL:
 		pipe_config->shared_dpll = DPLL_ID_SPLL;
+		break;
 	}
 }
 
@@ -11191,6 +11202,10 @@
 		return true;
 	else if (i915.enable_execlists)
 		return true;
+	else if (obj->base.dma_buf &&
+		 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
+						       false))
+		return true;
 	else
 		return ring != i915_gem_request_get_ring(obj->last_write_req);
 }
@@ -11305,6 +11320,9 @@
 {
 	struct intel_mmio_flip *mmio_flip =
 		container_of(work, struct intel_mmio_flip, work);
+	struct intel_framebuffer *intel_fb =
+		to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 
 	if (mmio_flip->req) {
 		WARN_ON(__i915_wait_request(mmio_flip->req,
@@ -11314,6 +11332,12 @@
 		i915_gem_request_unreference__unlocked(mmio_flip->req);
 	}
 
+	/* For framebuffer backed by dmabuf, wait for fence */
+	if (obj->base.dma_buf)
+		WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
+							    false, false,
+							    MAX_SCHEDULE_TIMEOUT) < 0);
+
 	intel_do_mmio_flip(mmio_flip);
 	kfree(mmio_flip);
 }
@@ -11584,7 +11608,7 @@
 			  to_intel_plane(primary)->frontbuffer_bit);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_fbc_disable_crtc(intel_crtc);
+	intel_fbc_deactivate(intel_crtc);
 	intel_frontbuffer_flip_prepare(dev,
 				       to_intel_plane(primary)->frontbuffer_bit);
 
@@ -12587,6 +12611,8 @@
 	} else
 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
+	PIPE_CONF_CHECK_I(has_dsi_encoder);
+
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
@@ -13383,6 +13409,13 @@
 			dev_priv->display.crtc_disable(crtc);
 			intel_crtc->active = false;
 			intel_disable_shared_dpll(intel_crtc);
+
+			/*
+			 * Underruns don't always raise
+			 * interrupts, so check manually.
+			 */
+			intel_check_cpu_fifo_underruns(dev_priv);
+			intel_check_pch_fifo_underruns(dev_priv);
 		}
 	}
 
@@ -13652,6 +13685,17 @@
 			return ret;
 	}
 
+	/* For framebuffer backed by dmabuf, wait for fence */
+	if (obj && obj->base.dma_buf) {
+		ret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
+							  false, true,
+							  MAX_SCHEDULE_TIMEOUT);
+		if (ret == -ERESTARTSYS)
+			return ret;
+
+		WARN_ON(ret < 0);
+	}
+
 	if (!obj) {
 		ret = 0;
 	} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -14246,7 +14290,14 @@
 	if (IS_CHERRYVIEW(dev))
 		return false;
 
-	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+		return false;
+
+	/* DDI E can't be used if DDI A requires 4 lanes */
+	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+		return false;
+
+	if (!dev_priv->vbt.int_crt_support)
 		return false;
 
 	return true;
@@ -14789,9 +14840,6 @@
 	else if (IS_I945GM(dev) || IS_845G(dev))
 		dev_priv->display.get_display_clock_speed =
 			i9xx_misc_get_display_clock_speed;
-	else if (IS_PINEVIEW(dev))
-		dev_priv->display.get_display_clock_speed =
-			pnv_get_display_clock_speed;
 	else if (IS_I915GM(dev))
 		dev_priv->display.get_display_clock_speed =
 			i915gm_get_display_clock_speed;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9b10526..0f0573a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -681,7 +681,7 @@
 	 * The clock divider is based off the hrawclk, and would like to run at
 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
 	 */
-	return index ? 0 : intel_hrawclk(dev) / 2;
+	return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
 }
 
 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -694,10 +694,10 @@
 		return 0;
 
 	if (intel_dig_port->port == PORT_A) {
-		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 
 	} else {
-		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+		return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
 	}
 }
 
@@ -711,7 +711,7 @@
 		if (index)
 			return 0;
 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
-	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+	} else if (HAS_PCH_LPT_H(dev_priv)) {
 		/* Workaround for non-ULT HSW */
 		switch (index) {
 		case 0: return 63;
@@ -719,7 +719,7 @@
 		default: return 0;
 		}
 	} else  {
-		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+		return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
 	}
 }
 
@@ -2697,6 +2697,15 @@
 	if (IS_VALLEYVIEW(dev))
 		vlv_init_panel_power_sequencer(intel_dp);
 
+	/*
+	 * We get an occasional spurious underrun between the port
+	 * enable and vdd enable, when enabling port A eDP.
+	 *
+	 * FIXME: Not sure if this applies to (PCH) port D eDP as well
+	 */
+	if (port == PORT_A)
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
 	intel_dp_enable_port(intel_dp);
 
 	if (port == PORT_A && IS_GEN5(dev_priv)) {
@@ -2714,6 +2723,9 @@
 	edp_panel_on(intel_dp);
 	edp_panel_vdd_off(intel_dp, true);
 
+	if (port == PORT_A)
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
 	pps_unlock(intel_dp);
 
 	if (IS_VALLEYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ab5c147..50f83d2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -393,6 +393,9 @@
 	 * accordingly. */
 	bool has_dp_encoder;
 
+	/* DSI has special cases */
+	bool has_dsi_encoder;
+
 	/* Whether we should send NULL infoframes. Required for audio. */
 	bool has_hdmi_sink;
 
@@ -710,7 +713,8 @@
 	void (*set_infoframes)(struct drm_encoder *encoder,
 			       bool enable,
 			       const struct drm_display_mode *adjusted_mode);
-	bool (*infoframe_enabled)(struct drm_encoder *encoder);
+	bool (*infoframe_enabled)(struct drm_encoder *encoder,
+				  const struct intel_crtc_state *pipe_config);
 };
 
 struct intel_dp_mst_encoder;
@@ -1316,9 +1320,11 @@
 #endif
 
 /* intel_fbc.c */
-bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
-void intel_fbc_update(struct drm_i915_private *dev_priv);
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
+void intel_fbc_deactivate(struct intel_crtc *crtc);
+void intel_fbc_update(struct intel_crtc *crtc);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_enable(struct intel_crtc *crtc);
 void intel_fbc_disable(struct drm_i915_private *dev_priv);
 void intel_fbc_disable_crtc(struct intel_crtc *crtc);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@@ -1410,6 +1416,8 @@
 void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
 void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain);
 
 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 				    enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index add2cf5..fff9a66 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -266,16 +266,18 @@
 }
 
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *config)
+				     struct intel_crtc_state *pipe_config)
 {
 	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
 						   base);
 	struct intel_connector *intel_connector = intel_dsi->attached_connector;
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
-	struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
 	DRM_DEBUG_KMS("\n");
 
+	pipe_config->has_dsi_encoder = true;
+
 	if (fixed_mode)
 		intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
@@ -462,6 +464,8 @@
 	intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
+
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
@@ -474,6 +478,9 @@
 
 	DRM_DEBUG_KMS("\n");
 
+	intel_dsi_prepare(encoder);
+	intel_enable_dsi_pll(encoder);
+
 	/* Panel Enable over CRC PMIC */
 	if (intel_dsi->gpio_panel)
 		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
@@ -699,6 +706,8 @@
 	u32 pclk = 0;
 	DRM_DEBUG_KMS("\n");
 
+	pipe_config->has_dsi_encoder = true;
+
 	/*
 	 * DPLL_MD is not used in case of DSI, reading will get some default value
 	 * set dpll_md = 0
@@ -1026,15 +1035,6 @@
 	}
 }
 
-static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
-{
-	DRM_DEBUG_KMS("\n");
-
-	intel_dsi_prepare(encoder);
-	intel_enable_dsi_pll(encoder);
-
-}
-
 static enum drm_connector_status
 intel_dsi_detect(struct drm_connector *connector, bool force)
 {
@@ -1155,9 +1155,7 @@
 	drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
 			 NULL);
 
-	/* XXX: very likely not all of these are needed */
 	intel_encoder->compute_config = intel_dsi_compute_config;
-	intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
 	intel_encoder->pre_enable = intel_dsi_pre_enable;
 	intel_encoder->enable = intel_dsi_enable_nop;
 	intel_encoder->disable = intel_dsi_pre_disable;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 11fc528..a1988a4 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -43,7 +43,7 @@
 
 static inline bool fbc_supported(struct drm_i915_private *dev_priv)
 {
-	return dev_priv->fbc.enable_fbc != NULL;
+	return dev_priv->fbc.activate != NULL;
 }
 
 static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
@@ -51,6 +51,11 @@
 	return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
 }
 
+static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
+{
+	return INTEL_INFO(dev_priv)->gen < 4;
+}
+
 /*
  * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
  * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -64,11 +69,51 @@
 	return crtc->base.y - crtc->adjusted_y;
 }
 
-static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
+/*
+ * For SKL+, the plane source size used by the hardware is based on the value we
+ * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
+ * we wrote to PIPESRC.
+ */
+static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
+					    int *width, int *height)
+{
+	struct intel_plane_state *plane_state =
+			to_intel_plane_state(crtc->base.primary->state);
+	int w, h;
+
+	if (intel_rotation_90_or_270(plane_state->base.rotation)) {
+		w = drm_rect_height(&plane_state->src) >> 16;
+		h = drm_rect_width(&plane_state->src) >> 16;
+	} else {
+		w = drm_rect_width(&plane_state->src) >> 16;
+		h = drm_rect_height(&plane_state->src) >> 16;
+	}
+
+	if (width)
+		*width = w;
+	if (height)
+		*height = h;
+}
+
+static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
+					struct drm_framebuffer *fb)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	int lines;
+
+	intel_fbc_get_plane_source_size(crtc, NULL, &lines);
+	if (INTEL_INFO(dev_priv)->gen >= 7)
+		lines = min(lines, 2048);
+
+	/* Hardware needs the full buffer stride, not just the active area. */
+	return lines * fb->pitches[0];
+}
+
+static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
 	u32 fbc_ctl;
 
-	dev_priv->fbc.enabled = false;
+	dev_priv->fbc.active = false;
 
 	/* Disable compression */
 	fbc_ctl = I915_READ(FBC_CONTROL);
@@ -83,11 +128,9 @@
 		DRM_DEBUG_KMS("FBC idle timed out\n");
 		return;
 	}
-
-	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static void i8xx_fbc_enable(struct intel_crtc *crtc)
+static void i8xx_fbc_activate(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -96,10 +139,10 @@
 	int i;
 	u32 fbc_ctl;
 
-	dev_priv->fbc.enabled = true;
+	dev_priv->fbc.active = true;
 
 	/* Note: fbc.threshold == 1 for i8xx */
-	cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
+	cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
 	if (fb->pitches[0] < cfb_pitch)
 		cfb_pitch = fb->pitches[0];
 
@@ -132,24 +175,21 @@
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= obj->fence_reg;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
-
-	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
-		      cfb_pitch, crtc->base.y, plane_name(crtc->plane));
 }
 
-static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
+static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
 {
 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_fbc_enable(struct intel_crtc *crtc)
+static void g4x_fbc_activate(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	u32 dpfc_ctl;
 
-	dev_priv->fbc.enabled = true;
+	dev_priv->fbc.active = true;
 
 	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
@@ -162,27 +202,23 @@
 
 	/* enable it... */
 	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
+static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
 	u32 dpfc_ctl;
 
-	dev_priv->fbc.enabled = false;
+	dev_priv->fbc.active = false;
 
 	/* Disable compression */
 	dpfc_ctl = I915_READ(DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
 		dpfc_ctl &= ~DPFC_CTL_EN;
 		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
-		DRM_DEBUG_KMS("disabled FBC\n");
 	}
 }
 
-static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
+static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
 {
 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
@@ -194,7 +230,7 @@
 	POSTING_READ(MSG_FBC_REND_STATE);
 }
 
-static void ilk_fbc_enable(struct intel_crtc *crtc)
+static void ilk_fbc_activate(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -203,7 +239,7 @@
 	int threshold = dev_priv->fbc.threshold;
 	unsigned int y_offset;
 
-	dev_priv->fbc.enabled = true;
+	dev_priv->fbc.active = true;
 
 	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
@@ -238,32 +274,28 @@
 	}
 
 	intel_fbc_recompress(dev_priv);
-
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
+static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
 	u32 dpfc_ctl;
 
-	dev_priv->fbc.enabled = false;
+	dev_priv->fbc.active = false;
 
 	/* Disable compression */
 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
 		dpfc_ctl &= ~DPFC_CTL_EN;
 		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
-		DRM_DEBUG_KMS("disabled FBC\n");
 	}
 }
 
-static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
+static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
 {
 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_fbc_enable(struct intel_crtc *crtc)
+static void gen7_fbc_activate(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -271,7 +303,7 @@
 	u32 dpfc_ctl;
 	int threshold = dev_priv->fbc.threshold;
 
-	dev_priv->fbc.enabled = true;
+	dev_priv->fbc.active = true;
 
 	dpfc_ctl = 0;
 	if (IS_IVYBRIDGE(dev_priv))
@@ -317,103 +349,41 @@
 	I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
 
 	intel_fbc_recompress(dev_priv);
-
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
 /**
- * intel_fbc_enabled - Is FBC enabled?
+ * intel_fbc_is_active - Is FBC active?
  * @dev_priv: i915 device instance
  *
  * This function is used to verify the current state of FBC.
  * FIXME: This should be tracked in the plane config eventually
  *        instead of queried at runtime for most callers.
  */
-bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
 {
-	return dev_priv->fbc.enabled;
+	return dev_priv->fbc.active;
 }
 
-static void intel_fbc_enable(struct intel_crtc *crtc,
-			     const struct drm_framebuffer *fb)
+static void intel_fbc_activate(const struct drm_framebuffer *fb)
 {
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = fb->dev->dev_private;
+	struct intel_crtc *crtc = dev_priv->fbc.crtc;
 
-	dev_priv->fbc.enable_fbc(crtc);
+	dev_priv->fbc.activate(crtc);
 
-	dev_priv->fbc.crtc = crtc;
 	dev_priv->fbc.fb_id = fb->base.id;
 	dev_priv->fbc.y = crtc->base.y;
 }
 
 static void intel_fbc_work_fn(struct work_struct *__work)
 {
-	struct intel_fbc_work *work =
-		container_of(to_delayed_work(__work),
-			     struct intel_fbc_work, work);
-	struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
-	struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
+	struct drm_i915_private *dev_priv =
+		container_of(__work, struct drm_i915_private, fbc.work.work);
+	struct intel_fbc_work *work = &dev_priv->fbc.work;
+	struct intel_crtc *crtc = dev_priv->fbc.crtc;
+	int delay_ms = 50;
 
-	mutex_lock(&dev_priv->fbc.lock);
-	if (work == dev_priv->fbc.fbc_work) {
-		/* Double check that we haven't switched fb without cancelling
-		 * the prior work.
-		 */
-		if (crtc_fb == work->fb)
-			intel_fbc_enable(work->crtc, work->fb);
-
-		dev_priv->fbc.fbc_work = NULL;
-	}
-	mutex_unlock(&dev_priv->fbc.lock);
-
-	kfree(work);
-}
-
-static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
-{
-	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-
-	if (dev_priv->fbc.fbc_work == NULL)
-		return;
-
-	/* Synchronisation is provided by struct_mutex and checking of
-	 * dev_priv->fbc.fbc_work, so we can perform the cancellation
-	 * entirely asynchronously.
-	 */
-	if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
-		/* tasklet was killed before being run, clean up */
-		kfree(dev_priv->fbc.fbc_work);
-
-	/* Mark the work as no longer wanted so that if it does
-	 * wake-up (because the work was already running and waiting
-	 * for our mutex), it will discover that is no longer
-	 * necessary to run.
-	 */
-	dev_priv->fbc.fbc_work = NULL;
-}
-
-static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
-{
-	struct intel_fbc_work *work;
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
-	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-
-	intel_fbc_cancel_work(dev_priv);
-
-	work = kzalloc(sizeof(*work), GFP_KERNEL);
-	if (work == NULL) {
-		DRM_ERROR("Failed to allocate FBC work structure\n");
-		intel_fbc_enable(crtc, crtc->base.primary->fb);
-		return;
-	}
-
-	work->crtc = crtc;
-	work->fb = crtc->base.primary->fb;
-	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
-	dev_priv->fbc.fbc_work = work;
-
+retry:
 	/* Delay the actual enabling to let pageflipping cease and the
 	 * display to settle before starting the compression. Note that
 	 * this delay also serves a second purpose: it allows for a
@@ -427,43 +397,71 @@
 	 *
 	 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
 	 */
-	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+	wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
+
+	mutex_lock(&dev_priv->fbc.lock);
+
+	/* Were we cancelled? */
+	if (!work->scheduled)
+		goto out;
+
+	/* Were we delayed again while this function was sleeping? */
+	if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
+		       jiffies)) {
+		mutex_unlock(&dev_priv->fbc.lock);
+		goto retry;
+	}
+
+	if (crtc->base.primary->fb == work->fb)
+		intel_fbc_activate(work->fb);
+
+	work->scheduled = false;
+
+out:
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
-static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+	dev_priv->fbc.work.scheduled = false;
+}
+
+static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc_work *work = &dev_priv->fbc.work;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+	/* It is useless to call intel_fbc_cancel_work() in this function since
+	 * we're not releasing fbc.lock, so it won't have an opportunity to grab
+	 * it to discover that it was cancelled. So we just update the expected
+	 * jiffy count. */
+	work->fb = crtc->base.primary->fb;
+	work->scheduled = true;
+	work->enable_jiffies = jiffies;
+
+	schedule_work(&work->work);
+}
+
+static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
 	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	intel_fbc_cancel_work(dev_priv);
 
-	if (dev_priv->fbc.enabled)
-		dev_priv->fbc.disable_fbc(dev_priv);
-	dev_priv->fbc.crtc = NULL;
-}
-
-/**
- * intel_fbc_disable - disable FBC
- * @dev_priv: i915 device instance
- *
- * This function disables FBC.
- */
-void intel_fbc_disable(struct drm_i915_private *dev_priv)
-{
-	if (!fbc_supported(dev_priv))
-		return;
-
-	mutex_lock(&dev_priv->fbc.lock);
-	__intel_fbc_disable(dev_priv);
-	mutex_unlock(&dev_priv->fbc.lock);
+	if (dev_priv->fbc.active)
+		dev_priv->fbc.deactivate(dev_priv);
 }
 
 /*
- * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * intel_fbc_deactivate - deactivate FBC if it's associated with crtc
  * @crtc: the CRTC
  *
- * This function disables FBC if it's associated with the provided CRTC.
+ * This function deactivates FBC if it's associated with the provided CRTC.
  */
-void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+void intel_fbc_deactivate(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
@@ -472,7 +470,7 @@
 
 	mutex_lock(&dev_priv->fbc.lock);
 	if (dev_priv->fbc.crtc == crtc)
-		__intel_fbc_disable(dev_priv);
+		__intel_fbc_deactivate(dev_priv);
 	mutex_unlock(&dev_priv->fbc.lock);
 }
 
@@ -486,13 +484,21 @@
 	DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
 }
 
-static bool crtc_is_valid(struct intel_crtc *crtc)
+static bool crtc_can_fbc(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
 	if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
 		return false;
 
+	if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
+		return false;
+
+	return true;
+}
+
+static bool crtc_is_valid(struct intel_crtc *crtc)
+{
 	if (!intel_crtc_active(&crtc->base))
 		return false;
 
@@ -502,24 +508,6 @@
 	return true;
 }
 
-static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
-{
-	struct drm_crtc *crtc = NULL, *tmp_crtc;
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe) {
-		tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
-		if (crtc_is_valid(to_intel_crtc(tmp_crtc)))
-			crtc = tmp_crtc;
-	}
-
-	if (!crtc)
-		return NULL;
-
-	return crtc;
-}
-
 static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
 {
 	enum pipe pipe;
@@ -590,11 +578,17 @@
 	}
 }
 
-static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
-			       int fb_cpp)
+static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
 {
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->state->fb;
 	struct drm_mm_node *uninitialized_var(compressed_llb);
-	int ret;
+	int size, fb_cpp, ret;
+
+	WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
+
+	size = intel_fbc_calculate_cfb_size(crtc, fb);
+	fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
 	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
 					 size, fb_cpp);
@@ -629,8 +623,6 @@
 			   dev_priv->mm.stolen_base + compressed_llb->start);
 	}
 
-	dev_priv->fbc.uncompressed_size = size;
-
 	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
 		      dev_priv->fbc.compressed_fb.size,
 		      dev_priv->fbc.threshold);
@@ -647,18 +639,15 @@
 
 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 {
-	if (dev_priv->fbc.uncompressed_size == 0)
-		return;
-
-	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+	if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
+		i915_gem_stolen_remove_node(dev_priv,
+					    &dev_priv->fbc.compressed_fb);
 
 	if (dev_priv->fbc.compressed_llb) {
 		i915_gem_stolen_remove_node(dev_priv,
 					    dev_priv->fbc.compressed_llb);
 		kfree(dev_priv->fbc.compressed_llb);
 	}
-
-	dev_priv->fbc.uncompressed_size = 0;
 }
 
 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
@@ -671,64 +660,6 @@
 	mutex_unlock(&dev_priv->fbc.lock);
 }
 
-/*
- * For SKL+, the plane source size used by the hardware is based on the value we
- * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
- * we wrote to PIPESRC.
- */
-static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
-					    int *width, int *height)
-{
-	struct intel_plane_state *plane_state =
-			to_intel_plane_state(crtc->base.primary->state);
-	int w, h;
-
-	if (intel_rotation_90_or_270(plane_state->base.rotation)) {
-		w = drm_rect_height(&plane_state->src) >> 16;
-		h = drm_rect_width(&plane_state->src) >> 16;
-	} else {
-		w = drm_rect_width(&plane_state->src) >> 16;
-		h = drm_rect_height(&plane_state->src) >> 16;
-	}
-
-	if (width)
-		*width = w;
-	if (height)
-		*height = h;
-}
-
-static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct drm_framebuffer *fb = crtc->base.primary->fb;
-	int lines;
-
-	intel_fbc_get_plane_source_size(crtc, NULL, &lines);
-	if (INTEL_INFO(dev_priv)->gen >= 7)
-		lines = min(lines, 2048);
-
-	/* Hardware needs the full buffer stride, not just the active area. */
-	return lines * fb->pitches[0];
-}
-
-static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct drm_framebuffer *fb = crtc->base.primary->fb;
-	int size, cpp;
-
-	size = intel_fbc_calculate_cfb_size(crtc);
-	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-
-	if (size <= dev_priv->fbc.uncompressed_size)
-		return 0;
-
-	/* Release any current block */
-	__intel_fbc_cleanup_cfb(dev_priv);
-
-	return intel_fbc_alloc_cfb(dev_priv, size, cpp);
-}
-
 static bool stride_is_valid(struct drm_i915_private *dev_priv,
 			    unsigned int stride)
 {
@@ -803,47 +734,34 @@
 }
 
 /**
- * __intel_fbc_update - enable/disable FBC as needed, unlocked
- * @dev_priv: i915 device instance
+ * __intel_fbc_update - activate/deactivate FBC as needed, unlocked
+ * @crtc: the CRTC that triggered the update
  *
- * This function completely reevaluates the status of FBC, then enables,
- * disables or maintains it on the same state.
+ * This function completely reevaluates the status of FBC, then activates,
+ * deactivates or maintains it on the same state.
  */
-static void __intel_fbc_update(struct drm_i915_private *dev_priv)
+static void __intel_fbc_update(struct intel_crtc *crtc)
 {
-	struct drm_crtc *drm_crtc = NULL;
-	struct intel_crtc *crtc;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct drm_framebuffer *fb;
 	struct drm_i915_gem_object *obj;
 	const struct drm_display_mode *adjusted_mode;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
-	if (intel_vgpu_active(dev_priv->dev))
-		i915.enable_fbc = 0;
-
-	if (i915.enable_fbc < 0) {
-		set_no_fbc_reason(dev_priv, "disabled per chip default");
-		goto out_disable;
-	}
-
-	if (!i915.enable_fbc) {
-		set_no_fbc_reason(dev_priv, "disabled per module param");
-		goto out_disable;
-	}
-
-	drm_crtc = intel_fbc_find_crtc(dev_priv);
-	if (!drm_crtc) {
-		set_no_fbc_reason(dev_priv, "no output");
-		goto out_disable;
-	}
-
 	if (!multiple_pipes_ok(dev_priv)) {
 		set_no_fbc_reason(dev_priv, "more than one pipe active");
 		goto out_disable;
 	}
 
-	crtc = to_intel_crtc(drm_crtc);
+	if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
+		return;
+
+	if (!crtc_is_valid(crtc)) {
+		set_no_fbc_reason(dev_priv, "no output");
+		goto out_disable;
+	}
+
 	fb = crtc->base.primary->fb;
 	obj = intel_fb_obj(fb);
 	adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -859,12 +777,6 @@
 		goto out_disable;
 	}
 
-	if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
-	    crtc->plane != PLANE_A) {
-		set_no_fbc_reason(dev_priv, "FBC unsupported on plane");
-		goto out_disable;
-	}
-
 	/* The use of a CPU fence is mandatory in order to detect writes
 	 * by the CPU to the scanout and trigger updates to the FBC.
 	 */
@@ -897,8 +809,19 @@
 		goto out_disable;
 	}
 
-	if (intel_fbc_setup_cfb(crtc)) {
-		set_no_fbc_reason(dev_priv, "not enough stolen memory");
+	/* It is possible for the required CFB size change without a
+	 * crtc->disable + crtc->enable since it is possible to change the
+	 * stride without triggering a full modeset. Since we try to
+	 * over-allocate the CFB, there's a chance we may keep FBC enabled even
+	 * if this happens, but if we exceed the current CFB size we'll have to
+	 * disable FBC. Notice that it would be possible to disable FBC, wait
+	 * for a frame, free the stolen node, then try to reenable FBC in case
+	 * we didn't get any invalidate/deactivate calls, but this would require
+	 * a lot of tracking just for a specific case. If we conclude it's an
+	 * important case, we can implement it later. */
+	if (intel_fbc_calculate_cfb_size(crtc, fb) >
+	    dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
+		set_no_fbc_reason(dev_priv, "CFB requirements changed");
 		goto out_disable;
 	}
 
@@ -909,10 +832,11 @@
 	 */
 	if (dev_priv->fbc.crtc == crtc &&
 	    dev_priv->fbc.fb_id == fb->base.id &&
-	    dev_priv->fbc.y == crtc->base.y)
+	    dev_priv->fbc.y == crtc->base.y &&
+	    dev_priv->fbc.active)
 		return;
 
-	if (intel_fbc_enabled(dev_priv)) {
+	if (intel_fbc_is_active(dev_priv)) {
 		/* We update FBC along two paths, after changing fb/crtc
 		 * configuration (modeswitching) and after page-flipping
 		 * finishes. For the latter, we know that not only did
@@ -936,36 +860,37 @@
 		 * disabling paths we do need to wait for a vblank at
 		 * some point. And we wait before enabling FBC anyway.
 		 */
-		DRM_DEBUG_KMS("disabling active FBC for update\n");
-		__intel_fbc_disable(dev_priv);
+		DRM_DEBUG_KMS("deactivating FBC for update\n");
+		__intel_fbc_deactivate(dev_priv);
 	}
 
-	intel_fbc_schedule_enable(crtc);
+	intel_fbc_schedule_activation(crtc);
 	dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
 	return;
 
 out_disable:
 	/* Multiple disables should be harmless */
-	if (intel_fbc_enabled(dev_priv)) {
-		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
-		__intel_fbc_disable(dev_priv);
+	if (intel_fbc_is_active(dev_priv)) {
+		DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
+		__intel_fbc_deactivate(dev_priv);
 	}
-	__intel_fbc_cleanup_cfb(dev_priv);
 }
 
 /*
- * intel_fbc_update - enable/disable FBC as needed
- * @dev_priv: i915 device instance
+ * intel_fbc_update - activate/deactivate FBC as needed
+ * @crtc: the CRTC that triggered the update
  *
- * This function reevaluates the overall state and enables or disables FBC.
+ * This function reevaluates the overall state and activates or deactivates FBC.
  */
-void intel_fbc_update(struct drm_i915_private *dev_priv)
+void intel_fbc_update(struct intel_crtc *crtc)
 {
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
 	if (!fbc_supported(dev_priv))
 		return;
 
 	mutex_lock(&dev_priv->fbc.lock);
-	__intel_fbc_update(dev_priv);
+	__intel_fbc_update(crtc);
 	mutex_unlock(&dev_priv->fbc.lock);
 }
 
@@ -985,16 +910,13 @@
 
 	if (dev_priv->fbc.enabled)
 		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
-	else if (dev_priv->fbc.fbc_work)
-		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
-					dev_priv->fbc.fbc_work->crtc->pipe);
 	else
 		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
 
 	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
 
 	if (dev_priv->fbc.busy_bits)
-		__intel_fbc_disable(dev_priv);
+		__intel_fbc_deactivate(dev_priv);
 
 	mutex_unlock(&dev_priv->fbc.lock);
 }
@@ -1012,15 +934,140 @@
 
 	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
 
-	if (!dev_priv->fbc.busy_bits) {
-		__intel_fbc_disable(dev_priv);
-		__intel_fbc_update(dev_priv);
+	if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
+		if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
+			intel_fbc_recompress(dev_priv);
+		} else {
+			__intel_fbc_deactivate(dev_priv);
+			__intel_fbc_update(dev_priv->fbc.crtc);
+		}
 	}
 
 	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 /**
+ * intel_fbc_enable: tries to enable FBC on the CRTC
+ * @crtc: the CRTC
+ *
+ * This function checks if it's possible to enable FBC on the following CRTC,
+ * then enables it. Notice that it doesn't activate FBC.
+ */
+void intel_fbc_enable(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (!fbc_supported(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+
+	if (dev_priv->fbc.enabled) {
+		WARN_ON(dev_priv->fbc.crtc == crtc);
+		goto out;
+	}
+
+	WARN_ON(dev_priv->fbc.active);
+	WARN_ON(dev_priv->fbc.crtc != NULL);
+
+	if (intel_vgpu_active(dev_priv->dev)) {
+		set_no_fbc_reason(dev_priv, "VGPU is active");
+		goto out;
+	}
+
+	if (i915.enable_fbc < 0) {
+		set_no_fbc_reason(dev_priv, "disabled per chip default");
+		goto out;
+	}
+
+	if (!i915.enable_fbc) {
+		set_no_fbc_reason(dev_priv, "disabled per module param");
+		goto out;
+	}
+
+	if (!crtc_can_fbc(crtc)) {
+		set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
+		goto out;
+	}
+
+	if (intel_fbc_alloc_cfb(crtc)) {
+		set_no_fbc_reason(dev_priv, "not enough stolen memory");
+		goto out;
+	}
+
+	DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+	dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
+
+	dev_priv->fbc.enabled = true;
+	dev_priv->fbc.crtc = crtc;
+out:
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/**
+ * __intel_fbc_disable - disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This is the low level function that actually disables FBC. Callers should
+ * grab the FBC lock.
+ */
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	struct intel_crtc *crtc = dev_priv->fbc.crtc;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+	WARN_ON(!dev_priv->fbc.enabled);
+	WARN_ON(dev_priv->fbc.active);
+	assert_pipe_disabled(dev_priv, crtc->pipe);
+
+	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+
+	__intel_fbc_cleanup_cfb(dev_priv);
+
+	dev_priv->fbc.enabled = false;
+	dev_priv->fbc.crtc = NULL;
+}
+
+/**
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (!fbc_supported(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	if (dev_priv->fbc.crtc == crtc) {
+		WARN_ON(!dev_priv->fbc.enabled);
+		WARN_ON(dev_priv->fbc.active);
+		__intel_fbc_disable(dev_priv);
+	}
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/**
+ * intel_fbc_disable - globally disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This function disables FBC regardless of which CRTC is associated with it.
+ */
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	if (!fbc_supported(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	if (dev_priv->fbc.enabled)
+		__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/**
  * intel_fbc_init - Initialize FBC
  * @dev_priv: the i915 device
  *
@@ -1030,8 +1077,11 @@
 {
 	enum pipe pipe;
 
+	INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
 	mutex_init(&dev_priv->fbc.lock);
 	dev_priv->fbc.enabled = false;
+	dev_priv->fbc.active = false;
+	dev_priv->fbc.work.scheduled = false;
 
 	if (!HAS_FBC(dev_priv)) {
 		dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
@@ -1047,29 +1097,29 @@
 	}
 
 	if (INTEL_INFO(dev_priv)->gen >= 7) {
-		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
-		dev_priv->fbc.enable_fbc = gen7_fbc_enable;
-		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
+		dev_priv->fbc.is_active = ilk_fbc_is_active;
+		dev_priv->fbc.activate = gen7_fbc_activate;
+		dev_priv->fbc.deactivate = ilk_fbc_deactivate;
 	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
-		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
-		dev_priv->fbc.enable_fbc = ilk_fbc_enable;
-		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
+		dev_priv->fbc.is_active = ilk_fbc_is_active;
+		dev_priv->fbc.activate = ilk_fbc_activate;
+		dev_priv->fbc.deactivate = ilk_fbc_deactivate;
 	} else if (IS_GM45(dev_priv)) {
-		dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
-		dev_priv->fbc.enable_fbc = g4x_fbc_enable;
-		dev_priv->fbc.disable_fbc = g4x_fbc_disable;
+		dev_priv->fbc.is_active = g4x_fbc_is_active;
+		dev_priv->fbc.activate = g4x_fbc_activate;
+		dev_priv->fbc.deactivate = g4x_fbc_deactivate;
 	} else {
-		dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
-		dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
-		dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
+		dev_priv->fbc.is_active = i8xx_fbc_is_active;
+		dev_priv->fbc.activate = i8xx_fbc_activate;
+		dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
 
 		/* This value was pulled out of someone's hat */
 		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
 	}
 
 	/* We still don't have any sort of hardware state readout for FBC, so
-	 * disable it in case the BIOS enabled it to make sure software matches
-	 * the hardware state. */
-	if (dev_priv->fbc.fbc_enabled(dev_priv))
-		dev_priv->fbc.disable_fbc(dev_priv);
+	 * deactivate it in case the BIOS activated it to make sure software
+	 * matches the hardware state. */
+	if (dev_priv->fbc.is_active(dev_priv))
+		dev_priv->fbc.deactivate(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 7ae182d..bda5266 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -128,9 +128,9 @@
 					  DE_PIPEB_FIFO_UNDERRUN;
 
 	if (enable)
-		ironlake_enable_display_irq(dev_priv, bit);
+		ilk_enable_display_irq(dev_priv, bit);
 	else
-		ironlake_disable_display_irq(dev_priv, bit);
+		ilk_disable_display_irq(dev_priv, bit);
 }
 
 static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
@@ -161,9 +161,9 @@
 		if (!ivb_can_enable_err_int(dev))
 			return;
 
-		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+		ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
 	} else {
-		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+		ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
 
 		if (old &&
 		    I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -178,14 +178,10 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	assert_spin_locked(&dev_priv->irq_lock);
-
 	if (enable)
-		dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+		bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
 	else
-		dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
-	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
-	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+		bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
 }
 
 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 5ba5866..8229522 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -42,8 +42,6 @@
 
 	uint32_t wq_offset;
 	uint32_t wq_size;
-
-	spinlock_t wq_lock;		/* Protects all data below	*/
 	uint32_t wq_tail;
 
 	/* GuC submission statistics & status */
@@ -95,8 +93,6 @@
 
 	struct i915_guc_client *execbuf_client;
 
-	spinlock_t host2guc_lock;	/* Protects all data below	*/
-
 	DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
 	uint32_t db_cacheline;		/* Cyclic counter mod pagesize	*/
 
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6dd1e09..00d065f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -169,10 +169,10 @@
 	POSTING_READ(VIDEO_DIP_CTL);
 }
 
-static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
+static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
+				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	u32 val = I915_READ(VIDEO_DIP_CTL);
 
@@ -225,13 +225,13 @@
 	POSTING_READ(reg);
 }
 
-static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
+static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
+				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
-	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+	i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
 	u32 val = I915_READ(reg);
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -287,12 +287,12 @@
 	POSTING_READ(reg);
 }
 
-static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
+static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
+				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-	u32 val = I915_READ(TVIDEO_DIP_CTL(intel_crtc->pipe));
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+	u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return false;
@@ -341,13 +341,13 @@
 	POSTING_READ(reg);
 }
 
-static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
+static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
+				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
-	u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(intel_crtc->pipe));
+	enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+	u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return false;
@@ -398,12 +398,11 @@
 	POSTING_READ(ctl_reg);
 }
 
-static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
+static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
+				  const struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-	u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder));
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+	u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
 
 	return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
 		      VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -927,7 +926,7 @@
 	if (tmp & HDMI_MODE_SELECT_HDMI)
 		pipe_config->has_hdmi_sink = true;
 
-	if (intel_hdmi->infoframe_enabled(&encoder->base))
+	if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
 		pipe_config->has_infoframe = true;
 
 	if (tmp & SDVO_AUDIO_ENABLE)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1110c83..e26e22a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -472,9 +472,7 @@
 }
 
 static int
-gmbus_xfer(struct i2c_adapter *adapter,
-	   struct i2c_msg *msgs,
-	   int num)
+do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 {
 	struct intel_gmbus *bus = container_of(adapter,
 					       struct intel_gmbus,
@@ -483,14 +481,6 @@
 	int i = 0, inc, try = 0;
 	int ret = 0;
 
-	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
-	mutex_lock(&dev_priv->gmbus_mutex);
-
-	if (bus->force_bit) {
-		ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
-		goto out;
-	}
-
 retry:
 	I915_WRITE(GMBUS0, bus->reg0);
 
@@ -505,17 +495,13 @@
 			ret = gmbus_xfer_write(dev_priv, &msgs[i]);
 		}
 
+		if (!ret)
+			ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+						   GMBUS_HW_WAIT_EN);
 		if (ret == -ETIMEDOUT)
 			goto timeout;
-		if (ret == -ENXIO)
+		else if (ret)
 			goto clear_err;
-
-		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
-					   GMBUS_HW_WAIT_EN);
-		if (ret == -ENXIO)
-			goto clear_err;
-		if (ret)
-			goto timeout;
 	}
 
 	/* Generate a STOP condition on the bus. Note that gmbus can't generata
@@ -589,13 +575,34 @@
 		 bus->adapter.name, bus->reg0 & 0xff);
 	I915_WRITE(GMBUS0, 0);
 
-	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+	/*
+	 * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
+	 * instead. Use EAGAIN to have i2c core retry.
+	 */
 	bus->force_bit = 1;
-	ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+	ret = -EAGAIN;
 
 out:
-	mutex_unlock(&dev_priv->gmbus_mutex);
+	return ret;
+}
 
+static int
+gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+{
+	struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	int ret;
+
+	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+	mutex_lock(&dev_priv->gmbus_mutex);
+
+	if (bus->force_bit)
+		ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+	else
+		ret = do_gmbus_xfer(adapter, msgs, num);
+
+	mutex_unlock(&dev_priv->gmbus_mutex);
 	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a24df35..ae808b6 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1264,6 +1264,14 @@
 #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
 /*
+ * BXT: PWM clock frequency = 19.2 MHz.
+ */
+static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+	return KHz(19200) / pwm_freq_hz;
+}
+
+/*
  * SPT: This value represents the period of the PWM stream in clock periods
  * multiplied by 16 (default increment) or 128 (alternate increment selected in
  * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
@@ -1300,7 +1308,7 @@
 	else
 		mul = 128;
 
-	if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+	if (HAS_PCH_LPT_H(dev_priv))
 		clock = MHz(135); /* LPT:H */
 	else
 		clock = MHz(24); /* LPT:LP */
@@ -1335,22 +1343,28 @@
 	int clock;
 
 	if (IS_PINEVIEW(dev))
-		clock = intel_hrawclk(dev);
+		clock = MHz(intel_hrawclk(dev));
 	else
-		clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
+		clock = 1000 * dev_priv->cdclk_freq;
 
 	return clock / (pwm_freq_hz * 32);
 }
 
 /*
  * Gen4: This value represents the period of the PWM stream in display core
- * clocks multiplied by 128.
+ * clocks ([DevCTG] HRAW clocks) multiplied by 128.
+ *
  */
 static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
+	int clock;
+
+	if (IS_G4X(dev_priv))
+		clock = MHz(intel_hrawclk(dev));
+	else
+		clock = 1000 * dev_priv->cdclk_freq;
 
 	return clock / (pwm_freq_hz * 128);
 }
@@ -1385,14 +1399,18 @@
 	u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
 	u32 pwm;
 
-	if (!pwm_freq_hz) {
-		DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
+	if (!panel->backlight.hz_to_pwm) {
+		DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
 		return 0;
 	}
 
-	if (!panel->backlight.hz_to_pwm) {
-		DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
-		return 0;
+	if (pwm_freq_hz) {
+		DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
+			      pwm_freq_hz);
+	} else {
+		pwm_freq_hz = 200;
+		DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
+			      pwm_freq_hz);
 	}
 
 	pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
@@ -1401,8 +1419,6 @@
 		return 0;
 	}
 
-	DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
-
 	return pwm;
 }
 
@@ -1750,6 +1766,7 @@
 		panel->backlight.disable = bxt_disable_backlight;
 		panel->backlight.set = bxt_set_backlight;
 		panel->backlight.get = bxt_get_backlight;
+		panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
 	} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
 		panel->backlight.setup = lpt_setup_backlight;
 		panel->backlight.enable = lpt_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 96f45d7..ee05ce8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -66,6 +66,14 @@
 	 */
 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
 		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
+
+	/*
+	 * Wa: Backlight PWM may stop in the asserted state, causing backlight
+	 * to stay fully on.
+	 */
+	if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+			   PWM1_GATING_DIS | PWM2_GATING_DIS);
 }
 
 static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -2422,7 +2430,7 @@
 	 * enabled sometime later.
 	 */
 	if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
-	    intel_fbc_enabled(dev_priv)) {
+	    intel_fbc_is_active(dev_priv)) {
 		for (level = 2; level <= max_level; level++) {
 			struct intel_wm_level *wm = &merged->wm[level];
 
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index bc5ea2a..b6609e6 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -191,9 +191,6 @@
 
 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
-	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-			   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
-
 	/* Enable AUX frame sync at sink */
 	if (dev_priv->psr.aux_frame_sync)
 		drm_dp_dpcd_writeb(&intel_dp->aux,
@@ -414,9 +411,14 @@
 				skl_psr_setup_su_vsc(intel_dp);
 		}
 
-		/* Avoid continuous PSR exit by masking memup and hpd */
+		/*
+		 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
+		 * Also mask LPSP to avoid dependency on other drivers that
+		 * might block runtime_pm besides preventing other hw tracking
+		 * issues now we can rely on frontbuffer tracking.
+		 */
 		I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
-			   EDP_PSR_DEBUG_MASK_HPD);
+			   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
 		/* Enable PSR on the panel */
 		hsw_psr_enable_sink(intel_dp);
@@ -522,11 +524,15 @@
 		return;
 	}
 
+	/* Disable PSR on Source */
 	if (HAS_DDI(dev))
 		hsw_psr_disable(intel_dp);
 	else
 		vlv_psr_disable(intel_dp);
 
+	/* Disable PSR on Sink */
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
 	dev_priv->psr.enabled = NULL;
 	mutex_unlock(&dev_priv->psr.lock);
 
@@ -737,25 +743,9 @@
 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-	if (HAS_DDI(dev)) {
-		/*
-		 * By definition every flush should mean invalidate + flush,
-		 * however on core platforms let's minimize the
-		 * disable/re-enable so we can avoid the invalidate when flip
-		 * originated the flush.
-		 */
-		if (frontbuffer_bits && origin != ORIGIN_FLIP)
-			intel_psr_exit(dev);
-	} else {
-		/*
-		 * On Valleyview and Cherryview we don't use hardware tracking
-		 * so any plane updates or cursor moves don't result in a PSR
-		 * invalidating. Which means we need to manually fake this in
-		 * software for all flushes.
-		 */
-		if (frontbuffer_bits)
-			intel_psr_exit(dev);
-	}
+	/* By definition flush = invalidate + flush */
+	if (frontbuffer_bits)
+		intel_psr_exit(dev);
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		if (!work_busy(&dev_priv->psr.work.work))
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index afca6c9..2c2151f 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -65,6 +65,72 @@
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
 				    int power_well_id);
 
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain)
+{
+	switch (domain) {
+	case POWER_DOMAIN_PIPE_A:
+		return "PIPE_A";
+	case POWER_DOMAIN_PIPE_B:
+		return "PIPE_B";
+	case POWER_DOMAIN_PIPE_C:
+		return "PIPE_C";
+	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+		return "PIPE_A_PANEL_FITTER";
+	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+		return "PIPE_B_PANEL_FITTER";
+	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+		return "PIPE_C_PANEL_FITTER";
+	case POWER_DOMAIN_TRANSCODER_A:
+		return "TRANSCODER_A";
+	case POWER_DOMAIN_TRANSCODER_B:
+		return "TRANSCODER_B";
+	case POWER_DOMAIN_TRANSCODER_C:
+		return "TRANSCODER_C";
+	case POWER_DOMAIN_TRANSCODER_EDP:
+		return "TRANSCODER_EDP";
+	case POWER_DOMAIN_PORT_DDI_A_LANES:
+		return "PORT_DDI_A_LANES";
+	case POWER_DOMAIN_PORT_DDI_B_LANES:
+		return "PORT_DDI_B_LANES";
+	case POWER_DOMAIN_PORT_DDI_C_LANES:
+		return "PORT_DDI_C_LANES";
+	case POWER_DOMAIN_PORT_DDI_D_LANES:
+		return "PORT_DDI_D_LANES";
+	case POWER_DOMAIN_PORT_DDI_E_LANES:
+		return "PORT_DDI_E_LANES";
+	case POWER_DOMAIN_PORT_DSI:
+		return "PORT_DSI";
+	case POWER_DOMAIN_PORT_CRT:
+		return "PORT_CRT";
+	case POWER_DOMAIN_PORT_OTHER:
+		return "PORT_OTHER";
+	case POWER_DOMAIN_VGA:
+		return "VGA";
+	case POWER_DOMAIN_AUDIO:
+		return "AUDIO";
+	case POWER_DOMAIN_PLLS:
+		return "PLLS";
+	case POWER_DOMAIN_AUX_A:
+		return "AUX_A";
+	case POWER_DOMAIN_AUX_B:
+		return "AUX_B";
+	case POWER_DOMAIN_AUX_C:
+		return "AUX_C";
+	case POWER_DOMAIN_AUX_D:
+		return "AUX_D";
+	case POWER_DOMAIN_GMBUS:
+		return "GMBUS";
+	case POWER_DOMAIN_INIT:
+		return "INIT";
+	case POWER_DOMAIN_MODESET:
+		return "MODESET";
+	default:
+		MISSING_CASE(domain);
+		return "?";
+	}
+}
+
 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 				    struct i915_power_well *power_well)
 {
@@ -1433,11 +1499,15 @@
 
 	mutex_lock(&power_domains->lock);
 
-	WARN_ON(!power_domains->domain_use_count[domain]);
+	WARN(!power_domains->domain_use_count[domain],
+	     "Use count on domain %s is already zero\n",
+	     intel_display_power_domain_str(domain));
 	power_domains->domain_use_count[domain]--;
 
 	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-		WARN_ON(!power_well->count);
+		WARN(!power_well->count,
+		     "Use count on power well %s is already zero",
+		     power_well->name);
 
 		if (!--power_well->count)
 			intel_power_well_disable(dev_priv, power_well);
@@ -1841,7 +1911,7 @@
 	if (disable_power_well >= 0)
 		return !!disable_power_well;
 
-	if (IS_SKYLAKE(dev_priv)) {
+	if (IS_BROXTON(dev_priv)) {
 		DRM_DEBUG_KMS("Disabling display power well support\n");
 		return 0;
 	}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 35fcf6b..063825f 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -137,7 +137,7 @@
 	imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
 }
 
-static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
 	.mode_fixup = dw_hdmi_imx_encoder_mode_fixup,
 	.mode_set   = dw_hdmi_imx_encoder_mode_set,
 	.prepare    = dw_hdmi_imx_encoder_prepare,
@@ -145,7 +145,7 @@
 	.disable    = dw_hdmi_imx_encoder_disable,
 };
 
-static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
+static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 09e20ea..7be7ac8 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -39,13 +39,12 @@
 struct imx_drm_device {
 	struct drm_device			*drm;
 	struct imx_drm_crtc			*crtc[MAX_CRTC];
-	int					pipes;
+	unsigned int				pipes;
 	struct drm_fbdev_cma			*fbhelper;
 };
 
 struct imx_drm_crtc {
 	struct drm_crtc				*crtc;
-	int					pipe;
 	struct imx_drm_crtc_helper_funcs	imx_drm_helper_funcs;
 };
 
@@ -54,9 +53,9 @@
 module_param(legacyfb_depth, int, 0444);
 #endif
 
-int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
+unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
 {
-	return crtc->pipe;
+	return drm_crtc_index(crtc->crtc);
 }
 EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
 
@@ -124,19 +123,19 @@
 
 int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
 {
-	return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
+	return drm_crtc_vblank_get(imx_drm_crtc->crtc);
 }
 EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
 
 void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
 {
-	drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
+	drm_crtc_vblank_put(imx_drm_crtc->crtc);
 }
 EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
 
 void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
 {
-	drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
+	drm_crtc_handle_vblank(imx_drm_crtc->crtc);
 }
 EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
 
@@ -215,7 +214,7 @@
 	drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
 }
 
-static struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
+static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
 	.fb_create = drm_fb_cma_create,
 	.output_poll_changed = imx_drm_output_poll_changed,
 };
@@ -356,12 +355,11 @@
 		return -ENOMEM;
 
 	imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs;
-	imx_drm_crtc->pipe = imxdrm->pipes++;
 	imx_drm_crtc->crtc = crtc;
 
 	crtc->port = port;
 
-	imxdrm->crtc[imx_drm_crtc->pipe] = imx_drm_crtc;
+	imxdrm->crtc[imxdrm->pipes++] = imx_drm_crtc;
 
 	*new_crtc = imx_drm_crtc;
 
@@ -378,7 +376,7 @@
 	return 0;
 
 err_register:
-	imxdrm->crtc[imx_drm_crtc->pipe] = NULL;
+	imxdrm->crtc[--imxdrm->pipes] = NULL;
 	kfree(imx_drm_crtc);
 	return ret;
 }
@@ -390,10 +388,11 @@
 int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc)
 {
 	struct imx_drm_device *imxdrm = imx_drm_crtc->crtc->dev->dev_private;
+	unsigned int pipe = drm_crtc_index(imx_drm_crtc->crtc);
 
 	drm_crtc_cleanup(imx_drm_crtc->crtc);
 
-	imxdrm->crtc[imx_drm_crtc->pipe] = NULL;
+	imxdrm->crtc[pipe] = NULL;
 
 	kfree(imx_drm_crtc);
 
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 83284b4..71cf6d9 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -13,7 +13,7 @@
 struct imx_drm_crtc;
 struct platform_device;
 
-int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
+unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
 
 struct imx_drm_crtc_helper_funcs {
 	int (*enable_vblank)(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index c79a61b..22ac482 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -358,23 +358,23 @@
 	drm_panel_unprepare(imx_ldb_ch->panel);
 }
 
-static struct drm_connector_funcs imx_ldb_connector_funcs = {
+static const struct drm_connector_funcs imx_ldb_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = imx_ldb_connector_detect,
 	.destroy = imx_drm_connector_destroy,
 };
 
-static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
 	.get_modes = imx_ldb_connector_get_modes,
 	.best_encoder = imx_ldb_connector_best_encoder,
 };
 
-static struct drm_encoder_funcs imx_ldb_encoder_funcs = {
+static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
 	.destroy = imx_drm_encoder_destroy,
 };
 
-static struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
 	.dpms = imx_ldb_encoder_dpms,
 	.mode_fixup = imx_ldb_encoder_mode_fixup,
 	.prepare = imx_ldb_encoder_prepare,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index e61a8fc..292349f 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -360,24 +360,24 @@
 	tve_disable(tve);
 }
 
-static struct drm_connector_funcs imx_tve_connector_funcs = {
+static const struct drm_connector_funcs imx_tve_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = imx_tve_connector_detect,
 	.destroy = imx_drm_connector_destroy,
 };
 
-static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
 	.get_modes = imx_tve_connector_get_modes,
 	.best_encoder = imx_tve_connector_best_encoder,
 	.mode_valid = imx_tve_connector_mode_valid,
 };
 
-static struct drm_encoder_funcs imx_tve_encoder_funcs = {
+static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
 	.destroy = imx_drm_encoder_destroy,
 };
 
-static struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
 	.dpms = imx_tve_encoder_dpms,
 	.mode_fixup = imx_tve_encoder_mode_fixup,
 	.prepare = imx_tve_encoder_prepare,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 4ab841e..30a5718 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -270,7 +270,7 @@
 	ipu_fb_enable(ipu_crtc);
 }
 
-static struct drm_crtc_helper_funcs ipu_helper_funcs = {
+static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
 	.dpms = ipu_crtc_dpms,
 	.mode_fixup = ipu_crtc_mode_fixup,
 	.mode_set = ipu_crtc_mode_set,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index fcbe4d2..b74bf8e 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -148,23 +148,23 @@
 	drm_panel_unprepare(imxpd->panel);
 }
 
-static struct drm_connector_funcs imx_pd_connector_funcs = {
+static const struct drm_connector_funcs imx_pd_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = imx_pd_connector_detect,
 	.destroy = imx_drm_connector_destroy,
 };
 
-static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
 	.get_modes = imx_pd_connector_get_modes,
 	.best_encoder = imx_pd_connector_best_encoder,
 };
 
-static struct drm_encoder_funcs imx_pd_encoder_funcs = {
+static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
 	.destroy = imx_drm_encoder_destroy,
 };
 
-static struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
 	.dpms = imx_pd_encoder_dpms,
 	.mode_fixup = imx_pd_encoder_mode_fixup,
 	.prepare = imx_pd_encoder_prepare,
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 3180212..19c18b7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1684,13 +1684,13 @@
 	kfree(connector);
 }
 
-struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
 	.get_modes = mga_vga_get_modes,
 	.mode_valid = mga_vga_mode_valid,
 	.best_encoder = mga_connector_best_encoder,
 };
 
-struct drm_connector_funcs mga_vga_connector_funcs = {
+static const struct drm_connector_funcs mga_vga_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = mga_vga_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index d9644c0..163317d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -769,7 +769,7 @@
 	kfree(tv_enc);
 }
 
-static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
+static const struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
 	.dpms = nv17_tv_dpms,
 	.mode_fixup = nv17_tv_mode_fixup,
 	.prepare = nv17_tv_prepare,
@@ -778,14 +778,14 @@
 	.detect = nv17_tv_detect,
 };
 
-static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
+static const struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
 	.get_modes = nv17_tv_get_modes,
 	.mode_valid = nv17_tv_mode_valid,
 	.create_resources = nv17_tv_create_resources,
 	.set_property = nv17_tv_set_property,
 };
 
-static struct drm_encoder_funcs nv17_tv_funcs = {
+static const struct drm_encoder_funcs nv17_tv_funcs = {
 	.destroy = nv17_tv_destroy,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1d3ee51..b3a563c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1046,10 +1046,6 @@
 		goto err_free;
 	}
 
-	err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev));
-	if (err < 0)
-		goto err_free;
-
 	drm->platformdev = pdev;
 	platform_set_drvdata(pdev, drm);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index c38a864..ee6a6d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -83,7 +83,7 @@
 	return &enc->base.base;
 }
 
-static inline struct drm_encoder_slave_funcs *
+static inline const struct drm_encoder_slave_funcs *
 get_slave_funcs(struct drm_encoder *enc)
 {
 	return to_encoder_slave(enc)->slave_funcs;
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index efb6095..6df1f2a 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -87,14 +87,11 @@
 	if (width == 0 || height == 0)
 		return NULL;
 
-	tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
-	pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+	tcm = kzalloc(sizeof(*tcm), GFP_KERNEL);
+	pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
 	if (!tcm || !pvt)
 		goto error;
 
-	memset(tcm, 0, sizeof(*tcm));
-	memset(pvt, 0, sizeof(*pvt));
-
 	/* Updating the pointers to SiTA implementation APIs */
 	tcm->height = height;
 	tcm->width = width;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 7d4704b..1500ab9 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -31,6 +31,16 @@
 	  Say Y here if you want to enable support for LG4573 RGB panel.
 	  To compile this driver as a module, choose M here.
 
+config DRM_PANEL_PANASONIC_VVX10F034N00
+	tristate "Panasonic VVX10F034N00 1920x1200 video mode panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for Panasonic VVX10F034N00
+	  WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
+	  Xperia Z2 tablets
+
 config DRM_PANEL_SAMSUNG_S6E8AA0
 	tristate "Samsung S6E8AA0 DSI video mode panel"
 	depends on OF
@@ -51,4 +61,13 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called panel-sharp-lq101r1sx01.
 
+config DRM_PANEL_SHARP_LS043T1LE01
+	tristate "Sharp LS043T1LE01 qHD video mode panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
+	  (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
+
 endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d0f016d..f277eed 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,7 @@
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
new file mode 100644
index 0000000..7f915f7
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2015 Red Hat
+ * Copyright (C) 2015 Sony Mobile Communications Inc.
+ * Author: Werner Johansson <werner.johansson@sonymobile.com>
+ *
+ * Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+/*
+ * When power is turned off to this panel a minimum off time of 500ms has to be
+ * observed before powering back on as there's no external reset pin. Keep
+ * track of earliest wakeup time and delay subsequent prepare call accordingly
+ */
+#define MIN_POFF_MS (500)
+
+struct wuxga_nt_panel {
+	struct drm_panel base;
+	struct mipi_dsi_device *dsi;
+
+	struct backlight_device *backlight;
+	struct regulator *supply;
+
+	bool prepared;
+	bool enabled;
+
+	ktime_t earliest_wake;
+
+	const struct drm_display_mode *mode;
+};
+
+static inline struct wuxga_nt_panel *to_wuxga_nt_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct wuxga_nt_panel, base);
+}
+
+static int wuxga_nt_panel_on(struct wuxga_nt_panel *wuxga_nt)
+{
+	struct mipi_dsi_device *dsi = wuxga_nt->dsi;
+	int ret;
+
+	ret = mipi_dsi_turn_on_peripheral(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int wuxga_nt_panel_disable(struct drm_panel *panel)
+{
+	struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+
+	if (!wuxga_nt->enabled)
+		return 0;
+
+	mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
+
+	if (wuxga_nt->backlight) {
+		wuxga_nt->backlight->props.power = FB_BLANK_POWERDOWN;
+		wuxga_nt->backlight->props.state |= BL_CORE_FBBLANK;
+		backlight_update_status(wuxga_nt->backlight);
+	}
+
+	wuxga_nt->enabled = false;
+
+	return 0;
+}
+
+static int wuxga_nt_panel_unprepare(struct drm_panel *panel)
+{
+	struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+
+	if (!wuxga_nt->prepared)
+		return 0;
+
+	regulator_disable(wuxga_nt->supply);
+	wuxga_nt->earliest_wake = ktime_add_ms(ktime_get_real(), MIN_POFF_MS);
+	wuxga_nt->prepared = false;
+
+	return 0;
+}
+
+static int wuxga_nt_panel_prepare(struct drm_panel *panel)
+{
+	struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+	int ret;
+	s64 enablewait;
+
+	if (wuxga_nt->prepared)
+		return 0;
+
+	/*
+	 * If the user re-enabled the panel before the required off-time then
+	 * we need to wait the remaining period before re-enabling regulator
+	 */
+	enablewait = ktime_ms_delta(wuxga_nt->earliest_wake, ktime_get_real());
+
+	/* Sanity check, this should never happen */
+	if (enablewait > MIN_POFF_MS)
+		enablewait = MIN_POFF_MS;
+
+	if (enablewait > 0)
+		msleep(enablewait);
+
+	ret = regulator_enable(wuxga_nt->supply);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * A minimum delay of 250ms is required after power-up until commands
+	 * can be sent
+	 */
+	msleep(250);
+
+	ret = wuxga_nt_panel_on(wuxga_nt);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel on: %d\n", ret);
+		goto poweroff;
+	}
+
+	wuxga_nt->prepared = true;
+
+	return 0;
+
+poweroff:
+	regulator_disable(wuxga_nt->supply);
+
+	return ret;
+}
+
+static int wuxga_nt_panel_enable(struct drm_panel *panel)
+{
+	struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+
+	if (wuxga_nt->enabled)
+		return 0;
+
+	if (wuxga_nt->backlight) {
+		wuxga_nt->backlight->props.power = FB_BLANK_UNBLANK;
+		wuxga_nt->backlight->props.state &= ~BL_CORE_FBBLANK;
+		backlight_update_status(wuxga_nt->backlight);
+	}
+
+	wuxga_nt->enabled = true;
+
+	return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+	.clock = 164402,
+	.hdisplay = 1920,
+	.hsync_start = 1920 + 152,
+	.hsync_end = 1920 + 152 + 52,
+	.htotal = 1920 + 152 + 52 + 20,
+	.vdisplay = 1200,
+	.vsync_start = 1200 + 24,
+	.vsync_end = 1200 + 24 + 6,
+	.vtotal = 1200 + 24 + 6 + 48,
+	.vrefresh = 60,
+};
+
+static int wuxga_nt_panel_get_modes(struct drm_panel *panel)
+{
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+				default_mode.hdisplay, default_mode.vdisplay,
+				default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(panel->connector, mode);
+
+	panel->connector->display_info.width_mm = 217;
+	panel->connector->display_info.height_mm = 136;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs wuxga_nt_panel_funcs = {
+	.disable = wuxga_nt_panel_disable,
+	.unprepare = wuxga_nt_panel_unprepare,
+	.prepare = wuxga_nt_panel_prepare,
+	.enable = wuxga_nt_panel_enable,
+	.get_modes = wuxga_nt_panel_get_modes,
+};
+
+static const struct of_device_id wuxga_nt_of_match[] = {
+	{ .compatible = "panasonic,vvx10f034n00", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, wuxga_nt_of_match);
+
+static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
+{
+	struct device *dev = &wuxga_nt->dsi->dev;
+	struct device_node *np;
+	int ret;
+
+	wuxga_nt->mode = &default_mode;
+
+	wuxga_nt->supply = devm_regulator_get(dev, "power");
+	if (IS_ERR(wuxga_nt->supply))
+		return PTR_ERR(wuxga_nt->supply);
+
+	np = of_parse_phandle(dev->of_node, "backlight", 0);
+	if (np) {
+		wuxga_nt->backlight = of_find_backlight_by_node(np);
+		of_node_put(np);
+
+		if (!wuxga_nt->backlight)
+			return -EPROBE_DEFER;
+	}
+
+	drm_panel_init(&wuxga_nt->base);
+	wuxga_nt->base.funcs = &wuxga_nt_panel_funcs;
+	wuxga_nt->base.dev = &wuxga_nt->dsi->dev;
+
+	ret = drm_panel_add(&wuxga_nt->base);
+	if (ret < 0)
+		goto put_backlight;
+
+	return 0;
+
+put_backlight:
+	if (wuxga_nt->backlight)
+		put_device(&wuxga_nt->backlight->dev);
+
+	return ret;
+}
+
+static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt)
+{
+	if (wuxga_nt->base.dev)
+		drm_panel_remove(&wuxga_nt->base);
+
+	if (wuxga_nt->backlight)
+		put_device(&wuxga_nt->backlight->dev);
+}
+
+static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct wuxga_nt_panel *wuxga_nt;
+	int ret;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+			MIPI_DSI_MODE_VIDEO_HSE |
+			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+			MIPI_DSI_MODE_LPM;
+
+	wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL);
+	if (!wuxga_nt)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, wuxga_nt);
+
+	wuxga_nt->dsi = dsi;
+
+	ret = wuxga_nt_panel_add(wuxga_nt);
+	if (ret < 0)
+		return ret;
+
+	return mipi_dsi_attach(dsi);
+}
+
+static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = wuxga_nt_panel_disable(&wuxga_nt->base);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+	drm_panel_detach(&wuxga_nt->base);
+	wuxga_nt_panel_del(wuxga_nt);
+
+	return 0;
+}
+
+static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
+
+	wuxga_nt_panel_disable(&wuxga_nt->base);
+}
+
+static struct mipi_dsi_driver wuxga_nt_panel_driver = {
+	.driver = {
+		.name = "panel-panasonic-vvx10f034n00",
+		.of_match_table = wuxga_nt_of_match,
+	},
+	.probe = wuxga_nt_panel_probe,
+	.remove = wuxga_nt_panel_remove,
+	.shutdown = wuxga_nt_panel_shutdown,
+};
+module_mipi_dsi_driver(wuxga_nt_panel_driver);
+
+MODULE_AUTHOR("Werner Johansson <werner.johansson@sonymobile.com>");
+MODULE_DESCRIPTION("Panasonic VVX10F034N00 Novatek NT1397-based WUXGA (1920x1200) video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
new file mode 100644
index 0000000..3aeb0bd
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2015 Red Hat
+ * Copyright (C) 2015 Sony Mobile Communications Inc.
+ * Author: Werner Johansson <werner.johansson@sonymobile.com>
+ *
+ * Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct sharp_nt_panel {
+	struct drm_panel base;
+	struct mipi_dsi_device *dsi;
+
+	struct backlight_device *backlight;
+	struct regulator *supply;
+	struct gpio_desc *reset_gpio;
+
+	bool prepared;
+	bool enabled;
+
+	const struct drm_display_mode *mode;
+};
+
+static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct sharp_nt_panel, base);
+}
+
+static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
+{
+	struct mipi_dsi_device *dsi = sharp_nt->dsi;
+	int ret;
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	msleep(120);
+
+	/* Novatek two-lane operation */
+	ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
+	if (ret < 0)
+		return ret;
+
+	/* Set both MCU and RGB I/F to 24bpp */
+	ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
+					(MIPI_DCS_PIXEL_FMT_24BIT << 4));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
+{
+	struct mipi_dsi_device *dsi = sharp_nt->dsi;
+	int ret;
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
+{
+	struct mipi_dsi_device *dsi = sharp_nt->dsi;
+	int ret;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+
+static int sharp_nt_panel_disable(struct drm_panel *panel)
+{
+	struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+
+	if (!sharp_nt->enabled)
+		return 0;
+
+	if (sharp_nt->backlight) {
+		sharp_nt->backlight->props.power = FB_BLANK_POWERDOWN;
+		backlight_update_status(sharp_nt->backlight);
+	}
+
+	sharp_nt->enabled = false;
+
+	return 0;
+}
+
+static int sharp_nt_panel_unprepare(struct drm_panel *panel)
+{
+	struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+	int ret;
+
+	if (!sharp_nt->prepared)
+		return 0;
+
+	ret = sharp_nt_panel_off(sharp_nt);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+		return ret;
+	}
+
+	regulator_disable(sharp_nt->supply);
+	if (sharp_nt->reset_gpio)
+		gpiod_set_value(sharp_nt->reset_gpio, 0);
+
+	sharp_nt->prepared = false;
+
+	return 0;
+}
+
+static int sharp_nt_panel_prepare(struct drm_panel *panel)
+{
+	struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+	int ret;
+
+	if (sharp_nt->prepared)
+		return 0;
+
+	ret = regulator_enable(sharp_nt->supply);
+	if (ret < 0)
+		return ret;
+
+	msleep(20);
+
+	if (sharp_nt->reset_gpio) {
+		gpiod_set_value(sharp_nt->reset_gpio, 1);
+		msleep(1);
+		gpiod_set_value(sharp_nt->reset_gpio, 0);
+		msleep(1);
+		gpiod_set_value(sharp_nt->reset_gpio, 1);
+		msleep(10);
+	}
+
+	ret = sharp_nt_panel_init(sharp_nt);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to init panel: %d\n", ret);
+		goto poweroff;
+	}
+
+	ret = sharp_nt_panel_on(sharp_nt);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel on: %d\n", ret);
+		goto poweroff;
+	}
+
+	sharp_nt->prepared = true;
+
+	return 0;
+
+poweroff:
+	regulator_disable(sharp_nt->supply);
+	if (sharp_nt->reset_gpio)
+		gpiod_set_value(sharp_nt->reset_gpio, 0);
+	return ret;
+}
+
+static int sharp_nt_panel_enable(struct drm_panel *panel)
+{
+	struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+
+	if (sharp_nt->enabled)
+		return 0;
+
+	if (sharp_nt->backlight) {
+		sharp_nt->backlight->props.power = FB_BLANK_UNBLANK;
+		backlight_update_status(sharp_nt->backlight);
+	}
+
+	sharp_nt->enabled = true;
+
+	return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+	.clock = 41118,
+	.hdisplay = 540,
+	.hsync_start = 540 + 48,
+	.hsync_end = 540 + 48 + 80,
+	.htotal = 540 + 48 + 80 + 32,
+	.vdisplay = 960,
+	.vsync_start = 960 + 3,
+	.vsync_end = 960 + 3 + 15,
+	.vtotal = 960 + 3 + 15 + 1,
+	.vrefresh = 60,
+};
+
+static int sharp_nt_panel_get_modes(struct drm_panel *panel)
+{
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+				default_mode.hdisplay, default_mode.vdisplay,
+				default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(panel->connector, mode);
+
+	panel->connector->display_info.width_mm = 54;
+	panel->connector->display_info.height_mm = 95;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs sharp_nt_panel_funcs = {
+	.disable = sharp_nt_panel_disable,
+	.unprepare = sharp_nt_panel_unprepare,
+	.prepare = sharp_nt_panel_prepare,
+	.enable = sharp_nt_panel_enable,
+	.get_modes = sharp_nt_panel_get_modes,
+};
+
+static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
+{
+	struct device *dev = &sharp_nt->dsi->dev;
+	struct device_node *np;
+	int ret;
+
+	sharp_nt->mode = &default_mode;
+
+	sharp_nt->supply = devm_regulator_get(dev, "avdd");
+	if (IS_ERR(sharp_nt->supply))
+		return PTR_ERR(sharp_nt->supply);
+
+	sharp_nt->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(sharp_nt->reset_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(sharp_nt->reset_gpio));
+		sharp_nt->reset_gpio = NULL;
+	} else {
+		gpiod_set_value(sharp_nt->reset_gpio, 0);
+	}
+
+	np = of_parse_phandle(dev->of_node, "backlight", 0);
+	if (np) {
+		sharp_nt->backlight = of_find_backlight_by_node(np);
+		of_node_put(np);
+
+		if (!sharp_nt->backlight)
+			return -EPROBE_DEFER;
+	}
+
+	drm_panel_init(&sharp_nt->base);
+	sharp_nt->base.funcs = &sharp_nt_panel_funcs;
+	sharp_nt->base.dev = &sharp_nt->dsi->dev;
+
+	ret = drm_panel_add(&sharp_nt->base);
+	if (ret < 0)
+		goto put_backlight;
+
+	return 0;
+
+put_backlight:
+	if (sharp_nt->backlight)
+		put_device(&sharp_nt->backlight->dev);
+
+	return ret;
+}
+
+static void sharp_nt_panel_del(struct sharp_nt_panel *sharp_nt)
+{
+	if (sharp_nt->base.dev)
+		drm_panel_remove(&sharp_nt->base);
+
+	if (sharp_nt->backlight)
+		put_device(&sharp_nt->backlight->dev);
+}
+
+static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct sharp_nt_panel *sharp_nt;
+	int ret;
+
+	dsi->lanes = 2;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+			MIPI_DSI_MODE_VIDEO_HSE |
+			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+			MIPI_DSI_MODE_EOT_PACKET;
+
+	sharp_nt = devm_kzalloc(&dsi->dev, sizeof(*sharp_nt), GFP_KERNEL);
+	if (!sharp_nt)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, sharp_nt);
+
+	sharp_nt->dsi = dsi;
+
+	ret = sharp_nt_panel_add(sharp_nt);
+	if (ret < 0)
+		return ret;
+
+	return mipi_dsi_attach(dsi);
+}
+
+static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = sharp_nt_panel_disable(&sharp_nt->base);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+	drm_panel_detach(&sharp_nt->base);
+	sharp_nt_panel_del(sharp_nt);
+
+	return 0;
+}
+
+static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
+
+	sharp_nt_panel_disable(&sharp_nt->base);
+}
+
+static const struct of_device_id sharp_nt_of_match[] = {
+	{ .compatible = "sharp,ls043t1le01-qhd", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sharp_nt_of_match);
+
+static struct mipi_dsi_driver sharp_nt_panel_driver = {
+	.driver = {
+		.name = "panel-sharp-ls043t1le01-qhd",
+		.of_match_table = sharp_nt_of_match,
+	},
+	.probe = sharp_nt_panel_probe,
+	.remove = sharp_nt_panel_remove,
+	.shutdown = sharp_nt_panel_shutdown,
+};
+module_mipi_dsi_driver(sharp_nt_panel_driver);
+
+MODULE_AUTHOR("Werner Johansson <werner.johansson@sonymobile.com>");
+MODULE_DESCRIPTION("Sharp LS043T1LE01 NT35565-based qHD (540x960) video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f97b73e..f88a631 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -44,6 +44,10 @@
 
 	unsigned int bpc;
 
+	/**
+	 * @width: width (in millimeters) of the panel's active display area
+	 * @height: height (in millimeters) of the panel's active display area
+	 */
 	struct {
 		unsigned int width;
 		unsigned int height;
@@ -832,6 +836,34 @@
 	},
 };
 
+static const struct drm_display_mode innolux_g121x1_l03_mode = {
+	.clock = 65000,
+	.hdisplay = 1024,
+	.hsync_start = 1024 + 0,
+	.hsync_end = 1024 + 1,
+	.htotal = 1024 + 0 + 1 + 320,
+	.vdisplay = 768,
+	.vsync_start = 768 + 38,
+	.vsync_end = 768 + 38 + 1,
+	.vtotal = 768 + 38 + 1 + 0,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc innolux_g121x1_l03 = {
+	.modes = &innolux_g121x1_l03_mode,
+	.num_modes = 1,
+	.bpc = 6,
+	.size = {
+		.width = 246,
+		.height = 185,
+	},
+	.delay = {
+		.enable = 200,
+		.unprepare = 200,
+		.disable = 400,
+	},
+};
+
 static const struct drm_display_mode innolux_n116bge_mode = {
 	.clock = 76420,
 	.hdisplay = 1366,
@@ -902,6 +934,30 @@
 	},
 };
 
+static const struct display_timing kyo_tcg121xglp_timing = {
+	.pixelclock = { 52000000, 65000000, 71000000 },
+	.hactive = { 1024, 1024, 1024 },
+	.hfront_porch = { 2, 2, 2 },
+	.hback_porch = { 2, 2, 2 },
+	.hsync_len = { 86, 124, 244 },
+	.vactive = { 768, 768, 768 },
+	.vfront_porch = { 2, 2, 2 },
+	.vback_porch = { 2, 2, 2 },
+	.vsync_len = { 6, 34, 73 },
+	.flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc kyo_tcg121xglp = {
+	.timings = &kyo_tcg121xglp_timing,
+	.num_timings = 1,
+	.bpc = 8,
+	.size = {
+		.width = 246,
+		.height = 184,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
 static const struct drm_display_mode lg_lb070wv8_mode = {
 	.clock = 33246,
 	.hdisplay = 800,
@@ -1027,6 +1083,30 @@
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct drm_display_mode qd43003c0_40_mode = {
+	.clock = 9000,
+	.hdisplay = 480,
+	.hsync_start = 480 + 8,
+	.hsync_end = 480 + 8 + 4,
+	.htotal = 480 + 8 + 4 + 39,
+	.vdisplay = 272,
+	.vsync_start = 272 + 4,
+	.vsync_end = 272 + 4 + 10,
+	.vtotal = 272 + 4 + 10 + 2,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc qd43003c0_40 = {
+	.modes = &qd43003c0_40_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 95,
+		.height = 53,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
 static const struct drm_display_mode samsung_ltn101nt05_mode = {
 	.clock = 54030,
 	.hdisplay = 1024,
@@ -1158,6 +1238,9 @@
 		.compatible ="innolux,g121i1-l01",
 		.data = &innolux_g121i1_l01
 	}, {
+		.compatible = "innolux,g121x1-l03",
+		.data = &innolux_g121x1_l03,
+	}, {
 		.compatible = "innolux,n116bge",
 		.data = &innolux_n116bge,
 	}, {
@@ -1167,6 +1250,9 @@
 		.compatible = "innolux,zj070na-01p",
 		.data = &innolux_zj070na_01p,
 	}, {
+		.compatible = "kyo,tcg121xglp",
+		.data = &kyo_tcg121xglp,
+	}, {
 		.compatible = "lg,lb070wv8",
 		.data = &lg_lb070wv8,
 	}, {
@@ -1182,6 +1268,9 @@
 		.compatible = "ortustech,com43h4m85ulc",
 		.data = &ortustech_com43h4m85ulc,
 	}, {
+		.compatible = "qiaodian,qd43003c0-40",
+		.data = &qd43003c0_40,
+	}, {
 		.compatible = "samsung,ltn101nt05",
 		.data = &samsung_ltn101nt05,
 	}, {
@@ -1263,6 +1352,36 @@
 	.lanes = 4,
 };
 
+static const struct drm_display_mode boe_tv080wum_nl0_mode = {
+	.clock = 160000,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 120,
+	.hsync_end = 1200 + 120 + 20,
+	.htotal = 1200 + 120 + 20 + 21,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 21,
+	.vsync_end = 1920 + 21 + 3,
+	.vtotal = 1920 + 21 + 3 + 18,
+	.vrefresh = 60,
+	.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc_dsi boe_tv080wum_nl0 = {
+	.desc = {
+		.modes = &boe_tv080wum_nl0_mode,
+		.num_modes = 1,
+		.size = {
+			.width = 107,
+			.height = 172,
+		},
+	},
+	.flags = MIPI_DSI_MODE_VIDEO |
+		 MIPI_DSI_MODE_VIDEO_BURST |
+		 MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+	.format = MIPI_DSI_FMT_RGB888,
+	.lanes = 4,
+};
+
 static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
 	.clock = 71000,
 	.hdisplay = 800,
@@ -1348,11 +1467,15 @@
 	.lanes = 4,
 };
 
+
 static const struct of_device_id dsi_of_match[] = {
 	{
 		.compatible = "auo,b080uan01",
 		.data = &auo_b080uan01
 	}, {
+		.compatible = "boe,tv080wum-nl0",
+		.data = &boe_tv080wum_nl0
+	}, {
 		.compatible = "lg,ld070wx3-sl01",
 		.data = &lg_ld070wx3_sl01
 	}, {
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index b28370e..5e1d789 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -32,7 +32,7 @@
 	struct qxl_bo *bo;
 	struct qxl_device *qdev;
 
-	bo = container_of(tbo, struct qxl_bo, tbo);
+	bo = to_qxl_bo(tbo);
 	qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
 
 	qxl_surface_evict(qdev, bo, false);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0cbc4c9..9534127 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -201,7 +201,7 @@
 		placement->num_busy_placement = 1;
 		return;
 	}
-	qbo = container_of(bo, struct qxl_bo, tbo);
+	qbo = to_qxl_bo(bo);
 	qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
 	*placement = qbo->placement;
 }
@@ -365,7 +365,7 @@
 
 	if (!qxl_ttm_bo_is_qxl_bo(bo))
 		return;
-	qbo = container_of(bo, struct qxl_bo, tbo);
+	qbo = to_qxl_bo(bo);
 	qdev = qbo->gem_base.dev->dev_private;
 
 	if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 96f2eb4..a37b6e2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -28,7 +28,7 @@
 {
 	struct rcar_du_connector *con = to_rcar_connector(connector);
 	struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
-	struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+	const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
 
 	if (sfuncs->get_modes == NULL)
 		return 0;
@@ -41,7 +41,7 @@
 {
 	struct rcar_du_connector *con = to_rcar_connector(connector);
 	struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
-	struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+	const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
 
 	if (sfuncs->mode_valid == NULL)
 		return MODE_OK;
@@ -66,7 +66,7 @@
 {
 	struct rcar_du_connector *con = to_rcar_connector(connector);
 	struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
-	struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+	const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
 
 	if (sfuncs->detect == NULL)
 		return connector_status_unknown;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 11267de..2567efc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -35,7 +35,7 @@
 static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
 {
 	struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-	struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+	const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
 
 	if (sfuncs->dpms)
 		sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -50,7 +50,7 @@
 static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
 {
 	struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-	struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+	const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
 
 	if (hdmienc->renc->lvds)
 		rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
@@ -67,7 +67,7 @@
 					struct drm_connector_state *conn_state)
 {
 	struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-	struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+	const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
 	struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
 	const struct drm_display_mode *mode = &crtc_state->mode;
 
@@ -89,7 +89,7 @@
 				     struct drm_display_mode *adjusted_mode)
 {
 	struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-	struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+	const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
 
 	if (sfuncs->mode_set)
 		sfuncs->mode_set(encoder, mode, adjusted_mode);
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 525b5a8..bddcabd 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -173,7 +173,7 @@
 	return (valid) ? MODE_OK : MODE_BAD;
 }
 
-static struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
+static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
@@ -218,7 +218,7 @@
 				      ROCKCHIP_OUT_MODE_AAAA);
 }
 
-static struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
 	.mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
 	.mode_set   = dw_hdmi_rockchip_encoder_mode_set,
 	.prepare    = dw_hdmi_rockchip_encoder_prepare,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index f22e1e1..afbb740 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -450,10 +450,6 @@
 	if (!drm)
 		return -ENOMEM;
 
-	ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
-	if (ret)
-		goto err_free;
-
 	ret = drm_dev_register(drm, 0);
 	if (ret)
 		goto err_free;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index b8ac5911..621f25c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -66,7 +66,7 @@
 				     rockchip_fb->obj[0], handle);
 }
 
-static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
+static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
 	.destroy	= rockchip_drm_fb_destroy,
 	.create_handle	= rockchip_drm_fb_create_handle,
 };
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 1f5cb68..dde6f20 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1955,8 +1955,10 @@
 		 * cases where only a single display controller is used.
 		 */
 		for_each_matching_node(np, tegra_dc_of_match) {
-			if (np == dc->dev->of_node)
+			if (np == dc->dev->of_node) {
+				of_node_put(np);
 				break;
+			}
 
 			value++;
 		}
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 6aecb66..b24a0f1 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -436,7 +436,7 @@
 	.remove = tegra_dpaux_remove,
 };
 
-struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
+struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np)
 {
 	struct tegra_dpaux *dpaux;
 
@@ -445,7 +445,7 @@
 	list_for_each_entry(dpaux, &dpaux_list, list)
 		if (np == dpaux->dev->of_node) {
 			mutex_unlock(&dpaux_lock);
-			return dpaux;
+			return &dpaux->aux;
 		}
 
 	mutex_unlock(&dpaux_lock);
@@ -453,8 +453,9 @@
 	return NULL;
 }
 
-int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
+int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
 {
+	struct tegra_dpaux *dpaux = to_dpaux(aux);
 	unsigned long timeout;
 	int err;
 
@@ -470,7 +471,7 @@
 	while (time_before(jiffies, timeout)) {
 		enum drm_connector_status status;
 
-		status = tegra_dpaux_detect(dpaux);
+		status = drm_dp_aux_detect(aux);
 		if (status == connector_status_connected) {
 			enable_irq(dpaux->irq);
 			return 0;
@@ -482,8 +483,9 @@
 	return -ETIMEDOUT;
 }
 
-int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
+int drm_dp_aux_detach(struct drm_dp_aux *aux)
 {
+	struct tegra_dpaux *dpaux = to_dpaux(aux);
 	unsigned long timeout;
 	int err;
 
@@ -498,7 +500,7 @@
 	while (time_before(jiffies, timeout)) {
 		enum drm_connector_status status;
 
-		status = tegra_dpaux_detect(dpaux);
+		status = drm_dp_aux_detect(aux);
 		if (status == connector_status_disconnected) {
 			dpaux->output = NULL;
 			return 0;
@@ -510,8 +512,9 @@
 	return -ETIMEDOUT;
 }
 
-enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
+enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
 {
+	struct tegra_dpaux *dpaux = to_dpaux(aux);
 	u32 value;
 
 	value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
@@ -522,8 +525,9 @@
 	return connector_status_disconnected;
 }
 
-int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
+int drm_dp_aux_enable(struct drm_dp_aux *aux)
 {
+	struct tegra_dpaux *dpaux = to_dpaux(aux);
 	u32 value;
 
 	value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
@@ -540,8 +544,9 @@
 	return 0;
 }
 
-int tegra_dpaux_disable(struct tegra_dpaux *dpaux)
+int drm_dp_aux_disable(struct drm_dp_aux *aux)
 {
+	struct tegra_dpaux *dpaux = to_dpaux(aux);
 	u32 value;
 
 	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
@@ -551,11 +556,11 @@
 	return 0;
 }
 
-int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding)
+int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
 {
 	int err;
 
-	err = drm_dp_dpcd_writeb(&dpaux->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+	err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
 				 encoding);
 	if (err < 0)
 		return err;
@@ -563,15 +568,15 @@
 	return 0;
 }
 
-int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
-		      u8 pattern)
+int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
+		     u8 pattern)
 {
 	u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
 	u8 status[DP_LINK_STATUS_SIZE], values[4];
 	unsigned int i;
 	int err;
 
-	err = drm_dp_dpcd_writeb(&dpaux->aux, DP_TRAINING_PATTERN_SET, pattern);
+	err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
 	if (err < 0)
 		return err;
 
@@ -584,14 +589,14 @@
 			    DP_TRAIN_MAX_SWING_REACHED |
 			    DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
 
-	err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values,
+	err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
 				link->num_lanes);
 	if (err < 0)
 		return err;
 
 	usleep_range(500, 1000);
 
-	err = drm_dp_dpcd_read_link_status(&dpaux->aux, status);
+	err = drm_dp_dpcd_read_link_status(aux, status);
 	if (err < 0)
 		return err;
 
@@ -609,11 +614,11 @@
 		break;
 
 	default:
-		dev_err(dpaux->dev, "unsupported training pattern %u\n", tp);
+		dev_err(aux->dev, "unsupported training pattern %u\n", tp);
 		return -EINVAL;
 	}
 
-	err = drm_dp_dpcd_writeb(&dpaux->aux, DP_EDP_CONFIGURATION_SET, 0);
+	err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e0f8277..c5c856a 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -137,8 +137,8 @@
 		start = geometry->aperture_start;
 		end = geometry->aperture_end;
 
-		DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
-			  start, end);
+		DRM_DEBUG_DRIVER("IOMMU aperture initialized (%#llx-%#llx)\n",
+				 start, end);
 		drm_mm_init(&tegra->mm, start, end - start + 1);
 	}
 
@@ -277,9 +277,7 @@
 	if (!gem)
 		return NULL;
 
-	mutex_lock(&drm->struct_mutex);
-	drm_gem_object_unreference(gem);
-	mutex_unlock(&drm->struct_mutex);
+	drm_gem_object_unreference_unlocked(gem);
 
 	bo = to_tegra_bo(gem);
 	return &bo->base;
@@ -473,7 +471,7 @@
 
 	args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
 
-	drm_gem_object_unreference(gem);
+	drm_gem_object_unreference_unlocked(gem);
 
 	return 0;
 }
@@ -683,7 +681,7 @@
 	bo->tiling.mode = mode;
 	bo->tiling.value = value;
 
-	drm_gem_object_unreference(gem);
+	drm_gem_object_unreference_unlocked(gem);
 
 	return 0;
 }
@@ -723,7 +721,7 @@
 		break;
 	}
 
-	drm_gem_object_unreference(gem);
+	drm_gem_object_unreference_unlocked(gem);
 
 	return err;
 }
@@ -748,7 +746,7 @@
 	if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
 		bo->flags |= TEGRA_BO_BOTTOM_UP;
 
-	drm_gem_object_unreference(gem);
+	drm_gem_object_unreference_unlocked(gem);
 
 	return 0;
 }
@@ -770,7 +768,7 @@
 	if (bo->flags & TEGRA_BO_BOTTOM_UP)
 		args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
 
-	drm_gem_object_unreference(gem);
+	drm_gem_object_unreference_unlocked(gem);
 
 	return 0;
 }
@@ -921,7 +919,8 @@
 #endif
 
 static struct drm_driver tegra_drm_driver = {
-	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+			   DRIVER_ATOMIC,
 	.load = tegra_drm_load,
 	.unload = tegra_drm_unload,
 	.open = tegra_drm_open,
@@ -991,7 +990,6 @@
 	if (!drm)
 		return -ENOMEM;
 
-	drm_dev_set_unique(drm, dev_name(&dev->dev));
 	dev_set_drvdata(&dev->dev, drm);
 
 	err = drm_dev_register(drm, 0);
@@ -1023,8 +1021,17 @@
 static int host1x_drm_suspend(struct device *dev)
 {
 	struct drm_device *drm = dev_get_drvdata(dev);
+	struct tegra_drm *tegra = drm->dev_private;
 
 	drm_kms_helper_poll_disable(drm);
+	tegra_drm_fb_suspend(drm);
+
+	tegra->state = drm_atomic_helper_suspend(drm);
+	if (IS_ERR(tegra->state)) {
+		tegra_drm_fb_resume(drm);
+		drm_kms_helper_poll_enable(drm);
+		return PTR_ERR(tegra->state);
+	}
 
 	return 0;
 }
@@ -1032,7 +1039,10 @@
 static int host1x_drm_resume(struct device *dev)
 {
 	struct drm_device *drm = dev_get_drvdata(dev);
+	struct tegra_drm *tegra = drm->dev_private;
 
+	drm_atomic_helper_resume(drm, tegra->state);
+	tegra_drm_fb_resume(drm);
 	drm_kms_helper_poll_enable(drm);
 
 	return 0;
@@ -1076,6 +1086,16 @@
 	.subdevs = host1x_drm_subdevs,
 };
 
+static struct platform_driver * const drivers[] = {
+	&tegra_dc_driver,
+	&tegra_hdmi_driver,
+	&tegra_dsi_driver,
+	&tegra_dpaux_driver,
+	&tegra_sor_driver,
+	&tegra_gr2d_driver,
+	&tegra_gr3d_driver,
+};
+
 static int __init host1x_drm_init(void)
 {
 	int err;
@@ -1084,48 +1104,12 @@
 	if (err < 0)
 		return err;
 
-	err = platform_driver_register(&tegra_dc_driver);
+	err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
 	if (err < 0)
 		goto unregister_host1x;
 
-	err = platform_driver_register(&tegra_dsi_driver);
-	if (err < 0)
-		goto unregister_dc;
-
-	err = platform_driver_register(&tegra_sor_driver);
-	if (err < 0)
-		goto unregister_dsi;
-
-	err = platform_driver_register(&tegra_hdmi_driver);
-	if (err < 0)
-		goto unregister_sor;
-
-	err = platform_driver_register(&tegra_dpaux_driver);
-	if (err < 0)
-		goto unregister_hdmi;
-
-	err = platform_driver_register(&tegra_gr2d_driver);
-	if (err < 0)
-		goto unregister_dpaux;
-
-	err = platform_driver_register(&tegra_gr3d_driver);
-	if (err < 0)
-		goto unregister_gr2d;
-
 	return 0;
 
-unregister_gr2d:
-	platform_driver_unregister(&tegra_gr2d_driver);
-unregister_dpaux:
-	platform_driver_unregister(&tegra_dpaux_driver);
-unregister_hdmi:
-	platform_driver_unregister(&tegra_hdmi_driver);
-unregister_sor:
-	platform_driver_unregister(&tegra_sor_driver);
-unregister_dsi:
-	platform_driver_unregister(&tegra_dsi_driver);
-unregister_dc:
-	platform_driver_unregister(&tegra_dc_driver);
 unregister_host1x:
 	host1x_driver_unregister(&host1x_drm_driver);
 	return err;
@@ -1134,13 +1118,7 @@
 
 static void __exit host1x_drm_exit(void)
 {
-	platform_driver_unregister(&tegra_gr3d_driver);
-	platform_driver_unregister(&tegra_gr2d_driver);
-	platform_driver_unregister(&tegra_dpaux_driver);
-	platform_driver_unregister(&tegra_hdmi_driver);
-	platform_driver_unregister(&tegra_sor_driver);
-	platform_driver_unregister(&tegra_dsi_driver);
-	platform_driver_unregister(&tegra_dc_driver);
+	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
 	host1x_driver_unregister(&host1x_drm_driver);
 }
 module_exit(host1x_drm_exit);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index d88a2d1..c088f2f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -57,6 +57,8 @@
 		struct work_struct work;
 		struct mutex lock;
 	} commit;
+
+	struct drm_atomic_state *state;
 };
 
 struct tegra_drm_client;
@@ -247,18 +249,17 @@
 void tegra_output_encoder_destroy(struct drm_encoder *encoder);
 
 /* from dpaux.c */
-struct tegra_dpaux;
 struct drm_dp_link;
 
-struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np);
-enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux);
-int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output);
-int tegra_dpaux_detach(struct tegra_dpaux *dpaux);
-int tegra_dpaux_enable(struct tegra_dpaux *dpaux);
-int tegra_dpaux_disable(struct tegra_dpaux *dpaux);
-int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding);
-int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
-		      u8 pattern);
+struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
+enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
+int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
+int drm_dp_aux_detach(struct drm_dp_aux *aux);
+int drm_dp_aux_enable(struct drm_dp_aux *aux);
+int drm_dp_aux_disable(struct drm_dp_aux *aux);
+int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
+int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
+		     u8 pattern);
 
 /* from fb.c */
 struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
@@ -273,16 +274,18 @@
 void tegra_drm_fb_free(struct drm_device *drm);
 int tegra_drm_fb_init(struct drm_device *drm);
 void tegra_drm_fb_exit(struct drm_device *drm);
+void tegra_drm_fb_suspend(struct drm_device *drm);
+void tegra_drm_fb_resume(struct drm_device *drm);
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
 void tegra_fb_output_poll_changed(struct drm_device *drm);
 #endif
 
 extern struct platform_driver tegra_dc_driver;
-extern struct platform_driver tegra_dsi_driver;
-extern struct platform_driver tegra_sor_driver;
 extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dsi_driver;
 extern struct platform_driver tegra_dpaux_driver;
+extern struct platform_driver tegra_sor_driver;
 extern struct platform_driver tegra_gr2d_driver;
 extern struct platform_driver tegra_gr3d_driver;
 
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index ede9e94..ca84de9 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/console.h>
+
 #include "drm.h"
 #include "gem.h"
 
@@ -86,7 +88,7 @@
 	return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
 }
 
-static struct drm_framebuffer_funcs tegra_fb_funcs = {
+static const struct drm_framebuffer_funcs tegra_fb_funcs = {
 	.destroy = tegra_fb_destroy,
 	.create_handle = tegra_fb_create_handle,
 };
@@ -413,3 +415,25 @@
 	tegra_fbdev_exit(tegra->fbdev);
 #endif
 }
+
+void tegra_drm_fb_suspend(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	struct tegra_drm *tegra = drm->dev_private;
+
+	console_lock();
+	drm_fb_helper_set_suspend(&tegra->fbdev->base, 1);
+	console_unlock();
+#endif
+}
+
+void tegra_drm_fb_resume(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	struct tegra_drm *tegra = drm->dev_private;
+
+	console_lock();
+	drm_fb_helper_set_suspend(&tegra->fbdev->base, 0);
+	console_unlock();
+#endif
+}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 01e16e1..33add93 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -28,11 +28,8 @@
 static void tegra_bo_put(struct host1x_bo *bo)
 {
 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
-	struct drm_device *drm = obj->gem.dev;
 
-	mutex_lock(&drm->struct_mutex);
-	drm_gem_object_unreference(&obj->gem);
-	mutex_unlock(&drm->struct_mutex);
+	drm_gem_object_unreference_unlocked(&obj->gem);
 }
 
 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
@@ -72,11 +69,8 @@
 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
 {
 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
-	struct drm_device *drm = obj->gem.dev;
 
-	mutex_lock(&drm->struct_mutex);
 	drm_gem_object_reference(&obj->gem);
-	mutex_unlock(&drm->struct_mutex);
 
 	return bo;
 }
@@ -408,12 +402,9 @@
 	struct drm_gem_object *gem;
 	struct tegra_bo *bo;
 
-	mutex_lock(&drm->struct_mutex);
-
 	gem = drm_gem_object_lookup(drm, file, handle);
 	if (!gem) {
 		dev_err(drm->dev, "failed to lookup GEM object\n");
-		mutex_unlock(&drm->struct_mutex);
 		return -EINVAL;
 	}
 
@@ -421,9 +412,7 @@
 
 	*offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
 
-	drm_gem_object_unreference(gem);
-
-	mutex_unlock(&drm->struct_mutex);
+	drm_gem_object_unreference_unlocked(gem);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 3e012ee..757c6e8 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -173,7 +173,7 @@
 	struct clk *clk_dp;
 	struct clk *clk;
 
-	struct tegra_dpaux *dpaux;
+	struct drm_dp_aux *aux;
 
 	struct drm_info_list *debugfs_files;
 	struct drm_minor *minor;
@@ -273,7 +273,7 @@
 		   SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
 	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
-	err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
+	err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
 	if (err < 0)
 		return err;
 
@@ -288,7 +288,7 @@
 
 	pattern = DP_TRAINING_PATTERN_1;
 
-	err = tegra_dpaux_train(sor->dpaux, link, pattern);
+	err = drm_dp_aux_train(sor->aux, link, pattern);
 	if (err < 0)
 		return err;
 
@@ -309,7 +309,7 @@
 
 	pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
 
-	err = tegra_dpaux_train(sor->dpaux, link, pattern);
+	err = drm_dp_aux_train(sor->aux, link, pattern);
 	if (err < 0)
 		return err;
 
@@ -324,7 +324,7 @@
 
 	pattern = DP_TRAINING_PATTERN_DISABLE;
 
-	err = tegra_dpaux_train(sor->dpaux, link, pattern);
+	err = drm_dp_aux_train(sor->aux, link, pattern);
 	if (err < 0)
 		return err;
 
@@ -1044,8 +1044,8 @@
 	struct tegra_output *output = connector_to_output(connector);
 	struct tegra_sor *sor = to_sor(output);
 
-	if (sor->dpaux)
-		return tegra_dpaux_detect(sor->dpaux);
+	if (sor->aux)
+		return drm_dp_aux_detect(sor->aux);
 
 	return tegra_output_connector_detect(connector, force);
 }
@@ -1066,13 +1066,13 @@
 	struct tegra_sor *sor = to_sor(output);
 	int err;
 
-	if (sor->dpaux)
-		tegra_dpaux_enable(sor->dpaux);
+	if (sor->aux)
+		drm_dp_aux_enable(sor->aux);
 
 	err = tegra_output_connector_get_modes(connector);
 
-	if (sor->dpaux)
-		tegra_dpaux_disable(sor->dpaux);
+	if (sor->aux)
+		drm_dp_aux_disable(sor->aux);
 
 	return err;
 }
@@ -1128,8 +1128,8 @@
 	if (err < 0)
 		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
 
-	if (sor->dpaux) {
-		err = tegra_dpaux_disable(sor->dpaux);
+	if (sor->aux) {
+		err = drm_dp_aux_disable(sor->aux);
 		if (err < 0)
 			dev_err(sor->dev, "failed to disable DP: %d\n", err);
 	}
@@ -1196,7 +1196,7 @@
 	struct tegra_sor *sor = to_sor(output);
 	struct tegra_sor_config config;
 	struct drm_dp_link link;
-	struct drm_dp_aux *aux;
+	u8 rate, lanes;
 	int err = 0;
 	u32 value;
 
@@ -1209,20 +1209,14 @@
 	if (output->panel)
 		drm_panel_prepare(output->panel);
 
-	/* FIXME: properly convert to struct drm_dp_aux */
-	aux = (struct drm_dp_aux *)sor->dpaux;
+	err = drm_dp_aux_enable(sor->aux);
+	if (err < 0)
+		dev_err(sor->dev, "failed to enable DP: %d\n", err);
 
-	if (sor->dpaux) {
-		err = tegra_dpaux_enable(sor->dpaux);
-		if (err < 0)
-			dev_err(sor->dev, "failed to enable DP: %d\n", err);
-
-		err = drm_dp_link_probe(aux, &link);
-		if (err < 0) {
-			dev_err(sor->dev, "failed to probe eDP link: %d\n",
-				err);
-			return;
-		}
+	err = drm_dp_link_probe(sor->aux, &link);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
+		return;
 	}
 
 	err = clk_set_parent(sor->clk, sor->clk_safe);
@@ -1434,61 +1428,52 @@
 	value |= SOR_DP_PADCTL_PAD_CAL_PD;
 	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
-	if (sor->dpaux) {
-		u8 rate, lanes;
+	err = drm_dp_link_probe(sor->aux, &link);
+	if (err < 0)
+		dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
 
-		err = drm_dp_link_probe(aux, &link);
-		if (err < 0)
-			dev_err(sor->dev, "failed to probe eDP link: %d\n",
-				err);
+	err = drm_dp_link_power_up(sor->aux, &link);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
 
-		err = drm_dp_link_power_up(aux, &link);
-		if (err < 0)
-			dev_err(sor->dev, "failed to power up eDP link: %d\n",
-				err);
+	err = drm_dp_link_configure(sor->aux, &link);
+	if (err < 0)
+		dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
 
-		err = drm_dp_link_configure(aux, &link);
-		if (err < 0)
-			dev_err(sor->dev, "failed to configure eDP link: %d\n",
-				err);
+	rate = drm_dp_link_rate_to_bw_code(link.rate);
+	lanes = link.num_lanes;
 
-		rate = drm_dp_link_rate_to_bw_code(link.rate);
-		lanes = link.num_lanes;
+	value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+	value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+	value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-		value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
-		value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
-		value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
-		tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+	value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+	value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+	value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
 
-		value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
-		value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
-		value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+	if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+		value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
 
-		if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
-			value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+	tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
-		tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+	/* disable training pattern generator */
 
-		/* disable training pattern generator */
-
-		for (i = 0; i < link.num_lanes; i++) {
-			unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
-					     SOR_DP_TPG_SCRAMBLER_GALIOS |
-					     SOR_DP_TPG_PATTERN_NONE;
-			value = (value << 8) | lane;
-		}
-
-		tegra_sor_writel(sor, value, SOR_DP_TPG);
-
-		err = tegra_sor_dp_train_fast(sor, &link);
-		if (err < 0) {
-			dev_err(sor->dev, "DP fast link training failed: %d\n",
-				err);
-		}
-
-		dev_dbg(sor->dev, "fast link training succeeded\n");
+	for (i = 0; i < link.num_lanes; i++) {
+		unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
+				     SOR_DP_TPG_SCRAMBLER_GALIOS |
+				     SOR_DP_TPG_PATTERN_NONE;
+		value = (value << 8) | lane;
 	}
 
+	tegra_sor_writel(sor, value, SOR_DP_TPG);
+
+	err = tegra_sor_dp_train_fast(sor, &link);
+	if (err < 0)
+		dev_err(sor->dev, "DP fast link training failed: %d\n", err);
+
+	dev_dbg(sor->dev, "fast link training succeeded\n");
+
 	err = tegra_sor_power_up(sor, 250);
 	if (err < 0)
 		dev_err(sor->dev, "failed to power up SOR: %d\n", err);
@@ -1961,9 +1946,9 @@
 
 	/* production settings */
 	settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
-	if (IS_ERR(settings)) {
-		dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n",
-			mode->clock * 1000, PTR_ERR(settings));
+	if (!settings) {
+		dev_err(sor->dev, "no settings for pixel clock %d Hz\n",
+			mode->clock * 1000);
 		return;
 	}
 
@@ -2148,7 +2133,7 @@
 	int encoder = DRM_MODE_ENCODER_NONE;
 	int err;
 
-	if (!sor->dpaux) {
+	if (!sor->aux) {
 		if (sor->soc->supports_hdmi) {
 			connector = DRM_MODE_CONNECTOR_HDMIA;
 			encoder = DRM_MODE_ENCODER_TMDS;
@@ -2199,8 +2184,8 @@
 			dev_err(sor->dev, "debugfs setup failed: %d\n", err);
 	}
 
-	if (sor->dpaux) {
-		err = tegra_dpaux_attach(sor->dpaux, &sor->output);
+	if (sor->aux) {
+		err = drm_dp_aux_attach(sor->aux, &sor->output);
 		if (err < 0) {
 			dev_err(sor->dev, "failed to attach DP: %d\n", err);
 			return err;
@@ -2249,8 +2234,8 @@
 
 	tegra_output_exit(&sor->output);
 
-	if (sor->dpaux) {
-		err = tegra_dpaux_detach(sor->dpaux);
+	if (sor->aux) {
+		err = drm_dp_aux_detach(sor->aux);
 		if (err < 0) {
 			dev_err(sor->dev, "failed to detach DP: %d\n", err);
 			return err;
@@ -2399,14 +2384,14 @@
 
 	np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
 	if (np) {
-		sor->dpaux = tegra_dpaux_find_by_of_node(np);
+		sor->aux = drm_dp_aux_find_by_of_node(np);
 		of_node_put(np);
 
-		if (!sor->dpaux)
+		if (!sor->aux)
 			return -EPROBE_DEFER;
 	}
 
-	if (!sor->dpaux) {
+	if (!sor->aux) {
 		if (sor->soc->supports_hdmi) {
 			sor->ops = &tegra_sor_hdmi_ops;
 		} else if (sor->soc->supports_lvds) {
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 0110d95..4709b54 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -122,13 +122,13 @@
 	kfree(connector);
 }
 
-static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
 	.get_modes = udl_get_modes,
 	.mode_valid = udl_mode_valid,
 	.best_encoder = udl_best_single_encoder,
 };
 
-static struct drm_connector_funcs udl_connector_funcs = {
+static const struct drm_connector_funcs udl_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = udl_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 677190a6..160ef2a 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -400,7 +400,7 @@
 	udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
 }
 
-static struct drm_crtc_helper_funcs udl_helper_funcs = {
+static const struct drm_crtc_helper_funcs udl_helper_funcs = {
 	.dpms = udl_crtc_dpms,
 	.mode_fixup = udl_crtc_mode_fixup,
 	.mode_set = udl_crtc_mode_set,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index cbcbbb8..f1655ff 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -184,8 +184,6 @@
 	vc4->dev = drm;
 	drm->dev_private = vc4;
 
-	drm_dev_set_unique(drm, dev_name(dev));
-
 	vc4_bo_cache_init(drm);
 
 	drm_mode_config_init(drm);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9394c35..162f188 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -470,7 +470,7 @@
 }
 
 
-static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 	.destroy = vmw_framebuffer_surface_destroy,
 	.dirty = vmw_framebuffer_surface_dirty,
 };
@@ -647,7 +647,7 @@
 	return ret;
 }
 
-static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
 	.destroy = vmw_framebuffer_dmabuf_destroy,
 	.dirty = vmw_framebuffer_dmabuf_dirty,
 };
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 2aff5e5..2def684 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -294,7 +294,7 @@
 	return vmw_ldu_commit_list(dev_priv);
 }
 
-static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
+static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
 	.cursor_set = vmw_du_crtc_cursor_set,
 	.cursor_move = vmw_du_crtc_cursor_move,
 	.gamma_set = vmw_du_crtc_gamma_set,
@@ -312,7 +312,7 @@
 	vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
 }
 
-static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
+static const struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
 	.destroy = vmw_ldu_encoder_destroy,
 };
 
@@ -325,7 +325,7 @@
 	vmw_ldu_destroy(vmw_connector_to_ldu(connector));
 }
 
-static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.detect = vmw_du_connector_detect,
 	.fill_modes = vmw_du_connector_fill_modes,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 6bb7af3..ecac70a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -530,7 +530,7 @@
 	return ret;
 }
 
-static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
+static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
 	.cursor_set = vmw_du_crtc_cursor_set,
 	.cursor_move = vmw_du_crtc_cursor_move,
 	.gamma_set = vmw_du_crtc_gamma_set,
@@ -548,7 +548,7 @@
 	vmw_sou_destroy(vmw_encoder_to_sou(encoder));
 }
 
-static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
+static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
 	.destroy = vmw_sou_encoder_destroy,
 };
 
@@ -561,7 +561,7 @@
 	vmw_sou_destroy(vmw_connector_to_sou(connector));
 }
 
-static struct drm_connector_funcs vmw_sou_connector_funcs = {
+static const struct drm_connector_funcs vmw_sou_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.set_property = vmw_du_connector_set_property,
 	.destroy = vmw_sou_connector_destroy,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 45e72c2..87fc00a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1040,7 +1040,7 @@
 /*
  *  Screen Target CRTC dispatch table
  */
-static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
+static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
 	.cursor_set = vmw_du_crtc_cursor_set,
 	.cursor_move = vmw_du_crtc_cursor_move,
 	.gamma_set = vmw_du_crtc_gamma_set,
@@ -1070,7 +1070,7 @@
 	vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
 }
 
-static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
+static const struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
 	.destroy = vmw_stdu_encoder_destroy,
 };
 
@@ -1097,7 +1097,7 @@
 
 
 
-static struct drm_connector_funcs vmw_stdu_connector_funcs = {
+static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.detect = vmw_du_connector_detect,
 	.fill_modes = vmw_du_connector_fill_modes,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7d620e8..c2a721a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -771,7 +771,7 @@
 	}
 	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
 			       GFP_KERNEL);
-	if (unlikely(srf->sizes == NULL)) {
+	if (unlikely(srf->offsets == NULL)) {
 		ret = -ENOMEM;
 		goto out_no_offsets;
 	}
@@ -815,11 +815,8 @@
 	    srf->sizes[0].height == 64 &&
 	    srf->format == SVGA3D_A8R8G8B8) {
 
-		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
-		/* clear the image */
-		if (srf->snooper.image) {
-			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
-		} else {
+		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
+		if (!srf->snooper.image) {
 			DRM_ERROR("Failed to allocate cursor_image\n");
 			ret = -ENOMEM;
 			goto out_no_copy;
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index c1189f0..a1d9974 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -10,6 +10,7 @@
 	mipi.o \
 	hw/host1x01.o \
 	hw/host1x02.o \
-	hw/host1x04.o
+	hw/host1x04.o \
+	hw/host1x05.o
 
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 4a99c64..da462af 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -538,6 +538,8 @@
 
 void host1x_driver_unregister(struct host1x_driver *driver)
 {
+	driver_unregister(&driver->driver);
+
 	mutex_lock(&drivers_lock);
 	list_del_init(&driver->list);
 	mutex_unlock(&drivers_lock);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 53d3d1d..314bf37 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -35,6 +35,7 @@
 #include "hw/host1x01.h"
 #include "hw/host1x02.h"
 #include "hw/host1x04.h"
+#include "hw/host1x05.h"
 
 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
 {
@@ -87,7 +88,17 @@
 	.sync_offset = 0x2100,
 };
 
+static const struct host1x_info host1x05_info = {
+	.nb_channels = 14,
+	.nb_pts = 192,
+	.nb_mlocks = 16,
+	.nb_bases = 64,
+	.init = host1x05_init,
+	.sync_offset = 0x2100,
+};
+
 static struct of_device_id host1x_of_match[] = {
+	{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
 	{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
 	{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
 	{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
@@ -212,6 +223,11 @@
 	.remove = host1x_remove,
 };
 
+static struct platform_driver * const drivers[] = {
+	&tegra_host1x_driver,
+	&tegra_mipi_driver,
+};
+
 static int __init tegra_host1x_init(void)
 {
 	int err;
@@ -220,28 +236,17 @@
 	if (err < 0)
 		return err;
 
-	err = platform_driver_register(&tegra_host1x_driver);
+	err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
 	if (err < 0)
-		goto unregister_bus;
+		bus_unregister(&host1x_bus_type);
 
-	err = platform_driver_register(&tegra_mipi_driver);
-	if (err < 0)
-		goto unregister_host1x;
-
-	return 0;
-
-unregister_host1x:
-	platform_driver_unregister(&tegra_host1x_driver);
-unregister_bus:
-	bus_unregister(&host1x_bus_type);
 	return err;
 }
 module_init(tegra_host1x_init);
 
 static void __exit tegra_host1x_exit(void)
 {
-	platform_driver_unregister(&tegra_mipi_driver);
-	platform_driver_unregister(&tegra_host1x_driver);
+	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
 	bus_unregister(&host1x_bus_type);
 }
 module_exit(tegra_host1x_exit);
diff --git a/drivers/gpu/host1x/hw/host1x05.c b/drivers/gpu/host1x/hw/host1x05.c
new file mode 100644
index 0000000..047097c
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x05.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra210 SoCs
+ *
+ * Copyright (c) 2015 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x05.h"
+#include "host1x05_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x05_init(struct host1x *host)
+{
+	host->channel_op = &host1x_channel_ops;
+	host->cdma_op = &host1x_cdma_ops;
+	host->cdma_pb_op = &host1x_pushbuffer_ops;
+	host->syncpt_op = &host1x_syncpt_ops;
+	host->intr_op = &host1x_intr_ops;
+	host->debug_op = &host1x_debug_ops;
+
+	return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x05.h b/drivers/gpu/host1x/hw/host1x05.h
new file mode 100644
index 0000000..a306d9c
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x05.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra210 SoCs
+ *
+ * Copyright (c) 2015 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X05_H
+#define HOST1X_HOST1X05_H
+
+struct host1x;
+
+int host1x05_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x05_hardware.h b/drivers/gpu/host1x/hw/host1x05_hardware.h
new file mode 100644
index 0000000..2937ebb
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x05_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra210
+ *
+ * Copyright (c) 2015 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X05_HARDWARE_H
+#define __HOST1X_HOST1X05_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x05_channel.h"
+#include "hw_host1x05_sync.h"
+#include "hw_host1x05_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_wait_syncpt_indx_f(indx)
+		| host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+		| host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+	unsigned indx, unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_wait_syncpt_base_indx_f(indx)
+		| host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+	unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+	unsigned cond, unsigned indx)
+{
+	return host1x_uclass_incr_syncpt_cond_f(cond)
+		| host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indbe_f(0xf)
+		| host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset);
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset)
+		| host1x_uclass_indoff_rwn_read_v();
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+	unsigned class_id, unsigned offset, unsigned mask)
+{
+	return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+	return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+	return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+	return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+	return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+	return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+		host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+	return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+	return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,	unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_channel.h b/drivers/gpu/host1x/hw/hw_host1x05_channel.h
new file mode 100644
index 0000000..fce6e2c
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x05_channel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2015 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X05_CHANNEL_H
+#define HOST1X_HW_HOST1X05_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+	host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+	return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+	host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+	return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+	host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+	return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+	host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+	return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+	host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+	return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+	host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+	return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+	host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+	return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+	host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+	host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+	return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+	host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+	return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+	host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_sync.h b/drivers/gpu/host1x/hw/hw_host1x05_sync.h
new file mode 100644
index 0000000..ca10eee
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x05_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2015 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X05_SYNC_H
+#define HOST1X_HW_HOST1X05_SYNC_H
+
+#define REGISTER_STRIDE	4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+	return 0xf80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+	host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+	return 0xe80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+	host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+	return 0xf00 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+	host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+	return 0xf20 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+	host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+	return 0xc00 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+	host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+	return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+	host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+	host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+	return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+	host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+	return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+	host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+	return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+	host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+	return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+	host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+	return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+	host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+	return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+	host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_v(u32 r)
+{
+	return (r >> 8) & 0xf;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_V(v) \
+	host1x_sync_mlock_owner_chid_v(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+	return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+	host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+	host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+	return 0x1380 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+	host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+	return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+	host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+	return 0xf60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+	host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+	return 0xc80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+	host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+	return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+	host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+	return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+	host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+	return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+	host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+	return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+	host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+	return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+	host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+	return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+	host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+	return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+	return 0xcc0 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+	host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+	return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+	host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+	host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_uclass.h b/drivers/gpu/host1x/hw/hw_host1x05_uclass.h
new file mode 100644
index 0000000..0c411da
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x05_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2015 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X05_UCLASS_H
+#define HOST1X_HW_HOST1X05_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+	host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+	return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+	host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+	host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+	return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+	host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+	host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+	host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+	return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+	host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+	return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+	host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+	host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+	return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+	host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+	return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+	host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+	return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+	host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+	return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+	host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+	return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+	return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 5531d7b..04caa8f 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1068,7 +1068,7 @@
 void drm_dev_unref(struct drm_device *dev);
 int drm_dev_register(struct drm_device *dev, unsigned long flags);
 void drm_dev_unregister(struct drm_device *dev);
-int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...);
+int drm_dev_set_unique(struct drm_device *dev, const char *name);
 
 struct drm_minor *drm_minor_acquire(unsigned int minor_id);
 void drm_minor_release(struct drm_minor *minor);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c2f98ba..3b040b3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -172,7 +172,9 @@
 	 * Clean up framebuffer resources, specifically also unreference the
 	 * backing storage. The core guarantees to call this function for every
 	 * framebuffer successfully created by ->fb_create() in
-	 * &drm_mode_config_funcs.
+	 * &drm_mode_config_funcs. Drivers must also call
+	 * drm_framebuffer_cleanup() to release DRM core resources for this
+	 * framebuffer.
 	 */
 	void (*destroy)(struct drm_framebuffer *framebuffer);
 
@@ -187,6 +189,9 @@
 	 * copying the current screen contents to a private buffer and blending
 	 * between that and the new contents.
 	 *
+	 * GEM based drivers should call drm_gem_handle_create() to create the
+	 * handle.
+	 *
 	 * RETURNS:
 	 *
 	 * 0 on success or a negative error code on failure.
@@ -1731,6 +1736,17 @@
 	 * requested metadata, but most of that is left to the driver. See
 	 * struct &drm_mode_fb_cmd2 for details.
 	 *
+	 * If the parameters are deemed valid and the backing storage objects in
+	 * the underlying memory manager all exist, then the driver allocates
+	 * a new &drm_framebuffer structure, subclassed to contain
+	 * driver-specific information (like the internal native buffer object
+	 * references). It also needs to fill out all relevant metadata, which
+	 * should be done by calling drm_helper_mode_fill_fb_struct().
+	 *
+	 * The initialization is finalized by calling drm_framebuffer_init(),
+	 * which registers the framebuffer and makes it accessible to other
+	 * threads.
+	 *
 	 * RETURNS:
 	 *
 	 * A new framebuffer with an initial reference count of 1 or a negative
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index 8b9cc36..82cdf61 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -95,7 +95,7 @@
 struct drm_encoder_slave {
 	struct drm_encoder base;
 
-	struct drm_encoder_slave_funcs *slave_funcs;
+	const struct drm_encoder_slave_funcs *slave_funcs;
 	void *slave_priv;
 	void *bus_priv;
 };
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index f1d8d0d..1b3b1f8 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -163,9 +163,36 @@
 	return container_of(dev, struct mipi_dsi_device, dev);
 }
 
+/**
+ * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
+ *                                given pixel format defined by the MIPI DSI
+ *                                specification
+ * @fmt: MIPI DSI pixel format
+ *
+ * Returns: The number of bits per pixel of the given pixel format.
+ */
+static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt)
+{
+	switch (fmt) {
+	case MIPI_DSI_FMT_RGB888:
+	case MIPI_DSI_FMT_RGB666:
+		return 24;
+
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		return 18;
+
+	case MIPI_DSI_FMT_RGB565:
+		return 16;
+	}
+
+	return -EINVAL;
+}
+
 struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
 int mipi_dsi_attach(struct mipi_dsi_device *dsi);
 int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
+int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
 int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
 					    u16 value);
 
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index f1a113e..f970209 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -279,12 +279,19 @@
 #define INTEL_SKL_GT3_IDS(info) \
 	INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
 	INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
-	INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
+	INTEL_VGA_DEVICE(0x192A, info)  /* SRV GT3 */
 
-#define INTEL_SKL_IDS(info) \
+#define INTEL_SKL_GT4_IDS(info) \
+	INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
+	INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
+	INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
+	INTEL_VGA_DEVICE(0x193A, info)  /* SRV GT4 */
+
+#define INTEL_SKL_IDS(info)	 \
 	INTEL_SKL_GT1_IDS(info), \
 	INTEL_SKL_GT2_IDS(info), \
-	INTEL_SKL_GT3_IDS(info)
+	INTEL_SKL_GT3_IDS(info), \
+	INTEL_SKL_GT4_IDS(info)
 
 #define INTEL_BXT_IDS(info) \
 	INTEL_VGA_DEVICE(0x0A84, info), \
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 813042c..3d4bf08 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -826,10 +826,10 @@
  * reserved, the validation sequence is checked against the validation
  * sequence of the process currently reserving the buffer,
  * and if the current validation sequence is greater than that of the process
- * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
+ * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
  * waiting for the buffer to become unreserved, after which it retries
  * reserving.
- * The caller should, when receiving an -EAGAIN error
+ * The caller should, when receiving an -EDEADLK error
  * release all its buffer reservations, wait for @bo to become unreserved, and
  * then rerun the validation with the same validation sequence. This procedure
  * will always guarantee that the process with the lowest validation sequence
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
new file mode 100644
index 0000000..4cc989a
--- /dev/null
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_DRM_H__
+#define __ETNAVIV_DRM_H__
+
+#include "drm.h"
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints:
+ *  1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
+ *     user/kernel compatibility
+ *  2) Keep fields aligned to their size
+ *  3) Because of how drm_ioctl() works, we can add new fields at
+ *     the end of an ioctl if some care is taken: drm_ioctl() will
+ *     zero out the new fields at the tail of the ioctl, so a zero
+ *     value should have a backwards compatible meaning.  And for
+ *     output params, userspace won't see the newly added output
+ *     fields.. so that has to be somehow ok.
+ */
+
+/* timeouts are specified in clock-monotonic absolute times (to simplify
+ * restarting interrupted ioctls).  The following struct is logically the
+ * same as 'struct timespec' but 32/64b ABI safe.
+ */
+struct drm_etnaviv_timespec {
+	__s64 tv_sec;          /* seconds */
+	__s64 tv_nsec;         /* nanoseconds */
+};
+
+#define ETNAVIV_PARAM_GPU_MODEL                     0x01
+#define ETNAVIV_PARAM_GPU_REVISION                  0x02
+#define ETNAVIV_PARAM_GPU_FEATURES_0                0x03
+#define ETNAVIV_PARAM_GPU_FEATURES_1                0x04
+#define ETNAVIV_PARAM_GPU_FEATURES_2                0x05
+#define ETNAVIV_PARAM_GPU_FEATURES_3                0x06
+#define ETNAVIV_PARAM_GPU_FEATURES_4                0x07
+
+#define ETNAVIV_PARAM_GPU_STREAM_COUNT              0x10
+#define ETNAVIV_PARAM_GPU_REGISTER_MAX              0x11
+#define ETNAVIV_PARAM_GPU_THREAD_COUNT              0x12
+#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE         0x13
+#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT         0x14
+#define ETNAVIV_PARAM_GPU_PIXEL_PIPES               0x15
+#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
+#define ETNAVIV_PARAM_GPU_BUFFER_SIZE               0x17
+#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT         0x18
+#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS             0x19
+
+#define ETNA_MAX_PIPES 4
+
+struct drm_etnaviv_param {
+	__u32 pipe;           /* in */
+	__u32 param;          /* in, ETNAVIV_PARAM_x */
+	__u64 value;          /* out (get_param) or in (set_param) */
+};
+
+/*
+ * GEM buffers:
+ */
+
+#define ETNA_BO_CACHE_MASK   0x000f0000
+/* cache modes */
+#define ETNA_BO_CACHED       0x00010000
+#define ETNA_BO_WC           0x00020000
+#define ETNA_BO_UNCACHED     0x00040000
+/* map flags */
+#define ETNA_BO_FORCE_MMU    0x00100000
+
+struct drm_etnaviv_gem_new {
+	__u64 size;           /* in */
+	__u32 flags;          /* in, mask of ETNA_BO_x */
+	__u32 handle;         /* out */
+};
+
+struct drm_etnaviv_gem_info {
+	__u32 handle;         /* in */
+	__u32 pad;
+	__u64 offset;         /* out, offset to pass to mmap() */
+};
+
+#define ETNA_PREP_READ        0x01
+#define ETNA_PREP_WRITE       0x02
+#define ETNA_PREP_NOSYNC      0x04
+
+struct drm_etnaviv_gem_cpu_prep {
+	__u32 handle;         /* in */
+	__u32 op;             /* in, mask of ETNA_PREP_x */
+	struct drm_etnaviv_timespec timeout;   /* in */
+};
+
+struct drm_etnaviv_gem_cpu_fini {
+	__u32 handle;         /* in */
+	__u32 flags;          /* in, placeholder for now, no defined values */
+};
+
+/*
+ * Cmdstream Submission:
+ */
+
+/* The value written into the cmdstream is logically:
+ * relocbuf->gpuaddr + reloc_offset
+ *
+ * NOTE that reloc's must be sorted by order of increasing submit_offset,
+ * otherwise EINVAL.
+ */
+struct drm_etnaviv_gem_submit_reloc {
+	__u32 submit_offset;  /* in, offset from submit_bo */
+	__u32 reloc_idx;      /* in, index of reloc_bo buffer */
+	__u64 reloc_offset;   /* in, offset from start of reloc_bo */
+	__u32 flags;          /* in, placeholder for now, no defined values */
+};
+
+/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
+ * cmdstream buffer(s) themselves or reloc entries) has one (and only
+ * one) entry in the submit->bos[] table.
+ *
+ * As a optimization, the current buffer (gpu virtual address) can be
+ * passed back through the 'presumed' field.  If on a subsequent reloc,
+ * userspace passes back a 'presumed' address that is still valid,
+ * then patching the cmdstream for this entry is skipped.  This can
+ * avoid kernel needing to map/access the cmdstream bo in the common
+ * case.
+ */
+#define ETNA_SUBMIT_BO_READ             0x0001
+#define ETNA_SUBMIT_BO_WRITE            0x0002
+struct drm_etnaviv_gem_submit_bo {
+	__u32 flags;          /* in, mask of ETNA_SUBMIT_BO_x */
+	__u32 handle;         /* in, GEM handle */
+	__u64 presumed;       /* in/out, presumed buffer address */
+};
+
+/* Each cmdstream submit consists of a table of buffers involved, and
+ * one or more cmdstream buffers.  This allows for conditional execution
+ * (context-restore), and IB buffers needed for per tile/bin draw cmds.
+ */
+#define ETNA_PIPE_3D      0x00
+#define ETNA_PIPE_2D      0x01
+#define ETNA_PIPE_VG      0x02
+struct drm_etnaviv_gem_submit {
+	__u32 fence;          /* out */
+	__u32 pipe;           /* in */
+	__u32 exec_state;     /* in, initial execution state (ETNA_PIPE_x) */
+	__u32 nr_bos;         /* in, number of submit_bo's */
+	__u32 nr_relocs;      /* in, number of submit_reloc's */
+	__u32 stream_size;    /* in, cmdstream size */
+	__u64 bos;            /* in, ptr to array of submit_bo's */
+	__u64 relocs;         /* in, ptr to array of submit_reloc's */
+	__u64 stream;         /* in, ptr to cmdstream */
+};
+
+/* The normal way to synchronize with the GPU is just to CPU_PREP on
+ * a buffer if you need to access it from the CPU (other cmdstream
+ * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
+ * handle the required synchronization under the hood).  This ioctl
+ * mainly just exists as a way to implement the gallium pipe_fence
+ * APIs without requiring a dummy bo to synchronize on.
+ */
+#define ETNA_WAIT_NONBLOCK      0x01
+struct drm_etnaviv_wait_fence {
+	__u32 pipe;           /* in */
+	__u32 fence;          /* in */
+	__u32 flags;          /* in, mask of ETNA_WAIT_x */
+	__u32 pad;
+	struct drm_etnaviv_timespec timeout;   /* in */
+};
+
+#define ETNA_USERPTR_READ	0x01
+#define ETNA_USERPTR_WRITE	0x02
+struct drm_etnaviv_gem_userptr {
+	__u64 user_ptr;	/* in, page aligned user pointer */
+	__u64 user_size;	/* in, page aligned user size */
+	__u32 flags;		/* in, flags */
+	__u32 handle;	/* out, non-zero handle */
+};
+
+struct drm_etnaviv_gem_wait {
+	__u32 pipe;				/* in */
+	__u32 handle;				/* in, bo to be waited for */
+	__u32 flags;				/* in, mask of ETNA_WAIT_x  */
+	__u32 pad;
+	struct drm_etnaviv_timespec timeout;	/* in */
+};
+
+#define DRM_ETNAVIV_GET_PARAM          0x00
+/* placeholder:
+#define DRM_ETNAVIV_SET_PARAM          0x01
+ */
+#define DRM_ETNAVIV_GEM_NEW            0x02
+#define DRM_ETNAVIV_GEM_INFO           0x03
+#define DRM_ETNAVIV_GEM_CPU_PREP       0x04
+#define DRM_ETNAVIV_GEM_CPU_FINI       0x05
+#define DRM_ETNAVIV_GEM_SUBMIT         0x06
+#define DRM_ETNAVIV_WAIT_FENCE         0x07
+#define DRM_ETNAVIV_GEM_USERPTR        0x08
+#define DRM_ETNAVIV_GEM_WAIT           0x09
+#define DRM_ETNAVIV_NUM_IOCTLS         0x0a
+
+#define DRM_IOCTL_ETNAVIV_GET_PARAM    DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
+#define DRM_IOCTL_ETNAVIV_GEM_NEW      DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
+#define DRM_IOCTL_ETNAVIV_GEM_INFO     DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
+#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
+#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
+#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
+#define DRM_IOCTL_ETNAVIV_WAIT_FENCE   DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
+#define DRM_IOCTL_ETNAVIV_GEM_USERPTR  DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
+#define DRM_IOCTL_ETNAVIV_GEM_WAIT     DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
+
+#endif /* __ETNAVIV_DRM_H__ */