Merge tag 'v3.16' into drm-next

Linux 3.16

backmerge requested by i915, nouveau and radeon authors

Conflicts:
	drivers/gpu/drm/i915/i915_gem_render_state.c
	drivers/gpu/drm/i915/intel_drv.h
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 7df3134..1d3756d 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1610,7 +1610,7 @@
           The connector is then registered with a call to
           <function>drm_connector_init</function> with a pointer to the connector
           functions and a connector type, and exposed through sysfs with a call to
-          <function>drm_sysfs_connector_add</function>.
+          <function>drm_connector_register</function>.
         </para>
         <para>
           Supported connector types are
@@ -1768,7 +1768,7 @@
 	(<function>drm_encoder_cleanup</function>) and connectors
 	(<function>drm_connector_cleanup</function>). Furthermore, connectors
 	that have been added to sysfs must be removed by a call to
-	<function>drm_sysfs_connector_remove</function> before calling
+	<function>drm_connector_unregister</function> before calling
 	<function>drm_connector_cleanup</function>.
       </para>
       <para>
@@ -1813,7 +1813,7 @@
 	drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 }]]></programlisting>
       <para>
         In the example above (taken from the i915 driver), a CRTC, connector and
@@ -2338,6 +2338,12 @@
 !Edrivers/gpu/drm/drm_dp_helper.c
     </sect2>
     <sect2>
+      <title>Display Port MST Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
+!Iinclude/drm/drm_dp_mst_helper.h
+!Edrivers/gpu/drm/drm_dp_mst_topology.c
+    </sect2>
+    <sect2>
       <title>EDID Helper Functions Reference</title>
 !Edrivers/gpu/drm/drm_edid.c
     </sect2>
@@ -2502,7 +2508,7 @@
 	<td valign="top" >Description/Restrictions</td>
 	</tr>
 	<tr>
-	<td rowspan="20" valign="top" >DRM</td>
+	<td rowspan="21" valign="top" >DRM</td>
 	<td rowspan="2" valign="top" >Generic</td>
 	<td valign="top" >“EDID”</td>
 	<td valign="top" >BLOB | IMMUTABLE</td>
@@ -2633,7 +2639,7 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td rowspan="2" valign="top" >Optional</td>
+	<td rowspan="3" valign="top" >Optional</td>
 	<td valign="top" >“scaling mode”</td>
 	<td valign="top" >ENUM</td>
 	<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
@@ -2641,6 +2647,15 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
+	<td valign="top" >"aspect ratio"</td>
+	<td valign="top" >ENUM</td>
+	<td valign="top" >{ "None", "4:3", "16:9" }</td>
+	<td valign="top" >Connector</td>
+	<td valign="top" >DRM property to set aspect ratio from user space app.
+		This enum is made generic to allow addition of custom aspect
+		ratios.</td>
+	</tr>
+	<tr>
 	<td valign="top" >“dirty”</td>
 	<td valign="top" >ENUM | IMMUTABLE</td>
 	<td valign="top" >{ "Off", "On", "Annotate" }</td>
@@ -2649,7 +2664,7 @@
 	</tr>
 	<tr>
 	<td rowspan="21" valign="top" >i915</td>
-	<td rowspan="3" valign="top" >Generic</td>
+	<td rowspan="2" valign="top" >Generic</td>
 	<td valign="top" >"Broadcast RGB"</td>
 	<td valign="top" >ENUM</td>
 	<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
@@ -2664,10 +2679,11 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td valign="top" >Standard name as in DRM</td>
-	<td valign="top" >Standard type as in DRM</td>
-	<td valign="top" >Standard value as in DRM</td>
-	<td valign="top" >Standard Object as in DRM</td>
+	<td rowspan="1" valign="top" >Plane</td>
+	<td valign="top" >“rotation”</td>
+	<td valign="top" >BITMASK</td>
+	<td valign="top" >{ 0, "rotate-0" }, { 2, "rotate-180" }</td>
+	<td valign="top" >Plane</td>
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
@@ -2799,8 +2815,8 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td rowspan="3" valign="top" >CDV gma-500</td>
-	<td rowspan="3" valign="top" >Generic</td>
+	<td rowspan="2" valign="top" >CDV gma-500</td>
+	<td rowspan="2" valign="top" >Generic</td>
 	<td valign="top" >"Broadcast RGB"</td>
 	<td valign="top" >ENUM</td>
 	<td valign="top" >{ “Full”, “Limited 16:235” }</td>
@@ -2815,15 +2831,8 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td valign="top" >Standard name as in DRM</td>
-	<td valign="top" >Standard type as in DRM</td>
-	<td valign="top" >Standard value as in DRM</td>
-	<td valign="top" >Standard Object as in DRM</td>
-	<td valign="top" >TBD</td>
-	</tr>
-	<tr>
-	<td rowspan="20" valign="top" >Poulsbo</td>
-	<td rowspan="2" valign="top" >Generic</td>
+	<td rowspan="19" valign="top" >Poulsbo</td>
+	<td rowspan="1" valign="top" >Generic</td>
 	<td valign="top" >“backlight”</td>
 	<td valign="top" >RANGE</td>
 	<td valign="top" >Min=0, Max=100</td>
@@ -2831,13 +2840,6 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td valign="top" >Standard name as in DRM</td>
-	<td valign="top" >Standard type as in DRM</td>
-	<td valign="top" >Standard value as in DRM</td>
-	<td valign="top" >Standard Object as in DRM</td>
-	<td valign="top" >TBD</td>
-	</tr>
-	<tr>
 	<td rowspan="17" valign="top" >SDVO-TV</td>
 	<td valign="top" >“mode”</td>
 	<td valign="top" >ENUM</td>
@@ -3064,7 +3066,7 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td rowspan="3" valign="top" >i2c/ch7006_drv</td>
+	<td rowspan="2" valign="top" >i2c/ch7006_drv</td>
 	<td valign="top" >Generic</td>
 	<td valign="top" >“scale”</td>
 	<td valign="top" >RANGE</td>
@@ -3073,14 +3075,7 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td rowspan="2" valign="top" >TV</td>
-	<td valign="top" >Standard names as in DRM</td>
-	<td valign="top" >Standard types as in DRM</td>
-	<td valign="top" >Standard Values as in DRM</td>
-	<td valign="top" >Standard object as in DRM</td>
-	<td valign="top" >TBD</td>
-	</tr>
-	<tr>
+	<td rowspan="1" valign="top" >TV</td>
 	<td valign="top" >“mode”</td>
 	<td valign="top" >ENUM</td>
 	<td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
@@ -3089,7 +3084,7 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td rowspan="16" valign="top" >nouveau</td>
+	<td rowspan="15" valign="top" >nouveau</td>
 	<td rowspan="6" valign="top" >NV10 Overlay</td>
 	<td valign="top" >"colorkey"</td>
 	<td valign="top" >RANGE</td>
@@ -3198,14 +3193,6 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td valign="top" >Generic</td>
-	<td valign="top" >Standard name as in DRM</td>
-	<td valign="top" >Standard type as in DRM</td>
-	<td valign="top" >Standard value as in DRM</td>
-	<td valign="top" >Standard Object as in DRM</td>
-	<td valign="top" >TBD</td>
-	</tr>
-	<tr>
 	<td rowspan="2" valign="top" >omap</td>
 	<td rowspan="2" valign="top" >Generic</td>
 	<td valign="top" >“rotation”</td>
@@ -3236,7 +3223,7 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td rowspan="10" valign="top" >radeon</td>
+	<td rowspan="9" valign="top" >radeon</td>
 	<td valign="top" >DVI-I</td>
 	<td valign="top" >“coherent”</td>
 	<td valign="top" >RANGE</td>
@@ -3308,14 +3295,6 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
-	<td valign="top" >Generic</td>
-	<td valign="top" >Standard name as in DRM</td>
-	<td valign="top" >Standard type as in DRM</td>
-	<td valign="top" >Standard value as in DRM</td>
-	<td valign="top" >Standard Object as in DRM</td>
-	<td valign="top" >TBD</td>
-	</tr>
-	<tr>
 	<td rowspan="3" valign="top" >rcar-du</td>
 	<td rowspan="3" valign="top" >Generic</td>
 	<td valign="top" >"alpha"</td>
diff --git a/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt b/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt
new file mode 100644
index 0000000..46525ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt
@@ -0,0 +1,30 @@
+Device Tree bindings for Armada DRM CRTC driver
+
+Required properties:
+ - compatible: value should be "marvell,dove-lcd".
+ - reg: base address and size of the LCD controller
+ - interrupts: single interrupt number for the LCD controller
+ - port: video output port with endpoints, as described by graph.txt
+
+Optional properties:
+
+ - clocks: as described by clock-bindings.txt
+ - clock-names: as described by clock-bindings.txt
+	"axiclk" - axi bus clock for pixel clock
+	"plldivider" - pll divider clock for pixel clock
+	"ext_ref_clk0" - external clock 0 for pixel clock
+	"ext_ref_clk1" - external clock 1 for pixel clock
+
+Note: all clocks are optional but at least one must be specified.
+Further clocks may be added in the future according to requirements of
+different SoCs.
+
+Example:
+
+	lcd0: lcd-controller@820000 {
+		compatible = "marvell,dove-lcd";
+		reg = <0x820000 0x1000>;
+		interrupts = <47>;
+		clocks = <&si5351 0>;
+		clock-names = "ext_ref_clk_1";
+	};
diff --git a/Documentation/devicetree/bindings/video/exynos_dsim.txt b/Documentation/devicetree/bindings/video/exynos_dsim.txt
index 33b5730..31036c6 100644
--- a/Documentation/devicetree/bindings/video/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dsim.txt
@@ -1,7 +1,9 @@
 Exynos MIPI DSI Master
 
 Required properties:
-  - compatible: "samsung,exynos4210-mipi-dsi"
+  - compatible: value should be one of the following
+		"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
+		"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
   - reg: physical base address and length of the registers set for the device
   - interrupts: should contain DSI interrupt
   - clocks: list of clock specifiers, must contain an entry for each required
diff --git a/Documentation/devicetree/bindings/video/exynos_mixer.txt b/Documentation/devicetree/bindings/video/exynos_mixer.txt
index 7bfde9c..08b394b 100644
--- a/Documentation/devicetree/bindings/video/exynos_mixer.txt
+++ b/Documentation/devicetree/bindings/video/exynos_mixer.txt
@@ -4,8 +4,9 @@
 - compatible: value should be one of the following:
 	1) "samsung,exynos5-mixer" <DEPRECATED>
 	2) "samsung,exynos4210-mixer"
-	3) "samsung,exynos5250-mixer"
-	4) "samsung,exynos5420-mixer"
+	3) "samsung,exynos4212-mixer"
+	4) "samsung,exynos5250-mixer"
+	5) "samsung,exynos5420-mixer"
 
 - reg: physical base address of the mixer and length of memory mapped
 	region.
diff --git a/Documentation/devicetree/bindings/video/samsung-fimd.txt b/Documentation/devicetree/bindings/video/samsung-fimd.txt
index 2dad41b..8428fcf 100644
--- a/Documentation/devicetree/bindings/video/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/video/samsung-fimd.txt
@@ -44,6 +44,34 @@
 - display-timings: timing settings for FIMD, as described in document [1].
 		Can be used in case timings cannot be provided otherwise
 		or to override timings provided by the panel.
+- samsung,sysreg: handle to syscon used to control the system registers
+- i80-if-timings: timing configuration for lcd i80 interface support.
+  - cs-setup: clock cycles for the active period of address signal is enabled
+              until chip select is enabled.
+              If not specified, the default value(0) will be used.
+  - wr-setup: clock cycles for the active period of CS signal is enabled until
+              write signal is enabled.
+              If not specified, the default value(0) will be used.
+  - wr-active: clock cycles for the active period of CS is enabled.
+               If not specified, the default value(1) will be used.
+  - wr-hold: clock cycles for the active period of CS is disabled until write
+             signal is disabled.
+             If not specified, the default value(0) will be used.
+
+  The parameters are defined as:
+
+    VCLK(internal)  __|??????|_____|??????|_____|??????|_____|??????|_____|??
+                      :            :            :            :            :
+    Address Output  --:<XXXXXXXXXXX:XXXXXXXXXXXX:XXXXXXXXXXXX:XXXXXXXXXXXX:XX
+                      | cs-setup+1 |            :            :            :
+                      |<---------->|            :            :            :
+    Chip Select     ???????????????|____________:____________:____________|??
+                                   | wr-setup+1 |            | wr-hold+1  |
+                                   |<---------->|            |<---------->|
+    Write Enable    ????????????????????????????|____________|???????????????
+                                                | wr-active+1|
+                                                |<---------->|
+    Video Data      ----------------------------<XXXXXXXXXXXXXXXXXXXXXXXXX>--
 
 The device node can contain 'port' child nodes according to the bindings defined
 in [2]. The following are properties specific to those nodes:
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 17b22e9..d9cb972 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -608,6 +608,7 @@
 		clocks = <&clock CLK_SCLK_FIMD0>, <&clock CLK_FIMD0>;
 		clock-names = "sclk_fimd", "fimd";
 		samsung,power-domain = <&pd_lcd0>;
+		samsung,sysreg = <&sys_reg>;
 		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index 79d0608..fdead12 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -87,6 +87,7 @@
 		reg = <0x14400000 0x40000>;
 		interrupt-names = "fifo", "vsync", "lcd_sys";
 		interrupts = <18 4>, <18 5>, <18 6>;
+		samsung,sysreg = <&sysreg_system_controller>;
 		status = "disabled";
 	};
 
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 1595722..4300466 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -520,6 +520,26 @@
 		phy-names = "dp";
 	};
 
+	mipi_phy: video-phy@10040714 {
+		compatible = "samsung,s5pv210-mipi-video-phy";
+		reg = <0x10040714 12>;
+		#phy-cells = <1>;
+	};
+
+	dsi@14500000 {
+		compatible = "samsung,exynos5410-mipi-dsi";
+		reg = <0x14500000 0x10000>;
+		interrupts = <0 82 0>;
+		samsung,power-domain = <&disp_pd>;
+		phys = <&mipi_phy 1>;
+		phy-names = "dsim";
+		clocks = <&clock CLK_DSIM1>, <&clock CLK_SCLK_MIPI1>;
+		clock-names = "bus_clk", "pll_clk";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
 	fimd: fimd@14400000 {
 		samsung,power-domain = <&disp_pd>;
 		clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c477899..f748430 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -18,6 +18,15 @@
 #include <linux/mutex.h>
 #include <linux/slab.h>
 
+struct component_match {
+	size_t alloc;
+	size_t num;
+	struct {
+		void *data;
+		int (*fn)(struct device *, void *);
+	} compare[0];
+};
+
 struct master {
 	struct list_head node;
 	struct list_head components;
@@ -25,6 +34,7 @@
 
 	const struct component_master_ops *ops;
 	struct device *dev;
+	struct component_match *match;
 };
 
 struct component {
@@ -69,6 +79,11 @@
 	c->master = NULL;
 }
 
+/*
+ * Add a component to a master, finding the component via the compare
+ * function and compare data.  This is safe to call for duplicate matches
+ * and will not result in the same component being added multiple times.
+ */
 int component_master_add_child(struct master *master,
 	int (*compare)(struct device *, void *), void *compare_data)
 {
@@ -76,11 +91,12 @@
 	int ret = -ENXIO;
 
 	list_for_each_entry(c, &component_list, node) {
-		if (c->master)
+		if (c->master && c->master != master)
 			continue;
 
 		if (compare(c->dev, compare_data)) {
-			component_attach_master(master, c);
+			if (!c->master)
+				component_attach_master(master, c);
 			ret = 0;
 			break;
 		}
@@ -90,6 +106,34 @@
 }
 EXPORT_SYMBOL_GPL(component_master_add_child);
 
+static int find_components(struct master *master)
+{
+	struct component_match *match = master->match;
+	size_t i;
+	int ret = 0;
+
+	if (!match) {
+		/*
+		 * Search the list of components, looking for components that
+		 * belong to this master, and attach them to the master.
+		 */
+		return master->ops->add_components(master->dev, master);
+	}
+
+	/*
+	 * Scan the array of match functions and attach
+	 * any components which are found to this master.
+	 */
+	for (i = 0; i < match->num; i++) {
+		ret = component_master_add_child(master,
+						 match->compare[i].fn,
+						 match->compare[i].data);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
 /* Detach all attached components from this master */
 static void master_remove_components(struct master *master)
 {
@@ -113,44 +157,44 @@
 static int try_to_bring_up_master(struct master *master,
 	struct component *component)
 {
-	int ret = 0;
+	int ret;
 
-	if (!master->bound) {
-		/*
-		 * Search the list of components, looking for components that
-		 * belong to this master, and attach them to the master.
-		 */
-		if (master->ops->add_components(master->dev, master)) {
-			/* Failed to find all components */
-			master_remove_components(master);
-			ret = 0;
-			goto out;
-		}
+	if (master->bound)
+		return 0;
 
-		if (component && component->master != master) {
-			master_remove_components(master);
-			ret = 0;
-			goto out;
-		}
-
-		if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
-			ret = -ENOMEM;
-			goto out;
-		}
-
-		/* Found all components */
-		ret = master->ops->bind(master->dev);
-		if (ret < 0) {
-			devres_release_group(master->dev, NULL);
-			dev_info(master->dev, "master bind failed: %d\n", ret);
-			master_remove_components(master);
-			goto out;
-		}
-
-		master->bound = true;
-		ret = 1;
+	/*
+	 * Search the list of components, looking for components that
+	 * belong to this master, and attach them to the master.
+	 */
+	if (find_components(master)) {
+		/* Failed to find all components */
+		ret = 0;
+		goto out;
 	}
+
+	if (component && component->master != master) {
+		ret = 0;
+		goto out;
+	}
+
+	if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* Found all components */
+	ret = master->ops->bind(master->dev);
+	if (ret < 0) {
+		devres_release_group(master->dev, NULL);
+		dev_info(master->dev, "master bind failed: %d\n", ret);
+		goto out;
+	}
+
+	master->bound = true;
+	return 1;
+
 out:
+	master_remove_components(master);
 
 	return ret;
 }
@@ -180,18 +224,89 @@
 	master_remove_components(master);
 }
 
-int component_master_add(struct device *dev,
-	const struct component_master_ops *ops)
+static size_t component_match_size(size_t num)
+{
+	return offsetof(struct component_match, compare[num]);
+}
+
+static struct component_match *component_match_realloc(struct device *dev,
+	struct component_match *match, size_t num)
+{
+	struct component_match *new;
+
+	if (match && match->alloc == num)
+		return match;
+
+	new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL);
+	if (!new)
+		return ERR_PTR(-ENOMEM);
+
+	if (match) {
+		memcpy(new, match, component_match_size(min(match->num, num)));
+		devm_kfree(dev, match);
+	} else {
+		new->num = 0;
+	}
+
+	new->alloc = num;
+
+	return new;
+}
+
+/*
+ * Add a component to be matched.
+ *
+ * The match array is first created or extended if necessary.
+ */
+void component_match_add(struct device *dev, struct component_match **matchptr,
+	int (*compare)(struct device *, void *), void *compare_data)
+{
+	struct component_match *match = *matchptr;
+
+	if (IS_ERR(match))
+		return;
+
+	if (!match || match->num == match->alloc) {
+		size_t new_size = match ? match->alloc + 16 : 15;
+
+		match = component_match_realloc(dev, match, new_size);
+
+		*matchptr = match;
+
+		if (IS_ERR(match))
+			return;
+	}
+
+	match->compare[match->num].fn = compare;
+	match->compare[match->num].data = compare_data;
+	match->num++;
+}
+EXPORT_SYMBOL(component_match_add);
+
+int component_master_add_with_match(struct device *dev,
+	const struct component_master_ops *ops,
+	struct component_match *match)
 {
 	struct master *master;
 	int ret;
 
+	if (ops->add_components && match)
+		return -EINVAL;
+
+	if (match) {
+		/* Reallocate the match array for its true size */
+		match = component_match_realloc(dev, match, match->num);
+		if (IS_ERR(match))
+			return PTR_ERR(match);
+	}
+
 	master = kzalloc(sizeof(*master), GFP_KERNEL);
 	if (!master)
 		return -ENOMEM;
 
 	master->dev = dev;
 	master->ops = ops;
+	master->match = match;
 	INIT_LIST_HEAD(&master->components);
 
 	/* Add to the list of available masters. */
@@ -209,6 +324,13 @@
 
 	return ret < 0 ? ret : 0;
 }
+EXPORT_SYMBOL_GPL(component_master_add_with_match);
+
+int component_master_add(struct device *dev,
+	const struct component_master_ops *ops)
+{
+	return component_master_add_with_match(dev, ops, NULL);
+}
 EXPORT_SYMBOL_GPL(component_master_add);
 
 void component_master_del(struct device *dev,
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index b297033..09f17eb 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -710,19 +710,6 @@
 	return 0;
 }
 
-
-static ssize_t agp_read(struct file *file, char __user *buf,
-			size_t count, loff_t * ppos)
-{
-	return -EINVAL;
-}
-
-static ssize_t agp_write(struct file *file, const char __user *buf,
-			 size_t count, loff_t * ppos)
-{
-	return -EINVAL;
-}
-
 static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
 {
 	struct agp_info userinfo;
@@ -1047,8 +1034,6 @@
 {
 	.owner		= THIS_MODULE,
 	.llseek		= no_llseek,
-	.read		= agp_read,
-	.write		= agp_write,
 	.unlocked_ioctl	= agp_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= compat_agp_ioctl,
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index dd2ba42..61d9e9c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -20,11 +20,12 @@
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 drm-$(CONFIG_PCI) += ati_pcigart.o
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
+drm-$(CONFIG_OF) += drm_of.o
 
 drm-usb-y   := drm_usb.o
 
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
-		drm_plane_helper.o
+		drm_plane_helper.o drm_dp_mst_topology.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 59948ef..ad3d2eb 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -15,20 +15,19 @@
 #include "armada_drm.h"
 #include "armada_hw.h"
 
-static int armada510_init(struct armada_private *priv, struct device *dev)
+static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
 {
-	priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+	struct clk *clk;
 
-	if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
-		priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+	clk = devm_clk_get(dev, "ext_ref_clk1");
+	if (IS_ERR(clk))
+		return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER : PTR_ERR(clk);
 
-	return PTR_RET(priv->extclk[0]);
-}
+	dcrtc->extclk[0] = clk;
 
-static int armada510_crtc_init(struct armada_crtc *dcrtc)
-{
 	/* Lower the watermark so to eliminate jitter at higher bandwidths */
 	armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+
 	return 0;
 }
 
@@ -45,8 +44,7 @@
 static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
 	const struct drm_display_mode *mode, uint32_t *sclk)
 {
-	struct armada_private *priv = dcrtc->crtc.dev->dev_private;
-	struct clk *clk = priv->extclk[0];
+	struct clk *clk = dcrtc->extclk[0];
 	int ret;
 
 	if (dcrtc->num == 1)
@@ -81,7 +79,6 @@
 const struct armada_variant armada510_ops = {
 	.has_spu_adv_reg = true,
 	.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
-	.init = armada510_init,
-	.crtc_init = armada510_crtc_init,
-	.crtc_compute_clock = armada510_crtc_compute_clock,
+	.init = armada510_crtc_init,
+	.compute_clock = armada510_crtc_compute_clock,
 };
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 81c34f9..3f620e2 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,6 +7,9 @@
  * published by the Free Software Foundation.
  */
 #include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include "armada_crtc.h"
@@ -332,24 +335,23 @@
 static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
 	const struct drm_display_mode *mode, struct drm_display_mode *adj)
 {
-	struct armada_private *priv = crtc->dev->dev_private;
 	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
 	int ret;
 
 	/* We can't do interlaced modes if we don't have the SPU_ADV_REG */
-	if (!priv->variant->has_spu_adv_reg &&
+	if (!dcrtc->variant->has_spu_adv_reg &&
 	    adj->flags & DRM_MODE_FLAG_INTERLACE)
 		return false;
 
 	/* Check whether the display mode is possible */
-	ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+	ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL);
 	if (ret)
 		return false;
 
 	return true;
 }
 
-void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
 {
 	struct armada_vbl_event *e, *n;
 	void __iomem *base = dcrtc->base;
@@ -410,6 +412,27 @@
 	}
 }
 
+static irqreturn_t armada_drm_irq(int irq, void *arg)
+{
+	struct armada_crtc *dcrtc = arg;
+	u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+
+	/*
+	 * This is rediculous - rather than writing bits to clear, we
+	 * have to set the actual status register value.  This is racy.
+	 */
+	writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+	/* Mask out those interrupts we haven't enabled */
+	v = stat & dcrtc->irq_ena;
+
+	if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+		armada_drm_crtc_irq(dcrtc, stat);
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
 /* These are locked by dev->vbl_lock */
 void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
 {
@@ -470,7 +493,6 @@
 	struct drm_display_mode *mode, struct drm_display_mode *adj,
 	int x, int y, struct drm_framebuffer *old_fb)
 {
-	struct armada_private *priv = crtc->dev->dev_private;
 	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
 	struct armada_regs regs[17];
 	uint32_t lm, rm, tm, bm, val, sclk;
@@ -515,7 +537,7 @@
 	}
 
 	/* Now compute the divider for real */
-	priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+	dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
 
 	/* Ensure graphic fifo is enabled */
 	armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
@@ -537,7 +559,7 @@
 	dcrtc->v[1].spu_v_porch = tm << 16 | bm;
 	val = adj->crtc_hsync_start;
 	dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
-		priv->variant->spu_adv_reg;
+		dcrtc->variant->spu_adv_reg;
 
 	if (interlaced) {
 		/* Odd interlaced frame */
@@ -546,7 +568,7 @@
 		dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
 		val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
 		dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
-			priv->variant->spu_adv_reg;
+			dcrtc->variant->spu_adv_reg;
 	} else {
 		dcrtc->v[0] = dcrtc->v[1];
 	}
@@ -561,7 +583,7 @@
 	armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
 			   LCD_SPUT_V_H_TOTAL);
 
-	if (priv->variant->has_spu_adv_reg) {
+	if (dcrtc->variant->has_spu_adv_reg) {
 		armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
 				     ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
 				     ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
@@ -805,12 +827,11 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-	struct armada_private *priv = crtc->dev->dev_private;
 	struct armada_gem_object *obj = NULL;
 	int ret;
 
 	/* If no cursor support, replicate drm's return value */
-	if (!priv->variant->has_spu_adv_reg)
+	if (!dcrtc->variant->has_spu_adv_reg)
 		return -ENXIO;
 
 	if (handle && w > 0 && h > 0) {
@@ -858,11 +879,10 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-	struct armada_private *priv = crtc->dev->dev_private;
 	int ret;
 
 	/* If no cursor support, replicate drm's return value */
-	if (!priv->variant->has_spu_adv_reg)
+	if (!dcrtc->variant->has_spu_adv_reg)
 		return -EFAULT;
 
 	mutex_lock(&dev->struct_mutex);
@@ -888,6 +908,10 @@
 	if (!IS_ERR(dcrtc->clk))
 		clk_disable_unprepare(dcrtc->clk);
 
+	writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+
+	of_node_put(dcrtc->crtc.port);
+
 	kfree(dcrtc);
 }
 
@@ -1027,19 +1051,20 @@
 	return 0;
 }
 
-int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
-	struct resource *res)
+int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
+	struct resource *res, int irq, const struct armada_variant *variant,
+	struct device_node *port)
 {
-	struct armada_private *priv = dev->dev_private;
+	struct armada_private *priv = drm->dev_private;
 	struct armada_crtc *dcrtc;
 	void __iomem *base;
 	int ret;
 
-	ret = armada_drm_crtc_create_properties(dev);
+	ret = armada_drm_crtc_create_properties(drm);
 	if (ret)
 		return ret;
 
-	base = devm_request_and_ioremap(dev->dev, res);
+	base = devm_request_and_ioremap(dev, res);
 	if (!base) {
 		DRM_ERROR("failed to ioremap register\n");
 		return -ENOMEM;
@@ -1051,8 +1076,12 @@
 		return -ENOMEM;
 	}
 
+	if (dev != drm->dev)
+		dev_set_drvdata(dev, dcrtc);
+
+	dcrtc->variant = variant;
 	dcrtc->base = base;
-	dcrtc->num = num;
+	dcrtc->num = drm->mode_config.num_crtc;
 	dcrtc->clk = ERR_PTR(-EINVAL);
 	dcrtc->csc_yuv_mode = CSC_AUTO;
 	dcrtc->csc_rgb_mode = CSC_AUTO;
@@ -1074,9 +1103,18 @@
 		       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
 	writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
 	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+	writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+	writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
 
-	if (priv->variant->crtc_init) {
-		ret = priv->variant->crtc_init(dcrtc);
+	ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
+			       dcrtc);
+	if (ret < 0) {
+		kfree(dcrtc);
+		return ret;
+	}
+
+	if (dcrtc->variant->init) {
+		ret = dcrtc->variant->init(dcrtc, dev);
 		if (ret) {
 			kfree(dcrtc);
 			return ret;
@@ -1088,7 +1126,8 @@
 
 	priv->dcrtc[dcrtc->num] = dcrtc;
 
-	drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+	dcrtc->crtc.port = port;
+	drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs);
 	drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
 
 	drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
@@ -1096,5 +1135,107 @@
 	drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
 				   dcrtc->csc_rgb_mode);
 
-	return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+	return armada_overlay_plane_create(drm, 1 << dcrtc->num);
 }
+
+static int
+armada_lcd_bind(struct device *dev, struct device *master, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *drm = data;
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	int irq = platform_get_irq(pdev, 0);
+	const struct armada_variant *variant;
+	struct device_node *port = NULL;
+
+	if (irq < 0)
+		return irq;
+
+	if (!dev->of_node) {
+		const struct platform_device_id *id;
+
+		id = platform_get_device_id(pdev);
+		if (!id)
+			return -ENXIO;
+
+		variant = (const struct armada_variant *)id->driver_data;
+	} else {
+		const struct of_device_id *match;
+		struct device_node *np, *parent = dev->of_node;
+
+		match = of_match_device(dev->driver->of_match_table, dev);
+		if (!match)
+			return -ENXIO;
+
+		np = of_get_child_by_name(parent, "ports");
+		if (np)
+			parent = np;
+		port = of_get_child_by_name(parent, "port");
+		of_node_put(np);
+		if (!port) {
+			dev_err(dev, "no port node found in %s\n",
+				parent->full_name);
+			return -ENXIO;
+		}
+
+		variant = match->data;
+	}
+
+	return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
+}
+
+static void
+armada_lcd_unbind(struct device *dev, struct device *master, void *data)
+{
+	struct armada_crtc *dcrtc = dev_get_drvdata(dev);
+
+	armada_drm_crtc_destroy(&dcrtc->crtc);
+}
+
+static const struct component_ops armada_lcd_ops = {
+	.bind = armada_lcd_bind,
+	.unbind = armada_lcd_unbind,
+};
+
+static int armada_lcd_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &armada_lcd_ops);
+}
+
+static int armada_lcd_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &armada_lcd_ops);
+	return 0;
+}
+
+static struct of_device_id armada_lcd_of_match[] = {
+	{
+		.compatible	= "marvell,dove-lcd",
+		.data		= &armada510_ops,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, armada_lcd_of_match);
+
+static const struct platform_device_id armada_lcd_platform_ids[] = {
+	{
+		.name		= "armada-lcd",
+		.driver_data	= (unsigned long)&armada510_ops,
+	}, {
+		.name		= "armada-510-lcd",
+		.driver_data	= (unsigned long)&armada510_ops,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
+
+struct platform_driver armada_lcd_platform_driver = {
+	.probe	= armada_lcd_probe,
+	.remove	= armada_lcd_remove,
+	.driver = {
+		.name	= "armada-lcd",
+		.owner	=  THIS_MODULE,
+		.of_match_table = armada_lcd_of_match,
+	},
+	.id_table = armada_lcd_platform_ids,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 9c10a07..98102a5 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,12 +32,15 @@
 	armada_reg_queue_mod(_r, _i, 0, 0, ~0)
 
 struct armada_frame_work;
+struct armada_variant;
 
 struct armada_crtc {
 	struct drm_crtc		crtc;
+	const struct armada_variant *variant;
 	unsigned		num;
 	void __iomem		*base;
 	struct clk		*clk;
+	struct clk		*extclk[2];
 	struct {
 		uint32_t	spu_v_h_total;
 		uint32_t	spu_v_porch;
@@ -72,12 +75,16 @@
 };
 #define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
 
-int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+struct device_node;
+int armada_drm_crtc_create(struct drm_device *, struct device *,
+	struct resource *, int, const struct armada_variant *,
+	struct device_node *);
 void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
 void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
-void armada_drm_crtc_irq(struct armada_crtc *, u32);
 void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
 void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
 void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
 
+extern struct platform_driver armada_lcd_platform_driver;
+
 #endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index a72cae0..ea63c6c 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -59,26 +59,23 @@
 struct armada_private;
 
 struct armada_variant {
-	bool	has_spu_adv_reg;
+	bool has_spu_adv_reg;
 	uint32_t spu_adv_reg;
-	int (*init)(struct armada_private *, struct device *);
-	int (*crtc_init)(struct armada_crtc *);
-	int (*crtc_compute_clock)(struct armada_crtc *,
-				  const struct drm_display_mode *,
-				  uint32_t *);
+	int (*init)(struct armada_crtc *, struct device *);
+	int (*compute_clock)(struct armada_crtc *,
+			     const struct drm_display_mode *,
+			     uint32_t *);
 };
 
 /* Variant ops */
 extern const struct armada_variant armada510_ops;
 
 struct armada_private {
-	const struct armada_variant *variant;
 	struct work_struct	fb_unref_work;
 	DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
 	struct drm_fb_helper	*fbdev;
 	struct armada_crtc	*dcrtc[2];
 	struct drm_mm		linear;
-	struct clk		*extclk[2];
 	struct drm_property	*csc_yuv_prop;
 	struct drm_property	*csc_rgb_prop;
 	struct drm_property	*colorkey_prop;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 8ab3cd1..e2d5792 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -6,7 +6,9 @@
  * published by the Free Software Foundation.
  */
 #include <linux/clk.h>
+#include <linux/component.h>
 #include <linux/module.h>
+#include <linux/of_graph.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include "armada_crtc.h"
@@ -52,6 +54,11 @@
 };
 #endif
 
+static bool is_componentized(struct device *dev)
+{
+	return dev->of_node || dev->platform_data;
+}
+
 static void armada_drm_unref_work(struct work_struct *work)
 {
 	struct armada_private *priv =
@@ -85,6 +92,7 @@
 static int armada_drm_load(struct drm_device *dev, unsigned long flags)
 {
 	const struct platform_device_id *id;
+	const struct armada_variant *variant;
 	struct armada_private *priv;
 	struct resource *res[ARRAY_SIZE(priv->dcrtc)];
 	struct resource *mem = NULL;
@@ -107,7 +115,7 @@
 			return -EINVAL;
 	}
 
-	if (!res[0] || !mem)
+	if (!mem)
 		return -ENXIO;
 
 	if (!devm_request_mem_region(dev->dev, mem->start,
@@ -128,11 +136,7 @@
 	if (!id)
 		return -ENXIO;
 
-	priv->variant = (struct armada_variant *)id->driver_data;
-
-	ret = priv->variant->init(priv, dev->dev);
-	if (ret)
-		return ret;
+	variant = (const struct armada_variant *)id->driver_data;
 
 	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
 	INIT_KFIFO(priv->fb_unref);
@@ -155,40 +159,50 @@
 
 	/* Create all LCD controllers */
 	for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+		int irq;
+
 		if (!res[n])
 			break;
 
-		ret = armada_drm_crtc_create(dev, n, res[n]);
+		irq = platform_get_irq(dev->platformdev, n);
+		if (irq < 0)
+			goto err_kms;
+
+		ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
+					     variant, NULL);
 		if (ret)
 			goto err_kms;
 	}
 
+	if (is_componentized(dev->dev)) {
+		ret = component_bind_all(dev->dev, dev);
+		if (ret)
+			goto err_kms;
+	} else {
 #ifdef CONFIG_DRM_ARMADA_TDA1998X
-	ret = armada_drm_connector_slave_create(dev, &tda19988_config);
-	if (ret)
-		goto err_kms;
+		ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+		if (ret)
+			goto err_kms;
 #endif
+	}
 
-	ret = drm_vblank_init(dev, n);
+	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
 	if (ret)
-		goto err_kms;
-
-	ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
-	if (ret)
-		goto err_kms;
+		goto err_comp;
 
 	dev->vblank_disable_allowed = 1;
 
 	ret = armada_fbdev_init(dev);
 	if (ret)
-		goto err_irq;
+		goto err_comp;
 
 	drm_kms_helper_poll_init(dev);
 
 	return 0;
 
- err_irq:
-	drm_irq_uninstall(dev);
+ err_comp:
+	if (is_componentized(dev->dev))
+		component_unbind_all(dev->dev, dev);
  err_kms:
 	drm_mode_config_cleanup(dev);
 	drm_mm_takedown(&priv->linear);
@@ -203,7 +217,10 @@
 
 	drm_kms_helper_poll_fini(dev);
 	armada_fbdev_fini(dev);
-	drm_irq_uninstall(dev);
+
+	if (is_componentized(dev->dev))
+		component_unbind_all(dev->dev, dev);
+
 	drm_mode_config_cleanup(dev);
 	drm_mm_takedown(&priv->linear);
 	flush_work(&priv->fb_unref_work);
@@ -259,52 +276,6 @@
 	armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
 }
 
-static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
-{
-	struct drm_device *dev = arg;
-	struct armada_private *priv = dev->dev_private;
-	struct armada_crtc *dcrtc = priv->dcrtc[0];
-	uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
-	irqreturn_t handled = IRQ_NONE;
-
-	/*
-	 * This is rediculous - rather than writing bits to clear, we
-	 * have to set the actual status register value.  This is racy.
-	 */
-	writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
-
-	/* Mask out those interrupts we haven't enabled */
-	v = stat & dcrtc->irq_ena;
-
-	if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
-		armada_drm_crtc_irq(dcrtc, stat);
-		handled = IRQ_HANDLED;
-	}
-
-	return handled;
-}
-
-static int armada_drm_irq_postinstall(struct drm_device *dev)
-{
-	struct armada_private *priv = dev->dev_private;
-	struct armada_crtc *dcrtc = priv->dcrtc[0];
-
-	spin_lock_irq(&dev->vbl_lock);
-	writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
-	writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
-	spin_unlock_irq(&dev->vbl_lock);
-
-	return 0;
-}
-
-static void armada_drm_irq_uninstall(struct drm_device *dev)
-{
-	struct armada_private *priv = dev->dev_private;
-	struct armada_crtc *dcrtc = priv->dcrtc[0];
-
-	writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
-}
-
 static struct drm_ioctl_desc armada_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
 		DRM_UNLOCKED),
@@ -340,9 +311,6 @@
 	.get_vblank_counter	= drm_vblank_count,
 	.enable_vblank		= armada_drm_enable_vblank,
 	.disable_vblank		= armada_drm_disable_vblank,
-	.irq_handler		= armada_drm_irq_handler,
-	.irq_postinstall	= armada_drm_irq_postinstall,
-	.irq_uninstall		= armada_drm_irq_uninstall,
 #ifdef CONFIG_DEBUG_FS
 	.debugfs_init		= armada_drm_debugfs_init,
 	.debugfs_cleanup	= armada_drm_debugfs_cleanup,
@@ -362,19 +330,140 @@
 	.desc			= "Armada SoC DRM",
 	.date			= "20120730",
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET |
-				  DRIVER_HAVE_IRQ | DRIVER_PRIME,
+				  DRIVER_PRIME,
 	.ioctls			= armada_ioctls,
 	.fops			= &armada_drm_fops,
 };
 
+static int armada_drm_bind(struct device *dev)
+{
+	return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
+}
+
+static void armada_drm_unbind(struct device *dev)
+{
+	drm_put_dev(dev_get_drvdata(dev));
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+	return dev->of_node == data;
+}
+
+static int compare_dev_name(struct device *dev, void *data)
+{
+	const char *name = data;
+	return !strcmp(dev_name(dev), name);
+}
+
+static void armada_add_endpoints(struct device *dev,
+	struct component_match **match, struct device_node *port)
+{
+	struct device_node *ep, *remote;
+
+	for_each_child_of_node(port, ep) {
+		remote = of_graph_get_remote_port_parent(ep);
+		if (!remote || !of_device_is_available(remote)) {
+			of_node_put(remote);
+			continue;
+		} else if (!of_device_is_available(remote->parent)) {
+			dev_warn(dev, "parent device of %s is not available\n",
+				 remote->full_name);
+			of_node_put(remote);
+			continue;
+		}
+
+		component_match_add(dev, match, compare_of, remote);
+		of_node_put(remote);
+	}
+}
+
+static int armada_drm_find_components(struct device *dev,
+	struct component_match **match)
+{
+	struct device_node *port;
+	int i;
+
+	if (dev->of_node) {
+		struct device_node *np = dev->of_node;
+
+		for (i = 0; ; i++) {
+			port = of_parse_phandle(np, "ports", i);
+			if (!port)
+				break;
+
+			component_match_add(dev, match, compare_of, port);
+			of_node_put(port);
+		}
+
+		if (i == 0) {
+			dev_err(dev, "missing 'ports' property\n");
+			return -ENODEV;
+		}
+
+		for (i = 0; ; i++) {
+			port = of_parse_phandle(np, "ports", i);
+			if (!port)
+				break;
+
+			armada_add_endpoints(dev, match, port);
+			of_node_put(port);
+		}
+	} else if (dev->platform_data) {
+		char **devices = dev->platform_data;
+		struct device *d;
+
+		for (i = 0; devices[i]; i++)
+			component_match_add(dev, match, compare_dev_name,
+					    devices[i]);
+
+		if (i == 0) {
+			dev_err(dev, "missing 'ports' property\n");
+			return -ENODEV;
+		}
+
+		for (i = 0; devices[i]; i++) {
+			d = bus_find_device_by_name(&platform_bus_type, NULL,
+					devices[i]);
+			if (d && d->of_node) {
+				for_each_child_of_node(d->of_node, port)
+					armada_add_endpoints(dev, match, port);
+			}
+			put_device(d);
+		}
+	}
+
+	return 0;
+}
+
+static const struct component_master_ops armada_master_ops = {
+	.bind = armada_drm_bind,
+	.unbind = armada_drm_unbind,
+};
+
 static int armada_drm_probe(struct platform_device *pdev)
 {
-	return drm_platform_init(&armada_drm_driver, pdev);
+	if (is_componentized(&pdev->dev)) {
+		struct component_match *match = NULL;
+		int ret;
+
+		ret = armada_drm_find_components(&pdev->dev, &match);
+		if (ret < 0)
+			return ret;
+
+		return component_master_add_with_match(&pdev->dev,
+				&armada_master_ops, match);
+	} else {
+		return drm_platform_init(&armada_drm_driver, pdev);
+	}
 }
 
 static int armada_drm_remove(struct platform_device *pdev)
 {
-	drm_put_dev(platform_get_drvdata(pdev));
+	if (is_componentized(&pdev->dev))
+		component_master_del(&pdev->dev, &armada_master_ops);
+	else
+		drm_put_dev(platform_get_drvdata(pdev));
 	return 0;
 }
 
@@ -402,14 +491,24 @@
 
 static int __init armada_drm_init(void)
 {
+	int ret;
+
 	armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
-	return platform_driver_register(&armada_drm_platform_driver);
+
+	ret = platform_driver_register(&armada_lcd_platform_driver);
+	if (ret)
+		return ret;
+	ret = platform_driver_register(&armada_drm_platform_driver);
+	if (ret)
+		platform_driver_unregister(&armada_lcd_platform_driver);
+	return ret;
 }
 module_init(armada_drm_init);
 
 static void __exit armada_drm_exit(void)
 {
 	platform_driver_unregister(&armada_drm_platform_driver);
+	platform_driver_unregister(&armada_lcd_platform_driver);
 }
 module_exit(armada_drm_exit);
 
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index fd166f5..7838e73 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -131,7 +131,7 @@
 	return ret;
 }
 
-static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
 	.gamma_set	= armada_drm_crtc_gamma_set,
 	.gamma_get	= armada_drm_crtc_gamma_get,
 	.fb_probe	= armada_fb_probe,
@@ -149,7 +149,7 @@
 
 	priv->fbdev = fbh;
 
-	fbh->funcs = &armada_fb_helper_funcs;
+	drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(dev, fbh, 1, 1);
 	if (ret) {
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
index d685a54..abbc309 100644
--- a/drivers/gpu/drm/armada/armada_output.c
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -48,7 +48,7 @@
 {
 	struct armada_connector *dconn = drm_to_armada_conn(conn);
 
-	drm_sysfs_connector_remove(conn);
+	drm_connector_unregister(conn);
 	drm_connector_cleanup(conn);
 	kfree(dconn);
 }
@@ -141,7 +141,7 @@
 	if (ret)
 		goto err_conn;
 
-	ret = drm_sysfs_connector_add(&dconn->conn);
+	ret = drm_connector_register(&dconn->conn);
 	if (ret)
 		goto err_sysfs;
 
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5d6a875..957d4fa 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -362,7 +362,7 @@
 {
 	int ret;
 
-	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
 	if (ret) {
 		if (ret != -ERESTARTSYS && ret != -EBUSY)
 			DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index a28640f..cba45c7 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -287,7 +287,7 @@
 	*blue = ast_crtc->lut_b[regno] << 8;
 }
 
-static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
 	.gamma_set = ast_fb_gamma_set,
 	.gamma_get = ast_fb_gamma_get,
 	.fb_probe = astfb_create,
@@ -328,8 +328,10 @@
 		return -ENOMEM;
 
 	ast->fbdev = afbdev;
-	afbdev->helper.funcs = &ast_fb_helper_funcs;
 	spin_lock_init(&afbdev->dirty_lock);
+
+	drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
+
 	ret = drm_fb_helper_init(dev, &afbdev->helper,
 				 1, 1);
 	if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 114aee9..5389350 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -667,17 +667,9 @@
 static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
 {
 	int enc_id = connector->encoder_ids[0];
-	struct drm_mode_object *obj;
-	struct drm_encoder *encoder;
-
 	/* pick the encoder ids */
-	if (enc_id) {
-		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
-			return NULL;
-		encoder = obj_to_encoder(obj);
-		return encoder;
-	}
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
 	return NULL;
 }
 
@@ -829,7 +821,7 @@
 {
 	struct ast_connector *ast_connector = to_ast_connector(connector);
 	ast_i2c_destroy(ast_connector->i2c);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -871,7 +863,7 @@
 	connector->interlace_allowed = 0;
 	connector->doublescan_allowed = 0;
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 9c13df2..f5e0ead 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -97,6 +97,7 @@
 /* ---------------------------------------------------------------------- */
 /* pm interface                                                           */
 
+#ifdef CONFIG_PM_SLEEP
 static int bochs_pm_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -131,6 +132,7 @@
 	drm_kms_helper_poll_enable(drm_dev);
 	return 0;
 }
+#endif
 
 static const struct dev_pm_ops bochs_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 561b844..fe95d31 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -72,7 +72,7 @@
 
 	bo = gem_to_bochs_bo(gobj);
 
-	ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+	ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
 	if (ret)
 		return ret;
 
@@ -179,7 +179,7 @@
 	*blue  = regno;
 }
 
-static struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
 	.gamma_set = bochs_fb_gamma_set,
 	.gamma_get = bochs_fb_gamma_get,
 	.fb_probe = bochsfb_create,
@@ -189,7 +189,8 @@
 {
 	int ret;
 
-	bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
+	drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
+			      &bochs_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
 				 1, 1);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index dcf2e55..9d7346b 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -53,7 +53,7 @@
 	if (old_fb) {
 		bochs_fb = to_bochs_framebuffer(old_fb);
 		bo = gem_to_bochs_bo(bochs_fb->obj);
-		ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+		ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
 		if (ret) {
 			DRM_ERROR("failed to reserve old_fb bo\n");
 		} else {
@@ -67,7 +67,7 @@
 
 	bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
 	bo = gem_to_bochs_bo(bochs_fb->obj);
-	ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+	ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
 	if (ret)
 		return ret;
 
@@ -216,18 +216,9 @@
 bochs_connector_best_encoder(struct drm_connector *connector)
 {
 	int enc_id = connector->encoder_ids[0];
-	struct drm_mode_object *obj;
-	struct drm_encoder *encoder;
-
 	/* pick the encoder ids */
-	if (enc_id) {
-		obj = drm_mode_object_find(connector->dev, enc_id,
-					   DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
-			return NULL;
-		encoder = obj_to_encoder(obj);
-		return encoder;
-	}
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
 	return NULL;
 }
 
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index b9a695d..1728a1b 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -387,7 +387,7 @@
 
 	*obj = NULL;
 
-	size = ALIGN(size, PAGE_SIZE);
+	size = PAGE_ALIGN(size);
 	if (size == 0)
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index 98fd17a..d466696 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -328,7 +328,7 @@
 	}
 	drm_connector_helper_add(&ptn_bridge->connector,
 			&ptn3460_connector_helper_funcs);
-	drm_sysfs_connector_add(&ptn_bridge->connector);
+	drm_connector_register(&ptn_bridge->connector);
 	drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
 
 	return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 08ce520..4516b05 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -76,6 +76,7 @@
 	drm_put_dev(dev);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cirrus_pm_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -110,6 +111,7 @@
 	drm_kms_helper_poll_enable(drm_dev);
 	return 0;
 }
+#endif
 
 static const struct file_operations cirrus_driver_fops = {
 	.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 117d3ec..401c890 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -241,7 +241,7 @@
 {
 	int ret;
 
-	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
 	if (ret) {
 		if (ret != -ERESTARTSYS && ret != -EBUSY)
 			DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 32bbba0..2a135f2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -288,7 +288,7 @@
 	return 0;
 }
 
-static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
 	.gamma_set = cirrus_crtc_fb_gamma_set,
 	.gamma_get = cirrus_crtc_fb_gamma_get,
 	.fb_probe = cirrusfb_create,
@@ -306,9 +306,11 @@
 		return -ENOMEM;
 
 	cdev->mode_info.gfbdev = gfbdev;
-	gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
 	spin_lock_init(&gfbdev->dirty_lock);
 
+	drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
+			      &cirrus_fb_helper_funcs);
+
 	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
 				 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
 	if (ret) {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 49332c5..e1c5c32 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -509,19 +509,9 @@
 						  *connector)
 {
 	int enc_id = connector->encoder_ids[0];
-	struct drm_mode_object *obj;
-	struct drm_encoder *encoder;
-
 	/* pick the encoder ids */
-	if (enc_id) {
-		obj =
-		    drm_mode_object_find(connector->dev, enc_id,
-					 DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
-			return NULL;
-		encoder = obj_to_encoder(obj);
-		return encoder;
-	}
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
 	return NULL;
 }
 
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 0406110..86a4a4a 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -80,11 +80,7 @@
 
 error_out:
 
-	/* Only last element can be null pointer so check for it first. */
-	if ((*buf)->data[idx])
-		kfree((*buf)->data[idx]);
-
-	for (--idx; idx >= 0; --idx)
+	for (; idx >= 0; --idx)
 		kfree((*buf)->data[idx]);
 
 	kfree(*buf);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 68175b5..61acb8f 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1217,7 +1217,6 @@
 				struct drm_buf_desc __user *to =
 				    &request->list[count];
 				struct drm_buf_entry *from = &dma->bufs[i];
-				struct drm_freelist *list = &dma->bufs[i].freelist;
 				if (copy_to_user(&to->count,
 						 &from->buf_count,
 						 sizeof(from->buf_count)) ||
@@ -1225,19 +1224,19 @@
 						 &from->buf_size,
 						 sizeof(from->buf_size)) ||
 				    copy_to_user(&to->low_mark,
-						 &list->low_mark,
-						 sizeof(list->low_mark)) ||
+						 &from->low_mark,
+						 sizeof(from->low_mark)) ||
 				    copy_to_user(&to->high_mark,
-						 &list->high_mark,
-						 sizeof(list->high_mark)))
+						 &from->high_mark,
+						 sizeof(from->high_mark)))
 					return -EFAULT;
 
 				DRM_DEBUG("%d %d %d %d %d\n",
 					  i,
 					  dma->bufs[i].buf_count,
 					  dma->bufs[i].buf_size,
-					  dma->bufs[i].freelist.low_mark,
-					  dma->bufs[i].freelist.high_mark);
+					  dma->bufs[i].low_mark,
+					  dma->bufs[i].high_mark);
 				++count;
 			}
 		}
@@ -1290,8 +1289,8 @@
 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
 		return -EINVAL;
 
-	entry->freelist.low_mark = request->low_mark;
-	entry->freelist.high_mark = request->high_mark;
+	entry->low_mark = request->low_mark;
+	entry->high_mark = request->high_mark;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fe94cc1..3c4a621 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -41,6 +41,10 @@
 
 #include "drm_crtc_internal.h"
 
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+							struct drm_mode_fb_cmd2 *r,
+							struct drm_file *file_priv);
+
 /**
  * drm_modeset_lock_all - take all modeset locks
  * @dev: drm device
@@ -178,6 +182,12 @@
 	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
 };
 
+static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
+	{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
+	{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
+	{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+};
+
 /*
  * Non-global properties, but "required" for certain connectors.
  */
@@ -357,6 +367,32 @@
 }
 EXPORT_SYMBOL(drm_get_format_name);
 
+/*
+ * Internal function to assign a slot in the object idr and optionally
+ * register the object into the idr.
+ */
+static int drm_mode_object_get_reg(struct drm_device *dev,
+				   struct drm_mode_object *obj,
+				   uint32_t obj_type,
+				   bool register_obj)
+{
+	int ret;
+
+	mutex_lock(&dev->mode_config.idr_mutex);
+	ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+	if (ret >= 0) {
+		/*
+		 * Set up the object linking under the protection of the idr
+		 * lock so that other users can't see inconsistent state.
+		 */
+		obj->id = ret;
+		obj->type = obj_type;
+	}
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	return ret < 0 ? ret : 0;
+}
+
 /**
  * drm_mode_object_get - allocate a new modeset identifier
  * @dev: DRM device
@@ -375,21 +411,15 @@
 int drm_mode_object_get(struct drm_device *dev,
 			struct drm_mode_object *obj, uint32_t obj_type)
 {
-	int ret;
+	return drm_mode_object_get_reg(dev, obj, obj_type, true);
+}
 
+static void drm_mode_object_register(struct drm_device *dev,
+				     struct drm_mode_object *obj)
+{
 	mutex_lock(&dev->mode_config.idr_mutex);
-	ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
-	if (ret >= 0) {
-		/*
-		 * Set up the object linking under the protection of the idr
-		 * lock so that other users can't see inconsistent state.
-		 */
-		obj->id = ret;
-		obj->type = obj_type;
-	}
+	idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
 	mutex_unlock(&dev->mode_config.idr_mutex);
-
-	return ret < 0 ? ret : 0;
 }
 
 /**
@@ -723,7 +753,7 @@
  */
 int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 			      struct drm_plane *primary,
-			      void *cursor,
+			      struct drm_plane *cursor,
 			      const struct drm_crtc_funcs *funcs)
 {
 	struct drm_mode_config *config = &dev->mode_config;
@@ -748,8 +778,11 @@
 	config->num_crtc++;
 
 	crtc->primary = primary;
+	crtc->cursor = cursor;
 	if (primary)
 		primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+	if (cursor)
+		cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
 
  out:
 	drm_modeset_unlock_all(dev);
@@ -842,7 +875,7 @@
 
 	drm_modeset_lock_all(dev);
 
-	ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+	ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
 	if (ret)
 		goto out_unlock;
 
@@ -881,6 +914,8 @@
 	drm_object_attach_property(&connector->base,
 				      dev->mode_config.dpms_property, 0);
 
+	connector->debugfs_entry = NULL;
+
 out_put:
 	if (ret)
 		drm_mode_object_put(dev, &connector->base);
@@ -921,6 +956,49 @@
 EXPORT_SYMBOL(drm_connector_cleanup);
 
 /**
+ * drm_connector_register - register a connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_register(struct drm_connector *connector)
+{
+	int ret;
+
+	drm_mode_object_register(connector->dev, &connector->base);
+
+	ret = drm_sysfs_connector_add(connector);
+	if (ret)
+		return ret;
+
+	ret = drm_debugfs_connector_add(connector);
+	if (ret) {
+		drm_sysfs_connector_remove(connector);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_connector_register);
+
+/**
+ * drm_connector_unregister - unregister a connector
+ * @connector: the connector to unregister
+ *
+ * Unregister userspace interfaces for a connector
+ */
+void drm_connector_unregister(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_debugfs_connector_remove(connector);
+}
+EXPORT_SYMBOL(drm_connector_unregister);
+
+
+/**
  * drm_connector_unplug_all - unregister connector userspace interfaces
  * @dev: drm device
  *
@@ -934,7 +1012,7 @@
 
 	/* taking the mode config mutex ends up in a clash with sysfs */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-		drm_sysfs_connector_remove(connector);
+		drm_connector_unregister(connector);
 
 }
 EXPORT_SYMBOL(drm_connector_unplug_all);
@@ -1214,6 +1292,7 @@
 {
 	struct drm_property *edid;
 	struct drm_property *dpms;
+	struct drm_property *dev_path;
 
 	/*
 	 * Standard properties (apply to all connectors)
@@ -1228,6 +1307,12 @@
 				   ARRAY_SIZE(drm_dpms_enum_list));
 	dev->mode_config.dpms_property = dpms;
 
+	dev_path = drm_property_create(dev,
+				       DRM_MODE_PROP_BLOB |
+				       DRM_MODE_PROP_IMMUTABLE,
+				       "PATH", 0);
+	dev->mode_config.path_property = dev_path;
+
 	return 0;
 }
 
@@ -1384,6 +1469,33 @@
 EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
 
 /**
+ * drm_mode_create_aspect_ratio_property - create aspect ratio property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
+{
+	if (dev->mode_config.aspect_ratio_property)
+		return 0;
+
+	dev->mode_config.aspect_ratio_property =
+		drm_property_create_enum(dev, 0, "aspect ratio",
+				drm_aspect_ratio_enum_list,
+				ARRAY_SIZE(drm_aspect_ratio_enum_list));
+
+	if (dev->mode_config.aspect_ratio_property == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
+
+/**
  * drm_mode_create_dirty_property - create dirty property
  * @dev: DRM device
  *
@@ -1470,6 +1582,15 @@
 }
 EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
 
+void drm_reinit_primary_mode_group(struct drm_device *dev)
+{
+	drm_modeset_lock_all(dev);
+	drm_mode_group_destroy(&dev->primary->mode_group);
+	drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+	drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_reinit_primary_mode_group);
+
 /**
  * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
  * @out: drm_mode_modeinfo struct to return to the user
@@ -2118,45 +2239,32 @@
 	return ret;
 }
 
-/**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
+/*
+ * setplane_internal - setplane handler for internal callers
  *
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable.
+ * Note that we assume an extra reference has already been taken on fb.  If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
  *
- * Returns:
- * Zero on success, errno on failure.
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
  */
-int drm_mode_setplane(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+static int setplane_internal(struct drm_plane *plane,
+			     struct drm_crtc *crtc,
+			     struct drm_framebuffer *fb,
+			     int32_t crtc_x, int32_t crtc_y,
+			     uint32_t crtc_w, uint32_t crtc_h,
+			     /* src_{x,y,w,h} values are 16.16 fixed point */
+			     uint32_t src_x, uint32_t src_y,
+			     uint32_t src_w, uint32_t src_h)
 {
-	struct drm_mode_set_plane *plane_req = data;
-	struct drm_plane *plane;
-	struct drm_crtc *crtc;
-	struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+	struct drm_device *dev = plane->dev;
+	struct drm_framebuffer *old_fb = NULL;
 	int ret = 0;
 	unsigned int fb_width, fb_height;
 	int i;
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	/*
-	 * First, find the plane, crtc, and fb objects.  If not available,
-	 * we don't bother to call the driver.
-	 */
-	plane = drm_plane_find(dev, plane_req->plane_id);
-	if (!plane) {
-		DRM_DEBUG_KMS("Unknown plane ID %d\n",
-			      plane_req->plane_id);
-		return -ENOENT;
-	}
-
 	/* No fb means shut it down */
-	if (!plane_req->fb_id) {
+	if (!fb) {
 		drm_modeset_lock_all(dev);
 		old_fb = plane->fb;
 		ret = plane->funcs->disable_plane(plane);
@@ -2170,14 +2278,6 @@
 		goto out;
 	}
 
-	crtc = drm_crtc_find(dev, plane_req->crtc_id);
-	if (!crtc) {
-		DRM_DEBUG_KMS("Unknown crtc ID %d\n",
-			      plane_req->crtc_id);
-		ret = -ENOENT;
-		goto out;
-	}
-
 	/* Check whether this plane is usable on this CRTC */
 	if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
 		DRM_DEBUG_KMS("Invalid crtc for plane\n");
@@ -2185,14 +2285,6 @@
 		goto out;
 	}
 
-	fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
-	if (!fb) {
-		DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
-			      plane_req->fb_id);
-		ret = -ENOENT;
-		goto out;
-	}
-
 	/* Check whether this plane supports the fb pixel format. */
 	for (i = 0; i < plane->format_count; i++)
 		if (fb->pixel_format == plane->format_types[i])
@@ -2208,43 +2300,25 @@
 	fb_height = fb->height << 16;
 
 	/* Make sure source coordinates are inside the fb. */
-	if (plane_req->src_w > fb_width ||
-	    plane_req->src_x > fb_width - plane_req->src_w ||
-	    plane_req->src_h > fb_height ||
-	    plane_req->src_y > fb_height - plane_req->src_h) {
+	if (src_w > fb_width ||
+	    src_x > fb_width - src_w ||
+	    src_h > fb_height ||
+	    src_y > fb_height - src_h) {
 		DRM_DEBUG_KMS("Invalid source coordinates "
 			      "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
-			      plane_req->src_w >> 16,
-			      ((plane_req->src_w & 0xffff) * 15625) >> 10,
-			      plane_req->src_h >> 16,
-			      ((plane_req->src_h & 0xffff) * 15625) >> 10,
-			      plane_req->src_x >> 16,
-			      ((plane_req->src_x & 0xffff) * 15625) >> 10,
-			      plane_req->src_y >> 16,
-			      ((plane_req->src_y & 0xffff) * 15625) >> 10);
+			      src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+			      src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+			      src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+			      src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
 		ret = -ENOSPC;
 		goto out;
 	}
 
-	/* Give drivers some help against integer overflows */
-	if (plane_req->crtc_w > INT_MAX ||
-	    plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
-	    plane_req->crtc_h > INT_MAX ||
-	    plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
-		DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
-			      plane_req->crtc_w, plane_req->crtc_h,
-			      plane_req->crtc_x, plane_req->crtc_y);
-		ret = -ERANGE;
-		goto out;
-	}
-
 	drm_modeset_lock_all(dev);
 	old_fb = plane->fb;
 	ret = plane->funcs->update_plane(plane, crtc, fb,
-					 plane_req->crtc_x, plane_req->crtc_y,
-					 plane_req->crtc_w, plane_req->crtc_h,
-					 plane_req->src_x, plane_req->src_y,
-					 plane_req->src_w, plane_req->src_h);
+					 crtc_x, crtc_y, crtc_w, crtc_h,
+					 src_x, src_y, src_w, src_h);
 	if (!ret) {
 		plane->crtc = crtc;
 		plane->fb = fb;
@@ -2261,6 +2335,85 @@
 		drm_framebuffer_unreference(old_fb);
 
 	return ret;
+
+}
+
+/**
+ * drm_mode_setplane - configure a plane's configuration
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_priv: DRM file info
+ *
+ * Set plane configuration, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable (planes may be disabled without providing a
+ * valid crtc).
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_mode_set_plane *plane_req = data;
+	struct drm_mode_object *obj;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc = NULL;
+	struct drm_framebuffer *fb = NULL;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	/* Give drivers some help against integer overflows */
+	if (plane_req->crtc_w > INT_MAX ||
+	    plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+	    plane_req->crtc_h > INT_MAX ||
+	    plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+		DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+			      plane_req->crtc_w, plane_req->crtc_h,
+			      plane_req->crtc_x, plane_req->crtc_y);
+		return -ERANGE;
+	}
+
+	/*
+	 * First, find the plane, crtc, and fb objects.  If not available,
+	 * we don't bother to call the driver.
+	 */
+	obj = drm_mode_object_find(dev, plane_req->plane_id,
+				   DRM_MODE_OBJECT_PLANE);
+	if (!obj) {
+		DRM_DEBUG_KMS("Unknown plane ID %d\n",
+			      plane_req->plane_id);
+		return -ENOENT;
+	}
+	plane = obj_to_plane(obj);
+
+	if (plane_req->fb_id) {
+		fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+		if (!fb) {
+			DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+				      plane_req->fb_id);
+			return -ENOENT;
+		}
+
+		obj = drm_mode_object_find(dev, plane_req->crtc_id,
+					   DRM_MODE_OBJECT_CRTC);
+		if (!obj) {
+			DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+				      plane_req->crtc_id);
+			return -ENOENT;
+		}
+		crtc = obj_to_crtc(obj);
+	}
+
+	/*
+	 * setplane_internal will take care of deref'ing either the old or new
+	 * framebuffer depending on success.
+	 */
+	return setplane_internal(plane, crtc, fb,
+				 plane_req->crtc_x, plane_req->crtc_y,
+				 plane_req->crtc_w, plane_req->crtc_h,
+				 plane_req->src_x, plane_req->src_y,
+				 plane_req->src_w, plane_req->src_h);
 }
 
 /**
@@ -2509,6 +2662,102 @@
 	return ret;
 }
 
+/**
+ * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
+ *     universal plane handler call
+ * @crtc: crtc to update cursor for
+ * @req: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Legacy cursor ioctl's work directly with driver buffer handles.  To
+ * translate legacy ioctl calls into universal plane handler calls, we need to
+ * wrap the native buffer handle in a drm_framebuffer.
+ *
+ * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
+ * buffer with a pitch of 4*width; the universal plane interface should be used
+ * directly in cases where the hardware can support other buffer settings and
+ * userspace wants to make use of these capabilities.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+				     struct drm_mode_cursor2 *req,
+				     struct drm_file *file_priv)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd2 fbreq = {
+		.width = req->width,
+		.height = req->height,
+		.pixel_format = DRM_FORMAT_ARGB8888,
+		.pitches = { req->width * 4 },
+		.handles = { req->handle },
+	};
+	int32_t crtc_x, crtc_y;
+	uint32_t crtc_w = 0, crtc_h = 0;
+	uint32_t src_w = 0, src_h = 0;
+	int ret = 0;
+
+	BUG_ON(!crtc->cursor);
+
+	/*
+	 * Obtain fb we'll be using (either new or existing) and take an extra
+	 * reference to it if fb != null.  setplane will take care of dropping
+	 * the reference if the plane update fails.
+	 */
+	if (req->flags & DRM_MODE_CURSOR_BO) {
+		if (req->handle) {
+			fb = add_framebuffer_internal(dev, &fbreq, file_priv);
+			if (IS_ERR(fb)) {
+				DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+				return PTR_ERR(fb);
+			}
+
+			drm_framebuffer_reference(fb);
+		} else {
+			fb = NULL;
+		}
+	} else {
+		mutex_lock(&dev->mode_config.mutex);
+		fb = crtc->cursor->fb;
+		if (fb)
+			drm_framebuffer_reference(fb);
+		mutex_unlock(&dev->mode_config.mutex);
+	}
+
+	if (req->flags & DRM_MODE_CURSOR_MOVE) {
+		crtc_x = req->x;
+		crtc_y = req->y;
+	} else {
+		crtc_x = crtc->cursor_x;
+		crtc_y = crtc->cursor_y;
+	}
+
+	if (fb) {
+		crtc_w = fb->width;
+		crtc_h = fb->height;
+		src_w = fb->width << 16;
+		src_h = fb->height << 16;
+	}
+
+	/*
+	 * setplane_internal will take care of deref'ing either the old or new
+	 * framebuffer depending on success.
+	 */
+	ret = setplane_internal(crtc->cursor, crtc, fb,
+				crtc_x, crtc_y, crtc_w, crtc_h,
+				0, 0, src_w, src_h);
+
+	/* Update successful; save new cursor position, if necessary */
+	if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+		crtc->cursor_x = req->x;
+		crtc->cursor_y = req->y;
+	}
+
+	return ret;
+}
+
 static int drm_mode_cursor_common(struct drm_device *dev,
 				  struct drm_mode_cursor2 *req,
 				  struct drm_file *file_priv)
@@ -2528,6 +2777,13 @@
 		return -ENOENT;
 	}
 
+	/*
+	 * If this crtc has a universal cursor plane, call that plane's update
+	 * handler rather than using legacy cursor handlers.
+	 */
+	if (crtc->cursor)
+		return drm_mode_cursor_universal(crtc, req, file_priv);
+
 	drm_modeset_lock(&crtc->mutex, NULL);
 	if (req->flags & DRM_MODE_CURSOR_BO) {
 		if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
@@ -2827,6 +3083,49 @@
 	return 0;
 }
 
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+							struct drm_mode_fb_cmd2 *r,
+							struct drm_file *file_priv)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if ((config->min_width > r->width) || (r->width > config->max_width)) {
+		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+			  r->width, config->min_width, config->max_width);
+		return ERR_PTR(-EINVAL);
+	}
+	if ((config->min_height > r->height) || (r->height > config->max_height)) {
+		DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+			  r->height, config->min_height, config->max_height);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = framebuffer_check(r);
+	if (ret)
+		return ERR_PTR(ret);
+
+	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+	if (IS_ERR(fb)) {
+		DRM_DEBUG_KMS("could not create framebuffer\n");
+		return fb;
+	}
+
+	mutex_lock(&file_priv->fbs_lock);
+	r->fb_id = fb->base.id;
+	list_add(&fb->filp_head, &file_priv->fbs);
+	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	return fb;
+}
+
 /**
  * drm_mode_addfb2 - add an FB to the graphics configuration
  * @dev: drm device for the ioctl
@@ -2845,48 +3144,16 @@
 int drm_mode_addfb2(struct drm_device *dev,
 		    void *data, struct drm_file *file_priv)
 {
-	struct drm_mode_fb_cmd2 *r = data;
-	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_framebuffer *fb;
-	int ret;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
-	if (r->flags & ~DRM_MODE_FB_INTERLACED) {
-		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
-		return -EINVAL;
-	}
-
-	if ((config->min_width > r->width) || (r->width > config->max_width)) {
-		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
-			  r->width, config->min_width, config->max_width);
-		return -EINVAL;
-	}
-	if ((config->min_height > r->height) || (r->height > config->max_height)) {
-		DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
-			  r->height, config->min_height, config->max_height);
-		return -EINVAL;
-	}
-
-	ret = framebuffer_check(r);
-	if (ret)
-		return ret;
-
-	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
-	if (IS_ERR(fb)) {
-		DRM_DEBUG_KMS("could not create framebuffer\n");
+	fb = add_framebuffer_internal(dev, data, file_priv);
+	if (IS_ERR(fb))
 		return PTR_ERR(fb);
-	}
 
-	mutex_lock(&file_priv->fbs_lock);
-	r->fb_id = fb->base.id;
-	list_add(&fb->filp_head, &file_priv->fbs);
-	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-	mutex_unlock(&file_priv->fbs_lock);
-
-
-	return ret;
+	return 0;
 }
 
 /**
@@ -3176,7 +3443,7 @@
 EXPORT_SYMBOL(drm_property_create);
 
 /**
- * drm_property_create - create a new enumeration property type
+ * drm_property_create_enum - create a new enumeration property type
  * @dev: drm device
  * @flags: flags specifying the property type
  * @name: name of the property
@@ -3222,7 +3489,7 @@
 EXPORT_SYMBOL(drm_property_create_enum);
 
 /**
- * drm_property_create - create a new bitmask property type
+ * drm_property_create_bitmask - create a new bitmask property type
  * @dev: drm device
  * @flags: flags specifying the property type
  * @name: name of the property
@@ -3242,19 +3509,28 @@
 struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
 					 int flags, const char *name,
 					 const struct drm_prop_enum_list *props,
-					 int num_values)
+					 int num_props,
+					 uint64_t supported_bits)
 {
 	struct drm_property *property;
-	int i, ret;
+	int i, ret, index = 0;
+	int num_values = hweight64(supported_bits);
 
 	flags |= DRM_MODE_PROP_BITMASK;
 
 	property = drm_property_create(dev, flags, name, num_values);
 	if (!property)
 		return NULL;
+	for (i = 0; i < num_props; i++) {
+		if (!(supported_bits & (1ULL << props[i].type)))
+			continue;
 
-	for (i = 0; i < num_values; i++) {
-		ret = drm_property_add_enum(property, i,
+		if (WARN_ON(index >= num_values)) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+
+		ret = drm_property_add_enum(property, index++,
 				      props[i].type,
 				      props[i].name);
 		if (ret) {
@@ -3284,7 +3560,7 @@
 }
 
 /**
- * drm_property_create - create a new ranged property type
+ * drm_property_create_range - create a new ranged property type
  * @dev: drm device
  * @flags: flags specifying the property type
  * @name: name of the property
@@ -3703,6 +3979,25 @@
 	return ret;
 }
 
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+					 char *path)
+{
+	struct drm_device *dev = connector->dev;
+	int ret, size;
+	size = strlen(path) + 1;
+
+	connector->path_blob_ptr = drm_property_create_blob(connector->dev,
+							    size, path);
+	if (!connector->path_blob_ptr)
+		return -EINVAL;
+
+	ret = drm_object_property_set_value(&connector->base,
+					    dev->mode_config.path_property,
+					    connector->path_blob_ptr->base.id);
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+
 /**
  * drm_mode_connector_update_edid_property - update the edid property of a connector
  * @connector: drm connector
@@ -3720,6 +4015,10 @@
 	struct drm_device *dev = connector->dev;
 	int ret, size;
 
+	/* ignore requests to set edid when overridden */
+	if (connector->override_edid)
+		return 0;
+
 	if (connector->edid_blob_ptr)
 		drm_property_destroy_blob(dev, connector->edid_blob_ptr);
 
@@ -4680,6 +4979,36 @@
 EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
 
 /**
+ * drm_rotation_simplify() - Try to simplify the rotation
+ * @rotation: Rotation to be simplified
+ * @supported_rotations: Supported rotations
+ *
+ * Attempt to simplify the rotation to a form that is supported.
+ * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * one could call this function like this:
+ *
+ * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
+ *                       BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
+ *                       BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
+ *
+ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * transforms the hardware supports, this function may not
+ * be able to produce a supported transform, so the caller should
+ * check the result afterwards.
+ */
+unsigned int drm_rotation_simplify(unsigned int rotation,
+				   unsigned int supported_rotations)
+{
+	if (rotation & ~supported_rotations) {
+		rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
+		rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
+	}
+
+	return rotation;
+}
+EXPORT_SYMBOL(drm_rotation_simplify);
+
+/**
  * drm_mode_config_init - initialize DRM mode_configuration structure
  * @dev: DRM device
  *
@@ -4797,3 +5126,21 @@
 	drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+						       unsigned int supported_rotations)
+{
+	static const struct drm_prop_enum_list props[] = {
+		{ DRM_ROTATE_0,   "rotate-0" },
+		{ DRM_ROTATE_90,  "rotate-90" },
+		{ DRM_ROTATE_180, "rotate-180" },
+		{ DRM_ROTATE_270, "rotate-270" },
+		{ DRM_REFLECT_X,  "reflect-x" },
+		{ DRM_REFLECT_Y,  "reflect-y" },
+	};
+
+	return drm_property_create_bitmask(dev, 0, "rotation",
+					   props, ARRAY_SIZE(props),
+					   supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 78b37f3..6c65a0a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -818,6 +818,7 @@
 	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
 				    &fb->bits_per_pixel);
 	fb->pixel_format = mode_cmd->pixel_format;
+	fb->flags = mode_cmd->flags;
 }
 EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
 
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index b4b51d4..13bd429 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <drm/drmP.h>
+#include <drm/drm_edid.h>
 
 #if defined(CONFIG_DEBUG_FS)
 
@@ -237,5 +238,186 @@
 	return 0;
 }
 
+static int connector_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	const char *status;
+
+	switch (connector->force) {
+	case DRM_FORCE_ON:
+		status = "on\n";
+		break;
+
+	case DRM_FORCE_ON_DIGITAL:
+		status = "digital\n";
+		break;
+
+	case DRM_FORCE_OFF:
+		status = "off\n";
+		break;
+
+	case DRM_FORCE_UNSPECIFIED:
+		status = "unspecified\n";
+		break;
+
+	default:
+		return 0;
+	}
+
+	seq_puts(m, status);
+
+	return 0;
+}
+
+static int connector_open(struct inode *inode, struct file *file)
+{
+	struct drm_connector *dev = inode->i_private;
+
+	return single_open(file, connector_show, dev);
+}
+
+static ssize_t connector_write(struct file *file, const char __user *ubuf,
+			       size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_connector *connector = m->private;
+	char buf[12];
+
+	if (len > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, ubuf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	if (!strcmp(buf, "on"))
+		connector->force = DRM_FORCE_ON;
+	else if (!strcmp(buf, "digital"))
+		connector->force = DRM_FORCE_ON_DIGITAL;
+	else if (!strcmp(buf, "off"))
+		connector->force = DRM_FORCE_OFF;
+	else if (!strcmp(buf, "unspecified"))
+		connector->force = DRM_FORCE_UNSPECIFIED;
+	else
+		return -EINVAL;
+
+	return len;
+}
+
+static int edid_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	struct drm_property_blob *edid = connector->edid_blob_ptr;
+
+	if (connector->override_edid && edid)
+		seq_write(m, edid->data, edid->length);
+
+	return 0;
+}
+
+static int edid_open(struct inode *inode, struct file *file)
+{
+	struct drm_connector *dev = inode->i_private;
+
+	return single_open(file, edid_show, dev);
+}
+
+static ssize_t edid_write(struct file *file, const char __user *ubuf,
+			  size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_connector *connector = m->private;
+	char *buf;
+	struct edid *edid;
+	int ret;
+
+	buf = memdup_user(ubuf, len);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
+
+	edid = (struct edid *) buf;
+
+	if (len == 5 && !strncmp(buf, "reset", 5)) {
+		connector->override_edid = false;
+		ret = drm_mode_connector_update_edid_property(connector, NULL);
+	} else if (len < EDID_LENGTH ||
+		   EDID_LENGTH * (1 + edid->extensions) > len)
+		ret = -EINVAL;
+	else {
+		connector->override_edid = false;
+		ret = drm_mode_connector_update_edid_property(connector, edid);
+		if (!ret)
+			connector->override_edid = true;
+	}
+
+	kfree(buf);
+
+	return (ret) ? ret : len;
+}
+
+static const struct file_operations drm_edid_fops = {
+	.owner = THIS_MODULE,
+	.open = edid_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = edid_write
+};
+
+
+static const struct file_operations drm_connector_fops = {
+	.owner = THIS_MODULE,
+	.open = connector_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = connector_write
+};
+
+int drm_debugfs_connector_add(struct drm_connector *connector)
+{
+	struct drm_minor *minor = connector->dev->primary;
+	struct dentry *root, *ent;
+
+	if (!minor->debugfs_root)
+		return -1;
+
+	root = debugfs_create_dir(connector->name, minor->debugfs_root);
+	if (!root)
+		return -ENOMEM;
+
+	connector->debugfs_entry = root;
+
+	/* force */
+	ent = debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
+				  &drm_connector_fops);
+	if (!ent)
+		goto error;
+
+	/* edid */
+	ent = debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root,
+				  connector, &drm_edid_fops);
+	if (!ent)
+		goto error;
+
+	return 0;
+
+error:
+	debugfs_remove_recursive(connector->debugfs_entry);
+	connector->debugfs_entry = NULL;
+	return -ENOMEM;
+}
+
+void drm_debugfs_connector_remove(struct drm_connector *connector)
+{
+	if (!connector->debugfs_entry)
+		return;
+
+	debugfs_remove_recursive(connector->debugfs_entry);
+
+	connector->debugfs_entry = NULL;
+}
+
 #endif /* CONFIG_DEBUG_FS */
 
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
new file mode 100644
index 0000000..ac3c273
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -0,0 +1,2715 @@
+/*
+ * Copyright © 2014 Red Hat
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drmP.h>
+
+#include <drm/drm_fixed.h>
+
+/**
+ * DOC: dp mst helper
+ *
+ * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
+ * protocol. The helpers contain a topology manager and bandwidth manager.
+ * The helpers encapsulate the sending and received of sideband msgs.
+ */
+static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+				  char *buf);
+static int test_calc_pbn_mode(void);
+
+static void drm_dp_put_port(struct drm_dp_mst_port *port);
+
+static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+				     int id,
+				     struct drm_dp_payload *payload);
+
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+				  struct drm_dp_mst_port *port,
+				  int offset, int size, u8 *bytes);
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+				    struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+					   struct drm_dp_mst_branch *mstb,
+					   struct drm_dp_mst_port *port);
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+				 u8 *guid);
+
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
+static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+/* sideband msg handling */
+static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
+{
+	u8 bitmask = 0x80;
+	u8 bitshift = 7;
+	u8 array_index = 0;
+	int number_of_bits = num_nibbles * 4;
+	u8 remainder = 0;
+
+	while (number_of_bits != 0) {
+		number_of_bits--;
+		remainder <<= 1;
+		remainder |= (data[array_index] & bitmask) >> bitshift;
+		bitmask >>= 1;
+		bitshift--;
+		if (bitmask == 0) {
+			bitmask = 0x80;
+			bitshift = 7;
+			array_index++;
+		}
+		if ((remainder & 0x10) == 0x10)
+			remainder ^= 0x13;
+	}
+
+	number_of_bits = 4;
+	while (number_of_bits != 0) {
+		number_of_bits--;
+		remainder <<= 1;
+		if ((remainder & 0x10) != 0)
+			remainder ^= 0x13;
+	}
+
+	return remainder;
+}
+
+static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
+{
+	u8 bitmask = 0x80;
+	u8 bitshift = 7;
+	u8 array_index = 0;
+	int number_of_bits = number_of_bytes * 8;
+	u16 remainder = 0;
+
+	while (number_of_bits != 0) {
+		number_of_bits--;
+		remainder <<= 1;
+		remainder |= (data[array_index] & bitmask) >> bitshift;
+		bitmask >>= 1;
+		bitshift--;
+		if (bitmask == 0) {
+			bitmask = 0x80;
+			bitshift = 7;
+			array_index++;
+		}
+		if ((remainder & 0x100) == 0x100)
+			remainder ^= 0xd5;
+	}
+
+	number_of_bits = 8;
+	while (number_of_bits != 0) {
+		number_of_bits--;
+		remainder <<= 1;
+		if ((remainder & 0x100) != 0)
+			remainder ^= 0xd5;
+	}
+
+	return remainder & 0xff;
+}
+static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
+{
+	u8 size = 3;
+	size += (hdr->lct / 2);
+	return size;
+}
+
+static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+					   u8 *buf, int *len)
+{
+	int idx = 0;
+	int i;
+	u8 crc4;
+	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
+	for (i = 0; i < (hdr->lct / 2); i++)
+		buf[idx++] = hdr->rad[i];
+	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
+		(hdr->msg_len & 0x3f);
+	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
+
+	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
+	buf[idx - 1] |= (crc4 & 0xf);
+
+	*len = idx;
+}
+
+static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+					   u8 *buf, int buflen, u8 *hdrlen)
+{
+	u8 crc4;
+	u8 len;
+	int i;
+	u8 idx;
+	if (buf[0] == 0)
+		return false;
+	len = 3;
+	len += ((buf[0] & 0xf0) >> 4) / 2;
+	if (len > buflen)
+		return false;
+	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
+
+	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
+		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
+		return false;
+	}
+
+	hdr->lct = (buf[0] & 0xf0) >> 4;
+	hdr->lcr = (buf[0] & 0xf);
+	idx = 1;
+	for (i = 0; i < (hdr->lct / 2); i++)
+		hdr->rad[i] = buf[idx++];
+	hdr->broadcast = (buf[idx] >> 7) & 0x1;
+	hdr->path_msg = (buf[idx] >> 6) & 0x1;
+	hdr->msg_len = buf[idx] & 0x3f;
+	idx++;
+	hdr->somt = (buf[idx] >> 7) & 0x1;
+	hdr->eomt = (buf[idx] >> 6) & 0x1;
+	hdr->seqno = (buf[idx] >> 4) & 0x1;
+	idx++;
+	*hdrlen = idx;
+	return true;
+}
+
+static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
+				       struct drm_dp_sideband_msg_tx *raw)
+{
+	int idx = 0;
+	int i;
+	u8 *buf = raw->msg;
+	buf[idx++] = req->req_type & 0x7f;
+
+	switch (req->req_type) {
+	case DP_ENUM_PATH_RESOURCES:
+		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
+		idx++;
+		break;
+	case DP_ALLOCATE_PAYLOAD:
+		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
+			(req->u.allocate_payload.number_sdp_streams & 0xf);
+		idx++;
+		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
+		idx++;
+		buf[idx] = (req->u.allocate_payload.pbn >> 8);
+		idx++;
+		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
+		idx++;
+		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
+			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
+				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
+			idx++;
+		}
+		if (req->u.allocate_payload.number_sdp_streams & 1) {
+			i = req->u.allocate_payload.number_sdp_streams - 1;
+			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
+			idx++;
+		}
+		break;
+	case DP_QUERY_PAYLOAD:
+		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
+		idx++;
+		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
+		idx++;
+		break;
+	case DP_REMOTE_DPCD_READ:
+		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
+		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
+		idx++;
+		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
+		idx++;
+		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
+		idx++;
+		buf[idx] = (req->u.dpcd_read.num_bytes);
+		idx++;
+		break;
+
+	case DP_REMOTE_DPCD_WRITE:
+		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
+		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
+		idx++;
+		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
+		idx++;
+		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
+		idx++;
+		buf[idx] = (req->u.dpcd_write.num_bytes);
+		idx++;
+		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
+		idx += req->u.dpcd_write.num_bytes;
+		break;
+	case DP_REMOTE_I2C_READ:
+		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
+		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
+		idx++;
+		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
+			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
+			idx++;
+			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
+			idx++;
+			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
+			idx += req->u.i2c_read.transactions[i].num_bytes;
+
+			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
+			idx++;
+		}
+		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
+		idx++;
+		buf[idx] = (req->u.i2c_read.num_bytes_read);
+		idx++;
+		break;
+
+	case DP_REMOTE_I2C_WRITE:
+		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
+		idx++;
+		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
+		idx++;
+		buf[idx] = (req->u.i2c_write.num_bytes);
+		idx++;
+		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
+		idx += req->u.i2c_write.num_bytes;
+		break;
+	}
+	raw->cur_len = idx;
+}
+
+static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
+{
+	u8 crc4;
+	crc4 = drm_dp_msg_data_crc4(msg, len);
+	msg[len] = crc4;
+}
+
+static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
+					 struct drm_dp_sideband_msg_tx *raw)
+{
+	int idx = 0;
+	u8 *buf = raw->msg;
+
+	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
+
+	raw->cur_len = idx;
+}
+
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
+				      u8 *replybuf, u8 replybuflen, bool hdr)
+{
+	int ret;
+	u8 crc4;
+
+	if (hdr) {
+		u8 hdrlen;
+		struct drm_dp_sideband_msg_hdr recv_hdr;
+		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
+		if (ret == false) {
+			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
+			return false;
+		}
+
+		/* get length contained in this portion */
+		msg->curchunk_len = recv_hdr.msg_len;
+		msg->curchunk_hdrlen = hdrlen;
+
+		/* we have already gotten an somt - don't bother parsing */
+		if (recv_hdr.somt && msg->have_somt)
+			return false;
+
+		if (recv_hdr.somt) {
+			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
+			msg->have_somt = true;
+		}
+		if (recv_hdr.eomt)
+			msg->have_eomt = true;
+
+		/* copy the bytes for the remainder of this header chunk */
+		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
+		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
+	} else {
+		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+		msg->curchunk_idx += replybuflen;
+	}
+
+	if (msg->curchunk_idx >= msg->curchunk_len) {
+		/* do CRC */
+		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+		/* copy chunk into bigger msg */
+		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
+		msg->curlen += msg->curchunk_len - 1;
+	}
+	return true;
+}
+
+static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
+					       struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+	int idx = 1;
+	int i;
+	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
+	idx += 16;
+	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
+		if (raw->msg[idx] & 0x80)
+			repmsg->u.link_addr.ports[i].input_port = 1;
+
+		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
+		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
+
+		idx++;
+		if (idx > raw->curlen)
+			goto fail_len;
+		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
+		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
+		if (repmsg->u.link_addr.ports[i].input_port == 0)
+			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
+		idx++;
+		if (idx > raw->curlen)
+			goto fail_len;
+		if (repmsg->u.link_addr.ports[i].input_port == 0) {
+			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
+			idx++;
+			if (idx > raw->curlen)
+				goto fail_len;
+			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
+			idx += 16;
+			if (idx > raw->curlen)
+				goto fail_len;
+			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
+			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
+			idx++;
+
+		}
+		if (idx > raw->curlen)
+			goto fail_len;
+	}
+
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
+						   struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+	int idx = 1;
+	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+	if (idx > raw->curlen)
+		goto fail_len;
+
+	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
+						      struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+	int idx = 1;
+	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
+						      struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+	int idx = 1;
+
+	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
+	idx++;
+	/* TODO check */
+	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
+							  struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+	int idx = 1;
+	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+	idx += 2;
+	if (idx > raw->curlen)
+		goto fail_len;
+	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+	idx += 2;
+	if (idx > raw->curlen)
+		goto fail_len;
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
+							  struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+	int idx = 1;
+	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+	idx += 2;
+	if (idx > raw->curlen)
+		goto fail_len;
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
+						    struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+	int idx = 1;
+	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
+	idx += 2;
+	if (idx > raw->curlen)
+		goto fail_len;
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
+					struct drm_dp_sideband_msg_reply_body *msg)
+{
+	memset(msg, 0, sizeof(*msg));
+	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
+	msg->req_type = (raw->msg[0] & 0x7f);
+
+	if (msg->reply_type) {
+		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
+		msg->u.nak.reason = raw->msg[17];
+		msg->u.nak.nak_data = raw->msg[18];
+		return false;
+	}
+
+	switch (msg->req_type) {
+	case DP_LINK_ADDRESS:
+		return drm_dp_sideband_parse_link_address(raw, msg);
+	case DP_QUERY_PAYLOAD:
+		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
+	case DP_REMOTE_DPCD_READ:
+		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
+	case DP_REMOTE_DPCD_WRITE:
+		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
+	case DP_REMOTE_I2C_READ:
+		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
+	case DP_ENUM_PATH_RESOURCES:
+		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
+	case DP_ALLOCATE_PAYLOAD:
+		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
+	default:
+		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
+		return false;
+	}
+}
+
+static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
+							   struct drm_dp_sideband_msg_req_body *msg)
+{
+	int idx = 1;
+
+	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+
+	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
+	idx += 16;
+	if (idx > raw->curlen)
+		goto fail_len;
+
+	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
+	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
+	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
+	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
+	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
+	idx++;
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
+							   struct drm_dp_sideband_msg_req_body *msg)
+{
+	int idx = 1;
+
+	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
+	idx++;
+	if (idx > raw->curlen)
+		goto fail_len;
+
+	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
+	idx += 16;
+	if (idx > raw->curlen)
+		goto fail_len;
+
+	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
+	idx++;
+	return true;
+fail_len:
+	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
+	return false;
+}
+
+static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
+				      struct drm_dp_sideband_msg_req_body *msg)
+{
+	memset(msg, 0, sizeof(*msg));
+	msg->req_type = (raw->msg[0] & 0x7f);
+
+	switch (msg->req_type) {
+	case DP_CONNECTION_STATUS_NOTIFY:
+		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
+	case DP_RESOURCE_STATUS_NOTIFY:
+		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
+	default:
+		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
+		return false;
+	}
+}
+
+static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+{
+	struct drm_dp_sideband_msg_req_body req;
+
+	req.req_type = DP_REMOTE_DPCD_WRITE;
+	req.u.dpcd_write.port_number = port_num;
+	req.u.dpcd_write.dpcd_address = offset;
+	req.u.dpcd_write.num_bytes = num_bytes;
+	req.u.dpcd_write.bytes = bytes;
+	drm_dp_encode_sideband_req(&req, msg);
+
+	return 0;
+}
+
+static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+{
+	struct drm_dp_sideband_msg_req_body req;
+
+	req.req_type = DP_LINK_ADDRESS;
+	drm_dp_encode_sideband_req(&req, msg);
+	return 0;
+}
+
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+{
+	struct drm_dp_sideband_msg_req_body req;
+
+	req.req_type = DP_ENUM_PATH_RESOURCES;
+	req.u.port_num.port_number = port_num;
+	drm_dp_encode_sideband_req(&req, msg);
+	msg->path_msg = true;
+	return 0;
+}
+
+static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
+				  u8 vcpi, uint16_t pbn)
+{
+	struct drm_dp_sideband_msg_req_body req;
+	memset(&req, 0, sizeof(req));
+	req.req_type = DP_ALLOCATE_PAYLOAD;
+	req.u.allocate_payload.port_number = port_num;
+	req.u.allocate_payload.vcpi = vcpi;
+	req.u.allocate_payload.pbn = pbn;
+	drm_dp_encode_sideband_req(&req, msg);
+	msg->path_msg = true;
+	return 0;
+}
+
+static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
+					struct drm_dp_vcpi *vcpi)
+{
+	int ret;
+
+	mutex_lock(&mgr->payload_lock);
+	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
+	if (ret > mgr->max_payloads) {
+		ret = -EINVAL;
+		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
+		goto out_unlock;
+	}
+
+	set_bit(ret, &mgr->payload_mask);
+	vcpi->vcpi = ret;
+	mgr->proposed_vcpis[ret - 1] = vcpi;
+out_unlock:
+	mutex_unlock(&mgr->payload_lock);
+	return ret;
+}
+
+static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
+				      int id)
+{
+	if (id == 0)
+		return;
+
+	mutex_lock(&mgr->payload_lock);
+	DRM_DEBUG_KMS("putting payload %d\n", id);
+	clear_bit(id, &mgr->payload_mask);
+	mgr->proposed_vcpis[id - 1] = NULL;
+	mutex_unlock(&mgr->payload_lock);
+}
+
+static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
+			      struct drm_dp_sideband_msg_tx *txmsg)
+{
+	bool ret;
+	mutex_lock(&mgr->qlock);
+	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
+	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
+	mutex_unlock(&mgr->qlock);
+	return ret;
+}
+
+static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
+				    struct drm_dp_sideband_msg_tx *txmsg)
+{
+	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+	int ret;
+
+	ret = wait_event_timeout(mgr->tx_waitq,
+				 check_txmsg_state(mgr, txmsg),
+				 (4 * HZ));
+	mutex_lock(&mstb->mgr->qlock);
+	if (ret > 0) {
+		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
+			ret = -EIO;
+			goto out;
+		}
+	} else {
+		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
+
+		/* dump some state */
+		ret = -EIO;
+
+		/* remove from q */
+		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
+		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+			list_del(&txmsg->next);
+		}
+
+		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
+		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
+			mstb->tx_slots[txmsg->seqno] = NULL;
+		}
+	}
+out:
+	mutex_unlock(&mgr->qlock);
+
+	return ret;
+}
+
+static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
+{
+	struct drm_dp_mst_branch *mstb;
+
+	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
+	if (!mstb)
+		return NULL;
+
+	mstb->lct = lct;
+	if (lct > 1)
+		memcpy(mstb->rad, rad, lct / 2);
+	INIT_LIST_HEAD(&mstb->ports);
+	kref_init(&mstb->kref);
+	return mstb;
+}
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+	struct drm_dp_mst_port *port, *tmp;
+	bool wake_tx = false;
+
+	cancel_work_sync(&mstb->mgr->work);
+
+	/*
+	 * destroy all ports - don't need lock
+	 * as there are no more references to the mst branch
+	 * device at this point.
+	 */
+	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+		list_del(&port->next);
+		drm_dp_put_port(port);
+	}
+
+	/* drop any tx slots msg */
+	mutex_lock(&mstb->mgr->qlock);
+	if (mstb->tx_slots[0]) {
+		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+		mstb->tx_slots[0] = NULL;
+		wake_tx = true;
+	}
+	if (mstb->tx_slots[1]) {
+		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+		mstb->tx_slots[1] = NULL;
+		wake_tx = true;
+	}
+	mutex_unlock(&mstb->mgr->qlock);
+
+	if (wake_tx)
+		wake_up(&mstb->mgr->tx_waitq);
+	kfree(mstb);
+}
+
+static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+{
+	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
+}
+
+
+static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
+{
+	switch (old_pdt) {
+	case DP_PEER_DEVICE_DP_LEGACY_CONV:
+	case DP_PEER_DEVICE_SST_SINK:
+		/* remove i2c over sideband */
+		drm_dp_mst_unregister_i2c_bus(&port->aux);
+		break;
+	case DP_PEER_DEVICE_MST_BRANCHING:
+		drm_dp_put_mst_branch_device(port->mstb);
+		port->mstb = NULL;
+		break;
+	}
+}
+
+static void drm_dp_destroy_port(struct kref *kref)
+{
+	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+	if (!port->input) {
+		port->vcpi.num_slots = 0;
+		if (port->connector)
+			(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
+		drm_dp_port_teardown_pdt(port, port->pdt);
+
+		if (!port->input && port->vcpi.vcpi > 0)
+			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+	}
+	kfree(port);
+
+	(*mgr->cbs->hotplug)(mgr);
+}
+
+static void drm_dp_put_port(struct drm_dp_mst_port *port)
+{
+	kref_put(&port->kref, drm_dp_destroy_port);
+}
+
+static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
+{
+	struct drm_dp_mst_port *port;
+	struct drm_dp_mst_branch *rmstb;
+	if (to_find == mstb) {
+		kref_get(&mstb->kref);
+		return mstb;
+	}
+	list_for_each_entry(port, &mstb->ports, next) {
+		if (port->mstb) {
+			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
+			if (rmstb)
+				return rmstb;
+		}
+	}
+	return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
+{
+	struct drm_dp_mst_branch *rmstb = NULL;
+	mutex_lock(&mgr->lock);
+	if (mgr->mst_primary)
+		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
+	mutex_unlock(&mgr->lock);
+	return rmstb;
+}
+
+static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
+{
+	struct drm_dp_mst_port *port, *mport;
+
+	list_for_each_entry(port, &mstb->ports, next) {
+		if (port == to_find) {
+			kref_get(&port->kref);
+			return port;
+		}
+		if (port->mstb) {
+			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
+			if (mport)
+				return mport;
+		}
+	}
+	return NULL;
+}
+
+static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+	struct drm_dp_mst_port *rport = NULL;
+	mutex_lock(&mgr->lock);
+	if (mgr->mst_primary)
+		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
+	mutex_unlock(&mgr->lock);
+	return rport;
+}
+
+static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
+{
+	struct drm_dp_mst_port *port;
+
+	list_for_each_entry(port, &mstb->ports, next) {
+		if (port->port_num == port_num) {
+			kref_get(&port->kref);
+			return port;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * calculate a new RAD for this MST branch device
+ * if parent has an LCT of 2 then it has 1 nibble of RAD,
+ * if parent has an LCT of 3 then it has 2 nibbles of RAD,
+ */
+static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
+				 u8 *rad)
+{
+	int lct = port->parent->lct;
+	int shift = 4;
+	int idx = lct / 2;
+	if (lct > 1) {
+		memcpy(rad, port->parent->rad, idx);
+		shift = (lct % 2) ? 4 : 0;
+	} else
+		rad[0] = 0;
+
+	rad[idx] |= port->port_num << shift;
+	return lct + 1;
+}
+
+/*
+ * return sends link address for new mstb
+ */
+static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+{
+	int ret;
+	u8 rad[6], lct;
+	bool send_link = false;
+	switch (port->pdt) {
+	case DP_PEER_DEVICE_DP_LEGACY_CONV:
+	case DP_PEER_DEVICE_SST_SINK:
+		/* add i2c over sideband */
+		ret = drm_dp_mst_register_i2c_bus(&port->aux);
+		break;
+	case DP_PEER_DEVICE_MST_BRANCHING:
+		lct = drm_dp_calculate_rad(port, rad);
+
+		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
+		port->mstb->mgr = port->mgr;
+		port->mstb->port_parent = port;
+
+		send_link = true;
+		break;
+	}
+	return send_link;
+}
+
+static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
+				   struct drm_dp_mst_port *port)
+{
+	int ret;
+	if (port->dpcd_rev >= 0x12) {
+		port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
+		if (!port->guid_valid) {
+			ret = drm_dp_send_dpcd_write(mstb->mgr,
+						     port,
+						     DP_GUID,
+						     16, port->guid);
+			port->guid_valid = true;
+		}
+	}
+}
+
+static void build_mst_prop_path(struct drm_dp_mst_port *port,
+				struct drm_dp_mst_branch *mstb,
+				char *proppath)
+{
+	int i;
+	char temp[8];
+	snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
+	for (i = 0; i < (mstb->lct - 1); i++) {
+		int shift = (i % 2) ? 0 : 4;
+		int port_num = mstb->rad[i / 2] >> shift;
+		snprintf(temp, 8, "-%d", port_num);
+		strncat(proppath, temp, 255);
+	}
+	snprintf(temp, 8, "-%d", port->port_num);
+	strncat(proppath, temp, 255);
+}
+
+static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+			    struct device *dev,
+			    struct drm_dp_link_addr_reply_port *port_msg)
+{
+	struct drm_dp_mst_port *port;
+	bool ret;
+	bool created = false;
+	int old_pdt = 0;
+	int old_ddps = 0;
+	port = drm_dp_get_port(mstb, port_msg->port_number);
+	if (!port) {
+		port = kzalloc(sizeof(*port), GFP_KERNEL);
+		if (!port)
+			return;
+		kref_init(&port->kref);
+		port->parent = mstb;
+		port->port_num = port_msg->port_number;
+		port->mgr = mstb->mgr;
+		port->aux.name = "DPMST";
+		port->aux.dev = dev;
+		created = true;
+	} else {
+		old_pdt = port->pdt;
+		old_ddps = port->ddps;
+	}
+
+	port->pdt = port_msg->peer_device_type;
+	port->input = port_msg->input_port;
+	port->mcs = port_msg->mcs;
+	port->ddps = port_msg->ddps;
+	port->ldps = port_msg->legacy_device_plug_status;
+	port->dpcd_rev = port_msg->dpcd_revision;
+	port->num_sdp_streams = port_msg->num_sdp_streams;
+	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
+	memcpy(port->guid, port_msg->peer_guid, 16);
+
+	/* manage mstb port lists with mgr lock - take a reference
+	   for this list */
+	if (created) {
+		mutex_lock(&mstb->mgr->lock);
+		kref_get(&port->kref);
+		list_add(&port->next, &mstb->ports);
+		mutex_unlock(&mstb->mgr->lock);
+	}
+
+	if (old_ddps != port->ddps) {
+		if (port->ddps) {
+			drm_dp_check_port_guid(mstb, port);
+			if (!port->input)
+				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+		} else {
+			port->guid_valid = false;
+			port->available_pbn = 0;
+			}
+	}
+
+	if (old_pdt != port->pdt && !port->input) {
+		drm_dp_port_teardown_pdt(port, old_pdt);
+
+		ret = drm_dp_port_setup_pdt(port);
+		if (ret == true) {
+			drm_dp_send_link_address(mstb->mgr, port->mstb);
+			port->mstb->link_address_sent = true;
+		}
+	}
+
+	if (created && !port->input) {
+		char proppath[255];
+		build_mst_prop_path(port, mstb, proppath);
+		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+	}
+
+	/* put reference to this port */
+	drm_dp_put_port(port);
+}
+
+static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
+			       struct drm_dp_connection_status_notify *conn_stat)
+{
+	struct drm_dp_mst_port *port;
+	int old_pdt;
+	int old_ddps;
+	bool dowork = false;
+	port = drm_dp_get_port(mstb, conn_stat->port_number);
+	if (!port)
+		return;
+
+	old_ddps = port->ddps;
+	old_pdt = port->pdt;
+	port->pdt = conn_stat->peer_device_type;
+	port->mcs = conn_stat->message_capability_status;
+	port->ldps = conn_stat->legacy_device_plug_status;
+	port->ddps = conn_stat->displayport_device_plug_status;
+
+	if (old_ddps != port->ddps) {
+		if (port->ddps) {
+			drm_dp_check_port_guid(mstb, port);
+			dowork = true;
+		} else {
+			port->guid_valid = false;
+			port->available_pbn = 0;
+		}
+	}
+	if (old_pdt != port->pdt && !port->input) {
+		drm_dp_port_teardown_pdt(port, old_pdt);
+
+		if (drm_dp_port_setup_pdt(port))
+			dowork = true;
+	}
+
+	drm_dp_put_port(port);
+	if (dowork)
+		queue_work(system_long_wq, &mstb->mgr->work);
+
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
+							       u8 lct, u8 *rad)
+{
+	struct drm_dp_mst_branch *mstb;
+	struct drm_dp_mst_port *port;
+	int i;
+	/* find the port by iterating down */
+	mstb = mgr->mst_primary;
+
+	for (i = 0; i < lct - 1; i++) {
+		int shift = (i % 2) ? 0 : 4;
+		int port_num = rad[i / 2] >> shift;
+
+		list_for_each_entry(port, &mstb->ports, next) {
+			if (port->port_num == port_num) {
+				if (!port->mstb) {
+					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
+					return NULL;
+				}
+
+				mstb = port->mstb;
+				break;
+			}
+		}
+	}
+	kref_get(&mstb->kref);
+	return mstb;
+}
+
+static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+					       struct drm_dp_mst_branch *mstb)
+{
+	struct drm_dp_mst_port *port;
+
+	if (!mstb->link_address_sent) {
+		drm_dp_send_link_address(mgr, mstb);
+		mstb->link_address_sent = true;
+	}
+	list_for_each_entry(port, &mstb->ports, next) {
+		if (port->input)
+			continue;
+
+		if (!port->ddps)
+			continue;
+
+		if (!port->available_pbn)
+			drm_dp_send_enum_path_resources(mgr, mstb, port);
+
+		if (port->mstb)
+			drm_dp_check_and_send_link_address(mgr, port->mstb);
+	}
+}
+
+static void drm_dp_mst_link_probe_work(struct work_struct *work)
+{
+	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+
+	drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
+
+}
+
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+				 u8 *guid)
+{
+	static u8 zero_guid[16];
+
+	if (!memcmp(guid, zero_guid, 16)) {
+		u64 salt = get_jiffies_64();
+		memcpy(&guid[0], &salt, sizeof(u64));
+		memcpy(&guid[8], &salt, sizeof(u64));
+		return false;
+	}
+	return true;
+}
+
+#if 0
+static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+{
+	struct drm_dp_sideband_msg_req_body req;
+
+	req.req_type = DP_REMOTE_DPCD_READ;
+	req.u.dpcd_read.port_number = port_num;
+	req.u.dpcd_read.dpcd_address = offset;
+	req.u.dpcd_read.num_bytes = num_bytes;
+	drm_dp_encode_sideband_req(&req, msg);
+
+	return 0;
+}
+#endif
+
+static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
+				    bool up, u8 *msg, int len)
+{
+	int ret;
+	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
+	int tosend, total, offset;
+	int retries = 0;
+
+retry:
+	total = len;
+	offset = 0;
+	do {
+		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
+
+		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
+					&msg[offset],
+					tosend);
+		if (ret != tosend) {
+			if (ret == -EIO && retries < 5) {
+				retries++;
+				goto retry;
+			}
+			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
+			WARN(1, "fail\n");
+
+			return -EIO;
+		}
+		offset += tosend;
+		total -= tosend;
+	} while (total > 0);
+	return 0;
+}
+
+static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+				  struct drm_dp_sideband_msg_tx *txmsg)
+{
+	struct drm_dp_mst_branch *mstb = txmsg->dst;
+
+	/* both msg slots are full */
+	if (txmsg->seqno == -1) {
+		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
+			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
+			return -EAGAIN;
+		}
+		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
+			txmsg->seqno = mstb->last_seqno;
+			mstb->last_seqno ^= 1;
+		} else if (mstb->tx_slots[0] == NULL)
+			txmsg->seqno = 0;
+		else
+			txmsg->seqno = 1;
+		mstb->tx_slots[txmsg->seqno] = txmsg;
+	}
+	hdr->broadcast = 0;
+	hdr->path_msg = txmsg->path_msg;
+	hdr->lct = mstb->lct;
+	hdr->lcr = mstb->lct - 1;
+	if (mstb->lct > 1)
+		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
+	hdr->seqno = txmsg->seqno;
+	return 0;
+}
+/*
+ * process a single block of the next message in the sideband queue
+ */
+static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+				   struct drm_dp_sideband_msg_tx *txmsg,
+				   bool up)
+{
+	u8 chunk[48];
+	struct drm_dp_sideband_msg_hdr hdr;
+	int len, space, idx, tosend;
+	int ret;
+
+	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
+
+	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
+		txmsg->seqno = -1;
+		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
+	}
+
+	/* make hdr from dst mst - for replies use seqno
+	   otherwise assign one */
+	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
+	if (ret < 0)
+		return ret;
+
+	/* amount left to send in this message */
+	len = txmsg->cur_len - txmsg->cur_offset;
+
+	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
+	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
+
+	tosend = min(len, space);
+	if (len == txmsg->cur_len)
+		hdr.somt = 1;
+	if (space >= len)
+		hdr.eomt = 1;
+
+
+	hdr.msg_len = tosend + 1;
+	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
+	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
+	/* add crc at end */
+	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
+	idx += tosend + 1;
+
+	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
+	if (ret) {
+		DRM_DEBUG_KMS("sideband msg failed to send\n");
+		return ret;
+	}
+
+	txmsg->cur_offset += tosend;
+	if (txmsg->cur_offset == txmsg->cur_len) {
+		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
+		return 1;
+	}
+	return 0;
+}
+
+/* must be called holding qlock */
+static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct drm_dp_sideband_msg_tx *txmsg;
+	int ret;
+
+	/* construct a chunk from the first msg in the tx_msg queue */
+	if (list_empty(&mgr->tx_msg_downq)) {
+		mgr->tx_down_in_progress = false;
+		return;
+	}
+	mgr->tx_down_in_progress = true;
+
+	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+	ret = process_single_tx_qlock(mgr, txmsg, false);
+	if (ret == 1) {
+		/* txmsg is sent it should be in the slots now */
+		list_del(&txmsg->next);
+	} else if (ret) {
+		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+		list_del(&txmsg->next);
+		if (txmsg->seqno != -1)
+			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+		wake_up(&mgr->tx_waitq);
+	}
+	if (list_empty(&mgr->tx_msg_downq)) {
+		mgr->tx_down_in_progress = false;
+		return;
+	}
+}
+
+/* called holding qlock */
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct drm_dp_sideband_msg_tx *txmsg;
+	int ret;
+
+	/* construct a chunk from the first msg in the tx_msg queue */
+	if (list_empty(&mgr->tx_msg_upq)) {
+		mgr->tx_up_in_progress = false;
+		return;
+	}
+
+	txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
+	ret = process_single_tx_qlock(mgr, txmsg, true);
+	if (ret == 1) {
+		/* up txmsgs aren't put in slots - so free after we send it */
+		list_del(&txmsg->next);
+		kfree(txmsg);
+	} else if (ret)
+		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+	mgr->tx_up_in_progress = true;
+}
+
+static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
+				 struct drm_dp_sideband_msg_tx *txmsg)
+{
+	mutex_lock(&mgr->qlock);
+	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
+	if (!mgr->tx_down_in_progress)
+		process_single_down_tx_qlock(mgr);
+	mutex_unlock(&mgr->qlock);
+}
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+				    struct drm_dp_mst_branch *mstb)
+{
+	int len;
+	struct drm_dp_sideband_msg_tx *txmsg;
+	int ret;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg)
+		return -ENOMEM;
+
+	txmsg->dst = mstb;
+	len = build_link_address(txmsg);
+
+	drm_dp_queue_down_tx(mgr, txmsg);
+
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret > 0) {
+		int i;
+
+		if (txmsg->reply.reply_type == 1)
+			DRM_DEBUG_KMS("link address nak received\n");
+		else {
+			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
+			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
+				       txmsg->reply.u.link_addr.ports[i].input_port,
+				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
+				       txmsg->reply.u.link_addr.ports[i].port_number,
+				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
+				       txmsg->reply.u.link_addr.ports[i].mcs,
+				       txmsg->reply.u.link_addr.ports[i].ddps,
+				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
+				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
+				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
+			}
+			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
+			}
+			(*mgr->cbs->hotplug)(mgr);
+		}
+	} else
+		DRM_DEBUG_KMS("link address failed %d\n", ret);
+
+	kfree(txmsg);
+	return 0;
+}
+
+static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+					   struct drm_dp_mst_branch *mstb,
+					   struct drm_dp_mst_port *port)
+{
+	int len;
+	struct drm_dp_sideband_msg_tx *txmsg;
+	int ret;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg)
+		return -ENOMEM;
+
+	txmsg->dst = mstb;
+	len = build_enum_path_resources(txmsg, port->port_num);
+
+	drm_dp_queue_down_tx(mgr, txmsg);
+
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret > 0) {
+		if (txmsg->reply.reply_type == 1)
+			DRM_DEBUG_KMS("enum path resources nak received\n");
+		else {
+			if (port->port_num != txmsg->reply.u.path_resources.port_number)
+				DRM_ERROR("got incorrect port in response\n");
+			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
+			       txmsg->reply.u.path_resources.avail_payload_bw_number);
+			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+		}
+	}
+
+	kfree(txmsg);
+	return 0;
+}
+
+static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+				   struct drm_dp_mst_port *port,
+				   int id,
+				   int pbn)
+{
+	struct drm_dp_sideband_msg_tx *txmsg;
+	struct drm_dp_mst_branch *mstb;
+	int len, ret;
+
+	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+	if (!mstb)
+		return -EINVAL;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg) {
+		ret = -ENOMEM;
+		goto fail_put;
+	}
+
+	txmsg->dst = mstb;
+	len = build_allocate_payload(txmsg, port->port_num,
+				     id,
+				     pbn);
+
+	drm_dp_queue_down_tx(mgr, txmsg);
+
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret > 0) {
+		if (txmsg->reply.reply_type == 1) {
+			ret = -EINVAL;
+		} else
+			ret = 0;
+	}
+	kfree(txmsg);
+fail_put:
+	drm_dp_put_mst_branch_device(mstb);
+	return ret;
+}
+
+static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+				       int id,
+				       struct drm_dp_payload *payload)
+{
+	int ret;
+
+	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
+	if (ret < 0) {
+		payload->payload_state = 0;
+		return ret;
+	}
+	payload->payload_state = DP_PAYLOAD_LOCAL;
+	return 0;
+}
+
+static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+				       struct drm_dp_mst_port *port,
+				       int id,
+				       struct drm_dp_payload *payload)
+{
+	int ret;
+	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
+	if (ret < 0)
+		return ret;
+	payload->payload_state = DP_PAYLOAD_REMOTE;
+	return ret;
+}
+
+static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+					struct drm_dp_mst_port *port,
+					int id,
+					struct drm_dp_payload *payload)
+{
+	DRM_DEBUG_KMS("\n");
+	/* its okay for these to fail */
+	if (port) {
+		drm_dp_payload_send_msg(mgr, port, id, 0);
+	}
+
+	drm_dp_dpcd_write_payload(mgr, id, payload);
+	payload->payload_state = 0;
+	return 0;
+}
+
+static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+					int id,
+					struct drm_dp_payload *payload)
+{
+	payload->payload_state = 0;
+	return 0;
+}
+
+/**
+ * drm_dp_update_payload_part1() - Execute payload update part 1
+ * @mgr: manager to use.
+ *
+ * This iterates over all proposed virtual channels, and tries to
+ * allocate space in the link for them. For 0->slots transitions,
+ * this step just writes the VCPI to the MST device. For slots->0
+ * transitions, this writes the updated VCPIs and removes the
+ * remote VC payloads.
+ *
+ * after calling this the driver should generate ACT and payload
+ * packets.
+ */
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+{
+	int i;
+	int cur_slots = 1;
+	struct drm_dp_payload req_payload;
+	struct drm_dp_mst_port *port;
+
+	mutex_lock(&mgr->payload_lock);
+	for (i = 0; i < mgr->max_payloads; i++) {
+		/* solve the current payloads - compare to the hw ones
+		   - update the hw view */
+		req_payload.start_slot = cur_slots;
+		if (mgr->proposed_vcpis[i]) {
+			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+		} else {
+			port = NULL;
+			req_payload.num_slots = 0;
+		}
+		/* work out what is required to happen with this payload */
+		if (mgr->payloads[i].start_slot != req_payload.start_slot ||
+		    mgr->payloads[i].num_slots != req_payload.num_slots) {
+
+			/* need to push an update for this payload */
+			if (req_payload.num_slots) {
+				drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
+				mgr->payloads[i].num_slots = req_payload.num_slots;
+			} else if (mgr->payloads[i].num_slots) {
+				mgr->payloads[i].num_slots = 0;
+				drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
+				req_payload.payload_state = mgr->payloads[i].payload_state;
+			} else
+				req_payload.payload_state = 0;
+
+			mgr->payloads[i].start_slot = req_payload.start_slot;
+			mgr->payloads[i].payload_state = req_payload.payload_state;
+		}
+		cur_slots += req_payload.num_slots;
+	}
+	mutex_unlock(&mgr->payload_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_update_payload_part1);
+
+/**
+ * drm_dp_update_payload_part2() - Execute payload update part 2
+ * @mgr: manager to use.
+ *
+ * This iterates over all proposed virtual channels, and tries to
+ * allocate space in the link for them. For 0->slots transitions,
+ * this step writes the remote VC payload commands. For slots->0
+ * this just resets some internal state.
+ */
+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct drm_dp_mst_port *port;
+	int i;
+	int ret = 0;
+	mutex_lock(&mgr->payload_lock);
+	for (i = 0; i < mgr->max_payloads; i++) {
+
+		if (!mgr->proposed_vcpis[i])
+			continue;
+
+		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+
+		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
+		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
+			ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
+		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
+			ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
+		}
+		if (ret) {
+			mutex_unlock(&mgr->payload_lock);
+			return ret;
+		}
+	}
+	mutex_unlock(&mgr->payload_lock);
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_update_payload_part2);
+
+#if 0 /* unused as of yet */
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+				 struct drm_dp_mst_port *port,
+				 int offset, int size)
+{
+	int len;
+	struct drm_dp_sideband_msg_tx *txmsg;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg)
+		return -ENOMEM;
+
+	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
+	txmsg->dst = port->parent;
+
+	drm_dp_queue_down_tx(mgr, txmsg);
+
+	return 0;
+}
+#endif
+
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+				  struct drm_dp_mst_port *port,
+				  int offset, int size, u8 *bytes)
+{
+	int len;
+	int ret;
+	struct drm_dp_sideband_msg_tx *txmsg;
+	struct drm_dp_mst_branch *mstb;
+
+	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+	if (!mstb)
+		return -EINVAL;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg) {
+		ret = -ENOMEM;
+		goto fail_put;
+	}
+
+	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+	txmsg->dst = mstb;
+
+	drm_dp_queue_down_tx(mgr, txmsg);
+
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret > 0) {
+		if (txmsg->reply.reply_type == 1) {
+			ret = -EINVAL;
+		} else
+			ret = 0;
+	}
+	kfree(txmsg);
+fail_put:
+	drm_dp_put_mst_branch_device(mstb);
+	return ret;
+}
+
+static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
+{
+	struct drm_dp_sideband_msg_reply_body reply;
+
+	reply.reply_type = 1;
+	reply.req_type = req_type;
+	drm_dp_encode_sideband_reply(&reply, msg);
+	return 0;
+}
+
+static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
+				    struct drm_dp_mst_branch *mstb,
+				    int req_type, int seqno, bool broadcast)
+{
+	struct drm_dp_sideband_msg_tx *txmsg;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg)
+		return -ENOMEM;
+
+	txmsg->dst = mstb;
+	txmsg->seqno = seqno;
+	drm_dp_encode_up_ack_reply(txmsg, req_type);
+
+	mutex_lock(&mgr->qlock);
+	list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
+	if (!mgr->tx_up_in_progress) {
+		process_single_up_tx_qlock(mgr);
+	}
+	mutex_unlock(&mgr->qlock);
+	return 0;
+}
+
+static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
+{
+	switch (dp_link_bw) {
+	case DP_LINK_BW_1_62:
+		return 3 * dp_link_count;
+	case DP_LINK_BW_2_7:
+		return 5 * dp_link_count;
+	case DP_LINK_BW_5_4:
+		return 10 * dp_link_count;
+	}
+	return 0;
+}
+
+/**
+ * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
+ * @mgr: manager to set state for
+ * @mst_state: true to enable MST on this connector - false to disable.
+ *
+ * This is called by the driver when it detects an MST capable device plugged
+ * into a DP MST capable port, or when a DP MST capable device is unplugged.
+ */
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
+{
+	int ret = 0;
+	struct drm_dp_mst_branch *mstb = NULL;
+
+	mutex_lock(&mgr->lock);
+	if (mst_state == mgr->mst_state)
+		goto out_unlock;
+
+	mgr->mst_state = mst_state;
+	/* set the device into MST mode */
+	if (mst_state) {
+		WARN_ON(mgr->mst_primary);
+
+		/* get dpcd info */
+		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+		if (ret != DP_RECEIVER_CAP_SIZE) {
+			DRM_DEBUG_KMS("failed to read DPCD\n");
+			goto out_unlock;
+		}
+
+		mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+		mgr->total_pbn = 2560;
+		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
+		mgr->avail_slots = mgr->total_slots;
+
+		/* add initial branch device at LCT 1 */
+		mstb = drm_dp_add_mst_branch_device(1, NULL);
+		if (mstb == NULL) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+		mstb->mgr = mgr;
+
+		/* give this the main reference */
+		mgr->mst_primary = mstb;
+		kref_get(&mgr->mst_primary->kref);
+
+		{
+			struct drm_dp_payload reset_pay;
+			reset_pay.start_slot = 0;
+			reset_pay.num_slots = 0x3f;
+			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+		}
+
+		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+		if (ret < 0) {
+			goto out_unlock;
+		}
+
+
+		/* sort out guid */
+		ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
+		if (ret != 16) {
+			DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
+			goto out_unlock;
+		}
+
+		mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
+		if (!mgr->guid_valid) {
+			ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
+			mgr->guid_valid = true;
+		}
+
+		queue_work(system_long_wq, &mgr->work);
+
+		ret = 0;
+	} else {
+		/* disable MST on the device */
+		mstb = mgr->mst_primary;
+		mgr->mst_primary = NULL;
+		/* this can fail if the device is gone */
+		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+		ret = 0;
+		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+		mgr->payload_mask = 0;
+		set_bit(0, &mgr->payload_mask);
+	}
+
+out_unlock:
+	mutex_unlock(&mgr->lock);
+	if (mstb)
+		drm_dp_put_mst_branch_device(mstb);
+	return ret;
+
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+
+/**
+ * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
+ * @mgr: manager to suspend
+ *
+ * This function tells the MST device that we can't handle UP messages
+ * anymore. This should stop it from sending any since we are suspended.
+ */
+void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
+{
+	mutex_lock(&mgr->lock);
+	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
+	mutex_unlock(&mgr->lock);
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
+
+/**
+ * drm_dp_mst_topology_mgr_resume() - resume the MST manager
+ * @mgr: manager to resume
+ *
+ * This will fetch DPCD and see if the device is still there,
+ * if it is, it will rewrite the MSTM control bits, and return.
+ *
+ * if the device fails this returns -1, and the driver should do
+ * a full MST reprobe, in case we were undocked.
+ */
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+{
+	int ret = 0;
+
+	mutex_lock(&mgr->lock);
+
+	if (mgr->mst_primary) {
+		int sret;
+		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+		if (sret != DP_RECEIVER_CAP_SIZE) {
+			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+			ret = -1;
+			goto out_unlock;
+		}
+
+		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+			ret = -1;
+			goto out_unlock;
+		}
+		ret = 0;
+	} else
+		ret = -1;
+
+out_unlock:
+	mutex_unlock(&mgr->lock);
+	return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+
+static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+{
+	int len;
+	u8 replyblock[32];
+	int replylen, origlen, curreply;
+	int ret;
+	struct drm_dp_sideband_msg_rx *msg;
+	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
+	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+
+	len = min(mgr->max_dpcd_transaction_bytes, 16);
+	ret = drm_dp_dpcd_read(mgr->aux, basereg,
+			       replyblock, len);
+	if (ret != len) {
+		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
+		return;
+	}
+	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+	if (!ret) {
+		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
+		return;
+	}
+	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
+
+	origlen = replylen;
+	replylen -= len;
+	curreply = len;
+	while (replylen > 0) {
+		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
+		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
+				    replyblock, len);
+		if (ret != len) {
+			DRM_DEBUG_KMS("failed to read a chunk\n");
+		}
+		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+		if (ret == false)
+			DRM_DEBUG_KMS("failed to build sideband msg\n");
+		curreply += len;
+		replylen -= len;
+	}
+}
+
+static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+{
+	int ret = 0;
+
+	drm_dp_get_one_sb_msg(mgr, false);
+
+	if (mgr->down_rep_recv.have_eomt) {
+		struct drm_dp_sideband_msg_tx *txmsg;
+		struct drm_dp_mst_branch *mstb;
+		int slot = -1;
+		mstb = drm_dp_get_mst_branch_device(mgr,
+						    mgr->down_rep_recv.initial_hdr.lct,
+						    mgr->down_rep_recv.initial_hdr.rad);
+
+		if (!mstb) {
+			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
+			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+			return 0;
+		}
+
+		/* find the message */
+		slot = mgr->down_rep_recv.initial_hdr.seqno;
+		mutex_lock(&mgr->qlock);
+		txmsg = mstb->tx_slots[slot];
+		/* remove from slots */
+		mutex_unlock(&mgr->qlock);
+
+		if (!txmsg) {
+			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
+			       mstb,
+			       mgr->down_rep_recv.initial_hdr.seqno,
+			       mgr->down_rep_recv.initial_hdr.lct,
+				      mgr->down_rep_recv.initial_hdr.rad[0],
+				      mgr->down_rep_recv.msg[0]);
+			drm_dp_put_mst_branch_device(mstb);
+			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+			return 0;
+		}
+
+		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+		if (txmsg->reply.reply_type == 1) {
+			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
+		}
+
+		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+		drm_dp_put_mst_branch_device(mstb);
+
+		mutex_lock(&mgr->qlock);
+		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+		mstb->tx_slots[slot] = NULL;
+		mutex_unlock(&mgr->qlock);
+
+		wake_up(&mgr->tx_waitq);
+	}
+	return ret;
+}
+
+static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+{
+	int ret = 0;
+	drm_dp_get_one_sb_msg(mgr, true);
+
+	if (mgr->up_req_recv.have_eomt) {
+		struct drm_dp_sideband_msg_req_body msg;
+		struct drm_dp_mst_branch *mstb;
+		bool seqno;
+		mstb = drm_dp_get_mst_branch_device(mgr,
+						    mgr->up_req_recv.initial_hdr.lct,
+						    mgr->up_req_recv.initial_hdr.rad);
+		if (!mstb) {
+			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+			return 0;
+		}
+
+		seqno = mgr->up_req_recv.initial_hdr.seqno;
+		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+
+		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+			drm_dp_update_port(mstb, &msg.u.conn_stat);
+			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+			(*mgr->cbs->hotplug)(mgr);
+
+		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+		}
+
+		drm_dp_put_mst_branch_device(mstb);
+		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+	}
+	return ret;
+}
+
+/**
+ * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
+ * @mgr: manager to notify irq for.
+ * @esi: 4 bytes from SINK_COUNT_ESI
+ *
+ * This should be called from the driver when it detects a short IRQ,
+ * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
+ * topology manager will process the sideband messages received as a result
+ * of this.
+ */
+int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
+{
+	int ret = 0;
+	int sc;
+	*handled = false;
+	sc = esi[0] & 0x3f;
+
+	if (sc != mgr->sink_count) {
+		mgr->sink_count = sc;
+		*handled = true;
+	}
+
+	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
+		ret = drm_dp_mst_handle_down_rep(mgr);
+		*handled = true;
+	}
+
+	if (esi[1] & DP_UP_REQ_MSG_RDY) {
+		ret |= drm_dp_mst_handle_up_req(mgr);
+		*handled = true;
+	}
+
+	drm_dp_mst_kick_tx(mgr);
+	return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
+
+/**
+ * drm_dp_mst_detect_port() - get connection status for an MST port
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port
+ *
+ * This returns the current connection state for a port. It validates the
+ * port pointer still exists so the caller doesn't require a reference
+ */
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+	enum drm_connector_status status = connector_status_disconnected;
+
+	/* we need to search for the port in the mgr in case its gone */
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return connector_status_disconnected;
+
+	if (!port->ddps)
+		goto out;
+
+	switch (port->pdt) {
+	case DP_PEER_DEVICE_NONE:
+	case DP_PEER_DEVICE_MST_BRANCHING:
+		break;
+
+	case DP_PEER_DEVICE_SST_SINK:
+		status = connector_status_connected;
+		break;
+	case DP_PEER_DEVICE_DP_LEGACY_CONV:
+		if (port->ldps)
+			status = connector_status_connected;
+		break;
+	}
+out:
+	drm_dp_put_port(port);
+	return status;
+}
+EXPORT_SYMBOL(drm_dp_mst_detect_port);
+
+/**
+ * drm_dp_mst_get_edid() - get EDID for an MST port
+ * @connector: toplevel connector to get EDID for
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port.
+ *
+ * This returns an EDID for the port connected to a connector,
+ * It validates the pointer still exists so the caller doesn't require a
+ * reference.
+ */
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+	struct edid *edid = NULL;
+
+	/* we need to search for the port in the mgr in case its gone */
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return NULL;
+
+	edid = drm_get_edid(connector, &port->aux.ddc);
+	drm_dp_put_port(port);
+	return edid;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_edid);
+
+/**
+ * drm_dp_find_vcpi_slots() - find slots for this PBN value
+ * @mgr: manager to use
+ * @pbn: payload bandwidth to convert into slots.
+ */
+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
+			   int pbn)
+{
+	int num_slots;
+
+	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+	if (num_slots > mgr->avail_slots)
+		return -ENOSPC;
+	return num_slots;
+}
+EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
+
+static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+			    struct drm_dp_vcpi *vcpi, int pbn)
+{
+	int num_slots;
+	int ret;
+
+	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+	if (num_slots > mgr->avail_slots)
+		return -ENOSPC;
+
+	vcpi->pbn = pbn;
+	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
+	vcpi->num_slots = num_slots;
+
+	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+/**
+ * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
+ * @mgr: manager for this port
+ * @port: port to allocate a virtual channel for.
+ * @pbn: payload bandwidth number to request
+ * @slots: returned number of slots for this PBN.
+ */
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
+{
+	int ret;
+
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return false;
+
+	if (port->vcpi.vcpi > 0) {
+		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+		if (pbn == port->vcpi.pbn) {
+			*slots = port->vcpi.num_slots;
+			return true;
+		}
+	}
+
+	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
+	if (ret) {
+		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
+		goto out;
+	}
+	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
+	*slots = port->vcpi.num_slots;
+
+	drm_dp_put_port(port);
+	return true;
+out:
+	return false;
+}
+EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
+
+/**
+ * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port.
+ *
+ * This just resets the number of slots for the ports VCPI for later programming.
+ */
+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return;
+	port->vcpi.num_slots = 0;
+	drm_dp_put_port(port);
+}
+EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
+
+/**
+ * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
+ * @mgr: manager for this port
+ * @port: unverified port to deallocate vcpi for
+ */
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return;
+
+	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+	port->vcpi.num_slots = 0;
+	port->vcpi.pbn = 0;
+	port->vcpi.aligned_pbn = 0;
+	port->vcpi.vcpi = 0;
+	drm_dp_put_port(port);
+}
+EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
+
+static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+				     int id, struct drm_dp_payload *payload)
+{
+	u8 payload_alloc[3], status;
+	int ret;
+	int retries = 0;
+
+	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+			   DP_PAYLOAD_TABLE_UPDATED);
+
+	payload_alloc[0] = id;
+	payload_alloc[1] = payload->start_slot;
+	payload_alloc[2] = payload->num_slots;
+
+	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+	if (ret != 3) {
+		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
+		goto fail;
+	}
+
+retry:
+	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+		goto fail;
+	}
+
+	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
+		retries++;
+		if (retries < 20) {
+			usleep_range(10000, 20000);
+			goto retry;
+		}
+		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
+		ret = -EINVAL;
+		goto fail;
+	}
+	ret = 0;
+fail:
+	return ret;
+}
+
+
+/**
+ * drm_dp_check_act_status() - Check ACT handled status.
+ * @mgr: manager to use
+ *
+ * Check the payload status bits in the DPCD for ACT handled completion.
+ */
+int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+	u8 status;
+	int ret;
+	int count = 0;
+
+	do {
+		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+
+		if (ret < 0) {
+			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+			goto fail;
+		}
+
+		if (status & DP_PAYLOAD_ACT_HANDLED)
+			break;
+		count++;
+		udelay(100);
+
+	} while (count < 30);
+
+	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
+		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
+		ret = -EINVAL;
+		goto fail;
+	}
+	return 0;
+fail:
+	return ret;
+}
+EXPORT_SYMBOL(drm_dp_check_act_status);
+
+/**
+ * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
+ * @clock: dot clock for the mode
+ * @bpp: bpp for the mode.
+ *
+ * This uses the formula in the spec to calculate the PBN value for a mode.
+ */
+int drm_dp_calc_pbn_mode(int clock, int bpp)
+{
+	fixed20_12 pix_bw;
+	fixed20_12 fbpp;
+	fixed20_12 result;
+	fixed20_12 margin, tmp;
+	u32 res;
+
+	pix_bw.full = dfixed_const(clock);
+	fbpp.full = dfixed_const(bpp);
+	tmp.full = dfixed_const(8);
+	fbpp.full = dfixed_div(fbpp, tmp);
+
+	result.full = dfixed_mul(pix_bw, fbpp);
+	margin.full = dfixed_const(54);
+	tmp.full = dfixed_const(64);
+	margin.full = dfixed_div(margin, tmp);
+	result.full = dfixed_div(result, margin);
+
+	margin.full = dfixed_const(1006);
+	tmp.full = dfixed_const(1000);
+	margin.full = dfixed_div(margin, tmp);
+	result.full = dfixed_mul(result, margin);
+
+	result.full = dfixed_div(result, tmp);
+	result.full = dfixed_ceil(result);
+	res = dfixed_trunc(result);
+	return res;
+}
+EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+
+static int test_calc_pbn_mode(void)
+{
+	int ret;
+	ret = drm_dp_calc_pbn_mode(154000, 30);
+	if (ret != 689)
+		return -EINVAL;
+	ret = drm_dp_calc_pbn_mode(234000, 30);
+	if (ret != 1047)
+		return -EINVAL;
+	return 0;
+}
+
+/* we want to kick the TX after we've ack the up/down IRQs. */
+static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
+{
+	queue_work(system_long_wq, &mgr->tx_work);
+}
+
+static void drm_dp_mst_dump_mstb(struct seq_file *m,
+				 struct drm_dp_mst_branch *mstb)
+{
+	struct drm_dp_mst_port *port;
+	int tabs = mstb->lct;
+	char prefix[10];
+	int i;
+
+	for (i = 0; i < tabs; i++)
+		prefix[i] = '\t';
+	prefix[i] = '\0';
+
+	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
+	list_for_each_entry(port, &mstb->ports, next) {
+		seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
+		if (port->mstb)
+			drm_dp_mst_dump_mstb(m, port->mstb);
+	}
+}
+
+static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+				  char *buf)
+{
+	int ret;
+	int i;
+	for (i = 0; i < 4; i++) {
+		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
+		if (ret != 16)
+			break;
+	}
+	if (i == 4)
+		return true;
+	return false;
+}
+
+/**
+ * drm_dp_mst_dump_topology(): dump topology to seq file.
+ * @m: seq_file to dump output to
+ * @mgr: manager to dump current topology for.
+ *
+ * helper to dump MST topology to a seq file for debugfs.
+ */
+void drm_dp_mst_dump_topology(struct seq_file *m,
+			      struct drm_dp_mst_topology_mgr *mgr)
+{
+	int i;
+	struct drm_dp_mst_port *port;
+	mutex_lock(&mgr->lock);
+	if (mgr->mst_primary)
+		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
+
+	/* dump VCPIs */
+	mutex_unlock(&mgr->lock);
+
+	mutex_lock(&mgr->payload_lock);
+	seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
+
+	for (i = 0; i < mgr->max_payloads; i++) {
+		if (mgr->proposed_vcpis[i]) {
+			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+			seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
+		} else
+			seq_printf(m, "vcpi %d:unsed\n", i);
+	}
+	for (i = 0; i < mgr->max_payloads; i++) {
+		seq_printf(m, "payload %d: %d, %d, %d\n",
+			   i,
+			   mgr->payloads[i].payload_state,
+			   mgr->payloads[i].start_slot,
+			   mgr->payloads[i].num_slots);
+
+
+	}
+	mutex_unlock(&mgr->payload_lock);
+
+	mutex_lock(&mgr->lock);
+	if (mgr->mst_primary) {
+		u8 buf[64];
+		bool bret;
+		int ret;
+		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+		seq_printf(m, "dpcd: ");
+		for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
+			seq_printf(m, "%02x ", buf[i]);
+		seq_printf(m, "\n");
+		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+		seq_printf(m, "faux/mst: ");
+		for (i = 0; i < 2; i++)
+			seq_printf(m, "%02x ", buf[i]);
+		seq_printf(m, "\n");
+		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+		seq_printf(m, "mst ctrl: ");
+		for (i = 0; i < 1; i++)
+			seq_printf(m, "%02x ", buf[i]);
+		seq_printf(m, "\n");
+
+		bret = dump_dp_payload_table(mgr, buf);
+		if (bret == true) {
+			seq_printf(m, "payload table: ");
+			for (i = 0; i < 63; i++)
+				seq_printf(m, "%02x ", buf[i]);
+			seq_printf(m, "\n");
+		}
+
+	}
+
+	mutex_unlock(&mgr->lock);
+
+}
+EXPORT_SYMBOL(drm_dp_mst_dump_topology);
+
+static void drm_dp_tx_work(struct work_struct *work)
+{
+	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
+
+	mutex_lock(&mgr->qlock);
+	if (mgr->tx_down_in_progress)
+		process_single_down_tx_qlock(mgr);
+	mutex_unlock(&mgr->qlock);
+}
+
+/**
+ * drm_dp_mst_topology_mgr_init - initialise a topology manager
+ * @mgr: manager struct to initialise
+ * @dev: device providing this structure - for i2c addition.
+ * @aux: DP helper aux channel to talk to this device
+ * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
+ * @max_payloads: maximum number of payloads this GPU can source
+ * @conn_base_id: the connector object ID the MST device is connected to.
+ *
+ * Return 0 for success, or negative error code on failure
+ */
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+				 struct device *dev, struct drm_dp_aux *aux,
+				 int max_dpcd_transaction_bytes,
+				 int max_payloads, int conn_base_id)
+{
+	mutex_init(&mgr->lock);
+	mutex_init(&mgr->qlock);
+	mutex_init(&mgr->payload_lock);
+	INIT_LIST_HEAD(&mgr->tx_msg_upq);
+	INIT_LIST_HEAD(&mgr->tx_msg_downq);
+	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
+	init_waitqueue_head(&mgr->tx_waitq);
+	mgr->dev = dev;
+	mgr->aux = aux;
+	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
+	mgr->max_payloads = max_payloads;
+	mgr->conn_base_id = conn_base_id;
+	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
+	if (!mgr->payloads)
+		return -ENOMEM;
+	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
+	if (!mgr->proposed_vcpis)
+		return -ENOMEM;
+	set_bit(0, &mgr->payload_mask);
+	test_calc_pbn_mode();
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
+
+/**
+ * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
+ * @mgr: manager to destroy
+ */
+void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
+{
+	mutex_lock(&mgr->payload_lock);
+	kfree(mgr->payloads);
+	mgr->payloads = NULL;
+	kfree(mgr->proposed_vcpis);
+	mgr->proposed_vcpis = NULL;
+	mutex_unlock(&mgr->payload_lock);
+	mgr->dev = NULL;
+	mgr->aux = NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
+
+/* I2C device */
+static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+			       int num)
+{
+	struct drm_dp_aux *aux = adapter->algo_data;
+	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
+	struct drm_dp_mst_branch *mstb;
+	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+	unsigned int i;
+	bool reading = false;
+	struct drm_dp_sideband_msg_req_body msg;
+	struct drm_dp_sideband_msg_tx *txmsg = NULL;
+	int ret;
+
+	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+	if (!mstb)
+		return -EREMOTEIO;
+
+	/* construct i2c msg */
+	/* see if last msg is a read */
+	if (msgs[num - 1].flags & I2C_M_RD)
+		reading = true;
+
+	if (!reading) {
+		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	msg.req_type = DP_REMOTE_I2C_READ;
+	msg.u.i2c_read.num_transactions = num - 1;
+	msg.u.i2c_read.port_number = port->port_num;
+	for (i = 0; i < num - 1; i++) {
+		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
+		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
+		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
+	}
+	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
+	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	txmsg->dst = mstb;
+	drm_dp_encode_sideband_req(&msg, txmsg);
+
+	drm_dp_queue_down_tx(mgr, txmsg);
+
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret > 0) {
+
+		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
+			ret = -EREMOTEIO;
+			goto out;
+		}
+		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
+			ret = -EIO;
+			goto out;
+		}
+		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
+		ret = num;
+	}
+out:
+	kfree(txmsg);
+	drm_dp_put_mst_branch_device(mstb);
+	return ret;
+}
+
+static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+	       I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
+	.functionality = drm_dp_mst_i2c_functionality,
+	.master_xfer = drm_dp_mst_i2c_xfer,
+};
+
+/**
+ * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
+{
+	aux->ddc.algo = &drm_dp_mst_i2c_algo;
+	aux->ddc.algo_data = aux;
+	aux->ddc.retries = 3;
+
+	aux->ddc.class = I2C_CLASS_DDC;
+	aux->ddc.owner = THIS_MODULE;
+	aux->ddc.dev.parent = aux->dev;
+	aux->ddc.dev.of_node = aux->dev->of_node;
+
+	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+		sizeof(aux->ddc.name));
+
+	return i2c_add_adapter(&aux->ddc);
+}
+
+/**
+ * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
+ * @aux: DisplayPort AUX channel
+ */
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
+{
+	i2c_del_adapter(&aux->ddc);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8218078..0cc1827 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -233,7 +233,7 @@
 /**
  * Copy and IOCTL return string to user space
  */
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
 {
 	int len;
 
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index dfa9769..1dbf3bc 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3305,6 +3305,7 @@
 	struct drm_device *dev = encoder->dev;
 
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 		if (connector->encoder == encoder && connector->eld[0])
@@ -3775,8 +3776,14 @@
 
 	frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
 
-	/* Populate picture aspect ratio from CEA mode list */
-	if (frame->video_code > 0)
+	/*
+	 * Populate picture aspect ratio from either
+	 * user input (if specified) or from the CEA mode list.
+	 */
+	if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
+		mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
+		frame->picture_aspect = mode->picture_aspect_ratio;
+	else if (frame->video_code > 0)
 		frame->picture_aspect = drm_get_cea_aspect_ratio(
 						frame->video_code);
 
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index f27c883..cc0ae04 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -327,7 +327,7 @@
 	return ret;
 }
 
-static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
 	.fb_probe = drm_fbdev_cma_create,
 };
 
@@ -354,9 +354,10 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
 	helper = &fbdev_cma->fb_helper;
 
+	drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+
 	ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
 	if (ret < 0) {
 		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d5d8cea..3144db9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -49,10 +49,11 @@
  * helper functions used by many drivers to implement the kernel mode setting
  * interfaces.
  *
- * Initialization is done as a three-step process with drm_fb_helper_init(),
- * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
- * Drivers with fancier requirements than the default behaviour can override the
- * second step with their own code.  Teardown is done with drm_fb_helper_fini().
+ * Initialization is done as a four-step process with drm_fb_helper_prepare(),
+ * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and
+ * drm_fb_helper_initial_config(). Drivers with fancier requirements than the
+ * default behaviour can override the third step with their own code.
+ * Teardown is done with drm_fb_helper_fini().
  *
  * At runtime drivers should restore the fbdev console by calling
  * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
@@ -63,6 +64,19 @@
  *
  * All other functions exported by the fb helper library can be used to
  * implement the fbdev driver interface by the driver.
+ *
+ * It is possible, though perhaps somewhat tricky, to implement race-free
+ * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare()
+ * helper must be called first to initialize the minimum required to make
+ * hotplug detection work. Drivers also need to make sure to properly set up
+ * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init()
+ * it is safe to enable interrupts and start processing hotplug events. At the
+ * same time, drivers should initialize all modeset objects such as CRTCs,
+ * encoders and connectors. To finish up the fbdev helper initialization, the
+ * drm_fb_helper_init() function is called. To probe for all attached displays
+ * and set up an initial configuration using the detected hardware, drivers
+ * should call drm_fb_helper_single_add_all_connectors() followed by
+ * drm_fb_helper_initial_config().
  */
 
 /**
@@ -105,6 +119,58 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
 
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
+{
+	struct drm_fb_helper_connector **temp;
+	struct drm_fb_helper_connector *fb_helper_connector;
+
+	WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+	if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
+		temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
+		if (!temp)
+			return -ENOMEM;
+
+		fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
+		fb_helper->connector_info = temp;
+	}
+
+
+	fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+	if (!fb_helper_connector)
+		return -ENOMEM;
+
+	fb_helper_connector->connector = connector;
+	fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+	return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+				       struct drm_connector *connector)
+{
+	struct drm_fb_helper_connector *fb_helper_connector;
+	int i, j;
+
+	WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		if (fb_helper->connector_info[i]->connector == connector)
+			break;
+	}
+
+	if (i == fb_helper->connector_count)
+		return -EINVAL;
+	fb_helper_connector = fb_helper->connector_info[i];
+
+	for (j = i + 1; j < fb_helper->connector_count; j++) {
+		fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
+	}
+	fb_helper->connector_count--;
+	kfree(fb_helper_connector);
+	return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
+
 static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
 {
 	struct drm_fb_helper_connector *fb_helper_conn;
@@ -199,9 +265,6 @@
 	struct drm_crtc_helper_funcs *funcs;
 	int i;
 
-	if (list_empty(&kernel_fb_helper_list))
-		return false;
-
 	list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
 		for (i = 0; i < helper->crtc_count; i++) {
 			struct drm_mode_set *mode_set =
@@ -531,6 +594,24 @@
 }
 
 /**
+ * drm_fb_helper_prepare - setup a drm_fb_helper structure
+ * @dev: DRM device
+ * @helper: driver-allocated fbdev helper structure to set up
+ * @funcs: pointer to structure of functions associate with this helper
+ *
+ * Sets up the bare minimum to make the framebuffer helper usable. This is
+ * useful to implement race-free initialization of the polling helpers.
+ */
+void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+			   const struct drm_fb_helper_funcs *funcs)
+{
+	INIT_LIST_HEAD(&helper->kernel_fb_list);
+	helper->funcs = funcs;
+	helper->dev = dev;
+}
+EXPORT_SYMBOL(drm_fb_helper_prepare);
+
+/**
  * drm_fb_helper_init - initialize a drm_fb_helper structure
  * @dev: drm device
  * @fb_helper: driver-allocated fbdev helper structure to initialize
@@ -542,8 +623,7 @@
  * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
  * to allow driver writes more control over the exact init sequence.
  *
- * Drivers must set fb_helper->funcs before calling
- * drm_fb_helper_initial_config().
+ * Drivers must call drm_fb_helper_prepare() before calling this function.
  *
  * RETURNS:
  * Zero if everything went ok, nonzero otherwise.
@@ -558,10 +638,6 @@
 	if (!max_conn_count)
 		return -EINVAL;
 
-	fb_helper->dev = dev;
-
-	INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
-
 	fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
 	if (!fb_helper->crtc_info)
 		return -ENOMEM;
@@ -572,6 +648,7 @@
 		kfree(fb_helper->crtc_info);
 		return -ENOMEM;
 	}
+	fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
 	fb_helper->connector_count = 0;
 
 	for (i = 0; i < crtc_count; i++) {
@@ -1056,7 +1133,6 @@
 	info->fix.ypanstep = 1; /* doing it in hw */
 	info->fix.ywrapstep = 0;
 	info->fix.accel = FB_ACCEL_NONE;
-	info->fix.type_aux = 0;
 
 	info->fix.line_length = pitch;
 	return;
@@ -1613,8 +1689,10 @@
  * either the output polling work or a work item launched from the driver's
  * hotplug interrupt).
  *
- * Note that the driver must ensure that this is only called _after_ the fb has
- * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ * Note that drivers may call this even before calling
+ * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows
+ * for a race-free fbcon setup and will make sure that the fbdev emulation will
+ * not miss any hotplug events.
  *
  * RETURNS:
  * 0 on success and a non-zero error code otherwise.
@@ -1624,11 +1702,8 @@
 	struct drm_device *dev = fb_helper->dev;
 	u32 max_width, max_height;
 
-	if (!fb_helper->fb)
-		return 0;
-
 	mutex_lock(&fb_helper->dev->mode_config.mutex);
-	if (!drm_fb_helper_is_bound(fb_helper)) {
+	if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
 		fb_helper->delayed_hotplug = true;
 		mutex_unlock(&fb_helper->dev->mode_config.mutex);
 		return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 021fe5d..bc583fe 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -157,10 +157,6 @@
  */
 static int drm_cpu_valid(void)
 {
-#if defined(__i386__)
-	if (boot_cpu_data.x86 == 3)
-		return 0;	/* No cmpxchg on a 386 */
-#endif
 #if defined(__sparc__) && !defined(__sparc_v9__)
 	return 0;		/* No cmpxchg before v9 sparc. */
 #endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f7d7119..6adee4c 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -441,18 +441,31 @@
  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  * from shmem
  * @obj: obj in question
- * @gfpmask: gfp mask of requested pages
+ *
+ * This reads the page-array of the shmem-backing storage of the given gem
+ * object. An array of pages is returned. If a page is not allocated or
+ * swapped-out, this will allocate/swap-in the required pages. Note that the
+ * whole object is covered by the page-array and pinned in memory.
+ *
+ * Use drm_gem_put_pages() to release the array and unpin all pages.
+ *
+ * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
+ * If you require other GFP-masks, you have to do those allocations yourself.
+ *
+ * Note that you are not allowed to change gfp-zones during runtime. That is,
+ * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
+ * set during initialization. If you have special zone constraints, set them
+ * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
+ * to keep pages in the required zone during swap-in.
  */
-struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+struct page **drm_gem_get_pages(struct drm_gem_object *obj)
 {
-	struct inode *inode;
 	struct address_space *mapping;
 	struct page *p, **pages;
 	int i, npages;
 
 	/* This is the shared memory object that backs the GEM resource */
-	inode = file_inode(obj->filp);
-	mapping = inode->i_mapping;
+	mapping = file_inode(obj->filp)->i_mapping;
 
 	/* We already BUG_ON() for non-page-aligned sizes in
 	 * drm_gem_object_init(), so we should never hit this unless
@@ -466,10 +479,8 @@
 	if (pages == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	gfpmask |= mapping_gfp_mask(mapping);
-
 	for (i = 0; i < npages; i++) {
-		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+		p = shmem_read_mapping_page(mapping, i);
 		if (IS_ERR(p))
 			goto fail;
 		pages[i] = p;
@@ -479,7 +490,7 @@
 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
 		 * so shmem can relocate pages during swapin if required.
 		 */
-		BUG_ON((gfpmask & __GFP_DMA32) &&
+		BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
 				(page_to_pfn(p) >= 0x00100000UL));
 	}
 
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 05c97c5..e467e67 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -327,7 +327,7 @@
 	/* Create a CMA GEM buffer. */
 	cma_obj = __drm_gem_cma_create(dev, size);
 	if (IS_ERR(cma_obj))
-		return ERR_PTR(PTR_ERR(cma_obj));
+		return ERR_CAST(cma_obj);
 
 	cma_obj->paddr = sg_dma_address(sgt->sgl);
 	cma_obj->sgt = sgt;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 86feedd..ecaf0fa 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -132,7 +132,7 @@
 				   i,
 				   dma->bufs[i].buf_size,
 				   dma->bufs[i].buf_count,
-				   atomic_read(&dma->bufs[i].freelist.count),
+				   0,
 				   dma->bufs[i].seg_count,
 				   seg_pages,
 				   seg_pages * PAGE_SIZE / 1024);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 69c61f3..ad66f96 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -342,8 +342,6 @@
 		file_priv->stereo_allowed = req->value;
 		break;
 	case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
-		if (!drm_universal_planes)
-			return -EINVAL;
 		if (req->value > 1)
 			return -EINVAL;
 		file_priv->universal_planes = req->value;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
new file mode 100644
index 0000000..16150a0
--- /dev/null
+++ b/drivers/gpu/drm/drm_of.c
@@ -0,0 +1,67 @@
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/of_graph.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_of.h>
+
+/**
+ * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * @dev: DRM device
+ * @port: port OF node
+ *
+ * Given a port OF node, return the possible mask of the corresponding
+ * CRTC within a device's list of CRTCs.  Returns zero if not found.
+ */
+static uint32_t drm_crtc_port_mask(struct drm_device *dev,
+				   struct device_node *port)
+{
+	unsigned int index = 0;
+	struct drm_crtc *tmp;
+
+	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+		if (tmp->port == port)
+			return 1 << index;
+
+		index++;
+	}
+
+	return 0;
+}
+
+/**
+ * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
+ * @dev: DRM device
+ * @port: encoder port to scan for endpoints
+ *
+ * Scan all endpoints attached to a port, locate their attached CRTCs,
+ * and generate the DRM mask of CRTCs which may be attached to this
+ * encoder.
+ *
+ * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ */
+uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+				    struct device_node *port)
+{
+	struct device_node *remote_port, *ep = NULL;
+	uint32_t possible_crtcs = 0;
+
+	do {
+		ep = of_graph_get_next_endpoint(port, ep);
+		if (!ep)
+			break;
+
+		remote_port = of_graph_get_remote_port(ep);
+		if (!remote_port) {
+			of_node_put(ep);
+			return 0;
+		}
+
+		possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+
+		of_node_put(remote_port);
+	} while (1);
+
+	return possible_crtcs;
+}
+EXPORT_SYMBOL(drm_of_find_possible_crtcs);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 6d13314..827ec1a 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -335,9 +335,10 @@
 	}
 
 	/* possible_crtc's will be filled in later by crtc_init */
-	ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs,
-			     formats, num_formats,
-			     DRM_PLANE_TYPE_PRIMARY);
+	ret = drm_universal_plane_init(dev, primary, 0,
+				       &drm_primary_helper_funcs,
+				       formats, num_formats,
+				       DRM_PLANE_TYPE_PRIMARY);
 	if (ret) {
 		kfree(primary);
 		primary = NULL;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d22676b..db7d250 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -130,7 +130,14 @@
 	count = drm_load_edid_firmware(connector);
 	if (count == 0)
 #endif
-		count = (*connector_funcs->get_modes)(connector);
+	{
+		if (connector->override_edid) {
+			struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
+
+			count = drm_add_edid_modes(connector, edid);
+		} else
+			count = (*connector_funcs->get_modes)(connector);
+	}
 
 	if (count == 0 && connector->status == connector_status_connected)
 		count = drm_add_modes_noedid(connector, 1024, 768);
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 7047ca0..631f5af 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -293,3 +293,143 @@
 		DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
 }
 EXPORT_SYMBOL(drm_rect_debug_print);
+
+/**
+ * drm_rect_rotate - Rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation to be applied
+ *
+ * Apply @rotation to the coordinates of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the untransformed coordinate
+ * space.
+ */
+void drm_rect_rotate(struct drm_rect *r,
+		     int width, int height,
+		     unsigned int rotation)
+{
+	struct drm_rect tmp;
+
+	if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+		tmp = *r;
+
+		if (rotation & BIT(DRM_REFLECT_X)) {
+			r->x1 = width - tmp.x2;
+			r->x2 = width - tmp.x1;
+		}
+
+		if (rotation & BIT(DRM_REFLECT_Y)) {
+			r->y1 = height - tmp.y2;
+			r->y2 = height - tmp.y1;
+		}
+	}
+
+	switch (rotation & 0xf) {
+	case BIT(DRM_ROTATE_0):
+		break;
+	case BIT(DRM_ROTATE_90):
+		tmp = *r;
+		r->x1 = tmp.y1;
+		r->x2 = tmp.y2;
+		r->y1 = width - tmp.x2;
+		r->y2 = width - tmp.x1;
+		break;
+	case BIT(DRM_ROTATE_180):
+		tmp = *r;
+		r->x1 = width - tmp.x2;
+		r->x2 = width - tmp.x1;
+		r->y1 = height - tmp.y2;
+		r->y2 = height - tmp.y1;
+		break;
+	case BIT(DRM_ROTATE_270):
+		tmp = *r;
+		r->x1 = height - tmp.y2;
+		r->x2 = height - tmp.y1;
+		r->y1 = tmp.x1;
+		r->y2 = tmp.x2;
+		break;
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL(drm_rect_rotate);
+
+/**
+ * drm_rect_rotate_inv - Inverse rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation whose inverse is to be applied
+ *
+ * Apply the inverse of @rotation to the coordinates
+ * of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the original untransformed
+ * coordinate space, so that you never have to flip
+ * them when doing a rotatation and its inverse.
+ * That is, if you do:
+ *
+ * drm_rotate(&r, width, height, rotation);
+ * drm_rotate_inv(&r, width, height, rotation);
+ *
+ * you will always get back the original rectangle.
+ */
+void drm_rect_rotate_inv(struct drm_rect *r,
+			 int width, int height,
+			 unsigned int rotation)
+{
+	struct drm_rect tmp;
+
+	switch (rotation & 0xf) {
+	case BIT(DRM_ROTATE_0):
+		break;
+	case BIT(DRM_ROTATE_90):
+		tmp = *r;
+		r->x1 = width - tmp.y2;
+		r->x2 = width - tmp.y1;
+		r->y1 = tmp.x1;
+		r->y2 = tmp.x2;
+		break;
+	case BIT(DRM_ROTATE_180):
+		tmp = *r;
+		r->x1 = width - tmp.x2;
+		r->x2 = width - tmp.x1;
+		r->y1 = height - tmp.y2;
+		r->y2 = height - tmp.y1;
+		break;
+	case BIT(DRM_ROTATE_270):
+		tmp = *r;
+		r->x1 = tmp.y1;
+		r->x2 = tmp.y2;
+		r->y1 = height - tmp.x2;
+		r->y2 = height - tmp.x1;
+		break;
+	default:
+		break;
+	}
+
+	if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+		tmp = *r;
+
+		if (rotation & BIT(DRM_REFLECT_X)) {
+			r->x1 = width - tmp.x2;
+			r->x2 = width - tmp.x1;
+		}
+
+		if (rotation & BIT(DRM_REFLECT_Y)) {
+			r->y1 = height - tmp.y2;
+			r->y2 = height - tmp.y1;
+		}
+	}
+}
+EXPORT_SYMBOL(drm_rect_rotate_inv);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 14d1646..233ea20 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -37,18 +37,9 @@
 unsigned int drm_debug = 0;	/* 1 to enable debug output */
 EXPORT_SYMBOL(drm_debug);
 
-unsigned int drm_rnodes = 0;	/* 1 to enable experimental render nodes API */
-EXPORT_SYMBOL(drm_rnodes);
-
-/* 1 to allow user space to request universal planes (experimental) */
-unsigned int drm_universal_planes = 0;
-EXPORT_SYMBOL(drm_universal_planes);
-
 unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
-EXPORT_SYMBOL(drm_vblank_offdelay);
 
 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
-EXPORT_SYMBOL(drm_timestamp_precision);
 
 /*
  * Default to use monotonic timestamps for wait-for-vblank and page-flip
@@ -60,14 +51,11 @@
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
-MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
 
 module_param_named(debug, drm_debug, int, 0600);
-module_param_named(rnodes, drm_rnodes, int, 0600);
-module_param_named(universal_planes, drm_universal_planes, int, 0600);
 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
@@ -588,7 +576,7 @@
 			goto err_minors;
 	}
 
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
 		if (ret)
 			goto err_minors;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 369b262..7827dad 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -438,7 +438,6 @@
 out:
 	return ret;
 }
-EXPORT_SYMBOL(drm_sysfs_connector_add);
 
 /**
  * drm_sysfs_connector_remove - remove an connector device from sysfs
@@ -468,7 +467,6 @@
 	device_unregister(connector->kdev);
 	connector->kdev = NULL;
 }
-EXPORT_SYMBOL(drm_sysfs_connector_remove);
 
 /**
  * drm_sysfs_hotplug_event - generate a DRM uevent
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 178d2a9..9ba1aae 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -28,6 +28,7 @@
 	bool "Exynos DRM FIMD"
 	depends on DRM_EXYNOS && !FB_S3C
 	select FB_MODE_HELPERS
+	select MFD_SYSCON
 	help
 	  Choose this option if you want to use Exynos FIMD for DRM.
 
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index a8ffc8c..31c3de9 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1018,7 +1018,7 @@
 	}
 
 	drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
 	return 0;
@@ -1376,6 +1376,7 @@
 	{ .compatible = "samsung,exynos5-dp" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, exynos_dp_match);
 
 struct platform_driver dp_driver = {
 	.probe		= exynos_dp_probe,
@@ -1390,4 +1391,4 @@
 
 MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
 MODULE_DESCRIPTION("Samsung SoC DP Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 9a16dbe..ba9b3d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -117,20 +117,7 @@
 	struct drm_device *dev = connector->dev;
 	struct exynos_drm_connector *exynos_connector =
 					to_exynos_connector(connector);
-	struct drm_mode_object *obj;
-	struct drm_encoder *encoder;
-
-	obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
-				   DRM_MODE_OBJECT_ENCODER);
-	if (!obj) {
-		DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
-				exynos_connector->encoder_id);
-		return NULL;
-	}
-
-	encoder = obj_to_encoder(obj);
-
-	return encoder;
+	return drm_encoder_find(dev, exynos_connector->encoder_id);
 }
 
 static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -185,7 +172,7 @@
 	struct exynos_drm_connector *exynos_connector =
 		to_exynos_connector(connector);
 
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(exynos_connector);
 }
@@ -230,7 +217,7 @@
 	drm_connector_init(dev, connector, &exynos_connector_funcs, type);
 	drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
 
-	err = drm_sysfs_connector_add(connector);
+	err = drm_connector_register(connector);
 	if (err)
 		goto err_connector;
 
@@ -250,7 +237,7 @@
 	return connector;
 
 err_sysfs:
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 err_connector:
 	drm_connector_cleanup(connector);
 	kfree(exynos_connector);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 95c9435..b68e58f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -69,8 +69,10 @@
 
 	if (mode > DRM_MODE_DPMS_ON) {
 		/* wait for the completion of page flip. */
-		wait_event(exynos_crtc->pending_flip_queue,
-				atomic_read(&exynos_crtc->pending_flip) == 0);
+		if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
+				!atomic_read(&exynos_crtc->pending_flip),
+				HZ/20))
+			atomic_set(&exynos_crtc->pending_flip, 0);
 		drm_vblank_off(crtc->dev, exynos_crtc->pipe);
 	}
 
@@ -259,6 +261,7 @@
 			spin_lock_irq(&dev->event_lock);
 			drm_vblank_put(dev, exynos_crtc->pipe);
 			list_del(&event->base.link);
+			atomic_set(&exynos_crtc->pending_flip, 0);
 			spin_unlock_irq(&dev->event_lock);
 
 			goto out;
@@ -508,3 +511,11 @@
 
 	return -EPERM;
 }
+
+void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
+{
+	struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+	if (manager->ops->te_handler)
+		manager->ops->te_handler(manager);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 9f74b10..690dcdd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -36,4 +36,11 @@
 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
 					unsigned int out_type);
 
+/*
+ * This function calls the crtc device(manager)'s te_handler() callback
+ * to trigger to transfer video image at the tearing effect synchronization
+ * signal.
+ */
+void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
+
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 9e530f2..3aa1c7e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -48,7 +48,7 @@
 
 static void exynos_dpi_connector_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 }
 
@@ -117,7 +117,7 @@
 	}
 
 	drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
 	return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ab7d182..0d74e9b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -39,8 +39,6 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
-#define VBLANK_OFF_DELAY	50000
-
 static struct platform_device *exynos_drm_pdev;
 
 static DEFINE_MUTEX(drm_component_lock);
@@ -103,8 +101,6 @@
 	/* setup possible_clones. */
 	exynos_drm_encoder_setup(dev);
 
-	drm_vblank_offdelay = VBLANK_OFF_DELAY;
-
 	platform_set_drvdata(dev->platformdev, dev);
 
 	/* Try to bind all sub drivers. */
@@ -362,7 +358,7 @@
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 	pm_message_t message;
 
-	if (pm_runtime_suspended(dev))
+	if (pm_runtime_suspended(dev) || !drm_dev)
 		return 0;
 
 	message.event = PM_EVENT_SUSPEND;
@@ -373,7 +369,7 @@
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 
-	if (pm_runtime_suspended(dev))
+	if (pm_runtime_suspended(dev) || !drm_dev)
 		return 0;
 
 	return exynos_drm_resume(drm_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 06cde45..69a6fa3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -40,8 +40,6 @@
 struct exynos_drm_overlay;
 struct drm_connector;
 
-extern unsigned int drm_vblank_offdelay;
-
 /* This enumerates device type. */
 enum exynos_drm_device_type {
 	EXYNOS_DEVICE_TYPE_NONE,
@@ -188,6 +186,8 @@
  * @win_commit: apply hardware specific overlay data to registers.
  * @win_enable: enable hardware specific overlay.
  * @win_disable: disable hardware specific overlay.
+ * @te_handler: trigger to transfer video image at the tearing effect
+ *	synchronization signal if there is a page flip request.
  */
 struct exynos_drm_manager;
 struct exynos_drm_manager_ops {
@@ -206,6 +206,7 @@
 	void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
 	void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
 	void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
+	void (*te_handler)(struct exynos_drm_manager *mgr);
 };
 
 /*
@@ -236,14 +237,9 @@
 	struct list_head	userptr_list;
 };
 
-struct exynos_drm_ipp_private {
-	struct device	*dev;
-	struct list_head	event_list;
-};
-
 struct drm_exynos_file_private {
 	struct exynos_drm_g2d_private	*g2d_priv;
-	struct exynos_drm_ipp_private	*ipp_priv;
+	struct device			*ipp_dev;
 	struct file			*anon_filp;
 };
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6302aa6..86aebd8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -16,7 +16,10 @@
 #include <drm/drm_panel.h>
 
 #include <linux/clk.h>
+#include <linux/gpio/consumer.h>
 #include <linux/irq.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
 #include <linux/phy/phy.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
@@ -24,6 +27,7 @@
 #include <video/mipi_display.h>
 #include <video/videomode.h>
 
+#include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
 
 /* returns true iff both arguments logically differs */
@@ -54,9 +58,12 @@
 
 /* FIFO memory AC characteristic register */
 #define DSIM_PLLCTRL_REG	0x4c	/* PLL control register */
-#define DSIM_PLLTMR_REG		0x50	/* PLL timer register */
 #define DSIM_PHYACCHR_REG	0x54	/* D-PHY AC characteristic register */
 #define DSIM_PHYACCHR1_REG	0x58	/* D-PHY AC characteristic register1 */
+#define DSIM_PHYCTRL_REG	0x5c
+#define DSIM_PHYTIMING_REG	0x64
+#define DSIM_PHYTIMING1_REG	0x68
+#define DSIM_PHYTIMING2_REG	0x6c
 
 /* DSIM_STATUS */
 #define DSIM_STOP_STATE_DAT(x)		(((x) & 0xf) << 0)
@@ -200,6 +207,24 @@
 #define DSIM_PLL_M(x)			((x) << 4)
 #define DSIM_PLL_S(x)			((x) << 1)
 
+/* DSIM_PHYCTRL */
+#define DSIM_PHYCTRL_ULPS_EXIT(x)	(((x) & 0x1ff) << 0)
+
+/* DSIM_PHYTIMING */
+#define DSIM_PHYTIMING_LPX(x)		((x) << 8)
+#define DSIM_PHYTIMING_HS_EXIT(x)	((x) << 0)
+
+/* DSIM_PHYTIMING1 */
+#define DSIM_PHYTIMING1_CLK_PREPARE(x)	((x) << 24)
+#define DSIM_PHYTIMING1_CLK_ZERO(x)	((x) << 16)
+#define DSIM_PHYTIMING1_CLK_POST(x)	((x) << 8)
+#define DSIM_PHYTIMING1_CLK_TRAIL(x)	((x) << 0)
+
+/* DSIM_PHYTIMING2 */
+#define DSIM_PHYTIMING2_HS_PREPARE(x)	((x) << 16)
+#define DSIM_PHYTIMING2_HS_ZERO(x)	((x) << 8)
+#define DSIM_PHYTIMING2_HS_TRAIL(x)	((x) << 0)
+
 #define DSI_MAX_BUS_WIDTH		4
 #define DSI_NUM_VIRTUAL_CHANNELS	4
 #define DSI_TX_FIFO_SIZE		2048
@@ -233,6 +258,12 @@
 #define DSIM_STATE_INITIALIZED		BIT(1)
 #define DSIM_STATE_CMD_LPM		BIT(2)
 
+struct exynos_dsi_driver_data {
+	unsigned int plltmr_reg;
+
+	unsigned int has_freqband:1;
+};
+
 struct exynos_dsi {
 	struct mipi_dsi_host dsi_host;
 	struct drm_connector connector;
@@ -247,6 +278,7 @@
 	struct clk *bus_clk;
 	struct regulator_bulk_data supplies[2];
 	int irq;
+	int te_gpio;
 
 	u32 pll_clk_rate;
 	u32 burst_clk_rate;
@@ -262,11 +294,39 @@
 
 	spinlock_t transfer_lock; /* protects transfer_list */
 	struct list_head transfer_list;
+
+	struct exynos_dsi_driver_data *driver_data;
 };
 
 #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
 #define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
 
+static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
+	.plltmr_reg = 0x50,
+	.has_freqband = 1,
+};
+
+static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
+	.plltmr_reg = 0x58,
+};
+
+static struct of_device_id exynos_dsi_of_match[] = {
+	{ .compatible = "samsung,exynos4210-mipi-dsi",
+	  .data = &exynos4_dsi_driver_data },
+	{ .compatible = "samsung,exynos5410-mipi-dsi",
+	  .data = &exynos5_dsi_driver_data },
+	{ }
+};
+
+static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data(
+						struct platform_device *pdev)
+{
+	const struct of_device_id *of_id =
+			of_match_device(exynos_dsi_of_match, &pdev->dev);
+
+	return (struct exynos_dsi_driver_data *)of_id->data;
+}
+
 static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
 {
 	if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
@@ -340,14 +400,9 @@
 static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
 					unsigned long freq)
 {
-	static const unsigned long freq_bands[] = {
-		100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
-		270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
-		510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
-		770 * MHZ, 870 * MHZ, 950 * MHZ,
-	};
+	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 	unsigned long fin, fout;
-	int timeout, band;
+	int timeout;
 	u8 p, s;
 	u16 m;
 	u32 reg;
@@ -368,18 +423,30 @@
 			"failed to find PLL PMS for requested frequency\n");
 		return -EFAULT;
 	}
+	dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
 
-	for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
-		if (fout < freq_bands[band])
-			break;
+	writel(500, dsi->reg_base + driver_data->plltmr_reg);
 
-	dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout,
-		p, m, s, band);
+	reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
 
-	writel(500, dsi->reg_base + DSIM_PLLTMR_REG);
+	if (driver_data->has_freqband) {
+		static const unsigned long freq_bands[] = {
+			100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+			270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+			510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+			770 * MHZ, 870 * MHZ, 950 * MHZ,
+		};
+		int band;
 
-	reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
-			| DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
+		for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+			if (fout < freq_bands[band])
+				break;
+
+		dev_dbg(dsi->dev, "band %d\n", band);
+
+		reg |= DSIM_FREQ_BAND(band);
+	}
+
 	writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
 
 	timeout = 1000;
@@ -433,6 +500,59 @@
 	return 0;
 }
 
+static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
+{
+	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	u32 reg;
+
+	if (driver_data->has_freqband)
+		return;
+
+	/* B D-PHY: D-PHY Master & Slave Analog Block control */
+	reg = DSIM_PHYCTRL_ULPS_EXIT(0x0af);
+	writel(reg, dsi->reg_base + DSIM_PHYCTRL_REG);
+
+	/*
+	 * T LPX: Transmitted length of any Low-Power state period
+	 * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
+	 *	burst
+	 */
+	reg = DSIM_PHYTIMING_LPX(0x06) | DSIM_PHYTIMING_HS_EXIT(0x0b);
+	writel(reg, dsi->reg_base + DSIM_PHYTIMING_REG);
+
+	/*
+	 * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
+	 *	Line state immediately before the HS-0 Line state starting the
+	 *	HS transmission
+	 * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
+	 *	transmitting the Clock.
+	 * T CLK_POST: Time that the transmitter continues to send HS clock
+	 *	after the last associated Data Lane has transitioned to LP Mode
+	 *	Interval is defined as the period from the end of T HS-TRAIL to
+	 *	the beginning of T CLK-TRAIL
+	 * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
+	 *	the last payload clock bit of a HS transmission burst
+	 */
+	reg = DSIM_PHYTIMING1_CLK_PREPARE(0x07) |
+			DSIM_PHYTIMING1_CLK_ZERO(0x27) |
+			DSIM_PHYTIMING1_CLK_POST(0x0d) |
+			DSIM_PHYTIMING1_CLK_TRAIL(0x08);
+	writel(reg, dsi->reg_base + DSIM_PHYTIMING1_REG);
+
+	/*
+	 * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
+	 *	Line state immediately before the HS-0 Line state starting the
+	 *	HS transmission
+	 * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
+	 *	transmitting the Sync sequence.
+	 * T HS-TRAIL: Time that the transmitter drives the flipped differential
+	 *	state after last payload data bit of a HS transmission burst
+	 */
+	reg = DSIM_PHYTIMING2_HS_PREPARE(0x09) | DSIM_PHYTIMING2_HS_ZERO(0x0d) |
+			DSIM_PHYTIMING2_HS_TRAIL(0x0b);
+	writel(reg, dsi->reg_base + DSIM_PHYTIMING2_REG);
+}
+
 static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
 {
 	u32 reg;
@@ -468,13 +588,20 @@
 	/* DSI configuration */
 	reg = 0;
 
+	/*
+	 * The first bit of mode_flags specifies display configuration.
+	 * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
+	 * mode, otherwise it will support command mode.
+	 */
 	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
 		reg |= DSIM_VIDEO_MODE;
 
+		/*
+		 * The user manual describes that following bits are ignored in
+		 * command mode.
+		 */
 		if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
 			reg |= DSIM_MFLUSH_VS;
-		if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
-			reg |= DSIM_EOT_DISABLE;
 		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
 			reg |= DSIM_SYNC_INFORM;
 		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -491,6 +618,9 @@
 			reg |= DSIM_HSA_MODE;
 	}
 
+	if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+		reg |= DSIM_EOT_DISABLE;
+
 	switch (dsi->format) {
 	case MIPI_DSI_FMT_RGB888:
 		reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
@@ -944,17 +1074,90 @@
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
+{
+	struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
+	struct drm_encoder *encoder = dsi->encoder;
+
+	if (dsi->state & DSIM_STATE_ENABLED)
+		exynos_drm_crtc_te_handler(encoder->crtc);
+
+	return IRQ_HANDLED;
+}
+
+static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
+{
+	enable_irq(dsi->irq);
+
+	if (gpio_is_valid(dsi->te_gpio))
+		enable_irq(gpio_to_irq(dsi->te_gpio));
+}
+
+static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
+{
+	if (gpio_is_valid(dsi->te_gpio))
+		disable_irq(gpio_to_irq(dsi->te_gpio));
+
+	disable_irq(dsi->irq);
+}
+
 static int exynos_dsi_init(struct exynos_dsi *dsi)
 {
-	exynos_dsi_enable_clock(dsi);
 	exynos_dsi_reset(dsi);
-	enable_irq(dsi->irq);
+	exynos_dsi_enable_irq(dsi);
+	exynos_dsi_enable_clock(dsi);
 	exynos_dsi_wait_for_reset(dsi);
+	exynos_dsi_set_phy_ctrl(dsi);
 	exynos_dsi_init_link(dsi);
 
 	return 0;
 }
 
+static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
+{
+	int ret;
+
+	dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+	if (!gpio_is_valid(dsi->te_gpio)) {
+		dev_err(dsi->dev, "no te-gpios specified\n");
+		ret = dsi->te_gpio;
+		goto out;
+	}
+
+	ret = gpio_request_one(dsi->te_gpio, GPIOF_IN, "te_gpio");
+	if (ret) {
+		dev_err(dsi->dev, "gpio request failed with %d\n", ret);
+		goto out;
+	}
+
+	/*
+	 * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel
+	 * calls drm_panel_init() first then calls mipi_dsi_attach() in probe().
+	 * It means that te_gpio is invalid when exynos_dsi_enable_irq() is
+	 * called by drm_panel_init() before panel is attached.
+	 */
+	ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
+					exynos_dsi_te_irq_handler, NULL,
+					IRQF_TRIGGER_RISING, "TE", dsi);
+	if (ret) {
+		dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
+		gpio_free(dsi->te_gpio);
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
+{
+	if (gpio_is_valid(dsi->te_gpio)) {
+		free_irq(gpio_to_irq(dsi->te_gpio), dsi);
+		gpio_free(dsi->te_gpio);
+		dsi->te_gpio = -ENOENT;
+	}
+}
+
 static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
 				  struct mipi_dsi_device *device)
 {
@@ -968,6 +1171,19 @@
 	if (dsi->connector.dev)
 		drm_helper_hpd_irq_event(dsi->connector.dev);
 
+	/*
+	 * This is a temporary solution and should be made by more generic way.
+	 *
+	 * If attached panel device is for command mode one, dsi should register
+	 * TE interrupt handler.
+	 */
+	if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+		int ret = exynos_dsi_register_te_irq(dsi);
+
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -976,6 +1192,8 @@
 {
 	struct exynos_dsi *dsi = host_to_dsi(host);
 
+	exynos_dsi_unregister_te_irq(dsi);
+
 	dsi->panel_node = NULL;
 
 	if (dsi->connector.dev)
@@ -1089,7 +1307,7 @@
 
 		exynos_dsi_disable_clock(dsi);
 
-		disable_irq(dsi->irq);
+		exynos_dsi_disable_irq(dsi);
 	}
 
 	dsi->state &= ~DSIM_STATE_CMD_LPM;
@@ -1246,7 +1464,7 @@
 	}
 
 	drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
 	return 0;
@@ -1278,6 +1496,7 @@
 	.type = EXYNOS_DISPLAY_TYPE_LCD,
 	.ops = &exynos_dsi_display_ops,
 };
+MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
 
 /* of_* functions will be removed after merge of of_graph patches */
 static struct device_node *
@@ -1435,6 +1654,9 @@
 		goto err_del_component;
 	}
 
+	/* To be checked as invalid one */
+	dsi->te_gpio = -ENOENT;
+
 	init_completion(&dsi->completed);
 	spin_lock_init(&dsi->transfer_lock);
 	INIT_LIST_HEAD(&dsi->transfer_list);
@@ -1443,6 +1665,7 @@
 	dsi->dsi_host.dev = &pdev->dev;
 
 	dsi->dev = &pdev->dev;
+	dsi->driver_data = exynos_dsi_get_driver_data(pdev);
 
 	ret = exynos_dsi_parse_dt(dsi);
 	if (ret)
@@ -1525,11 +1748,6 @@
 	return 0;
 }
 
-static struct of_device_id exynos_dsi_of_match[] = {
-	{ .compatible = "samsung,exynos4210-mipi-dsi" },
-	{ }
-};
-
 struct platform_driver dsi_driver = {
 	.probe = exynos_dsi_probe,
 	.remove = exynos_dsi_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index d771b46..32e63f6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -225,7 +225,7 @@
 	return ret;
 }
 
-static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
 	.fb_probe =	exynos_drm_fbdev_create,
 };
 
@@ -266,7 +266,8 @@
 		return -ENOMEM;
 
 	private->fb_helper = helper = &fbdev->drm_fb_helper;
-	helper->funcs = &exynos_drm_fb_helper_funcs;
+
+	drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
 
 	num_crtc = dev->mode_config.num_crtc;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 831dde9..ec7cc9e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1887,6 +1887,7 @@
 	{ .compatible = "samsung,exynos4212-fimc" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, fimc_of_match);
 
 struct platform_driver fimc_driver = {
 	.probe		= fimc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 33161ad..5d09e33 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -20,6 +20,8 @@
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #include <video/of_display_timing.h>
 #include <video/of_videomode.h>
@@ -61,6 +63,24 @@
 /* color key value register for hardware window 1 ~ 4. */
 #define WKEYCON1_BASE(x)		((WKEYCON1 + 0x140) + ((x - 1) * 8))
 
+/* I80 / RGB trigger control register */
+#define TRIGCON				0x1A4
+#define TRGMODE_I80_RGB_ENABLE_I80	(1 << 0)
+#define SWTRGCMD_I80_RGB_ENABLE		(1 << 1)
+
+/* display mode change control register except exynos4 */
+#define VIDOUT_CON			0x000
+#define VIDOUT_CON_F_I80_LDI0		(0x2 << 8)
+
+/* I80 interface control for main LDI register */
+#define I80IFCONFAx(x)			(0x1B0 + (x) * 4)
+#define I80IFCONFBx(x)			(0x1B8 + (x) * 4)
+#define LCD_CS_SETUP(x)			((x) << 16)
+#define LCD_WR_SETUP(x)			((x) << 12)
+#define LCD_WR_ACTIVE(x)		((x) << 8)
+#define LCD_WR_HOLD(x)			((x) << 4)
+#define I80IFEN_ENABLE			(1 << 0)
+
 /* FIMD has totally five hardware windows. */
 #define WINDOWS_NR	5
 
@@ -68,10 +88,14 @@
 
 struct fimd_driver_data {
 	unsigned int timing_base;
+	unsigned int lcdblk_offset;
+	unsigned int lcdblk_vt_shift;
+	unsigned int lcdblk_bypass_shift;
 
 	unsigned int has_shadowcon:1;
 	unsigned int has_clksel:1;
 	unsigned int has_limited_fmt:1;
+	unsigned int has_vidoutcon:1;
 };
 
 static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -82,12 +106,19 @@
 
 static struct fimd_driver_data exynos4_fimd_driver_data = {
 	.timing_base = 0x0,
+	.lcdblk_offset = 0x210,
+	.lcdblk_vt_shift = 10,
+	.lcdblk_bypass_shift = 1,
 	.has_shadowcon = 1,
 };
 
 static struct fimd_driver_data exynos5_fimd_driver_data = {
 	.timing_base = 0x20000,
+	.lcdblk_offset = 0x214,
+	.lcdblk_vt_shift = 24,
+	.lcdblk_bypass_shift = 15,
 	.has_shadowcon = 1,
+	.has_vidoutcon = 1,
 };
 
 struct fimd_win_data {
@@ -112,15 +143,22 @@
 	struct clk			*bus_clk;
 	struct clk			*lcd_clk;
 	void __iomem			*regs;
+	struct regmap			*sysreg;
 	struct drm_display_mode		mode;
 	struct fimd_win_data		win_data[WINDOWS_NR];
 	unsigned int			default_win;
 	unsigned long			irq_flags;
+	u32				vidcon0;
 	u32				vidcon1;
+	u32				vidout_con;
+	u32				i80ifcon;
+	bool				i80_if;
 	bool				suspended;
 	int				pipe;
 	wait_queue_head_t		wait_vsync_queue;
 	atomic_t			wait_vsync_event;
+	atomic_t			win_updated;
+	atomic_t			triggering;
 
 	struct exynos_drm_panel_info panel;
 	struct fimd_driver_data *driver_data;
@@ -136,6 +174,7 @@
 	  .data = &exynos5_fimd_driver_data },
 	{},
 };
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
 
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
 	struct platform_device *pdev)
@@ -243,6 +282,14 @@
 	unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
 	u32 clkdiv;
 
+	if (ctx->i80_if) {
+		/*
+		 * The frame done interrupt should be occurred prior to the
+		 * next TE signal.
+		 */
+		ideal_clk *= 2;
+	}
+
 	/* Find the clock divider value that gets us closest to ideal_clk */
 	clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
 
@@ -271,11 +318,10 @@
 {
 	struct fimd_context *ctx = mgr->ctx;
 	struct drm_display_mode *mode = &ctx->mode;
-	struct fimd_driver_data *driver_data;
-	u32 val, clkdiv, vidcon1;
-	int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+	struct fimd_driver_data *driver_data = ctx->driver_data;
+	void *timing_base = ctx->regs + driver_data->timing_base;
+	u32 val, clkdiv;
 
-	driver_data = ctx->driver_data;
 	if (ctx->suspended)
 		return;
 
@@ -283,33 +329,65 @@
 	if (mode->htotal == 0 || mode->vtotal == 0)
 		return;
 
-	/* setup polarity values */
-	vidcon1 = ctx->vidcon1;
-	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-		vidcon1 |= VIDCON1_INV_VSYNC;
-	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-		vidcon1 |= VIDCON1_INV_HSYNC;
-	writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
+	if (ctx->i80_if) {
+		val = ctx->i80ifcon | I80IFEN_ENABLE;
+		writel(val, timing_base + I80IFCONFAx(0));
 
-	/* setup vertical timing values. */
-	vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
-	vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
-	vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
+		/* disable auto frame rate */
+		writel(0, timing_base + I80IFCONFBx(0));
 
-	val = VIDTCON0_VBPD(vbpd - 1) |
-		VIDTCON0_VFPD(vfpd - 1) |
-		VIDTCON0_VSPW(vsync_len - 1);
-	writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
+		/* set video type selection to I80 interface */
+		if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+					driver_data->lcdblk_offset,
+					0x3 << driver_data->lcdblk_vt_shift,
+					0x1 << driver_data->lcdblk_vt_shift)) {
+			DRM_ERROR("Failed to update sysreg for I80 i/f.\n");
+			return;
+		}
+	} else {
+		int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+		u32 vidcon1;
 
-	/* setup horizontal timing values.  */
-	hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
-	hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
-	hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
+		/* setup polarity values */
+		vidcon1 = ctx->vidcon1;
+		if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+			vidcon1 |= VIDCON1_INV_VSYNC;
+		if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+			vidcon1 |= VIDCON1_INV_HSYNC;
+		writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
 
-	val = VIDTCON1_HBPD(hbpd - 1) |
-		VIDTCON1_HFPD(hfpd - 1) |
-		VIDTCON1_HSPW(hsync_len - 1);
-	writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+		/* setup vertical timing values. */
+		vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+		vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
+		vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
+		val = VIDTCON0_VBPD(vbpd - 1) |
+			VIDTCON0_VFPD(vfpd - 1) |
+			VIDTCON0_VSPW(vsync_len - 1);
+		writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
+
+		/* setup horizontal timing values.  */
+		hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+		hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
+		hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+		val = VIDTCON1_HBPD(hbpd - 1) |
+			VIDTCON1_HFPD(hfpd - 1) |
+			VIDTCON1_HSPW(hsync_len - 1);
+		writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+	}
+
+	if (driver_data->has_vidoutcon)
+		writel(ctx->vidout_con, timing_base + VIDOUT_CON);
+
+	/* set bypass selection */
+	if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+				driver_data->lcdblk_offset,
+				0x1 << driver_data->lcdblk_bypass_shift,
+				0x1 << driver_data->lcdblk_bypass_shift)) {
+		DRM_ERROR("Failed to update sysreg for bypass setting.\n");
+		return;
+	}
 
 	/* setup horizontal and vertical display size. */
 	val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
@@ -322,7 +400,8 @@
 	 * fields of register with prefix '_F' would be updated
 	 * at vsync(same as dma start)
 	 */
-	val = VIDCON0_ENVID | VIDCON0_ENVID_F;
+	val = ctx->vidcon0;
+	val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
 
 	if (ctx->driver_data->has_clksel)
 		val |= VIDCON0_CLKSEL_LCD;
@@ -660,6 +739,9 @@
 	}
 
 	win_data->enabled = true;
+
+	if (ctx->i80_if)
+		atomic_set(&ctx->win_updated, 1);
 }
 
 static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
@@ -838,6 +920,58 @@
 	}
 }
 
+static void fimd_trigger(struct device *dev)
+{
+	struct exynos_drm_manager *mgr = get_fimd_manager(dev);
+	struct fimd_context *ctx = mgr->ctx;
+	struct fimd_driver_data *driver_data = ctx->driver_data;
+	void *timing_base = ctx->regs + driver_data->timing_base;
+	u32 reg;
+
+	atomic_set(&ctx->triggering, 1);
+
+	reg = readl(ctx->regs + VIDINTCON0);
+	reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE |
+						VIDINTCON0_INT_SYSMAINCON);
+	writel(reg, ctx->regs + VIDINTCON0);
+
+	reg = readl(timing_base + TRIGCON);
+	reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
+	writel(reg, timing_base + TRIGCON);
+}
+
+static void fimd_te_handler(struct exynos_drm_manager *mgr)
+{
+	struct fimd_context *ctx = mgr->ctx;
+
+	/* Checks the crtc is detached already from encoder */
+	if (ctx->pipe < 0 || !ctx->drm_dev)
+		return;
+
+	 /*
+	 * Skips to trigger if in triggering state, because multiple triggering
+	 * requests can cause panel reset.
+	 */
+	if (atomic_read(&ctx->triggering))
+		return;
+
+	/*
+	 * If there is a page flip request, triggers and handles the page flip
+	 * event so that current fb can be updated into panel GRAM.
+	 */
+	if (atomic_add_unless(&ctx->win_updated, -1, 0))
+		fimd_trigger(ctx->dev);
+
+	/* Wakes up vsync event queue */
+	if (atomic_read(&ctx->wait_vsync_event)) {
+		atomic_set(&ctx->wait_vsync_event, 0);
+		wake_up(&ctx->wait_vsync_queue);
+
+		if (!atomic_read(&ctx->triggering))
+			drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+	}
+}
+
 static struct exynos_drm_manager_ops fimd_manager_ops = {
 	.dpms = fimd_dpms,
 	.mode_fixup = fimd_mode_fixup,
@@ -849,6 +983,7 @@
 	.win_mode_set = fimd_win_mode_set,
 	.win_commit = fimd_win_commit,
 	.win_disable = fimd_win_disable,
+	.te_handler = fimd_te_handler,
 };
 
 static struct exynos_drm_manager fimd_manager = {
@@ -859,26 +994,40 @@
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 {
 	struct fimd_context *ctx = (struct fimd_context *)dev_id;
-	u32 val;
+	u32 val, clear_bit;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
-	if (val & VIDINTCON1_INT_FRAME)
-		/* VSYNC interrupt */
-		writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+	clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
+	if (val & clear_bit)
+		writel(clear_bit, ctx->regs + VIDINTCON1);
 
 	/* check the crtc is detached already from encoder */
 	if (ctx->pipe < 0 || !ctx->drm_dev)
 		goto out;
 
-	drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-	exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+	if (ctx->i80_if) {
+		/* unset I80 frame done interrupt */
+		val = readl(ctx->regs + VIDINTCON0);
+		val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
+		writel(val, ctx->regs + VIDINTCON0);
 
-	/* set wait vsync event to zero and wake up queue. */
-	if (atomic_read(&ctx->wait_vsync_event)) {
-		atomic_set(&ctx->wait_vsync_event, 0);
-		wake_up(&ctx->wait_vsync_queue);
+		/* exit triggering mode */
+		atomic_set(&ctx->triggering, 0);
+
+		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+	} else {
+		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+		/* set wait vsync event to zero and wake up queue. */
+		if (atomic_read(&ctx->wait_vsync_event)) {
+			atomic_set(&ctx->wait_vsync_event, 0);
+			wake_up(&ctx->wait_vsync_queue);
+		}
 	}
+
 out:
 	return IRQ_HANDLED;
 }
@@ -923,6 +1072,7 @@
 {
 	struct device *dev = &pdev->dev;
 	struct fimd_context *ctx;
+	struct device_node *i80_if_timings;
 	struct resource *res;
 	int ret = -EINVAL;
 
@@ -944,12 +1094,51 @@
 
 	ctx->dev = dev;
 	ctx->suspended = true;
+	ctx->driver_data = drm_fimd_get_driver_data(pdev);
 
 	if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
 		ctx->vidcon1 |= VIDCON1_INV_VDEN;
 	if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
 		ctx->vidcon1 |= VIDCON1_INV_VCLK;
 
+	i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
+	if (i80_if_timings) {
+		u32 val;
+
+		ctx->i80_if = true;
+
+		if (ctx->driver_data->has_vidoutcon)
+			ctx->vidout_con |= VIDOUT_CON_F_I80_LDI0;
+		else
+			ctx->vidcon0 |= VIDCON0_VIDOUT_I80_LDI0;
+		/*
+		 * The user manual describes that this "DSI_EN" bit is required
+		 * to enable I80 24-bit data interface.
+		 */
+		ctx->vidcon0 |= VIDCON0_DSI_EN;
+
+		if (of_property_read_u32(i80_if_timings, "cs-setup", &val))
+			val = 0;
+		ctx->i80ifcon = LCD_CS_SETUP(val);
+		if (of_property_read_u32(i80_if_timings, "wr-setup", &val))
+			val = 0;
+		ctx->i80ifcon |= LCD_WR_SETUP(val);
+		if (of_property_read_u32(i80_if_timings, "wr-active", &val))
+			val = 1;
+		ctx->i80ifcon |= LCD_WR_ACTIVE(val);
+		if (of_property_read_u32(i80_if_timings, "wr-hold", &val))
+			val = 0;
+		ctx->i80ifcon |= LCD_WR_HOLD(val);
+	}
+	of_node_put(i80_if_timings);
+
+	ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+							"samsung,sysreg");
+	if (IS_ERR(ctx->sysreg)) {
+		dev_warn(dev, "failed to get system register.\n");
+		ctx->sysreg = NULL;
+	}
+
 	ctx->bus_clk = devm_clk_get(dev, "fimd");
 	if (IS_ERR(ctx->bus_clk)) {
 		dev_err(dev, "failed to get bus clock\n");
@@ -972,7 +1161,8 @@
 		goto err_del_component;
 	}
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+					   ctx->i80_if ? "lcd_sys" : "vsync");
 	if (!res) {
 		dev_err(dev, "irq request failed.\n");
 		ret = -ENXIO;
@@ -986,7 +1176,6 @@
 		goto err_del_component;
 	}
 
-	ctx->driver_data = drm_fimd_get_driver_data(pdev);
 	init_waitqueue_head(&ctx->wait_vsync_queue);
 	atomic_set(&ctx->wait_vsync_event, 0);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 8001587..df7a77d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1042,8 +1042,23 @@
 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
 			     struct drm_file *file)
 {
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+	struct device *dev;
+	struct g2d_data *g2d;
 	struct drm_exynos_g2d_get_ver *ver = data;
 
+	if (!g2d_priv)
+		return -ENODEV;
+
+	dev = g2d_priv->dev;
+	if (!dev)
+		return -ENODEV;
+
+	g2d = dev_get_drvdata(dev);
+	if (!g2d)
+		return -EFAULT;
+
 	ver->major = G2D_HW_MAJOR_VER;
 	ver->minor = G2D_HW_MINOR_VER;
 
@@ -1056,7 +1071,7 @@
 {
 	struct drm_exynos_file_private *file_priv = file->driver_priv;
 	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
-	struct device *dev = g2d_priv->dev;
+	struct device *dev;
 	struct g2d_data *g2d;
 	struct drm_exynos_g2d_set_cmdlist *req = data;
 	struct drm_exynos_g2d_cmd *cmd;
@@ -1067,6 +1082,10 @@
 	int size;
 	int ret;
 
+	if (!g2d_priv)
+		return -ENODEV;
+
+	dev = g2d_priv->dev;
 	if (!dev)
 		return -ENODEV;
 
@@ -1223,13 +1242,17 @@
 {
 	struct drm_exynos_file_private *file_priv = file->driver_priv;
 	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
-	struct device *dev = g2d_priv->dev;
+	struct device *dev;
 	struct g2d_data *g2d;
 	struct drm_exynos_g2d_exec *req = data;
 	struct g2d_runqueue_node *runqueue_node;
 	struct list_head *run_cmdlist;
 	struct list_head *event_list;
 
+	if (!g2d_priv)
+		return -ENODEV;
+
+	dev = g2d_priv->dev;
 	if (!dev)
 		return -ENODEV;
 
@@ -1544,8 +1567,10 @@
 
 static const struct of_device_id exynos_g2d_match[] = {
 	{ .compatible = "samsung,exynos5250-g2d" },
+	{ .compatible = "samsung,exynos4212-g2d" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
 
 struct platform_driver g2d_driver = {
 	.probe		= g2d_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 163a054..15db801 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -301,7 +301,6 @@
 					unsigned int gem_handle,
 					struct drm_file *filp)
 {
-	struct exynos_drm_gem_obj *exynos_gem_obj;
 	struct drm_gem_object *obj;
 
 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
@@ -310,8 +309,6 @@
 		return;
 	}
 
-	exynos_gem_obj = to_exynos_gem_obj(obj);
-
 	drm_gem_object_unreference_unlocked(obj);
 
 	/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index a1888e1..c411399 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -129,9 +129,6 @@
 
 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
 {
-	if (!ippdrv)
-		return -EINVAL;
-
 	mutex_lock(&exynos_drm_ippdrv_lock);
 	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
 	mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -141,9 +138,6 @@
 
 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
 {
-	if (!ippdrv)
-		return -EINVAL;
-
 	mutex_lock(&exynos_drm_ippdrv_lock);
 	list_del(&ippdrv->drv_list);
 	mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -151,20 +145,15 @@
 	return 0;
 }
 
-static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
-		u32 *idp)
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
 {
 	int ret;
 
-	/* do the allocation under our mutexlock */
 	mutex_lock(lock);
 	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
 	mutex_unlock(lock);
-	if (ret < 0)
-		return ret;
 
-	*idp = ret;
-	return 0;
+	return ret;
 }
 
 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -178,35 +167,25 @@
 {
 	void *obj;
 
-	DRM_DEBUG_KMS("id[%d]\n", id);
-
 	mutex_lock(lock);
-
-	/* find object using handle */
 	obj = idr_find(id_idr, id);
-	if (!obj) {
-		DRM_ERROR("failed to find object.\n");
-		mutex_unlock(lock);
-		return ERR_PTR(-ENODEV);
-	}
-
 	mutex_unlock(lock);
 
 	return obj;
 }
 
-static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
-		enum drm_exynos_ipp_cmd	cmd)
+static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
+			    struct drm_exynos_ipp_property *property)
 {
-	/*
-	 * check dedicated flag and WB, OUTPUT operation with
-	 * power on state.
-	 */
-	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
-	    !pm_runtime_suspended(ippdrv->dev)))
-		return true;
+	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
+				  !pm_runtime_suspended(ippdrv->dev)))
+		return -EBUSY;
 
-	return false;
+	if (ippdrv->check_property &&
+	    ippdrv->check_property(ippdrv->dev, property))
+		return -EINVAL;
+
+	return 0;
 }
 
 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
@@ -214,62 +193,30 @@
 {
 	struct exynos_drm_ippdrv *ippdrv;
 	u32 ipp_id = property->ipp_id;
-
-	DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
+	int ret;
 
 	if (ipp_id) {
-		/* find ipp driver using idr */
-		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
-			ipp_id);
-		if (IS_ERR(ippdrv)) {
-			DRM_ERROR("not found ipp%d driver.\n", ipp_id);
-			return ippdrv;
+		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
+		if (!ippdrv) {
+			DRM_DEBUG("ipp%d driver not found\n", ipp_id);
+			return ERR_PTR(-ENODEV);
 		}
 
-		/*
-		 * WB, OUTPUT opertion not supported multi-operation.
-		 * so, make dedicated state at set property ioctl.
-		 * when ipp driver finished operations, clear dedicated flags.
-		 */
-		if (ipp_check_dedicated(ippdrv, property->cmd)) {
-			DRM_ERROR("already used choose device.\n");
-			return ERR_PTR(-EBUSY);
-		}
-
-		/*
-		 * This is necessary to find correct device in ipp drivers.
-		 * ipp drivers have different abilities,
-		 * so need to check property.
-		 */
-		if (ippdrv->check_property &&
-		    ippdrv->check_property(ippdrv->dev, property)) {
-			DRM_ERROR("not support property.\n");
-			return ERR_PTR(-EINVAL);
+		ret = ipp_check_driver(ippdrv, property);
+		if (ret < 0) {
+			DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
+			return ERR_PTR(ret);
 		}
 
 		return ippdrv;
 	} else {
-		/*
-		 * This case is search all ipp driver for finding.
-		 * user application don't set ipp_id in this case,
-		 * so ipp subsystem search correct driver in driver list.
-		 */
 		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
-			if (ipp_check_dedicated(ippdrv, property->cmd)) {
-				DRM_DEBUG_KMS("used device.\n");
-				continue;
-			}
-
-			if (ippdrv->check_property &&
-			    ippdrv->check_property(ippdrv->dev, property)) {
-				DRM_DEBUG_KMS("not support property.\n");
-				continue;
-			}
-
-			return ippdrv;
+			ret = ipp_check_driver(ippdrv, property);
+			if (ret == 0)
+				return ippdrv;
 		}
 
-		DRM_ERROR("not support ipp driver operations.\n");
+		DRM_DEBUG("cannot find driver suitable for given property.\n");
 	}
 
 	return ERR_PTR(-ENODEV);
@@ -308,8 +255,7 @@
 		struct drm_file *file)
 {
 	struct drm_exynos_file_private *file_priv = file->driver_priv;
-	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
-	struct device *dev = priv->dev;
+	struct device *dev = file_priv->ipp_dev;
 	struct ipp_context *ctx = get_ipp_context(dev);
 	struct drm_exynos_ipp_prop_list *prop_list = data;
 	struct exynos_drm_ippdrv *ippdrv;
@@ -346,10 +292,10 @@
 		 */
 		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
 						prop_list->ipp_id);
-		if (IS_ERR(ippdrv)) {
+		if (!ippdrv) {
 			DRM_ERROR("not found ipp%d driver.\n",
 					prop_list->ipp_id);
-			return PTR_ERR(ippdrv);
+			return -ENODEV;
 		}
 
 		*prop_list = ippdrv->prop_list;
@@ -432,7 +378,7 @@
 	if (!event_work)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+	INIT_WORK(&event_work->work, ipp_sched_event);
 
 	return event_work;
 }
@@ -441,8 +387,7 @@
 		struct drm_file *file)
 {
 	struct drm_exynos_file_private *file_priv = file->driver_priv;
-	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
-	struct device *dev = priv->dev;
+	struct device *dev = file_priv->ipp_dev;
 	struct ipp_context *ctx = get_ipp_context(dev);
 	struct drm_exynos_ipp_property *property = data;
 	struct exynos_drm_ippdrv *ippdrv;
@@ -489,19 +434,18 @@
 	if (!c_node)
 		return -ENOMEM;
 
-	/* create property id */
-	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
-		&property->prop_id);
-	if (ret) {
+	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
+	if (ret < 0) {
 		DRM_ERROR("failed to create id.\n");
 		goto err_clear;
 	}
+	property->prop_id = ret;
 
 	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
 		property->prop_id, property->cmd, (int)ippdrv);
 
 	/* stored property information and ippdrv in private data */
-	c_node->priv = priv;
+	c_node->dev = dev;
 	c_node->property = *property;
 	c_node->state = IPP_STATE_IDLE;
 
@@ -534,7 +478,6 @@
 		INIT_LIST_HEAD(&c_node->mem_list[i]);
 
 	INIT_LIST_HEAD(&c_node->event_list);
-	list_splice_init(&priv->event_list, &c_node->event_list);
 	mutex_lock(&ippdrv->cmd_lock);
 	list_add_tail(&c_node->list, &ippdrv->cmd_list);
 	mutex_unlock(&ippdrv->cmd_lock);
@@ -577,42 +520,18 @@
 	kfree(c_node);
 }
 
-static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
 {
-	struct drm_exynos_ipp_property *property = &c_node->property;
-	struct drm_exynos_ipp_mem_node *m_node;
-	struct list_head *head;
-	int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
-
-	for_each_ipp_ops(i) {
-		/* source/destination memory list */
-		head = &c_node->mem_list[i];
-
-		/* find memory node entry */
-		list_for_each_entry(m_node, head, list) {
-			DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
-				i ? "dst" : "src", count[i], (int)m_node);
-			count[i]++;
-		}
+	switch (c_node->property.cmd) {
+	case IPP_CMD_WB:
+		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
+	case IPP_CMD_OUTPUT:
+		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
+	case IPP_CMD_M2M:
+	default:
+		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
+		       !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
 	}
-
-	DRM_DEBUG_KMS("min[%d]max[%d]\n",
-		min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
-		max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
-
-	/*
-	 * M2M operations should be need paired memory address.
-	 * so, need to check minimum count about src, dst.
-	 * other case not use paired memory, so use maximum count
-	 */
-	if (ipp_is_m2m_cmd(property->cmd))
-		ret = min(count[EXYNOS_DRM_OPS_SRC],
-			count[EXYNOS_DRM_OPS_DST]);
-	else
-		ret = max(count[EXYNOS_DRM_OPS_SRC],
-			count[EXYNOS_DRM_OPS_DST]);
-
-	return ret;
 }
 
 static struct drm_exynos_ipp_mem_node
@@ -683,16 +602,14 @@
 		struct drm_exynos_ipp_queue_buf *qbuf)
 {
 	struct drm_exynos_ipp_mem_node *m_node;
-	struct drm_exynos_ipp_buf_info buf_info;
-	void *addr;
+	struct drm_exynos_ipp_buf_info *buf_info;
 	int i;
 
 	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
 	if (!m_node)
 		return ERR_PTR(-ENOMEM);
 
-	/* clear base address for error handling */
-	memset(&buf_info, 0x0, sizeof(buf_info));
+	buf_info = &m_node->buf_info;
 
 	/* operations, buffer id */
 	m_node->ops_id = qbuf->ops_id;
@@ -707,6 +624,8 @@
 
 		/* get dma address by handle */
 		if (qbuf->handle[i]) {
+			dma_addr_t *addr;
+
 			addr = exynos_drm_gem_get_dma_addr(drm_dev,
 					qbuf->handle[i], file);
 			if (IS_ERR(addr)) {
@@ -714,15 +633,14 @@
 				goto err_clear;
 			}
 
-			buf_info.handles[i] = qbuf->handle[i];
-			buf_info.base[i] = *(dma_addr_t *) addr;
-			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
-				i, buf_info.base[i], (int)buf_info.handles[i]);
+			buf_info->handles[i] = qbuf->handle[i];
+			buf_info->base[i] = *addr;
+			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
+				      buf_info->base[i], buf_info->handles[i]);
 		}
 	}
 
 	m_node->filp = file;
-	m_node->buf_info = buf_info;
 	mutex_lock(&c_node->mem_lock);
 	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
 	mutex_unlock(&c_node->mem_lock);
@@ -930,8 +848,7 @@
 		struct drm_file *file)
 {
 	struct drm_exynos_file_private *file_priv = file->driver_priv;
-	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
-	struct device *dev = priv->dev;
+	struct device *dev = file_priv->ipp_dev;
 	struct ipp_context *ctx = get_ipp_context(dev);
 	struct drm_exynos_ipp_queue_buf *qbuf = data;
 	struct drm_exynos_ipp_cmd_node *c_node;
@@ -955,9 +872,9 @@
 	/* find command node */
 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
 		qbuf->prop_id);
-	if (IS_ERR(c_node)) {
+	if (!c_node) {
 		DRM_ERROR("failed to get command node.\n");
-		return PTR_ERR(c_node);
+		return -ENODEV;
 	}
 
 	/* buffer control */
@@ -1062,9 +979,8 @@
 		struct drm_file *file)
 {
 	struct drm_exynos_file_private *file_priv = file->driver_priv;
-	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
 	struct exynos_drm_ippdrv *ippdrv = NULL;
-	struct device *dev = priv->dev;
+	struct device *dev = file_priv->ipp_dev;
 	struct ipp_context *ctx = get_ipp_context(dev);
 	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
 	struct drm_exynos_ipp_cmd_work *cmd_work;
@@ -1091,9 +1007,9 @@
 
 	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
 		cmd_ctrl->prop_id);
-	if (IS_ERR(c_node)) {
+	if (!c_node) {
 		DRM_ERROR("invalid command node list.\n");
-		return PTR_ERR(c_node);
+		return -ENODEV;
 	}
 
 	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
@@ -1198,7 +1114,6 @@
 	/* reset h/w block */
 	if (ippdrv->reset &&
 	    ippdrv->reset(ippdrv->dev)) {
-		DRM_ERROR("failed to reset.\n");
 		return -EINVAL;
 	}
 
@@ -1216,30 +1131,24 @@
 		/* set format */
 		if (ops->set_fmt) {
 			ret = ops->set_fmt(ippdrv->dev, config->fmt);
-			if (ret) {
-				DRM_ERROR("not support format.\n");
+			if (ret)
 				return ret;
-			}
 		}
 
 		/* set transform for rotation, flip */
 		if (ops->set_transf) {
 			ret = ops->set_transf(ippdrv->dev, config->degree,
 				config->flip, &swap);
-			if (ret) {
-				DRM_ERROR("not support tranf.\n");
-				return -EINVAL;
-			}
+			if (ret)
+				return ret;
 		}
 
 		/* set size */
 		if (ops->set_size) {
 			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
 				&config->sz);
-			if (ret) {
-				DRM_ERROR("not support size.\n");
+			if (ret)
 				return ret;
-			}
 		}
 	}
 
@@ -1283,11 +1192,6 @@
 
 			m_node = list_first_entry(head,
 				struct drm_exynos_ipp_mem_node, list);
-			if (!m_node) {
-				DRM_ERROR("failed to get node.\n");
-				ret = -EFAULT;
-				goto err_unlock;
-			}
 
 			DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
 
@@ -1545,11 +1449,6 @@
 
 			m_node = list_first_entry(head,
 				struct drm_exynos_ipp_mem_node, list);
-			if (!m_node) {
-				DRM_ERROR("empty memory node.\n");
-				ret = -ENOMEM;
-				goto err_mem_unlock;
-			}
 
 			tbuf_id[i] = m_node->buf_id;
 			DRM_DEBUG_KMS("%s buf_id[%d]\n",
@@ -1586,11 +1485,6 @@
 
 		m_node = list_first_entry(head,
 			struct drm_exynos_ipp_mem_node, list);
-		if (!m_node) {
-			DRM_ERROR("empty memory node.\n");
-			ret = -ENOMEM;
-			goto err_mem_unlock;
-		}
 
 		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
 
@@ -1704,21 +1598,17 @@
 
 	/* get ipp driver entry */
 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
-		u32 ipp_id;
-
 		ippdrv->drm_dev = drm_dev;
 
-		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
-				    &ipp_id);
-		if (ret || ipp_id == 0) {
+		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
+		if (ret < 0) {
 			DRM_ERROR("failed to create id.\n");
 			goto err;
 		}
+		ippdrv->prop_list.ipp_id = ret;
 
 		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
-			count++, (int)ippdrv, ipp_id);
-
-		ippdrv->prop_list.ipp_id = ipp_id;
+			count++, (int)ippdrv, ret);
 
 		/* store parent device for node */
 		ippdrv->parent_dev = dev;
@@ -1776,17 +1666,10 @@
 		struct drm_file *file)
 {
 	struct drm_exynos_file_private *file_priv = file->driver_priv;
-	struct exynos_drm_ipp_private *priv;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	priv->dev = dev;
-	file_priv->ipp_priv = priv;
+	file_priv->ipp_dev = dev;
 
-	INIT_LIST_HEAD(&priv->event_list);
-
-	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
+	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
 
 	return 0;
 }
@@ -1795,13 +1678,12 @@
 		struct drm_file *file)
 {
 	struct drm_exynos_file_private *file_priv = file->driver_priv;
-	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
 	struct exynos_drm_ippdrv *ippdrv = NULL;
 	struct ipp_context *ctx = get_ipp_context(dev);
 	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
 	int count = 0;
 
-	DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
+	DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
 
 	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
 		mutex_lock(&ippdrv->cmd_lock);
@@ -1810,7 +1692,7 @@
 			DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
 				count++, (int)ippdrv);
 
-			if (c_node->priv == priv) {
+			if (c_node->dev == file_priv->ipp_dev) {
 				/*
 				 * userland goto unnormal state. process killed.
 				 * and close the file.
@@ -1832,7 +1714,6 @@
 		mutex_unlock(&ippdrv->cmd_lock);
 	}
 
-	kfree(priv);
 	return;
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 7aaeaae..6f48d62 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,7 @@
 /*
  * A structure of command node.
  *
- * @priv: IPP private information.
+ * @dev: IPP device.
  * @list: list head to command queue information.
  * @event_list: list head of event.
  * @mem_list: list head to source,destination memory queue information.
@@ -64,7 +64,7 @@
  * @state: state of command node.
  */
 struct drm_exynos_ipp_cmd_node {
-	struct exynos_drm_ipp_private *priv;
+	struct device		*dev;
 	struct list_head	list;
 	struct list_head	event_list;
 	struct list_head	mem_list[EXYNOS_DRM_OPS_MAX];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index f01fbb6..55af6b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -691,6 +691,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, exynos_rotator_match);
 
 static int rotator_probe(struct platform_device *pdev)
 {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 2fb8705..9528d81 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -562,7 +562,7 @@
 	}
 
 	drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
 	return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index aa259b0..562966d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -84,6 +84,7 @@
 	struct clk			*sclk_hdmiphy;
 	struct clk			*mout_hdmi;
 	struct regulator_bulk_data	*regul_bulk;
+	struct regulator		*reg_hdmi_en;
 	int				regul_count;
 };
 
@@ -592,6 +593,13 @@
 	.is_apb_phy	= 0,
 };
 
+static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
+	.type		= HDMI_TYPE13,
+	.phy_confs	= hdmiphy_v13_configs,
+	.phy_conf_count	= ARRAY_SIZE(hdmiphy_v13_configs),
+	.is_apb_phy	= 0,
+};
+
 static struct hdmi_driver_data exynos5_hdmi_driver_data = {
 	.type		= HDMI_TYPE14,
 	.phy_confs	= hdmiphy_v13_configs,
@@ -1129,7 +1137,7 @@
 	}
 
 	drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
 	return 0;
@@ -1241,14 +1249,13 @@
 
 static void hdmi_audio_init(struct hdmi_context *hdata)
 {
-	u32 sample_rate, bits_per_sample, frame_size_code;
+	u32 sample_rate, bits_per_sample;
 	u32 data_num, bit_ch, sample_frq;
 	u32 val;
 	u8 acr[7];
 
 	sample_rate = 44100;
 	bits_per_sample = 16;
-	frame_size_code = 0;
 
 	switch (bits_per_sample) {
 	case 20:
@@ -2168,7 +2175,6 @@
 	struct device *dev = hdata->dev;
 	struct hdmi_resources *res = &hdata->res;
 	static char *supply[] = {
-		"hdmi-en",
 		"vdd",
 		"vdd_osc",
 		"vdd_pll",
@@ -2228,6 +2234,20 @@
 	}
 	res->regul_count = ARRAY_SIZE(supply);
 
+	res->reg_hdmi_en = devm_regulator_get(dev, "hdmi-en");
+	if (IS_ERR(res->reg_hdmi_en) && PTR_ERR(res->reg_hdmi_en) != -ENOENT) {
+		DRM_ERROR("failed to get hdmi-en regulator\n");
+		return PTR_ERR(res->reg_hdmi_en);
+	}
+	if (!IS_ERR(res->reg_hdmi_en)) {
+		ret = regulator_enable(res->reg_hdmi_en);
+		if (ret) {
+			DRM_ERROR("failed to enable hdmi-en regulator\n");
+			return ret;
+		}
+	} else
+		res->reg_hdmi_en = NULL;
+
 	return ret;
 fail:
 	DRM_ERROR("HDMI resource init - failed\n");
@@ -2263,6 +2283,9 @@
 		.compatible = "samsung,exynos5-hdmi",
 		.data = &exynos5_hdmi_driver_data,
 	}, {
+		.compatible = "samsung,exynos4210-hdmi",
+		.data = &exynos4210_hdmi_driver_data,
+	}, {
 		.compatible = "samsung,exynos4212-hdmi",
 		.data = &exynos4212_hdmi_driver_data,
 	}, {
@@ -2272,6 +2295,7 @@
 		/* end node */
 	}
 };
+MODULE_DEVICE_TABLE (of, hdmi_match_types);
 
 static int hdmi_bind(struct device *dev, struct device *master, void *data)
 {
@@ -2494,7 +2518,11 @@
 
 	cancel_delayed_work_sync(&hdata->hotplug_work);
 
-	put_device(&hdata->hdmiphy_port->dev);
+	if (hdata->res.reg_hdmi_en)
+		regulator_disable(hdata->res.reg_hdmi_en);
+
+	if (hdata->hdmiphy_port)
+		put_device(&hdata->hdmiphy_port->dev);
 	put_device(&hdata->ddc_adpt->dev);
 
 	pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7529946..e8b4ec8 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -76,7 +76,7 @@
 	struct clk		*vp;
 	struct clk		*sclk_mixer;
 	struct clk		*sclk_hdmi;
-	struct clk		*sclk_dac;
+	struct clk		*mout_mixer;
 };
 
 enum mixer_version_id {
@@ -93,6 +93,7 @@
 	bool			interlace;
 	bool			powered;
 	bool			vp_enabled;
+	bool			has_sclk;
 	u32			int_en;
 
 	struct mutex		mixer_mutex;
@@ -106,6 +107,7 @@
 struct mixer_drv_data {
 	enum mixer_version_id	version;
 	bool					is_vp_enabled;
+	bool					has_sclk;
 };
 
 static const u8 filter_y_horiz_tap8[] = {
@@ -363,6 +365,11 @@
 			vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
 			mixer_reg_writemask(res, MXR_CFG, val,
 				MXR_CFG_VP_ENABLE);
+
+			/* control blending of graphic layer 0 */
+			mixer_reg_writemask(res, MXR_GRAPHIC_CFG(0), val,
+					MXR_GRP_CFG_BLEND_PRE_MUL |
+					MXR_GRP_CFG_PIXEL_BLEND_EN);
 		}
 		break;
 	}
@@ -809,19 +816,23 @@
 		dev_err(dev, "failed to get clock 'vp'\n");
 		return -ENODEV;
 	}
-	mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
-	if (IS_ERR(mixer_res->sclk_mixer)) {
-		dev_err(dev, "failed to get clock 'sclk_mixer'\n");
-		return -ENODEV;
-	}
-	mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
-	if (IS_ERR(mixer_res->sclk_dac)) {
-		dev_err(dev, "failed to get clock 'sclk_dac'\n");
-		return -ENODEV;
-	}
 
-	if (mixer_res->sclk_hdmi)
-		clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+	if (mixer_ctx->has_sclk) {
+		mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+		if (IS_ERR(mixer_res->sclk_mixer)) {
+			dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+			return -ENODEV;
+		}
+		mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer");
+		if (IS_ERR(mixer_res->mout_mixer)) {
+			dev_err(dev, "failed to get clock 'mout_mixer'\n");
+			return -ENODEV;
+		}
+
+		if (mixer_res->sclk_hdmi && mixer_res->mout_mixer)
+			clk_set_parent(mixer_res->mout_mixer,
+				       mixer_res->sclk_hdmi);
+	}
 
 	res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
 	if (res == NULL) {
@@ -1082,7 +1093,8 @@
 	clk_prepare_enable(res->mixer);
 	if (ctx->vp_enabled) {
 		clk_prepare_enable(res->vp);
-		clk_prepare_enable(res->sclk_mixer);
+		if (ctx->has_sclk)
+			clk_prepare_enable(res->sclk_mixer);
 	}
 
 	mutex_lock(&ctx->mixer_mutex);
@@ -1121,7 +1133,8 @@
 	clk_disable_unprepare(res->mixer);
 	if (ctx->vp_enabled) {
 		clk_disable_unprepare(res->vp);
-		clk_disable_unprepare(res->sclk_mixer);
+		if (ctx->has_sclk)
+			clk_disable_unprepare(res->sclk_mixer);
 	}
 
 	pm_runtime_put_sync(ctx->dev);
@@ -1189,9 +1202,15 @@
 	.is_vp_enabled = 0,
 };
 
+static struct mixer_drv_data exynos4212_mxr_drv_data = {
+	.version = MXR_VER_0_0_0_16,
+	.is_vp_enabled = 1,
+};
+
 static struct mixer_drv_data exynos4210_mxr_drv_data = {
 	.version = MXR_VER_0_0_0_16,
 	.is_vp_enabled = 1,
+	.has_sclk = 1,
 };
 
 static struct platform_device_id mixer_driver_types[] = {
@@ -1208,6 +1227,12 @@
 
 static struct of_device_id mixer_match_types[] = {
 	{
+		.compatible = "samsung,exynos4210-mixer",
+		.data	= &exynos4210_mxr_drv_data,
+	}, {
+		.compatible = "samsung,exynos4212-mixer",
+		.data	= &exynos4212_mxr_drv_data,
+	}, {
 		.compatible = "samsung,exynos5-mixer",
 		.data	= &exynos5250_mxr_drv_data,
 	}, {
@@ -1220,6 +1245,7 @@
 		/* end node */
 	}
 };
+MODULE_DEVICE_TABLE(of, mixer_match_types);
 
 static int mixer_bind(struct device *dev, struct device *manager, void *data)
 {
@@ -1251,6 +1277,7 @@
 	ctx->pdev = pdev;
 	ctx->dev = dev;
 	ctx->vp_enabled = drv->is_vp_enabled;
+	ctx->has_sclk = drv->has_sclk;
 	ctx->mxr_ver = drv->version;
 	init_waitqueue_head(&ctx->wait_vsync_queue);
 	atomic_set(&ctx->wait_vsync_event, 0);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index c18268c..248c33a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -192,7 +192,7 @@
 	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 
 	psb_intel_i2c_destroy(gma_encoder->ddc_bus);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -304,7 +304,7 @@
 	drm_connector_helper_add(connector,
 					&cdv_intel_crt_connector_helper_funcs);
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	return;
 failed_ddc:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 9ff30c2..a4cc0e6 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1713,7 +1713,7 @@
 		}
 	}
 	i2c_del_adapter(&intel_dp->adapter);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -1847,7 +1847,7 @@
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	/* Set up the DDC bus. */
 	switch (output_reg) {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index b99084b..4268bf2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -248,7 +248,7 @@
 
 	if (gma_encoder->i2c_bus)
 		psb_intel_i2c_destroy(gma_encoder->i2c_bus);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -356,7 +356,7 @@
 
 	hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
 	hdmi_priv->dev = dev;
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	return;
 
 failed_ddc:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8ecc920..0b77039 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -446,7 +446,7 @@
 
 	if (gma_encoder->i2c_bus)
 		psb_intel_i2c_destroy(gma_encoder->i2c_bus);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -774,7 +774,7 @@
 
 out:
 	mutex_unlock(&dev->mode_config.mutex);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	return;
 
 failed_find:
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index e7fcc14..d0dd3be 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -561,7 +561,7 @@
 	return psbfb_create(psb_fbdev, sizes);
 }
 
-static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
 	.gamma_set = psbfb_gamma_set,
 	.gamma_get = psbfb_gamma_get,
 	.fb_probe = psbfb_probe,
@@ -600,7 +600,8 @@
 	}
 
 	dev_priv->fbdev = fbdev;
-	fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
 
 	drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
 							INTELFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 592d205..ce015db 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -206,7 +206,7 @@
 
 	WARN_ON(gt->pages);
 
-	pages = drm_gem_get_pages(&gt->gem, 0);
+	pages = drm_gem_get_pages(&gt->gem);
 	if (IS_ERR(pages))
 		return PTR_ERR(pages);
 
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 6e91b20..abf2248 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -318,7 +318,7 @@
 
 	if (!dsi_connector)
 		return;
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	sender = dsi_connector->pkg_sender;
 	mdfld_dsi_pkg_sender_destroy(sender);
@@ -597,7 +597,7 @@
 	dsi_config->encoder = encoder;
 	encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
 		INTEL_OUTPUT_MIPI2;
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	return;
 
 	/*TODO: add code to destroy outputs on error*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index cf018dd..e6f5c62 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -665,7 +665,7 @@
 	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	dev_info(dev->dev, "HDMI initialised.\n");
 
 	return;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 9b09946..0d39da6 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -404,7 +404,7 @@
 out:
 	mutex_unlock(&dev->mode_config.mutex);
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	return;
 
 failed_find:
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index d7778d0..88aad95 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -563,7 +563,7 @@
 
 	if (lvds_priv->ddc_bus)
 		psb_intel_i2c_destroy(lvds_priv->ddc_bus);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -829,7 +829,7 @@
 	 */
 out:
 	mutex_unlock(&dev->mode_config.mutex);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	return;
 
 failed_find:
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index deeb082..0be96fd 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1682,7 +1682,7 @@
 				     psb_intel_sdvo_connector->tv_format);
 
 	psb_intel_sdvo_destroy_enhance_property(connector);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -2071,7 +2071,7 @@
 	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
 
 	gma_connector_attach_encoder(&connector->base, &encoder->base);
-	drm_sysfs_connector_add(&connector->base.base);
+	drm_connector_register(&connector->base.base);
 }
 
 static void
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index ac357b0..a8f1e03 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1196,8 +1196,7 @@
 	if (priv->hdmi->irq)
 		free_irq(priv->hdmi->irq, priv);
 
-	if (priv->cec)
-		i2c_unregister_device(priv->cec);
+	i2c_unregister_device(priv->cec);
 	drm_i2c_encoder_destroy(encoder);
 	kfree(priv);
 }
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 437e182..4e39ab3 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -69,15 +69,3 @@
 	  option changes the default for that module option.
 
 	  If in doubt, say "N".
-
-config DRM_I915_UMS
-	bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
-	depends on DRM_I915 && BROKEN
-	default n
-	help
-	  Choose this option if you still need userspace modesetting.
-
-	  Userspace modesetting is deprecated for quite some time now, so
-	  enable this only if you have ancient versions of the DDX drivers.
-
-	  If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cad1683..91bd167 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@
 	  intel_crt.o \
 	  intel_ddi.o \
 	  intel_dp.o \
+	  intel_dp_mst.o \
 	  intel_dsi_cmd.o \
 	  intel_dsi.o \
 	  intel_dsi_pll.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9d79543..dea99d9 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -426,6 +426,9 @@
 	GEN7_SO_WRITE_OFFSET(1),
 	GEN7_SO_WRITE_OFFSET(2),
 	GEN7_SO_WRITE_OFFSET(3),
+	GEN7_L3SQCREG1,
+	GEN7_L3CNTLREG2,
+	GEN7_L3CNTLREG3,
 };
 
 static const u32 gen7_blt_regs[] = {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b8c6892..9e737b7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -170,11 +170,13 @@
 	}
 	if (obj->ring != NULL)
 		seq_printf(m, " (%s)", obj->ring->name);
+	if (obj->frontbuffer_bits)
+		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
 
 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
 {
-	seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
 	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
 	seq_putc(m, ' ');
 }
@@ -515,6 +517,11 @@
 	struct drm_device *dev = node->minor->dev;
 	unsigned long flags;
 	struct intel_crtc *crtc;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 	for_each_intel_crtc(dev, crtc) {
 		const char pipe = pipe_name(crtc->pipe);
@@ -556,6 +563,8 @@
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 
+	mutex_unlock(&dev->struct_mutex);
+
 	return 0;
 }
 
@@ -985,29 +994,6 @@
 			i915_next_seqno_get, i915_next_seqno_set,
 			"0x%llx\n");
 
-static int i915_rstdby_delays(struct seq_file *m, void *unused)
-{
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u16 crstanddelay;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
-
-	crstanddelay = I915_READ16(CRSTANDVID);
-
-	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
-
-	seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
-
-	return 0;
-}
-
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
@@ -1029,7 +1015,8 @@
 			   MEMSTAT_VID_SHIFT);
 		seq_printf(m, "Current P-state: %d\n",
 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-	} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+	} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
+		   IS_BROADWELL(dev)) {
 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1048,7 +1035,7 @@
 
 		reqf = I915_READ(GEN6_RPNSWREQ);
 		reqf &= ~GEN6_TURBO_DISABLE;
-		if (IS_HASWELL(dev))
+		if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 			reqf >>= 24;
 		else
 			reqf >>= 25;
@@ -1065,7 +1052,7 @@
 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
-		if (IS_HASWELL(dev))
+		if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
 		else
 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1121,20 +1108,21 @@
 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
 			   dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
 	} else if (IS_VALLEYVIEW(dev)) {
-		u32 freq_sts, val;
+		u32 freq_sts;
 
 		mutex_lock(&dev_priv->rps.hw_lock);
 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 
-		val = valleyview_rps_max_freq(dev_priv);
 		seq_printf(m, "max GPU freq: %d MHz\n",
-			   vlv_gpu_freq(dev_priv, val));
+			   vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
 
-		val = valleyview_rps_min_freq(dev_priv);
 		seq_printf(m, "min GPU freq: %d MHz\n",
-			   vlv_gpu_freq(dev_priv, val));
+			   vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+
+		seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+			   vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
 
 		seq_printf(m, "current GPU freq: %d MHz\n",
 			   vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
@@ -1148,61 +1136,6 @@
 	return ret;
 }
 
-static int i915_delayfreq_table(struct seq_file *m, void *unused)
-{
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 delayfreq;
-	int ret, i;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
-
-	for (i = 0; i < 16; i++) {
-		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
-		seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
-			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
-	}
-
-	intel_runtime_pm_put(dev_priv);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
-static inline int MAP_TO_MV(int map)
-{
-	return 1250 - (map * 25);
-}
-
-static int i915_inttoext_table(struct seq_file *m, void *unused)
-{
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 inttoext;
-	int ret, i;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
-
-	for (i = 1; i <= 32; i++) {
-		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
-		seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
-	}
-
-	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
 static int ironlake_drpc_info(struct seq_file *m)
 {
 	struct drm_info_node *node = m->private;
@@ -1513,10 +1446,17 @@
 
 	intel_runtime_pm_get(dev_priv);
 
-	if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
-		seq_puts(m, "enabled\n");
-	else
-		seq_puts(m, "disabled\n");
+	seq_printf(m, "Enabled by kernel parameter: %s\n",
+		   yesno(i915.enable_ips));
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		seq_puts(m, "Currently: unknown\n");
+	} else {
+		if (I915_READ(IPS_CTL) & IPS_ENABLE)
+			seq_puts(m, "Currently: enabled\n");
+		else
+			seq_puts(m, "Currently: disabled\n");
+	}
 
 	intel_runtime_pm_put(dev_priv);
 
@@ -1620,26 +1560,6 @@
 	return ret;
 }
 
-static int i915_gfxec(struct seq_file *m, void *unused)
-{
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
-
-	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
-	intel_runtime_pm_put(dev_priv);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
 static int i915_opregion(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
@@ -1677,9 +1597,6 @@
 
 #ifdef CONFIG_DRM_I915_FBDEV
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
-	if (ret)
-		return ret;
 
 	ifbdev = dev_priv->fbdev;
 	fb = to_intel_framebuffer(ifbdev->helper.fb);
@@ -1692,7 +1609,6 @@
 		   atomic_read(&fb->base.refcount.refcount));
 	describe_obj(m, fb->obj);
 	seq_putc(m, '\n');
-	mutex_unlock(&dev->mode_config.mutex);
 #endif
 
 	mutex_lock(&dev->mode_config.fb_lock);
@@ -1723,7 +1639,7 @@
 	struct intel_context *ctx;
 	int ret, i;
 
-	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
@@ -1740,7 +1656,7 @@
 	}
 
 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
-		if (ctx->obj == NULL)
+		if (ctx->legacy_hw_ctx.rcs_state == NULL)
 			continue;
 
 		seq_puts(m, "HW context ");
@@ -1749,11 +1665,11 @@
 			if (ring->default_context == ctx)
 				seq_printf(m, "(default context %s) ", ring->name);
 
-		describe_obj(m, ctx->obj);
+		describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
 		seq_putc(m, '\n');
 	}
 
-	mutex_unlock(&dev->mode_config.mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -1863,7 +1779,7 @@
 	if (i915_gem_context_is_default(ctx))
 		seq_puts(m, "  default context:\n");
 	else
-		seq_printf(m, "  context %d:\n", ctx->id);
+		seq_printf(m, "  context %d:\n", ctx->user_handle);
 	ppgtt->debug_dump(ppgtt, m);
 
 	return 0;
@@ -1976,17 +1892,25 @@
 
 	intel_runtime_pm_get(dev_priv);
 
+	mutex_lock(&dev_priv->psr.lock);
 	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
 	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
+	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
+	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
+		   dev_priv->psr.busy_frontbuffer_bits);
+	seq_printf(m, "Re-enable work scheduled: %s\n",
+		   yesno(work_busy(&dev_priv->psr.work.work)));
 
 	enabled = HAS_PSR(dev) &&
 		I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
-	seq_printf(m, "Enabled: %s\n", yesno(enabled));
+	seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
 
 	if (HAS_PSR(dev))
 		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
 			EDP_PSR_PERF_CNT_MASK;
 	seq_printf(m, "Performance_Counter: %u\n", psrperf);
+	mutex_unlock(&dev_priv->psr.lock);
 
 	intel_runtime_pm_put(dev_priv);
 	return 0;
@@ -2072,7 +1996,7 @@
 
 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
 	seq_printf(m, "IRQs disabled: %s\n",
-		   yesno(dev_priv->pm.irqs_disabled));
+		   yesno(!intel_irqs_enabled(dev_priv)));
 
 	return 0;
 }
@@ -2126,6 +2050,8 @@
 		return "VGA";
 	case POWER_DOMAIN_AUDIO:
 		return "AUDIO";
+	case POWER_DOMAIN_PLLS:
+		return "PLLS";
 	case POWER_DOMAIN_INIT:
 		return "INIT";
 	default:
@@ -2223,9 +2149,12 @@
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct intel_encoder *intel_encoder;
 
-	seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
-		   crtc->primary->fb->base.id, crtc->x, crtc->y,
-		   crtc->primary->fb->width, crtc->primary->fb->height);
+	if (crtc->primary->fb)
+		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+			   crtc->primary->fb->base.id, crtc->x, crtc->y,
+			   crtc->primary->fb->width, crtc->primary->fb->height);
+	else
+		seq_puts(m, "\tprimary plane disabled\n");
 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
 		intel_encoder_info(m, intel_crtc, intel_encoder);
 }
@@ -2287,13 +2216,15 @@
 		seq_printf(m, "\tCEA rev: %d\n",
 			   connector->display_info.cea_rev);
 	}
-	if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
-	    intel_encoder->type == INTEL_OUTPUT_EDP)
-		intel_dp_info(m, intel_connector);
-	else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
-		intel_hdmi_info(m, intel_connector);
-	else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
-		intel_lvds_info(m, intel_connector);
+	if (intel_encoder) {
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		    intel_encoder->type == INTEL_OUTPUT_EDP)
+			intel_dp_info(m, intel_connector);
+		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+			intel_hdmi_info(m, intel_connector);
+		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+			intel_lvds_info(m, intel_connector);
+	}
 
 	seq_printf(m, "\tmodes:\n");
 	list_for_each_entry(mode, &connector->modes, head)
@@ -2347,17 +2278,17 @@
 		bool active;
 		int x, y;
 
-		seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
+		seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
 			   crtc->base.base.id, pipe_name(crtc->pipe),
-			   yesno(crtc->active));
+			   yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
 		if (crtc->active) {
 			intel_crtc_info(m, crtc);
 
 			active = cursor_position(dev, crtc->pipe, &x, &y);
-			seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
+			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
 				   yesno(crtc->cursor_base),
-				   x, y, crtc->cursor_addr,
-				   yesno(active));
+				   x, y, crtc->cursor_width, crtc->cursor_height,
+				   crtc->cursor_addr, yesno(active));
 		}
 
 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -2377,12 +2308,132 @@
 	return 0;
 }
 
+static int i915_semaphore_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	int i, j, ret;
+
+	if (!i915_semaphore_is_enabled(dev)) {
+		seq_puts(m, "Semaphores are disabled\n");
+		return 0;
+	}
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	intel_runtime_pm_get(dev_priv);
+
+	if (IS_BROADWELL(dev)) {
+		struct page *page;
+		uint64_t *seqno;
+
+		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+
+		seqno = (uint64_t *)kmap_atomic(page);
+		for_each_ring(ring, dev_priv, i) {
+			uint64_t offset;
+
+			seq_printf(m, "%s\n", ring->name);
+
+			seq_puts(m, "  Last signal:");
+			for (j = 0; j < num_rings; j++) {
+				offset = i * I915_NUM_RINGS + j;
+				seq_printf(m, "0x%08llx (0x%02llx) ",
+					   seqno[offset], offset * 8);
+			}
+			seq_putc(m, '\n');
+
+			seq_puts(m, "  Last wait:  ");
+			for (j = 0; j < num_rings; j++) {
+				offset = i + (j * I915_NUM_RINGS);
+				seq_printf(m, "0x%08llx (0x%02llx) ",
+					   seqno[offset], offset * 8);
+			}
+			seq_putc(m, '\n');
+
+		}
+		kunmap_atomic(seqno);
+	} else {
+		seq_puts(m, "  Last signal:");
+		for_each_ring(ring, dev_priv, i)
+			for (j = 0; j < num_rings; j++)
+				seq_printf(m, "0x%08x\n",
+					   I915_READ(ring->semaphore.mbox.signal[j]));
+		seq_putc(m, '\n');
+	}
+
+	seq_puts(m, "\nSync seqno:\n");
+	for_each_ring(ring, dev_priv, i) {
+		for (j = 0; j < num_rings; j++) {
+			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
+		}
+		seq_putc(m, '\n');
+	}
+	seq_putc(m, '\n');
+
+	intel_runtime_pm_put(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	drm_modeset_lock_all(dev);
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
+		seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
+			   pll->active, yesno(pll->on));
+		seq_printf(m, " tracked hardware state:\n");
+		seq_printf(m, " dpll:    0x%08x\n", pll->hw_state.dpll);
+		seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
+		seq_printf(m, " fp0:     0x%08x\n", pll->hw_state.fp0);
+		seq_printf(m, " fp1:     0x%08x\n", pll->hw_state.fp1);
+		seq_printf(m, " wrpll:   0x%08x\n", pll->hw_state.wrpll);
+	}
+	drm_modeset_unlock_all(dev);
+
+	return 0;
+}
+
 struct pipe_crc_info {
 	const char *name;
 	struct drm_device *dev;
 	enum pipe pipe;
 };
 
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_encoder *encoder;
+	struct intel_encoder *intel_encoder;
+	struct intel_digital_port *intel_dig_port;
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		intel_encoder = to_intel_encoder(encoder);
+		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+			continue;
+		intel_dig_port = enc_to_dig_port(encoder);
+		if (!intel_dig_port->dp.can_mst)
+			continue;
+
+		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+	}
+	drm_modeset_unlock_all(dev);
+	return 0;
+}
+
 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 {
 	struct pipe_crc_info *info = inode->i_private;
@@ -2849,7 +2900,60 @@
 	return 0;
 }
 
-static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+	drm_modeset_lock_all(dev);
+	/*
+	 * If we use the eDP transcoder we need to make sure that we don't
+	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+	 * relevant on hsw with pipe A when using the always-on power well
+	 * routing.
+	 */
+	if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
+	    !crtc->config.pch_pfit.enabled) {
+		crtc->config.pch_pfit.force_thru = true;
+
+		intel_display_power_get(dev_priv,
+					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+
+		dev_priv->display.crtc_disable(&crtc->base);
+		dev_priv->display.crtc_enable(&crtc->base);
+	}
+	drm_modeset_unlock_all(dev);
+}
+
+static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+	drm_modeset_lock_all(dev);
+	/*
+	 * If we use the eDP transcoder we need to make sure that we don't
+	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+	 * relevant on hsw with pipe A when using the always-on power well
+	 * routing.
+	 */
+	if (crtc->config.pch_pfit.force_thru) {
+		crtc->config.pch_pfit.force_thru = false;
+
+		dev_priv->display.crtc_disable(&crtc->base);
+		dev_priv->display.crtc_enable(&crtc->base);
+
+		intel_display_power_put(dev_priv,
+					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+	}
+	drm_modeset_unlock_all(dev);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+				enum pipe pipe,
+				enum intel_pipe_crc_source *source,
 				uint32_t *val)
 {
 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -2863,6 +2967,9 @@
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_PF:
+		if (IS_HASWELL(dev) && pipe == PIPE_A)
+			hsw_trans_edp_pipe_A_crc_wa(dev);
+
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -2895,11 +3002,11 @@
 	else if (INTEL_INFO(dev)->gen < 5)
 		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
 	else if (IS_VALLEYVIEW(dev))
-		ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
 	else if (IS_GEN5(dev) || IS_GEN6(dev))
 		ret = ilk_pipe_crc_ctl_reg(&source, &val);
 	else
-		ret = ivb_pipe_crc_ctl_reg(&source, &val);
+		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
 
 	if (ret != 0)
 		return ret;
@@ -2929,11 +3036,16 @@
 	/* real source -> none transition */
 	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
 		struct intel_pipe_crc_entry *entries;
+		struct intel_crtc *crtc =
+			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
 		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
 				 pipe_name(pipe));
 
-		intel_wait_for_vblank(dev, pipe);
+		drm_modeset_lock(&crtc->base.mutex, NULL);
+		if (crtc->active)
+			intel_wait_for_vblank(dev, pipe);
+		drm_modeset_unlock(&crtc->base.mutex);
 
 		spin_lock_irq(&pipe_crc->lock);
 		entries = pipe_crc->entries;
@@ -2946,6 +3058,8 @@
 			g4x_undo_pipe_scramble_reset(dev, pipe);
 		else if (IS_VALLEYVIEW(dev))
 			vlv_undo_pipe_scramble_reset(dev, pipe);
+		else if (IS_HASWELL(dev) && pipe == PIPE_A)
+			hsw_undo_trans_edp_pipe_A_crc_wa(dev);
 	}
 
 	return 0;
@@ -3177,7 +3291,7 @@
 {
 	struct drm_device *dev = inode->i_private;
 
-	if (!HAS_PCH_SPLIT(dev))
+	if (HAS_GMCH_DISPLAY(dev))
 		return -ENODEV;
 
 	return single_open(file, pri_wm_latency_show, dev);
@@ -3187,7 +3301,7 @@
 {
 	struct drm_device *dev = inode->i_private;
 
-	if (!HAS_PCH_SPLIT(dev))
+	if (HAS_GMCH_DISPLAY(dev))
 		return -ENODEV;
 
 	return single_open(file, spr_wm_latency_show, dev);
@@ -3197,7 +3311,7 @@
 {
 	struct drm_device *dev = inode->i_private;
 
-	if (!HAS_PCH_SPLIT(dev))
+	if (HAS_GMCH_DISPLAY(dev))
 		return -ENODEV;
 
 	return single_open(file, cur_wm_latency_show, dev);
@@ -3506,7 +3620,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (INTEL_INFO(dev)->gen < 6)
 		return -ENODEV;
 
 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3532,7 +3646,7 @@
 	u32 rp_state_cap, hw_max, hw_min;
 	int ret;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (INTEL_INFO(dev)->gen < 6)
 		return -ENODEV;
 
 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3549,8 +3663,8 @@
 	if (IS_VALLEYVIEW(dev)) {
 		val = vlv_freq_opcode(dev_priv, val);
 
-		hw_max = valleyview_rps_max_freq(dev_priv);
-		hw_min = valleyview_rps_min_freq(dev_priv);
+		hw_max = dev_priv->rps.max_freq;
+		hw_min = dev_priv->rps.min_freq;
 	} else {
 		do_div(val, GT_FREQUENCY_MULTIPLIER);
 
@@ -3587,7 +3701,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (INTEL_INFO(dev)->gen < 6)
 		return -ENODEV;
 
 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3613,7 +3727,7 @@
 	u32 rp_state_cap, hw_max, hw_min;
 	int ret;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (INTEL_INFO(dev)->gen < 6)
 		return -ENODEV;
 
 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3630,8 +3744,8 @@
 	if (IS_VALLEYVIEW(dev)) {
 		val = vlv_freq_opcode(dev_priv, val);
 
-		hw_max = valleyview_rps_max_freq(dev_priv);
-		hw_min = valleyview_rps_min_freq(dev_priv);
+		hw_max = dev_priv->rps.max_freq;
+		hw_min = dev_priv->rps.min_freq;
 	} else {
 		do_div(val, GT_FREQUENCY_MULTIPLIER);
 
@@ -3799,14 +3913,10 @@
 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
-	{"i915_rstdby_delays", i915_rstdby_delays, 0},
 	{"i915_frequency_info", i915_frequency_info, 0},
-	{"i915_delayfreq_table", i915_delayfreq_table, 0},
-	{"i915_inttoext_table", i915_inttoext_table, 0},
 	{"i915_drpc_info", i915_drpc_info, 0},
 	{"i915_emon_status", i915_emon_status, 0},
 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
-	{"i915_gfxec", i915_gfxec, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
 	{"i915_ips_status", i915_ips_status, 0},
 	{"i915_sr_status", i915_sr_status, 0},
@@ -3823,6 +3933,9 @@
 	{"i915_pc8_status", i915_pc8_status, 0},
 	{"i915_power_domain_info", i915_power_domain_info, 0},
 	{"i915_display_info", i915_display_info, 0},
+	{"i915_semaphore_status", i915_semaphore_status, 0},
+	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+	{"i915_dp_mst_info", i915_dp_mst_info, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d443441..2e7f03a 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -138,7 +138,7 @@
 	I915_WRITE(HWS_PGA, 0x1ffff000);
 }
 
-void i915_kernel_lost_context(struct drm_device * dev)
+void i915_kernel_lost_context(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
@@ -166,7 +166,7 @@
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
-static int i915_dma_cleanup(struct drm_device * dev)
+static int i915_dma_cleanup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
@@ -190,7 +190,7 @@
 	return 0;
 }
 
-static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -235,7 +235,7 @@
 	return 0;
 }
 
-static int i915_dma_resume(struct drm_device * dev)
+static int i915_dma_resume(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = LP_RING(dev_priv);
@@ -359,7 +359,7 @@
 	return 0;
 }
 
-static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i, ret;
@@ -369,6 +369,7 @@
 
 	for (i = 0; i < dwords;) {
 		int sz = validate_cmd(buffer[i]);
+
 		if (sz == 0 || i + sz > dwords)
 			return -EINVAL;
 		i += sz;
@@ -453,7 +454,7 @@
 	}
 }
 
-static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+static int i915_dispatch_cmdbuffer(struct drm_device *dev,
 				   drm_i915_cmdbuffer_t *cmd,
 				   struct drm_clip_rect *cliprects,
 				   void *cmdbuf)
@@ -487,8 +488,8 @@
 	return 0;
 }
 
-static int i915_dispatch_batchbuffer(struct drm_device * dev,
-				     drm_i915_batchbuffer_t * batch,
+static int i915_dispatch_batchbuffer(struct drm_device *dev,
+				     drm_i915_batchbuffer_t *batch,
 				     struct drm_clip_rect *cliprects)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -549,7 +550,7 @@
 	return 0;
 }
 
-static int i915_dispatch_flip(struct drm_device * dev)
+static int i915_dispatch_flip(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv =
@@ -755,7 +756,7 @@
 	return ret;
 }
 
-static int i915_emit_irq(struct drm_device * dev)
+static int i915_emit_irq(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -781,7 +782,7 @@
 	return dev_priv->dri1.counter;
 }
 
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device *dev, int irq_nr)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -1266,6 +1267,7 @@
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
 	if (state == VGA_SWITCHEROO_ON) {
 		pr_info("switched on\n");
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1338,6 +1340,8 @@
 	if (ret)
 		goto cleanup_gem_stolen;
 
+	dev_priv->pm._irqs_disabled = false;
+
 	/* Important: The output setup functions called by modeset_init need
 	 * working irqs for e.g. gmbus and dp aux transfers. */
 	intel_modeset_init(dev);
@@ -1375,9 +1379,6 @@
 	 */
 	intel_fbdev_initial_config(dev);
 
-	/* Only enable hotplug handling once the fbdev is fully set up. */
-	dev_priv->enable_hotplug_processing = true;
-
 	drm_kms_helper_poll_init(dev);
 
 	return 0;
@@ -1425,15 +1426,16 @@
 }
 
 #if IS_ENABLED(CONFIG_FB)
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
 	struct apertures_struct *ap;
 	struct pci_dev *pdev = dev_priv->dev->pdev;
 	bool primary;
+	int ret;
 
 	ap = alloc_apertures(1);
 	if (!ap)
-		return;
+		return -ENOMEM;
 
 	ap->ranges[0].base = dev_priv->gtt.mappable_base;
 	ap->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -1441,13 +1443,16 @@
 	primary =
 		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 
-	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
 
 	kfree(ap);
+
+	return ret;
 }
 #else
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
+	return 0;
 }
 #endif
 
@@ -1492,10 +1497,11 @@
 #define SEP_EMPTY
 #define PRINT_FLAG(name) info->name ? #name "," : ""
 #define SEP_COMMA ,
-	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
 			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
 			 info->gen,
 			 dev_priv->dev->pdev->device,
+			 dev_priv->dev->pdev->revision,
 			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
 #undef PRINT_S
 #undef SEP_EMPTY
@@ -1594,7 +1600,7 @@
 	if (dev_priv == NULL)
 		return -ENOMEM;
 
-	dev->dev_private = (void *)dev_priv;
+	dev->dev_private = dev_priv;
 	dev_priv->dev = dev;
 
 	/* copy initial configuration to dev_priv->info */
@@ -1606,6 +1612,7 @@
 	spin_lock_init(&dev_priv->backlight_lock);
 	spin_lock_init(&dev_priv->uncore.lock);
 	spin_lock_init(&dev_priv->mm.object_stat_lock);
+	spin_lock_init(&dev_priv->mmio_flip_lock);
 	mutex_init(&dev_priv->dpio_lock);
 	mutex_init(&dev_priv->modeset_restore_lock);
 
@@ -1664,7 +1671,11 @@
 			goto out_gtt;
 		}
 
-		i915_kick_out_firmware_fb(dev_priv);
+		ret = i915_kick_out_firmware_fb(dev_priv);
+		if (ret) {
+			DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+			goto out_gtt;
+		}
 	}
 
 	pci_set_master(dev->pdev);
@@ -1717,6 +1728,13 @@
 		goto out_mtrrfree;
 	}
 
+	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+	if (dev_priv->dp_wq == NULL) {
+		DRM_ERROR("Failed to create our dp workqueue.\n");
+		ret = -ENOMEM;
+		goto out_freewq;
+	}
+
 	intel_irq_init(dev);
 	intel_uncore_sanitize(dev);
 
@@ -1792,6 +1810,8 @@
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
 	pm_qos_remove_request(&dev_priv->pm_qos);
+	destroy_workqueue(dev_priv->dp_wq);
+out_freewq:
 	destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1892,6 +1912,7 @@
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
 
+	destroy_workqueue(dev_priv->dp_wq);
 	destroy_workqueue(dev_priv->wq);
 	pm_qos_remove_request(&dev_priv->pm_qos);
 
@@ -1933,7 +1954,7 @@
  * and DMA structures, since the kernel won't be using them, and clea
  * up any GEM state.
  */
-void i915_driver_lastclose(struct drm_device * dev)
+void i915_driver_lastclose(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -1954,11 +1975,11 @@
 	i915_dma_cleanup(dev);
 }
 
-void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
 {
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_context_close(dev, file_priv);
-	i915_gem_release(dev, file_priv);
+	i915_gem_context_close(dev, file);
+	i915_gem_release(dev, file);
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -2031,7 +2052,7 @@
  * manage the gtt, we need to claim that all intel devices are agp.  For
  * otherwise the drm core refuses to initialize the agp support code.
  */
-int i915_driver_device_is_agp(struct drm_device * dev)
+int i915_driver_device_is_agp(struct drm_device *dev)
 {
 	return 1;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 651e65e..6c4b25c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -28,6 +28,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/acpi.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -46,8 +47,6 @@
 			  PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
 	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
 			   TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
-	.dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
-	.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
 	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
 
 #define GEN_CHV_PIPEOFFSETS \
@@ -55,10 +54,6 @@
 			  CHV_PIPE_C_OFFSET }, \
 	.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
 			   CHV_TRANSCODER_C_OFFSET, }, \
-	.dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
-			  CHV_DPLL_C_OFFSET }, \
-	.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
-			     CHV_DPLL_C_MD_OFFSET }, \
 	.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
 			     CHV_PALETTE_C_OFFSET }
 
@@ -308,6 +303,7 @@
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.has_llc = 1,
 	.has_ddi = 1,
+	.has_fpga_dbg = 1,
 	.has_fbc = 1,
 	GEN_DEFAULT_PIPEOFFSETS,
 	IVB_CURSOR_OFFSETS,
@@ -319,6 +315,7 @@
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.has_llc = 1,
 	.has_ddi = 1,
+	.has_fpga_dbg = 1,
 	.has_fbc = 1,
 	GEN_DEFAULT_PIPEOFFSETS,
 	IVB_CURSOR_OFFSETS,
@@ -330,6 +327,7 @@
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 	.has_llc = 1,
 	.has_ddi = 1,
+	.has_fpga_dbg = 1,
 	.has_fbc = 1,
 	GEN_DEFAULT_PIPEOFFSETS,
 	IVB_CURSOR_OFFSETS,
@@ -341,6 +339,7 @@
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 	.has_llc = 1,
 	.has_ddi = 1,
+	.has_fpga_dbg = 1,
 	.has_fbc = 1,
 	GEN_DEFAULT_PIPEOFFSETS,
 	IVB_CURSOR_OFFSETS,
@@ -482,10 +481,6 @@
 	if (i915.semaphores >= 0)
 		return i915.semaphores;
 
-	/* Until we get further testing... */
-	if (IS_GEN8(dev))
-		return false;
-
 #ifdef CONFIG_INTEL_IOMMU
 	/* Enable semaphores on SNB when IO remapping is off */
 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
@@ -499,8 +494,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
-
-	intel_runtime_pm_get(dev_priv);
+	pci_power_t opregion_target_state;
 
 	/* ignore lid events during suspend */
 	mutex_lock(&dev_priv->modeset_restore_lock);
@@ -526,21 +520,23 @@
 			return error;
 		}
 
-		drm_irq_uninstall(dev);
-		dev_priv->enable_hotplug_processing = false;
-
-		intel_disable_gt_powersave(dev);
-
 		/*
 		 * Disable CRTCs directly since we want to preserve sw state
-		 * for _thaw.
+		 * for _thaw. Also, power gate the CRTC power wells.
 		 */
 		drm_modeset_lock_all(dev);
-		for_each_crtc(dev, crtc) {
-			dev_priv->display.crtc_disable(crtc);
-		}
+		for_each_crtc(dev, crtc)
+			intel_crtc_control(crtc, false);
 		drm_modeset_unlock_all(dev);
 
+		intel_dp_mst_suspend(dev);
+
+		flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+		intel_runtime_pm_disable_interrupts(dev);
+
+		intel_suspend_gt_powersave(dev);
+
 		intel_modeset_suspend_hw(dev);
 	}
 
@@ -548,8 +544,15 @@
 
 	i915_save_state(dev);
 
+	opregion_target_state = PCI_D3cold;
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+	if (acpi_target_system_state() < ACPI_STATE_S3)
+		opregion_target_state = PCI_D1;
+#endif
+	intel_opregion_notify_adapter(dev, opregion_target_state);
+
+	intel_uncore_forcewake_reset(dev, false);
 	intel_opregion_fini(dev);
-	intel_uncore_fini(dev);
 
 	console_lock();
 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
@@ -557,6 +560,8 @@
 
 	dev_priv->suspend_count++;
 
+	intel_display_set_init_power(dev_priv, false);
+
 	return 0;
 }
 
@@ -606,7 +611,10 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	intel_uncore_early_sanitize(dev);
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hsw_disable_pc8(dev_priv);
+
+	intel_uncore_early_sanitize(dev, true);
 	intel_uncore_sanitize(dev);
 	intel_power_domains_init_hw(dev_priv);
 
@@ -639,11 +647,19 @@
 		}
 		mutex_unlock(&dev->struct_mutex);
 
-		/* We need working interrupts for modeset enabling ... */
-		drm_irq_install(dev, dev->pdev->irq);
+		intel_runtime_pm_restore_interrupts(dev);
 
 		intel_modeset_init_hw(dev);
 
+		{
+			unsigned long irqflags;
+			spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+			if (dev_priv->display.hpd_irq_setup)
+				dev_priv->display.hpd_irq_setup(dev);
+			spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+		}
+
+		intel_dp_mst_resume(dev);
 		drm_modeset_lock_all(dev);
 		intel_modeset_setup_hw_state(dev, true);
 		drm_modeset_unlock_all(dev);
@@ -655,7 +671,6 @@
 		 * notifications.
 		 * */
 		intel_hpd_init(dev);
-		dev_priv->enable_hotplug_processing = true;
 		/* Config may have changed between suspend and resume */
 		drm_helper_hpd_irq_event(dev);
 	}
@@ -678,7 +693,8 @@
 	dev_priv->modeset_restore = MODESET_DONE;
 	mutex_unlock(&dev_priv->modeset_restore_lock);
 
-	intel_runtime_pm_put(dev_priv);
+	intel_opregion_notify_adapter(dev, PCI_D0);
+
 	return 0;
 }
 
@@ -887,6 +903,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct drm_i915_private *dev_priv = drm_dev->dev_private;
 
 	/*
 	 * We have a suspedn ordering issue with the snd-hda driver also
@@ -900,6 +917,9 @@
 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
+	if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
+		hsw_enable_pc8(dev_priv);
+
 	pci_disable_device(pdev);
 	pci_set_power_state(pdev, PCI_D3hot);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 374f964..5de27f9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,7 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20080730"
+#define DRIVER_DATE		"20140620"
 
 enum pipe {
 	INVALID_PIPE = -1,
@@ -129,6 +129,7 @@
 	POWER_DOMAIN_PORT_OTHER,
 	POWER_DOMAIN_VGA,
 	POWER_DOMAIN_AUDIO,
+	POWER_DOMAIN_PLLS,
 	POWER_DOMAIN_INIT,
 
 	POWER_DOMAIN_NUM,
@@ -178,14 +179,20 @@
 	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
 		if ((intel_connector)->base.encoder == (__encoder))
 
+#define for_each_power_domain(domain, mask)				\
+	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
+		if ((1 << (domain)) & (mask))
+
 struct drm_i915_private;
 struct i915_mmu_object;
 
 enum intel_dpll_id {
 	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
 	/* real shared dpll ids must be >= 0 */
-	DPLL_ID_PCH_PLL_A,
-	DPLL_ID_PCH_PLL_B,
+	DPLL_ID_PCH_PLL_A = 0,
+	DPLL_ID_PCH_PLL_B = 1,
+	DPLL_ID_WRPLL1 = 0,
+	DPLL_ID_WRPLL2 = 1,
 };
 #define I915_NUM_PLLS 2
 
@@ -194,6 +201,7 @@
 	uint32_t dpll_md;
 	uint32_t fp0;
 	uint32_t fp1;
+	uint32_t wrpll;
 };
 
 struct intel_shared_dpll {
@@ -204,6 +212,8 @@
 	/* should match the index in the dev_priv->shared_dplls array */
 	enum intel_dpll_id id;
 	struct intel_dpll_hw_state hw_state;
+	/* The mode_set hook is optional and should be used together with the
+	 * intel_prepare_shared_dpll function. */
 	void (*mode_set)(struct drm_i915_private *dev_priv,
 			 struct intel_shared_dpll *pll);
 	void (*enable)(struct drm_i915_private *dev_priv,
@@ -228,12 +238,6 @@
 			    int pixel_clock, int link_clock,
 			    struct intel_link_m_n *m_n);
 
-struct intel_ddi_plls {
-	int spll_refcount;
-	int wrpll1_refcount;
-	int wrpll2_refcount;
-};
-
 /* Interface history:
  *
  * 1.1: Original.
@@ -324,6 +328,7 @@
 	u64 fence[I915_MAX_NUM_FENCES];
 	struct intel_overlay_error_state *overlay;
 	struct intel_display_error_state *display;
+	struct drm_i915_error_object *semaphore_obj;
 
 	struct drm_i915_error_ring {
 		bool valid;
@@ -435,8 +440,8 @@
 	void (*update_wm)(struct drm_crtc *crtc);
 	void (*update_sprite_wm)(struct drm_plane *plane,
 				 struct drm_crtc *crtc,
-				 uint32_t sprite_width, int pixel_size,
-				 bool enable, bool scaled);
+				 uint32_t sprite_width, uint32_t sprite_height,
+				 int pixel_size, bool enable, bool scaled);
 	void (*modeset_global_resources)(struct drm_device *dev);
 	/* Returns the active state of the crtc, and if the crtc is active,
 	 * fills out the pipe-config with the hw state. */
@@ -552,8 +557,6 @@
 	/* Register offsets for the various display pipes and transcoders */
 	int pipe_offsets[I915_MAX_TRANSCODERS];
 	int trans_offsets[I915_MAX_TRANSCODERS];
-	int dpll_offsets[I915_MAX_PIPES];
-	int dpll_md_offsets[I915_MAX_PIPES];
 	int palette_offsets[I915_MAX_PIPES];
 	int cursor_offsets[I915_MAX_PIPES];
 };
@@ -586,28 +589,48 @@
 };
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
-#define DEFAULT_CONTEXT_ID 0
+#define DEFAULT_CONTEXT_HANDLE 0
+/**
+ * struct intel_context - as the name implies, represents a context.
+ * @ref: reference count.
+ * @user_handle: userspace tracking identity for this context.
+ * @remap_slice: l3 row remapping information.
+ * @file_priv: filp associated with this context (NULL for global default
+ *	       context).
+ * @hang_stats: information about the role of this context in possible GPU
+ *		hangs.
+ * @vm: virtual memory space used by this context.
+ * @legacy_hw_ctx: render context backing object and whether it is correctly
+ *                initialized (legacy ring submission mechanism only).
+ * @link: link in the global list of contexts.
+ *
+ * Contexts are memory images used by the hardware to store copies of their
+ * internal state.
+ */
 struct intel_context {
 	struct kref ref;
-	int id;
-	bool is_initialized;
+	int user_handle;
 	uint8_t remap_slice;
 	struct drm_i915_file_private *file_priv;
-	struct intel_engine_cs *last_ring;
-	struct drm_i915_gem_object *obj;
 	struct i915_ctx_hang_stats hang_stats;
 	struct i915_address_space *vm;
 
+	struct {
+		struct drm_i915_gem_object *rcs_state;
+		bool initialized;
+	} legacy_hw_ctx;
+
 	struct list_head link;
 };
 
 struct i915_fbc {
 	unsigned long size;
+	unsigned threshold;
 	unsigned int fb_id;
 	enum plane plane;
 	int y;
 
-	struct drm_mm_node *compressed_fb;
+	struct drm_mm_node compressed_fb;
 	struct drm_mm_node *compressed_llb;
 
 	struct intel_fbc_work {
@@ -635,9 +658,15 @@
 	struct intel_connector *connector;
 };
 
+struct intel_dp;
 struct i915_psr {
+	struct mutex lock;
 	bool sink_support;
 	bool source_ok;
+	struct intel_dp *enabled;
+	bool active;
+	struct delayed_work work;
+	unsigned busy_frontbuffer_bits;
 };
 
 enum intel_pch {
@@ -880,6 +909,12 @@
 	u32 clock_gate_dis2;
 };
 
+struct intel_rps_ei {
+	u32 cz_clock;
+	u32 render_c0;
+	u32 media_c0;
+};
+
 struct intel_gen6_power_mgmt {
 	/* work and pm_iir are protected by dev_priv->irq_lock */
 	struct work_struct work;
@@ -903,6 +938,9 @@
 	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
 	u8 rp1_freq;		/* "less than" RP0 power/freqency */
 	u8 rp0_freq;		/* Non-overclocked max frequency. */
+	u32 cz_freq;
+
+	u32 ei_interrupt_count;
 
 	int last_adj;
 	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -910,6 +948,9 @@
 	bool enabled;
 	struct delayed_work delayed_resume_work;
 
+	/* manual wa residency calculations */
+	struct intel_rps_ei up_ei, down_ei;
+
 	/*
 	 * Protects RPS/RC6 register access and PCU communication.
 	 * Must be taken after struct_mutex if nested.
@@ -1230,6 +1271,7 @@
 		u16 pwm_freq_hz;
 		bool present;
 		bool active_low_pwm;
+		u8 min_brightness;	/* min_brightness/255 of max */
 	} backlight;
 
 	/* MIPI DSI */
@@ -1299,7 +1341,7 @@
  */
 struct i915_runtime_pm {
 	bool suspended;
-	bool irqs_disabled;
+	bool _irqs_disabled;
 };
 
 enum intel_pipe_crc_source {
@@ -1332,6 +1374,17 @@
 	wait_queue_head_t wq;
 };
 
+struct i915_frontbuffer_tracking {
+	struct mutex lock;
+
+	/*
+	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+	 * scheduled flips.
+	 */
+	unsigned busy_bits;
+	unsigned flip_bits;
+};
+
 struct drm_i915_private {
 	struct drm_device *dev;
 	struct kmem_cache *slab;
@@ -1363,6 +1416,7 @@
 
 	struct pci_dev *bridge_dev;
 	struct intel_engine_cs ring[I915_NUM_RINGS];
+	struct drm_i915_gem_object *semaphore_obj;
 	uint32_t last_seqno, next_seqno;
 
 	drm_dma_handle_t *status_page_dmah;
@@ -1371,6 +1425,9 @@
 	/* protects the irq masks */
 	spinlock_t irq_lock;
 
+	/* protects the mmio flip data */
+	spinlock_t mmio_flip_lock;
+
 	bool display_irqs_enabled;
 
 	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1390,7 +1447,6 @@
 	u32 pipestat_irq_mask[I915_MAX_PIPES];
 
 	struct work_struct hotplug_work;
-	bool enable_hotplug_processing;
 	struct {
 		unsigned long hpd_last_jiffies;
 		int hpd_cnt;
@@ -1467,7 +1523,6 @@
 
 	int num_shared_dpll;
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
-	struct intel_ddi_plls ddi_plls;
 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
 	/* Reclocking support */
@@ -1475,6 +1530,9 @@
 	bool lvds_downclock_avail;
 	/* indicates the reduced downclock for LVDS*/
 	int lvds_downclock;
+
+	struct i915_frontbuffer_tracking fb_tracking;
+
 	u16 orig_clock;
 
 	bool mchbar_need_disable;
@@ -1541,6 +1599,20 @@
 
 	struct i915_runtime_pm pm;
 
+	struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
+	u32 long_hpd_port_mask;
+	u32 short_hpd_port_mask;
+	struct work_struct dig_port_work;
+
+	/*
+	 * if we get a HPD irq from DP and a HPD irq from non-DP
+	 * the non-DP HPD could block the workqueue on a mode config
+	 * mutex getting, that userspace may have taken. However
+	 * userspace is waiting on the DP workqueue to run which is
+	 * blocked behind the non-DP one.
+	 */
+	struct workqueue_struct *dp_wq;
+
 	/* Old dri1 support infrastructure, beware the dragons ya fools entering
 	 * here! */
 	struct i915_dri1_state dri1;
@@ -1592,6 +1664,28 @@
 	void (*release)(struct drm_i915_gem_object *);
 };
 
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-vise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_FRONTBUFFER_BITS \
+	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+	(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe) \
+	(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+	(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+	(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
@@ -1662,6 +1756,12 @@
 	unsigned int pin_display:1;
 
 	/*
+	 * Is the object to be mapped as read-only to the GPU
+	 * Only honoured if hardware has relevant pte bit
+	 */
+	unsigned long gt_ro:1;
+
+	/*
 	 * Is the GPU currently using a fence to access this buffer,
 	 */
 	unsigned int pending_fenced_gpu_access:1;
@@ -1673,6 +1773,8 @@
 	unsigned int has_global_gtt_mapping:1;
 	unsigned int has_dma_mapping:1;
 
+	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
 	struct sg_table *pages;
 	int pages_pin_count;
 
@@ -1719,6 +1821,10 @@
 };
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+		       struct drm_i915_gem_object *new,
+		       unsigned frontbuffer_bits);
+
 /**
  * Request queue structure.
  *
@@ -1940,10 +2046,8 @@
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >= 6 && \
-				 (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
-#define HAS_PPGTT(dev)		(INTEL_INFO(dev)->gen >= 7 \
-				 && !IS_GEN8(dev))
+#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >= 6)
+#define HAS_PPGTT(dev)		(INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
 #define USES_PPGTT(dev)		intel_enable_ppgtt(dev, false)
 #define USES_FULL_PPGTT(dev)	intel_enable_ppgtt(dev, true)
 
@@ -1998,6 +2102,8 @@
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+
 /* DPF == dynamic parity feature */
 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
@@ -2040,6 +2146,8 @@
 	bool reset;
 	bool disable_display;
 	bool disable_vtd_wa;
+	int use_mmio_flip;
+	bool mmio_debug;
 };
 extern struct i915_params i915 __read_mostly;
 
@@ -2048,12 +2156,12 @@
 extern void i915_kernel_lost_context(struct drm_device * dev);
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
 extern void i915_driver_lastclose(struct drm_device * dev);
 extern void i915_driver_preclose(struct drm_device *dev,
-				 struct drm_file *file_priv);
+				 struct drm_file *file);
 extern void i915_driver_postclose(struct drm_device *dev,
-				  struct drm_file *file_priv);
+				  struct drm_file *file);
 extern int i915_driver_device_is_agp(struct drm_device * dev);
 #ifdef CONFIG_COMPAT
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -2084,10 +2192,12 @@
 extern void intel_hpd_init(struct drm_device *dev);
 
 extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev,
+					bool restore_forcewake);
 extern void intel_uncore_init(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
 extern void intel_uncore_fini(struct drm_device *dev);
+extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
 
 void
 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2235,6 +2345,8 @@
 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
 	return unlikely(atomic_read(&error->reset_counter)
@@ -2404,7 +2516,7 @@
 
 static inline bool i915_gem_context_is_default(const struct intel_context *c)
 {
-	return c->id == DEFAULT_CONTEXT_ID;
+	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -2435,7 +2547,7 @@
 
 /* i915_gem_stolen.c */
 int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
 void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
 void i915_gem_cleanup_stolen(struct drm_device *dev);
 struct drm_i915_gem_object *
@@ -2445,7 +2557,6 @@
 					       u32 stolen_offset,
 					       u32 gtt_offset,
 					       u32 size);
-void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
 
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -2593,8 +2704,8 @@
 extern void intel_init_pch_refclk(struct drm_device *dev);
 extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void valleyview_set_rps(struct drm_device *dev, u8 val);
-extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
-extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
+extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
+				  bool enable);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 extern int intel_enable_rc6(const struct drm_device *dev);
@@ -2605,6 +2716,8 @@
 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
 			       struct drm_file *file);
 
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
 /* overlay */
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@ -2700,10 +2813,10 @@
 
 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
 {
-	if (HAS_PCH_SPLIT(dev))
-		return CPU_VGACNTRL;
-	else if (IS_VALLEYVIEW(dev))
+	if (IS_VALLEYVIEW(dev))
 		return VLV_VGACNTRL;
+	else if (INTEL_INFO(dev)->gen >= 5)
+		return CPU_VGACNTRL;
 	else
 		return VGACNTRL;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d893e4d..dcd8d7b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1095,7 +1095,7 @@
  * Compare seqno against outstanding lazy request. Emit a request if they are
  * equal.
  */
-static int
+int
 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
 {
 	int ret;
@@ -1161,14 +1161,14 @@
 	unsigned long timeout_expire;
 	int ret;
 
-	WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
+	WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
 
 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
 		return 0;
 
 	timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
 
-	if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
+	if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
 		gen6_rps_boost(dev_priv);
 		if (file_priv)
 			mod_delayed_work(dev_priv->wq,
@@ -1561,14 +1561,29 @@
 	if (ret)
 		goto unpin;
 
-	obj->fault_mappable = true;
-
+	/* Finally, remap it using the new GTT offset */
 	pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
 	pfn >>= PAGE_SHIFT;
-	pfn += page_offset;
 
-	/* Finally, remap it using the new GTT offset */
-	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+	if (!obj->fault_mappable) {
+		unsigned long size = min_t(unsigned long,
+					   vma->vm_end - vma->vm_start,
+					   obj->base.size);
+		int i;
+
+		for (i = 0; i < size >> PAGE_SHIFT; i++) {
+			ret = vm_insert_pfn(vma,
+					    (unsigned long)vma->vm_start + i * PAGE_SIZE,
+					    pfn + i);
+			if (ret)
+				break;
+		}
+
+		obj->fault_mappable = true;
+	} else
+		ret = vm_insert_pfn(vma,
+				    (unsigned long)vmf->virtual_address,
+				    pfn + page_offset);
 unpin:
 	i915_gem_object_ggtt_unpin(obj);
 unlock:
@@ -2052,16 +2067,10 @@
 			 * our own buffer, now let the real VM do its job and
 			 * go down in flames if truly OOM.
 			 */
-			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
-			gfp |= __GFP_IO | __GFP_WAIT;
-
 			i915_gem_shrink_all(dev_priv);
-			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+			page = shmem_read_mapping_page(mapping, i);
 			if (IS_ERR(page))
 				goto err_pages;
-
-			gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
-			gfp &= ~(__GFP_IO | __GFP_WAIT);
 		}
 #ifdef CONFIG_SWIOTLB
 		if (swiotlb_nr_tbl()) {
@@ -2210,6 +2219,8 @@
 			list_move_tail(&vma->mm_list, &vm->inactive_list);
 	}
 
+	intel_fb_obj_flush(obj, true);
+
 	list_del_init(&obj->ring_list);
 	obj->ring = NULL;
 
@@ -2319,7 +2330,7 @@
 	u32 request_ring_position, request_start;
 	int ret;
 
-	request_start = intel_ring_get_tail(ring);
+	request_start = intel_ring_get_tail(ring->buffer);
 	/*
 	 * Emit any outstanding flushes - execbuf can fail to emit the flush
 	 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2340,7 +2351,7 @@
 	 * GPU processing the request, we never over-estimate the
 	 * position of the head.
 	 */
-	request_ring_position = intel_ring_get_tail(ring);
+	request_ring_position = intel_ring_get_tail(ring->buffer);
 
 	ret = ring->add_request(ring);
 	if (ret)
@@ -2831,6 +2842,8 @@
 	idx = intel_ring_sync_index(from, to);
 
 	seqno = obj->last_read_seqno;
+	/* Optimization: Avoid semaphore sync when we are sure we already
+	 * waited for an object with higher seqno */
 	if (seqno <= from->semaphore.sync_seqno[idx])
 		return 0;
 
@@ -2914,8 +2927,6 @@
 
 	vma->unbind_vma(vma);
 
-	i915_gem_gtt_finish_object(obj);
-
 	list_del_init(&vma->mm_list);
 	/* Avoid an unnecessary call to unbind on rebind. */
 	if (i915_is_ggtt(vma->vm))
@@ -2926,8 +2937,10 @@
 
 	/* Since the unbound list is global, only move to that list if
 	 * no more VMAs exist. */
-	if (list_empty(&obj->vma_list))
+	if (list_empty(&obj->vma_list)) {
+		i915_gem_gtt_finish_object(obj);
 		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+	}
 
 	/* And finally now the object is completely decoupled from this vma,
 	 * we can drop its hold on the backing storage and allow it to be
@@ -3539,6 +3552,8 @@
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
+	intel_fb_obj_flush(obj, false);
+
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
 					    old_write_domain);
@@ -3560,6 +3575,8 @@
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
+	intel_fb_obj_flush(obj, false);
+
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
 					    old_write_domain);
@@ -3613,6 +3630,9 @@
 		obj->dirty = 1;
 	}
 
+	if (write)
+		intel_fb_obj_invalidate(obj, NULL);
+
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
@@ -3949,6 +3969,9 @@
 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
+	if (write)
+		intel_fb_obj_invalidate(obj, NULL);
+
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
@@ -4437,13 +4460,14 @@
 	if (obj->stolen)
 		i915_gem_object_unpin_pages(obj);
 
+	WARN_ON(obj->frontbuffer_bits);
+
 	if (WARN_ON(obj->pages_pin_count))
 		obj->pages_pin_count = 0;
 	if (discard_backing_storage(obj))
 		obj->madv = I915_MADV_DONTNEED;
 	i915_gem_object_put_pages(obj);
 	i915_gem_object_free_mmap_offset(obj);
-	i915_gem_object_release_stolen(obj);
 
 	BUG_ON(obj->pages);
 
@@ -4921,6 +4945,8 @@
 
 	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
 	register_oom_notifier(&dev_priv->mm.oom_notifier);
+
+	mutex_init(&dev_priv->fb_tracking.lock);
 }
 
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4982,6 +5008,23 @@
 	return ret;
 }
 
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+		       struct drm_i915_gem_object *new,
+		       unsigned frontbuffer_bits)
+{
+	if (old) {
+		WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
+		WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
+		old->frontbuffer_bits &= ~frontbuffer_bits;
+	}
+
+	if (new) {
+		WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
+		WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
+		new->frontbuffer_bits |= frontbuffer_bits;
+	}
+}
+
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 {
 	if (!mutex_is_locked(mutex))
@@ -5064,12 +5107,13 @@
 	    vm == &dev_priv->mm.aliasing_ppgtt->base)
 		vm = &dev_priv->gtt.base;
 
-	BUG_ON(list_empty(&o->vma_list));
 	list_for_each_entry(vma, &o->vma_list, vma_link) {
 		if (vma->vm == vm)
 			return vma->node.start;
 
 	}
+	WARN(1, "%s vma for this object not found.\n",
+	     i915_is_ggtt(vm) ? "global" : "ppgtt");
 	return -1;
 }
 
@@ -5150,8 +5194,11 @@
 	bool was_interruptible;
 	bool unlock;
 
-	while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+	while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
 		schedule_timeout_killable(1);
+		if (fatal_signal_pending(current))
+			return NOTIFY_DONE;
+	}
 	if (timeout == 0) {
 		pr_err("Unable to purge GPU memory due lock contention.\n");
 		return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5ddf3b..3b99390 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -182,22 +182,50 @@
 						   typeof(*ctx), ref);
 	struct i915_hw_ppgtt *ppgtt = NULL;
 
-	if (ctx->obj) {
+	if (ctx->legacy_hw_ctx.rcs_state) {
 		/* We refcount even the aliasing PPGTT to keep the code symmetric */
-		if (USES_PPGTT(ctx->obj->base.dev))
+		if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
 			ppgtt = ctx_to_ppgtt(ctx);
-
-		/* XXX: Free up the object before tearing down the address space, in
-		 * case we're bound in the PPGTT */
-		drm_gem_object_unreference(&ctx->obj->base);
 	}
 
 	if (ppgtt)
 		kref_put(&ppgtt->ref, ppgtt_release);
+	if (ctx->legacy_hw_ctx.rcs_state)
+		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
 	list_del(&ctx->link);
 	kfree(ctx);
 }
 
+static struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	obj = i915_gem_alloc_object(dev, size);
+	if (obj == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * Try to make the context utilize L3 as well as LLC.
+	 *
+	 * On VLV we don't have L3 controls in the PTEs so we
+	 * shouldn't touch the cache level, especially as that
+	 * would make the object snooped which might have a
+	 * negative performance impact.
+	 */
+	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
+		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+		/* Failure shouldn't ever happen this early */
+		if (WARN_ON(ret)) {
+			drm_gem_object_unreference(&obj->base);
+			return ERR_PTR(ret);
+		}
+	}
+
+	return obj;
+}
+
 static struct i915_hw_ppgtt *
 create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
 {
@@ -234,40 +262,26 @@
 	list_add_tail(&ctx->link, &dev_priv->context_list);
 
 	if (dev_priv->hw_context_size) {
-		ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
-		if (ctx->obj == NULL) {
-			ret = -ENOMEM;
+		struct drm_i915_gem_object *obj =
+				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+		if (IS_ERR(obj)) {
+			ret = PTR_ERR(obj);
 			goto err_out;
 		}
-
-		/*
-		 * Try to make the context utilize L3 as well as LLC.
-		 *
-		 * On VLV we don't have L3 controls in the PTEs so we
-		 * shouldn't touch the cache level, especially as that
-		 * would make the object snooped which might have a
-		 * negative performance impact.
-		 */
-		if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
-			ret = i915_gem_object_set_cache_level(ctx->obj,
-							      I915_CACHE_L3_LLC);
-			/* Failure shouldn't ever happen this early */
-			if (WARN_ON(ret))
-				goto err_out;
-		}
+		ctx->legacy_hw_ctx.rcs_state = obj;
 	}
 
 	/* Default context will never have a file_priv */
 	if (file_priv != NULL) {
 		ret = idr_alloc(&file_priv->context_idr, ctx,
-				DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
+				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
 		if (ret < 0)
 			goto err_out;
 	} else
-		ret = DEFAULT_CONTEXT_ID;
+		ret = DEFAULT_CONTEXT_HANDLE;
 
 	ctx->file_priv = file_priv;
-	ctx->id = ret;
+	ctx->user_handle = ret;
 	/* NB: Mark all slices as needing a remap so that when the context first
 	 * loads it will restore whatever remap state already exists. If there
 	 * is no remap info, it will be a NOP. */
@@ -301,7 +315,7 @@
 	if (IS_ERR(ctx))
 		return ctx;
 
-	if (is_global_default_ctx && ctx->obj) {
+	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
 		/* We may need to do things with the shrinker which
 		 * require us to immediately switch back to the default
 		 * context. This can cause a problem as pinning the
@@ -309,7 +323,7 @@
 		 * be available. To avoid this we always pin the default
 		 * context.
 		 */
-		ret = i915_gem_obj_ggtt_pin(ctx->obj,
+		ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
 					    get_context_alignment(dev), 0);
 		if (ret) {
 			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -349,8 +363,8 @@
 	return ctx;
 
 err_unpin:
-	if (is_global_default_ctx && ctx->obj)
-		i915_gem_object_ggtt_unpin(ctx->obj);
+	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
+		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
 err_destroy:
 	i915_gem_context_unreference(ctx);
 	return ERR_PTR(ret);
@@ -366,23 +380,27 @@
 	for (i = 0; i < I915_NUM_RINGS; i++) {
 		struct intel_engine_cs *ring = &dev_priv->ring[i];
 		struct intel_context *dctx = ring->default_context;
+		struct intel_context *lctx = ring->last_context;
 
 		/* Do a fake switch to the default context */
-		if (ring->last_context == dctx)
+		if (lctx == dctx)
 			continue;
 
-		if (!ring->last_context)
+		if (!lctx)
 			continue;
 
-		if (dctx->obj && i == RCS) {
-			WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+		if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
+			WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
 						      get_context_alignment(dev), 0));
 			/* Fake a finish/inactive */
-			dctx->obj->base.write_domain = 0;
-			dctx->obj->active = 0;
+			dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
+			dctx->legacy_hw_ctx.rcs_state->active = 0;
 		}
 
-		i915_gem_context_unreference(ring->last_context);
+		if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
+			i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
+
+		i915_gem_context_unreference(lctx);
 		i915_gem_context_reference(dctx);
 		ring->last_context = dctx;
 	}
@@ -429,7 +447,7 @@
 	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
 	int i;
 
-	if (dctx->obj) {
+	if (dctx->legacy_hw_ctx.rcs_state) {
 		/* The only known way to stop the gpu from accessing the hw context is
 		 * to reset it. Do this as the very last operation to avoid confusing
 		 * other code, leading to spurious errors. */
@@ -444,13 +462,13 @@
 		WARN_ON(!dev_priv->ring[RCS].last_context);
 		if (dev_priv->ring[RCS].last_context == dctx) {
 			/* Fake switch to NULL context */
-			WARN_ON(dctx->obj->active);
-			i915_gem_object_ggtt_unpin(dctx->obj);
+			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
+			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
 			i915_gem_context_unreference(dctx);
 			dev_priv->ring[RCS].last_context = NULL;
 		}
 
-		i915_gem_object_ggtt_unpin(dctx->obj);
+		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
 	}
 
 	for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -570,7 +588,7 @@
 
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
+	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
 			MI_MM_SPACE_GTT |
 			MI_SAVE_EXT_STATE_EN |
 			MI_RESTORE_EXT_STATE_EN |
@@ -602,16 +620,16 @@
 	int ret, i;
 
 	if (from != NULL && ring == &dev_priv->ring[RCS]) {
-		BUG_ON(from->obj == NULL);
-		BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
+		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
 	}
 
-	if (from == to && from->last_ring == ring && !to->remap_slice)
+	if (from == to && !to->remap_slice)
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
 	if (ring == &dev_priv->ring[RCS]) {
-		ret = i915_gem_obj_ggtt_pin(to->obj,
+		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
 					    get_context_alignment(ring->dev), 0);
 		if (ret)
 			return ret;
@@ -644,17 +662,17 @@
 	 *
 	 * XXX: We need a real interface to do this instead of trickery.
 	 */
-	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+	ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
 	if (ret)
 		goto unpin_out;
 
-	if (!to->obj->has_global_gtt_mapping) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+	if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
+		struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
 							   &dev_priv->gtt.base);
-		vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+		vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
 	}
 
-	if (!to->is_initialized || i915_gem_context_is_default(to))
+	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
 		hw_flags |= MI_RESTORE_INHIBIT;
 
 	ret = mi_set_context(ring, to, hw_flags);
@@ -680,8 +698,8 @@
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
-		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
+		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -689,21 +707,20 @@
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		from->obj->dirty = 1;
-		BUG_ON(from->obj->ring != ring);
+		from->legacy_hw_ctx.rcs_state->dirty = 1;
+		BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
 
 		/* obj is kept alive until the next request by its active ref */
-		i915_gem_object_ggtt_unpin(from->obj);
+		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
 		i915_gem_context_unreference(from);
 	}
 
-	uninitialized = !to->is_initialized && from == NULL;
-	to->is_initialized = true;
+	uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+	to->legacy_hw_ctx.initialized = true;
 
 done:
 	i915_gem_context_reference(to);
 	ring->last_context = to;
-	to->last_ring = ring;
 
 	if (uninitialized) {
 		ret = i915_gem_render_state_init(ring);
@@ -715,7 +732,7 @@
 
 unpin_out:
 	if (ring->id == RCS)
-		i915_gem_object_ggtt_unpin(to->obj);
+		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
 	return ret;
 }
 
@@ -736,7 +753,7 @@
 
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	if (to->obj == NULL) { /* We have the fake context */
+	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
 		if (to != ring->last_context) {
 			i915_gem_context_reference(to);
 			if (ring->last_context)
@@ -774,7 +791,7 @@
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	args->ctx_id = ctx->id;
+	args->ctx_id = ctx->user_handle;
 	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
 
 	return 0;
@@ -788,7 +805,7 @@
 	struct intel_context *ctx;
 	int ret;
 
-	if (args->ctx_id == DEFAULT_CONTEXT_ID)
+	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
 		return -ENOENT;
 
 	ret = i915_mutex_lock_interruptible(dev);
@@ -801,7 +818,7 @@
 		return PTR_ERR(ctx);
 	}
 
-	idr_remove(&ctx->file_priv->context_idr, ctx->id);
+	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
 	i915_gem_context_unreference(ctx);
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a30133..60998fc 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -938,7 +938,7 @@
 	struct intel_context *ctx = NULL;
 	struct i915_ctx_hang_stats *hs;
 
-	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
 		return ERR_PTR(-EINVAL);
 
 	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -975,10 +975,8 @@
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			obj->last_write_seqno = intel_ring_get_seqno(ring);
-			/* check for potential scanout */
-			if (i915_gem_obj_ggtt_bound(obj) &&
-			    i915_gem_obj_to_ggtt(obj)->pin_count)
-				intel_mark_fb_busy(obj, ring);
+
+			intel_fb_obj_invalidate(obj, ring);
 
 			/* update for the implicit flush after a batch */
 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1028,6 +1026,163 @@
 	return 0;
 }
 
+static int
+legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
+			     struct intel_engine_cs *ring,
+			     struct intel_context *ctx,
+			     struct drm_i915_gem_execbuffer2 *args,
+			     struct list_head *vmas,
+			     struct drm_i915_gem_object *batch_obj,
+			     u64 exec_start, u32 flags)
+{
+	struct drm_clip_rect *cliprects = NULL;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u64 exec_len;
+	int instp_mode;
+	u32 instp_mask;
+	int i, ret = 0;
+
+	if (args->num_cliprects != 0) {
+		if (ring != &dev_priv->ring[RCS]) {
+			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+			return -EINVAL;
+		}
+
+		if (INTEL_INFO(dev)->gen >= 5) {
+			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+			return -EINVAL;
+		}
+
+		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+			DRM_DEBUG("execbuf with %u cliprects\n",
+				  args->num_cliprects);
+			return -EINVAL;
+		}
+
+		cliprects = kcalloc(args->num_cliprects,
+				    sizeof(*cliprects),
+				    GFP_KERNEL);
+		if (cliprects == NULL) {
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		if (copy_from_user(cliprects,
+				   to_user_ptr(args->cliprects_ptr),
+				   sizeof(*cliprects)*args->num_cliprects)) {
+			ret = -EFAULT;
+			goto error;
+		}
+	} else {
+		if (args->DR4 == 0xffffffff) {
+			DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+			args->DR4 = 0;
+		}
+
+		if (args->DR1 || args->DR4 || args->cliprects_ptr) {
+			DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
+			return -EINVAL;
+		}
+	}
+
+	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+	if (ret)
+		goto error;
+
+	ret = i915_switch_context(ring, ctx);
+	if (ret)
+		goto error;
+
+	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+	instp_mask = I915_EXEC_CONSTANTS_MASK;
+	switch (instp_mode) {
+	case I915_EXEC_CONSTANTS_REL_GENERAL:
+	case I915_EXEC_CONSTANTS_ABSOLUTE:
+	case I915_EXEC_CONSTANTS_REL_SURFACE:
+		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+			ret = -EINVAL;
+			goto error;
+		}
+
+		if (instp_mode != dev_priv->relative_constants_mode) {
+			if (INTEL_INFO(dev)->gen < 4) {
+				DRM_DEBUG("no rel constants on pre-gen4\n");
+				ret = -EINVAL;
+				goto error;
+			}
+
+			if (INTEL_INFO(dev)->gen > 5 &&
+			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+				ret = -EINVAL;
+				goto error;
+			}
+
+			/* The HW changed the meaning on this bit on gen6 */
+			if (INTEL_INFO(dev)->gen >= 6)
+				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+		}
+		break;
+	default:
+		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (ring == &dev_priv->ring[RCS] &&
+			instp_mode != dev_priv->relative_constants_mode) {
+		ret = intel_ring_begin(ring, 4);
+		if (ret)
+			goto error;
+
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, INSTPM);
+		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ring);
+
+		dev_priv->relative_constants_mode = instp_mode;
+	}
+
+	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+		ret = i915_reset_gen7_sol_offsets(dev, ring);
+		if (ret)
+			goto error;
+	}
+
+	exec_len = args->batch_len;
+	if (cliprects) {
+		for (i = 0; i < args->num_cliprects; i++) {
+			ret = i915_emit_box(dev, &cliprects[i],
+					    args->DR1, args->DR4);
+			if (ret)
+				goto error;
+
+			ret = ring->dispatch_execbuffer(ring,
+							exec_start, exec_len,
+							flags);
+			if (ret)
+				goto error;
+		}
+	} else {
+		ret = ring->dispatch_execbuffer(ring,
+						exec_start, exec_len,
+						flags);
+		if (ret)
+			return ret;
+	}
+
+	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+	i915_gem_execbuffer_move_to_active(vmas, ring);
+	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+
+error:
+	kfree(cliprects);
+	return ret;
+}
+
 /**
  * Find one BSD ring to dispatch the corresponding BSD command.
  * The Ring ID is returned.
@@ -1087,14 +1242,13 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
-	struct drm_clip_rect *cliprects = NULL;
 	struct intel_engine_cs *ring;
 	struct intel_context *ctx;
 	struct i915_address_space *vm;
 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-	u64 exec_start = args->batch_start_offset, exec_len;
-	u32 mask, flags;
-	int ret, mode, i;
+	u64 exec_start = args->batch_start_offset;
+	u32 flags;
+	int ret;
 	bool need_relocs;
 
 	if (!i915_gem_check_execbuffer(args))
@@ -1138,87 +1292,11 @@
 		return -EINVAL;
 	}
 
-	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-	mask = I915_EXEC_CONSTANTS_MASK;
-	switch (mode) {
-	case I915_EXEC_CONSTANTS_REL_GENERAL:
-	case I915_EXEC_CONSTANTS_ABSOLUTE:
-	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (mode != 0 && ring != &dev_priv->ring[RCS]) {
-			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
-			return -EINVAL;
-		}
-
-		if (mode != dev_priv->relative_constants_mode) {
-			if (INTEL_INFO(dev)->gen < 4) {
-				DRM_DEBUG("no rel constants on pre-gen4\n");
-				return -EINVAL;
-			}
-
-			if (INTEL_INFO(dev)->gen > 5 &&
-			    mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
-				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
-				return -EINVAL;
-			}
-
-			/* The HW changed the meaning on this bit on gen6 */
-			if (INTEL_INFO(dev)->gen >= 6)
-				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
-		}
-		break;
-	default:
-		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
-		return -EINVAL;
-	}
-
 	if (args->buffer_count < 1) {
 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
 		return -EINVAL;
 	}
 
-	if (args->num_cliprects != 0) {
-		if (ring != &dev_priv->ring[RCS]) {
-			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
-			return -EINVAL;
-		}
-
-		if (INTEL_INFO(dev)->gen >= 5) {
-			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
-			return -EINVAL;
-		}
-
-		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
-			DRM_DEBUG("execbuf with %u cliprects\n",
-				  args->num_cliprects);
-			return -EINVAL;
-		}
-
-		cliprects = kcalloc(args->num_cliprects,
-				    sizeof(*cliprects),
-				    GFP_KERNEL);
-		if (cliprects == NULL) {
-			ret = -ENOMEM;
-			goto pre_mutex_err;
-		}
-
-		if (copy_from_user(cliprects,
-				   to_user_ptr(args->cliprects_ptr),
-				   sizeof(*cliprects)*args->num_cliprects)) {
-			ret = -EFAULT;
-			goto pre_mutex_err;
-		}
-	} else {
-		if (args->DR4 == 0xffffffff) {
-			DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
-			args->DR4 = 0;
-		}
-
-		if (args->DR1 || args->DR4 || args->cliprects_ptr) {
-			DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
-			return -EINVAL;
-		}
-	}
-
 	intel_runtime_pm_get(dev_priv);
 
 	ret = i915_mutex_lock_interruptible(dev);
@@ -1322,63 +1400,11 @@
 	else
 		exec_start += i915_gem_obj_offset(batch_obj, vm);
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
+	ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
+			args, &eb->vmas, batch_obj, exec_start, flags);
 	if (ret)
 		goto err;
 
-	ret = i915_switch_context(ring, ctx);
-	if (ret)
-		goto err;
-
-	if (ring == &dev_priv->ring[RCS] &&
-	    mode != dev_priv->relative_constants_mode) {
-		ret = intel_ring_begin(ring, 4);
-		if (ret)
-				goto err;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit(ring, INSTPM);
-		intel_ring_emit(ring, mask << 16 | mode);
-		intel_ring_advance(ring);
-
-		dev_priv->relative_constants_mode = mode;
-	}
-
-	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, ring);
-		if (ret)
-			goto err;
-	}
-
-
-	exec_len = args->batch_len;
-	if (cliprects) {
-		for (i = 0; i < args->num_cliprects; i++) {
-			ret = i915_emit_box(dev, &cliprects[i],
-					    args->DR1, args->DR4);
-			if (ret)
-				goto err;
-
-			ret = ring->dispatch_execbuffer(ring,
-							exec_start, exec_len,
-							flags);
-			if (ret)
-				goto err;
-		}
-	} else {
-		ret = ring->dispatch_execbuffer(ring,
-						exec_start, exec_len,
-						flags);
-		if (ret)
-			goto err;
-	}
-
-	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
-
-	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-
 err:
 	/* the request owns the ref now */
 	i915_gem_context_unreference(ctx);
@@ -1387,8 +1413,6 @@
 	mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
-	kfree(cliprects);
-
 	/* intel_gpu_busy should also get a ref, so it will free when the device
 	 * is really idle. */
 	intel_runtime_pm_put(dev_priv);
@@ -1525,7 +1549,7 @@
 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
 	if (!ret) {
 		/* Copy the new buffer offsets back to the user's exec list. */
-		struct drm_i915_gem_exec_object2 *user_exec_list =
+		struct drm_i915_gem_exec_object2 __user *user_exec_list =
 				   to_user_ptr(args->buffers_ptr);
 		int i;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8b3cde7..5188936 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -63,6 +63,13 @@
 	}
 #endif
 
+	/* Early VLV doesn't have this */
+	if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+	    dev->pdev->revision < 0xb) {
+		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
+		return 0;
+	}
+
 	return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
 }
 
@@ -110,7 +117,7 @@
 
 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 				     enum i915_cache_level level,
-				     bool valid)
+				     bool valid, u32 unused)
 {
 	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -132,7 +139,7 @@
 
 static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
 				     enum i915_cache_level level,
-				     bool valid)
+				     bool valid, u32 unused)
 {
 	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -156,7 +163,7 @@
 
 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
 				     enum i915_cache_level level,
-				     bool valid)
+				     bool valid, u32 flags)
 {
 	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -164,7 +171,8 @@
 	/* Mark the page as writeable.  Other platforms don't have a
 	 * setting for read-only/writable, so this matches that behavior.
 	 */
-	pte |= BYT_PTE_WRITEABLE;
+	if (!(flags & PTE_READ_ONLY))
+		pte |= BYT_PTE_WRITEABLE;
 
 	if (level != I915_CACHE_NONE)
 		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
@@ -174,7 +182,7 @@
 
 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
 				     enum i915_cache_level level,
-				     bool valid)
+				     bool valid, u32 unused)
 {
 	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -187,7 +195,7 @@
 
 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
 				      enum i915_cache_level level,
-				      bool valid)
+				      bool valid, u32 unused)
 {
 	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -301,7 +309,7 @@
 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 				      struct sg_table *pages,
 				      uint64_t start,
-				      enum i915_cache_level cache_level)
+				      enum i915_cache_level cache_level, u32 unused)
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
@@ -639,7 +647,7 @@
 	uint32_t pd_entry;
 	int pte, pde;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
 
 	pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
 		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
@@ -941,7 +949,7 @@
 	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
 	unsigned last_pte, i;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
 
 	while (num_entries) {
 		last_pte = first_pte + num_entries;
@@ -964,7 +972,7 @@
 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 				      struct sg_table *pages,
 				      uint64_t start,
-				      enum i915_cache_level cache_level)
+				      enum i915_cache_level cache_level, u32 flags)
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
@@ -981,7 +989,8 @@
 
 		pt_vaddr[act_pte] =
 			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
-				       cache_level, true);
+				       cache_level, true, flags);
+
 		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
 			kunmap_atomic(pt_vaddr);
 			pt_vaddr = NULL;
@@ -1218,8 +1227,12 @@
 	       enum i915_cache_level cache_level,
 	       u32 flags)
 {
+	/* Currently applicable only to VLV */
+	if (vma->obj->gt_ro)
+		flags |= PTE_READ_ONLY;
+
 	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
-				cache_level);
+				cache_level, flags);
 }
 
 static void ppgtt_unbind_vma(struct i915_vma *vma)
@@ -1394,7 +1407,7 @@
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     uint64_t start,
-				     enum i915_cache_level level)
+				     enum i915_cache_level level, u32 unused)
 {
 	struct drm_i915_private *dev_priv = vm->dev->dev_private;
 	unsigned first_entry = start >> PAGE_SHIFT;
@@ -1440,7 +1453,7 @@
 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     uint64_t start,
-				     enum i915_cache_level level)
+				     enum i915_cache_level level, u32 flags)
 {
 	struct drm_i915_private *dev_priv = vm->dev->dev_private;
 	unsigned first_entry = start >> PAGE_SHIFT;
@@ -1452,7 +1465,7 @@
 
 	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
 		addr = sg_page_iter_dma_address(&sg_iter);
-		iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
+		iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
 		i++;
 	}
 
@@ -1464,7 +1477,7 @@
 	 */
 	if (i != 0)
 		WARN_ON(readl(&gtt_entries[i-1]) !=
-			vm->pte_encode(addr, level, true));
+			vm->pte_encode(addr, level, true, flags));
 
 	/* This next bit makes the above posting read even more important. We
 	 * want to flush the TLBs only after we're certain all the PTE updates
@@ -1518,7 +1531,7 @@
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -1567,6 +1580,10 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj = vma->obj;
 
+	/* Currently applicable only to VLV */
+	if (obj->gt_ro)
+		flags |= PTE_READ_ONLY;
+
 	/* If there is no aliasing PPGTT, or the caller needs a global mapping,
 	 * or we have a global mapping already but the cacheability flags have
 	 * changed, set the global PTEs.
@@ -1583,7 +1600,7 @@
 		    (cache_level != obj->cache_level)) {
 			vma->vm->insert_entries(vma->vm, obj->pages,
 						vma->node.start,
-						cache_level);
+						cache_level, flags);
 			obj->has_global_gtt_mapping = 1;
 		}
 	}
@@ -1595,7 +1612,7 @@
 		appgtt->base.insert_entries(&appgtt->base,
 					    vma->obj->pages,
 					    vma->node.start,
-					    cache_level);
+					    cache_level, flags);
 		vma->obj->has_aliasing_ppgtt_mapping = 1;
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1b96a06..8d6f7c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -154,6 +154,7 @@
 	void (*unbind_vma)(struct i915_vma *vma);
 	/* Map an object into an address space with the given cache flags. */
 #define GLOBAL_BIND (1<<0)
+#define PTE_READ_ONLY (1<<1)
 	void (*bind_vma)(struct i915_vma *vma,
 			 enum i915_cache_level cache_level,
 			 u32 flags);
@@ -197,7 +198,7 @@
 	/* FIXME: Need a more generic return type */
 	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
 				     enum i915_cache_level level,
-				     bool valid); /* Create a valid PTE */
+				     bool valid, u32 flags); /* Create a valid PTE */
 	void (*clear_range)(struct i915_address_space *vm,
 			    uint64_t start,
 			    uint64_t length,
@@ -205,7 +206,7 @@
 	void (*insert_entries)(struct i915_address_space *vm,
 			       struct sg_table *st,
 			       uint64_t start,
-			       enum i915_cache_level cache_level);
+			       enum i915_cache_level cache_level, u32 flags);
 	void (*cleanup)(struct i915_address_space *vm);
 };
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 34894b5..e60be3f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,64 +28,13 @@
 #include "i915_drv.h"
 #include "intel_renderstate.h"
 
-struct i915_render_state {
+struct render_state {
+	const struct intel_renderstate_rodata *rodata;
 	struct drm_i915_gem_object *obj;
-	unsigned long ggtt_offset;
-	u32 *batch;
-	u32 size;
-	u32 len;
+	u64 ggtt_offset;
+	int gen;
 };
 
-static struct i915_render_state *render_state_alloc(struct drm_device *dev)
-{
-	struct i915_render_state *so;
-	struct page *page;
-	int ret;
-
-	so = kzalloc(sizeof(*so), GFP_KERNEL);
-	if (!so)
-		return ERR_PTR(-ENOMEM);
-
-	so->obj = i915_gem_alloc_object(dev, 4096);
-	if (so->obj == NULL) {
-		ret = -ENOMEM;
-		goto free;
-	}
-	so->size = 4096;
-
-	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
-	if (ret)
-		goto free_gem;
-
-	BUG_ON(so->obj->pages->nents != 1);
-	page = sg_page(so->obj->pages->sgl);
-
-	so->batch = kmap(page);
-	if (!so->batch) {
-		ret = -ENOMEM;
-		goto unpin;
-	}
-
-	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-
-	return so;
-unpin:
-	i915_gem_object_ggtt_unpin(so->obj);
-free_gem:
-	drm_gem_object_unreference(&so->obj->base);
-free:
-	kfree(so);
-	return ERR_PTR(ret);
-}
-
-static void render_state_free(struct i915_render_state *so)
-{
-	kunmap(kmap_to_page(so->batch));
-	i915_gem_object_ggtt_unpin(so->obj);
-	drm_gem_object_unreference(&so->obj->base);
-	kfree(so);
-}
-
 static const struct intel_renderstate_rodata *
 render_state_get_rodata(struct drm_device *dev, const int gen)
 {
@@ -101,98 +50,120 @@
 	return NULL;
 }
 
-static int render_state_setup(const int gen,
-			      const struct intel_renderstate_rodata *rodata,
-			      struct i915_render_state *so)
+static int render_state_init(struct render_state *so, struct drm_device *dev)
 {
-	const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
-	u32 reloc_index = 0;
-	u32 * const d = so->batch;
-	unsigned int i = 0;
 	int ret;
 
-	if (!rodata || rodata->batch_items * 4 > so->size)
+	so->gen = INTEL_INFO(dev)->gen;
+	so->rodata = render_state_get_rodata(dev, so->gen);
+	if (so->rodata == NULL)
+		return 0;
+
+	if (so->rodata->batch_items * 4 > 4096)
 		return -EINVAL;
 
+	so->obj = i915_gem_alloc_object(dev, 4096);
+	if (so->obj == NULL)
+		return -ENOMEM;
+
+	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+	if (ret)
+		goto free_gem;
+
+	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+	return 0;
+
+free_gem:
+	drm_gem_object_unreference(&so->obj->base);
+	return ret;
+}
+
+static int render_state_setup(struct render_state *so)
+{
+	const struct intel_renderstate_rodata *rodata = so->rodata;
+	unsigned int i = 0, reloc_index = 0;
+	struct page *page;
+	u32 *d;
+	int ret;
+
 	ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
 	if (ret)
 		return ret;
 
+	page = sg_page(so->obj->pages->sgl);
+	d = kmap(page);
+
 	while (i < rodata->batch_items) {
 		u32 s = rodata->batch[i];
 
-		if (reloc_index < rodata->reloc_items &&
-		    i * 4  == rodata->reloc[reloc_index]) {
-
-			s += goffset & 0xffffffff;
-
-			/* We keep batch offsets max 32bit */
-			if (gen >= 8) {
+		if (i * 4  == rodata->reloc[reloc_index]) {
+			u64 r = s + so->ggtt_offset;
+			s = lower_32_bits(r);
+			if (so->gen >= 8) {
 				if (i + 1 >= rodata->batch_items ||
 				    rodata->batch[i + 1] != 0)
 					return -EINVAL;
 
-				d[i] = s;
-				i++;
-				s = (goffset & 0xffffffff00000000ull) >> 32;
+				d[i++] = s;
+				s = upper_32_bits(r);
 			}
 
 			reloc_index++;
 		}
 
-		d[i] = s;
-		i++;
+		d[i++] = s;
 	}
+	kunmap(page);
 
 	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
 	if (ret)
 		return ret;
 
-	if (rodata->reloc_items != reloc_index) {
-		DRM_ERROR("not all relocs resolved, %d out of %d\n",
-			  reloc_index, rodata->reloc_items);
+	if (rodata->reloc[reloc_index] != -1) {
+		DRM_ERROR("only %d relocs resolved\n", reloc_index);
 		return -EINVAL;
 	}
 
-	so->len = rodata->batch_items * 4;
-
 	return 0;
 }
 
+static void render_state_fini(struct render_state *so)
+{
+	i915_gem_object_ggtt_unpin(so->obj);
+	drm_gem_object_unreference(&so->obj->base);
+}
+
 int i915_gem_render_state_init(struct intel_engine_cs *ring)
 {
-	const int gen = INTEL_INFO(ring->dev)->gen;
-	struct i915_render_state *so;
-	const struct intel_renderstate_rodata *rodata;
+	struct render_state so;
 	int ret;
 
 	if (WARN_ON(ring->id != RCS))
 		return -ENOENT;
 
-	rodata = render_state_get_rodata(ring->dev, gen);
-	if (rodata == NULL)
+	ret = render_state_init(&so, ring->dev);
+	if (ret)
+		return ret;
+
+	if (so.rodata == NULL)
 		return 0;
 
-	so = render_state_alloc(ring->dev);
-	if (IS_ERR(so))
-		return PTR_ERR(so);
-
-	ret = render_state_setup(gen, rodata, so);
+	ret = render_state_setup(&so);
 	if (ret)
 		goto out;
 
 	ret = ring->dispatch_execbuffer(ring,
-					i915_gem_obj_ggtt_offset(so->obj),
-					so->len,
+					so.ggtt_offset,
+					so.rodata->batch_items * 4,
 					I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
 
-	ret = __i915_add_request(ring, NULL, so->obj, NULL);
+	ret = __i915_add_request(ring, NULL, so.obj, NULL);
 	/* __i915_add_request moves object to inactive if it fails */
 out:
-	render_state_free(so);
+	render_state_fini(&so);
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 7465ab0..21c025a 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -147,30 +147,68 @@
 	return base;
 }
 
-static int i915_setup_compression(struct drm_device *dev, int size)
+static int find_compression_threshold(struct drm_device *dev,
+				      struct drm_mm_node *node,
+				      int size,
+				      int fb_cpp)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+	int compression_threshold = 1;
 	int ret;
 
-	compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
-	if (!compressed_fb)
-		goto err_llb;
+	/* HACK: This code depends on what we will do in *_enable_fbc. If that
+	 * code changes, this code needs to change as well.
+	 *
+	 * The enable_fbc code will attempt to use one of our 2 compression
+	 * thresholds, therefore, in that case, we only have 1 resort.
+	 */
 
-	/* Try to over-allocate to reduce reallocations and fragmentation */
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+	/* Try to over-allocate to reduce reallocations and fragmentation. */
+	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
 				 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
-	if (ret)
-		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
-					 size >>= 1, 4096,
-					 DRM_MM_SEARCH_DEFAULT);
-	if (ret)
+	if (ret == 0)
+		return compression_threshold;
+
+again:
+	/* HW's ability to limit the CFB is 1:4 */
+	if (compression_threshold > 4 ||
+	    (fb_cpp == 2 && compression_threshold == 2))
+		return 0;
+
+	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
+				 size >>= 1, 4096,
+				 DRM_MM_SEARCH_DEFAULT);
+	if (ret && INTEL_INFO(dev)->gen <= 4) {
+		return 0;
+	} else if (ret) {
+		compression_threshold <<= 1;
+		goto again;
+	} else {
+		return compression_threshold;
+	}
+}
+
+static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mm_node *uninitialized_var(compressed_llb);
+	int ret;
+
+	ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
+					 size, fb_cpp);
+	if (!ret)
 		goto err_llb;
+	else if (ret > 1) {
+		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+	}
+
+	dev_priv->fbc.threshold = ret;
 
 	if (HAS_PCH_SPLIT(dev))
-		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
 	else if (IS_GM45(dev)) {
-		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
 	} else {
 		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
 		if (!compressed_llb)
@@ -184,13 +222,12 @@
 		dev_priv->fbc.compressed_llb = compressed_llb;
 
 		I915_WRITE(FBC_CFB_BASE,
-			   dev_priv->mm.stolen_base + compressed_fb->start);
+			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
 		I915_WRITE(FBC_LL_BASE,
 			   dev_priv->mm.stolen_base + compressed_llb->start);
 	}
 
-	dev_priv->fbc.compressed_fb = compressed_fb;
-	dev_priv->fbc.size = size;
+	dev_priv->fbc.size = size / dev_priv->fbc.threshold;
 
 	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
 		      size);
@@ -199,14 +236,13 @@
 
 err_fb:
 	kfree(compressed_llb);
-	drm_mm_remove_node(compressed_fb);
+	drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
 err_llb:
-	kfree(compressed_fb);
 	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
 	return -ENOSPC;
 }
 
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -219,7 +255,7 @@
 	/* Release any current block */
 	i915_gem_stolen_cleanup_compression(dev);
 
-	return i915_setup_compression(dev, size);
+	return i915_setup_compression(dev, size, fb_cpp);
 }
 
 void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
@@ -229,10 +265,7 @@
 	if (dev_priv->fbc.size == 0)
 		return;
 
-	if (dev_priv->fbc.compressed_fb) {
-		drm_mm_remove_node(dev_priv->fbc.compressed_fb);
-		kfree(dev_priv->fbc.compressed_fb);
-	}
+	drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
 
 	if (dev_priv->fbc.compressed_llb) {
 		drm_mm_remove_node(dev_priv->fbc.compressed_llb);
@@ -336,9 +369,20 @@
 	kfree(obj->pages);
 }
 
+
+static void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+	if (obj->stolen) {
+		drm_mm_remove_node(obj->stolen);
+		kfree(obj->stolen);
+		obj->stolen = NULL;
+	}
+}
 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
 	.get_pages = i915_gem_object_get_pages_stolen,
 	.put_pages = i915_gem_object_put_pages_stolen,
+	.release = i915_gem_object_release_stolen,
 };
 
 static struct drm_i915_gem_object *
@@ -496,13 +540,3 @@
 	drm_gem_object_unreference(&obj->base);
 	return NULL;
 }
-
-void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
-	if (obj->stolen) {
-		drm_mm_remove_node(obj->stolen);
-		kfree(obj->stolen);
-		obj->stolen = NULL;
-	}
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 21ea928..fe69fc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -40,19 +40,87 @@
 	struct hlist_node node;
 	struct mmu_notifier mn;
 	struct rb_root objects;
+	struct list_head linear;
 	struct drm_device *dev;
 	struct mm_struct *mm;
 	struct work_struct work;
 	unsigned long count;
 	unsigned long serial;
+	bool has_linear;
 };
 
 struct i915_mmu_object {
 	struct i915_mmu_notifier *mmu;
 	struct interval_tree_node it;
+	struct list_head link;
 	struct drm_i915_gem_object *obj;
+	bool is_linear;
 };
 
+static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	unsigned long end;
+
+	mutex_lock(&dev->struct_mutex);
+	/* Cancel any active worker and force us to re-evaluate gup */
+	obj->userptr.work = NULL;
+
+	if (obj->pages != NULL) {
+		struct drm_i915_private *dev_priv = to_i915(dev);
+		struct i915_vma *vma, *tmp;
+		bool was_interruptible;
+
+		was_interruptible = dev_priv->mm.interruptible;
+		dev_priv->mm.interruptible = false;
+
+		list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+			int ret = i915_vma_unbind(vma);
+			WARN_ON(ret && ret != -EIO);
+		}
+		WARN_ON(i915_gem_object_put_pages(obj));
+
+		dev_priv->mm.interruptible = was_interruptible;
+	}
+
+	end = obj->userptr.ptr + obj->base.size;
+
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+
+	return end;
+}
+
+static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
+				      struct mm_struct *mm,
+				      unsigned long start,
+				      unsigned long end)
+{
+	struct i915_mmu_object *mmu;
+	unsigned long serial;
+
+restart:
+	serial = mn->serial;
+	list_for_each_entry(mmu, &mn->linear, link) {
+		struct drm_i915_gem_object *obj;
+
+		if (mmu->it.last < start || mmu->it.start > end)
+			continue;
+
+		obj = mmu->obj;
+		drm_gem_object_reference(&obj->base);
+		spin_unlock(&mn->lock);
+
+		cancel_userptr(obj);
+
+		spin_lock(&mn->lock);
+		if (serial != mn->serial)
+			goto restart;
+	}
+
+	return NULL;
+}
+
 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 						       struct mm_struct *mm,
 						       unsigned long start,
@@ -60,16 +128,18 @@
 {
 	struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
 	struct interval_tree_node *it = NULL;
+	unsigned long next = start;
 	unsigned long serial = 0;
 
 	end--; /* interval ranges are inclusive, but invalidate range is exclusive */
-	while (start < end) {
-		struct drm_i915_gem_object *obj;
+	while (next < end) {
+		struct drm_i915_gem_object *obj = NULL;
 
-		obj = NULL;
 		spin_lock(&mn->lock);
-		if (serial == mn->serial)
-			it = interval_tree_iter_next(it, start, end);
+		if (mn->has_linear)
+			it = invalidate_range__linear(mn, mm, start, end);
+		else if (serial == mn->serial)
+			it = interval_tree_iter_next(it, next, end);
 		else
 			it = interval_tree_iter_first(&mn->objects, start, end);
 		if (it != NULL) {
@@ -81,31 +151,7 @@
 		if (obj == NULL)
 			return;
 
-		mutex_lock(&mn->dev->struct_mutex);
-		/* Cancel any active worker and force us to re-evaluate gup */
-		obj->userptr.work = NULL;
-
-		if (obj->pages != NULL) {
-			struct drm_i915_private *dev_priv = to_i915(mn->dev);
-			struct i915_vma *vma, *tmp;
-			bool was_interruptible;
-
-			was_interruptible = dev_priv->mm.interruptible;
-			dev_priv->mm.interruptible = false;
-
-			list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
-				int ret = i915_vma_unbind(vma);
-				WARN_ON(ret && ret != -EIO);
-			}
-			WARN_ON(i915_gem_object_put_pages(obj));
-
-			dev_priv->mm.interruptible = was_interruptible;
-		}
-
-		start = obj->userptr.ptr + obj->base.size;
-
-		drm_gem_object_unreference(&obj->base);
-		mutex_unlock(&mn->dev->struct_mutex);
+		next = cancel_userptr(obj);
 	}
 }
 
@@ -150,7 +196,9 @@
 	mmu->mm = mm;
 	mmu->objects = RB_ROOT;
 	mmu->count = 0;
-	mmu->serial = 0;
+	mmu->serial = 1;
+	INIT_LIST_HEAD(&mmu->linear);
+	mmu->has_linear = false;
 
 	/* Protected by mmap_sem (write-lock) */
 	ret = __mmu_notifier_register(&mmu->mn, mm);
@@ -197,6 +245,17 @@
 		mmu->serial = 1;
 }
 
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
+{
+	struct i915_mmu_object *mn;
+
+	list_for_each_entry(mn, &mmu->linear, link)
+		if (mn->is_linear)
+			return true;
+
+	return false;
+}
+
 static void
 i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
 		      struct i915_mmu_object *mn)
@@ -204,7 +263,11 @@
 	lockdep_assert_held(&mmu->dev->struct_mutex);
 
 	spin_lock(&mmu->lock);
-	interval_tree_remove(&mn->it, &mmu->objects);
+	list_del(&mn->link);
+	if (mn->is_linear)
+		mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
+	else
+		interval_tree_remove(&mn->it, &mmu->objects);
 	__i915_mmu_notifier_update_serial(mmu);
 	spin_unlock(&mmu->lock);
 
@@ -230,7 +293,6 @@
 	 */
 	i915_gem_retire_requests(mmu->dev);
 
-	/* Disallow overlapping userptr objects */
 	spin_lock(&mmu->lock);
 	it = interval_tree_iter_first(&mmu->objects,
 				      mn->it.start, mn->it.last);
@@ -243,14 +305,22 @@
 		 * to flush their object references upon which the object will
 		 * be removed from the interval-tree, or the the range is
 		 * still in use by another client and the overlap is invalid.
+		 *
+		 * If we do have an overlap, we cannot use the interval tree
+		 * for fast range invalidation.
 		 */
 
 		obj = container_of(it, struct i915_mmu_object, it)->obj;
-		ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
-	} else {
+		if (!obj->userptr.workers)
+			mmu->has_linear = mn->is_linear = true;
+		else
+			ret = -EAGAIN;
+	} else
 		interval_tree_insert(&mn->it, &mmu->objects);
+
+	if (ret == 0) {
+		list_add(&mn->link, &mmu->linear);
 		__i915_mmu_notifier_update_serial(mmu);
-		ret = 0;
 	}
 	spin_unlock(&mmu->lock);
 	mutex_unlock(&mmu->dev->struct_mutex);
@@ -611,12 +681,11 @@
  * We impose several restrictions upon the memory being mapped
  * into the GPU.
  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
- * 2. It cannot overlap any other userptr object in the same address space.
- * 3. It must be normal system memory, not a pointer into another map of IO
+ * 2. It must be normal system memory, not a pointer into another map of IO
  *    space (e.g. it must not be a GTT mmapping of another object).
- * 4. We only allow a bo as large as we could in theory map into the GTT,
+ * 3. We only allow a bo as large as we could in theory map into the GTT,
  *    that is we limit the size to the total size of the GTT.
- * 5. The bo is marked as being snoopable. The backing pages are left
+ * 4. The bo is marked as being snoopable. The backing pages are left
  *    accessible directly by the CPU, but reads and writes by the GPU may
  *    incur the cost of a snoop (unless you have an LLC architecture).
  *
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 66cf417..0b3f694 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -327,6 +327,7 @@
 	struct drm_device *dev = error_priv->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_error_state *error = error_priv->error;
+	struct drm_i915_error_object *obj;
 	int i, j, offset, elt;
 	int max_hangcheck_score;
 
@@ -395,8 +396,6 @@
 				    error->pinned_bo_count[0]);
 
 	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		struct drm_i915_error_object *obj;
-
 		obj = error->ring[i].batchbuffer;
 		if (obj) {
 			err_puts(m, dev_priv->ring[i].name);
@@ -459,6 +458,18 @@
 		}
 	}
 
+	if ((obj = error->semaphore_obj)) {
+		err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
+		for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+			err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+				   elt * 4,
+				   obj->pages[0][elt],
+				   obj->pages[0][elt+1],
+				   obj->pages[0][elt+2],
+				   obj->pages[0][elt+3]);
+		}
+	}
+
 	if (error->overlay)
 		intel_overlay_print_error_state(m, error->overlay);
 
@@ -529,6 +540,7 @@
 		kfree(error->ring[i].requests);
 	}
 
+	i915_error_object_free(error->semaphore_obj);
 	kfree(error->active_bo);
 	kfree(error->overlay);
 	kfree(error->display);
@@ -746,7 +758,59 @@
 	}
 }
 
+
+static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
+					struct drm_i915_error_state *error,
+					struct intel_engine_cs *ring,
+					struct drm_i915_error_ring *ering)
+{
+	struct intel_engine_cs *to;
+	int i;
+
+	if (!i915_semaphore_is_enabled(dev_priv->dev))
+		return;
+
+	if (!error->semaphore_obj)
+		error->semaphore_obj =
+			i915_error_object_create(dev_priv,
+						 dev_priv->semaphore_obj,
+						 &dev_priv->gtt.base);
+
+	for_each_ring(to, dev_priv, i) {
+		int idx;
+		u16 signal_offset;
+		u32 *tmp;
+
+		if (ring == to)
+			continue;
+
+		signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
+		tmp = error->semaphore_obj->pages[0];
+		idx = intel_ring_sync_index(ring, to);
+
+		ering->semaphore_mboxes[idx] = tmp[signal_offset];
+		ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+	}
+}
+
+static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
+					struct intel_engine_cs *ring,
+					struct drm_i915_error_ring *ering)
+{
+	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
+	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
+	ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+	ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+
+	if (HAS_VEBOX(dev_priv->dev)) {
+		ering->semaphore_mboxes[2] =
+			I915_READ(RING_SYNC_2(ring->mmio_base));
+		ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+	}
+}
+
 static void i915_record_ring_state(struct drm_device *dev,
+				   struct drm_i915_error_state *error,
 				   struct intel_engine_cs *ring,
 				   struct drm_i915_error_ring *ering)
 {
@@ -755,18 +819,10 @@
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
 		ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
-		ering->semaphore_mboxes[0]
-			= I915_READ(RING_SYNC_0(ring->mmio_base));
-		ering->semaphore_mboxes[1]
-			= I915_READ(RING_SYNC_1(ring->mmio_base));
-		ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
-		ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
-	}
-
-	if (HAS_VEBOX(dev)) {
-		ering->semaphore_mboxes[2] =
-			I915_READ(RING_SYNC_2(ring->mmio_base));
-		ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+		if (INTEL_INFO(dev)->gen >= 8)
+			gen8_record_semaphore_state(dev_priv, error, ring, ering);
+		else
+			gen6_record_semaphore_state(dev_priv, ring, ering);
 	}
 
 	if (INTEL_INFO(dev)->gen >= 4) {
@@ -871,6 +927,9 @@
 		return;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		if (!i915_gem_obj_ggtt_bound(obj))
+			continue;
+
 		if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
 			ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
 			break;
@@ -895,7 +954,7 @@
 
 		error->ring[i].valid = true;
 
-		i915_record_ring_state(dev, ring, &error->ring[i]);
+		i915_record_ring_state(dev, error, ring, &error->ring[i]);
 
 		request = i915_gem_find_active_request(ring);
 		if (request) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c05c84f..6ef9d6f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -136,7 +136,7 @@
 {
 	assert_spin_locked(&dev_priv->irq_lock);
 
-	if (WARN_ON(dev_priv->pm.irqs_disabled))
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 		return;
 
 	if ((dev_priv->irq_mask & mask) != 0) {
@@ -151,7 +151,7 @@
 {
 	assert_spin_locked(&dev_priv->irq_lock);
 
-	if (WARN_ON(dev_priv->pm.irqs_disabled))
+	if (!intel_irqs_enabled(dev_priv))
 		return;
 
 	if ((dev_priv->irq_mask & mask) != mask) {
@@ -173,7 +173,7 @@
 {
 	assert_spin_locked(&dev_priv->irq_lock);
 
-	if (WARN_ON(dev_priv->pm.irqs_disabled))
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 		return;
 
 	dev_priv->gt_irq_mask &= ~interrupt_mask;
@@ -182,12 +182,12 @@
 	POSTING_READ(GTIMR);
 }
 
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
 	ilk_update_gt_irq(dev_priv, mask, mask);
 }
 
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
 	ilk_update_gt_irq(dev_priv, mask, 0);
 }
@@ -206,7 +206,7 @@
 
 	assert_spin_locked(&dev_priv->irq_lock);
 
-	if (WARN_ON(dev_priv->pm.irqs_disabled))
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 		return;
 
 	new_val = dev_priv->pm_irq_mask;
@@ -220,12 +220,12 @@
 	}
 }
 
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
 	snb_update_pm_irq(dev_priv, mask, mask);
 }
 
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
 	snb_update_pm_irq(dev_priv, mask, 0);
 }
@@ -264,7 +264,7 @@
 
 	assert_spin_locked(&dev_priv->irq_lock);
 
-	if (WARN_ON(dev_priv->pm.irqs_disabled))
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 		return;
 
 	new_val = dev_priv->pm_irq_mask;
@@ -278,12 +278,12 @@
 	}
 }
 
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
 	bdw_update_pm_irq(dev_priv, mask, mask);
 }
 
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
 	bdw_update_pm_irq(dev_priv, mask, 0);
 }
@@ -420,7 +420,7 @@
 
 	assert_spin_locked(&dev_priv->irq_lock);
 
-	if (WARN_ON(dev_priv->pm.irqs_disabled))
+	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 		return;
 
 	I915_WRITE(SDEIMR, sdeimr);
@@ -1090,6 +1090,53 @@
 	return true;
 }
 
+static void i915_digport_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, dig_port_work);
+	unsigned long irqflags;
+	u32 long_port_mask, short_port_mask;
+	struct intel_digital_port *intel_dig_port;
+	int i, ret;
+	u32 old_bits = 0;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	long_port_mask = dev_priv->long_hpd_port_mask;
+	dev_priv->long_hpd_port_mask = 0;
+	short_port_mask = dev_priv->short_hpd_port_mask;
+	dev_priv->short_hpd_port_mask = 0;
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		bool valid = false;
+		bool long_hpd = false;
+		intel_dig_port = dev_priv->hpd_irq_port[i];
+		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+			continue;
+
+		if (long_port_mask & (1 << i))  {
+			valid = true;
+			long_hpd = true;
+		} else if (short_port_mask & (1 << i))
+			valid = true;
+
+		if (valid) {
+			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+			if (ret == true) {
+				/* if we get true fallback to old school hpd */
+				old_bits |= (1 << intel_dig_port->base.hpd_pin);
+			}
+		}
+	}
+
+	if (old_bits) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		dev_priv->hpd_event_bits |= old_bits;
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+		schedule_work(&dev_priv->hotplug_work);
+	}
+}
+
 /*
  * Handle hotplug events outside the interrupt handler proper.
  */
@@ -1109,10 +1156,6 @@
 	bool changed = false;
 	u32 hpd_event_bits;
 
-	/* HPD irq before everything is fully set up. */
-	if (!dev_priv->enable_hotplug_processing)
-		return;
-
 	mutex_lock(&mode_config->mutex);
 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
@@ -1122,6 +1165,8 @@
 	dev_priv->hpd_event_bits = 0;
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
 		intel_connector = to_intel_connector(connector);
+		if (!intel_connector->encoder)
+			continue;
 		intel_encoder = intel_connector->encoder;
 		if (intel_encoder->hpd_pin > HPD_NONE &&
 		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
@@ -1152,6 +1197,8 @@
 
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
 		intel_connector = to_intel_connector(connector);
+		if (!intel_connector->encoder)
+			continue;
 		intel_encoder = intel_connector->encoder;
 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
 			if (intel_encoder->hot_plug)
@@ -1218,10 +1265,138 @@
 
 	trace_i915_gem_request_complete(ring);
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		intel_notify_mmio_flip(ring);
+
 	wake_up_all(&ring->irq_queue);
 	i915_queue_hangcheck(dev);
 }
 
+static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
+			    struct intel_rps_ei *rps_ei)
+{
+	u32 cz_ts, cz_freq_khz;
+	u32 render_count, media_count;
+	u32 elapsed_render, elapsed_media, elapsed_time;
+	u32 residency = 0;
+
+	cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+	cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
+
+	render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
+	media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
+
+	if (rps_ei->cz_clock == 0) {
+		rps_ei->cz_clock = cz_ts;
+		rps_ei->render_c0 = render_count;
+		rps_ei->media_c0 = media_count;
+
+		return dev_priv->rps.cur_freq;
+	}
+
+	elapsed_time = cz_ts - rps_ei->cz_clock;
+	rps_ei->cz_clock = cz_ts;
+
+	elapsed_render = render_count - rps_ei->render_c0;
+	rps_ei->render_c0 = render_count;
+
+	elapsed_media = media_count - rps_ei->media_c0;
+	rps_ei->media_c0 = media_count;
+
+	/* Convert all the counters into common unit of milli sec */
+	elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
+	elapsed_render /=  cz_freq_khz;
+	elapsed_media /= cz_freq_khz;
+
+	/*
+	 * Calculate overall C0 residency percentage
+	 * only if elapsed time is non zero
+	 */
+	if (elapsed_time) {
+		residency =
+			((max(elapsed_render, elapsed_media) * 100)
+				/ elapsed_time);
+	}
+
+	return residency;
+}
+
+/**
+ * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
+ * busy-ness calculated from C0 counters of render & media power wells
+ * @dev_priv: DRM device private
+ *
+ */
+static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+{
+	u32 residency_C0_up = 0, residency_C0_down = 0;
+	u8 new_delay, adj;
+
+	dev_priv->rps.ei_interrupt_count++;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+
+	if (dev_priv->rps.up_ei.cz_clock == 0) {
+		vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
+		vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
+		return dev_priv->rps.cur_freq;
+	}
+
+
+	/*
+	 * To down throttle, C0 residency should be less than down threshold
+	 * for continous EI intervals. So calculate down EI counters
+	 * once in VLV_INT_COUNT_FOR_DOWN_EI
+	 */
+	if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+
+		dev_priv->rps.ei_interrupt_count = 0;
+
+		residency_C0_down = vlv_c0_residency(dev_priv,
+						     &dev_priv->rps.down_ei);
+	} else {
+		residency_C0_up = vlv_c0_residency(dev_priv,
+						   &dev_priv->rps.up_ei);
+	}
+
+	new_delay = dev_priv->rps.cur_freq;
+
+	adj = dev_priv->rps.last_adj;
+	/* C0 residency is greater than UP threshold. Increase Frequency */
+	if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
+		if (adj > 0)
+			adj *= 2;
+		else
+			adj = 1;
+
+		if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
+			new_delay = dev_priv->rps.cur_freq + adj;
+
+		/*
+		 * For better performance, jump directly
+		 * to RPe if we're below it.
+		 */
+		if (new_delay < dev_priv->rps.efficient_freq)
+			new_delay = dev_priv->rps.efficient_freq;
+
+	} else if (!dev_priv->rps.ei_interrupt_count &&
+			(residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
+		if (adj < 0)
+			adj *= 2;
+		else
+			adj = -1;
+		/*
+		 * This means, C0 residency is less than down threshold over
+		 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
+		 */
+		if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
+			new_delay = dev_priv->rps.cur_freq + adj;
+	}
+
+	return new_delay;
+}
+
 static void gen6_pm_rps_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
@@ -1232,11 +1407,11 @@
 	spin_lock_irq(&dev_priv->irq_lock);
 	pm_iir = dev_priv->rps.pm_iir;
 	dev_priv->rps.pm_iir = 0;
-	if (IS_BROADWELL(dev_priv->dev))
-		bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	if (INTEL_INFO(dev_priv->dev)->gen >= 8)
+		gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 	else {
 		/* Make sure not to corrupt PMIMR state used by ringbuffer */
-		snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+		gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 	}
 	spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -1252,8 +1427,10 @@
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
 		if (adj > 0)
 			adj *= 2;
-		else
-			adj = 1;
+		else {
+			/* CHV needs even encode values */
+			adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
+		}
 		new_delay = dev_priv->rps.cur_freq + adj;
 
 		/*
@@ -1268,11 +1445,15 @@
 		else
 			new_delay = dev_priv->rps.min_freq_softlimit;
 		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+		new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
 		if (adj < 0)
 			adj *= 2;
-		else
-			adj = -1;
+		else {
+			/* CHV needs even encode values */
+			adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
+		}
 		new_delay = dev_priv->rps.cur_freq + adj;
 	} else { /* unknown event */
 		new_delay = dev_priv->rps.cur_freq;
@@ -1372,7 +1553,7 @@
 out:
 	WARN_ON(dev_priv->l3_parity.which_slice);
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1386,7 +1567,7 @@
 		return;
 
 	spin_lock(&dev_priv->irq_lock);
-	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
 	spin_unlock(&dev_priv->irq_lock);
 
 	iir &= GT_PARITY_ERROR(dev);
@@ -1441,7 +1622,7 @@
 
 	spin_lock(&dev_priv->irq_lock);
 	dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-	bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+	gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
 	spin_unlock(&dev_priv->irq_lock);
 
 	queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1458,6 +1639,7 @@
 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
 		tmp = I915_READ(GEN8_GT_IIR(0));
 		if (tmp) {
+			I915_WRITE(GEN8_GT_IIR(0), tmp);
 			ret = IRQ_HANDLED;
 			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
 			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
@@ -1465,7 +1647,6 @@
 				notify_ring(dev, &dev_priv->ring[RCS]);
 			if (bcs & GT_RENDER_USER_INTERRUPT)
 				notify_ring(dev, &dev_priv->ring[BCS]);
-			I915_WRITE(GEN8_GT_IIR(0), tmp);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
 	}
@@ -1473,6 +1654,7 @@
 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
 		tmp = I915_READ(GEN8_GT_IIR(1));
 		if (tmp) {
+			I915_WRITE(GEN8_GT_IIR(1), tmp);
 			ret = IRQ_HANDLED;
 			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
 			if (vcs & GT_RENDER_USER_INTERRUPT)
@@ -1480,7 +1662,6 @@
 			vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
 			if (vcs & GT_RENDER_USER_INTERRUPT)
 				notify_ring(dev, &dev_priv->ring[VCS2]);
-			I915_WRITE(GEN8_GT_IIR(1), tmp);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
 	}
@@ -1488,10 +1669,10 @@
 	if (master_ctl & GEN8_GT_PM_IRQ) {
 		tmp = I915_READ(GEN8_GT_IIR(2));
 		if (tmp & dev_priv->pm_rps_events) {
-			ret = IRQ_HANDLED;
-			gen8_rps_irq_handler(dev_priv, tmp);
 			I915_WRITE(GEN8_GT_IIR(2),
 				   tmp & dev_priv->pm_rps_events);
+			ret = IRQ_HANDLED;
+			gen8_rps_irq_handler(dev_priv, tmp);
 		} else
 			DRM_ERROR("The master control interrupt lied (PM)!\n");
 	}
@@ -1499,11 +1680,11 @@
 	if (master_ctl & GEN8_GT_VECS_IRQ) {
 		tmp = I915_READ(GEN8_GT_IIR(3));
 		if (tmp) {
+			I915_WRITE(GEN8_GT_IIR(3), tmp);
 			ret = IRQ_HANDLED;
 			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
 			if (vcs & GT_RENDER_USER_INTERRUPT)
 				notify_ring(dev, &dev_priv->ring[VECS]);
-			I915_WRITE(GEN8_GT_IIR(3), tmp);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
 	}
@@ -1514,23 +1695,104 @@
 #define HPD_STORM_DETECT_PERIOD 1000
 #define HPD_STORM_THRESHOLD 5
 
+static int ilk_port_to_hotplug_shift(enum port port)
+{
+	switch (port) {
+	case PORT_A:
+	case PORT_E:
+	default:
+		return -1;
+	case PORT_B:
+		return 0;
+	case PORT_C:
+		return 8;
+	case PORT_D:
+		return 16;
+	}
+}
+
+static int g4x_port_to_hotplug_shift(enum port port)
+{
+	switch (port) {
+	case PORT_A:
+	case PORT_E:
+	default:
+		return -1;
+	case PORT_B:
+		return 17;
+	case PORT_C:
+		return 19;
+	case PORT_D:
+		return 21;
+	}
+}
+
+static inline enum port get_port_from_pin(enum hpd_pin pin)
+{
+	switch (pin) {
+	case HPD_PORT_B:
+		return PORT_B;
+	case HPD_PORT_C:
+		return PORT_C;
+	case HPD_PORT_D:
+		return PORT_D;
+	default:
+		return PORT_A; /* no hpd */
+	}
+}
+
 static inline void intel_hpd_irq_handler(struct drm_device *dev,
 					 u32 hotplug_trigger,
+					 u32 dig_hotplug_reg,
 					 const u32 *hpd)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
+	enum port port;
 	bool storm_detected = false;
+	bool queue_dig = false, queue_hp = false;
+	u32 dig_shift;
+	u32 dig_port_mask = 0;
 
 	if (!hotplug_trigger)
 		return;
 
-	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-			  hotplug_trigger);
+	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
+			 hotplug_trigger, dig_hotplug_reg);
 
 	spin_lock(&dev_priv->irq_lock);
 	for (i = 1; i < HPD_NUM_PINS; i++) {
+		if (!(hpd[i] & hotplug_trigger))
+			continue;
 
+		port = get_port_from_pin(i);
+		if (port && dev_priv->hpd_irq_port[port]) {
+			bool long_hpd;
+
+			if (IS_G4X(dev)) {
+				dig_shift = g4x_port_to_hotplug_shift(port);
+				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+			} else {
+				dig_shift = ilk_port_to_hotplug_shift(port);
+				long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+			}
+
+			DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
+			/* for long HPD pulses we want to have the digital queue happen,
+			   but we still want HPD storm detection to function. */
+			if (long_hpd) {
+				dev_priv->long_hpd_port_mask |= (1 << port);
+				dig_port_mask |= hpd[i];
+			} else {
+				/* for short HPD just trigger the digital queue */
+				dev_priv->short_hpd_port_mask |= (1 << port);
+				hotplug_trigger &= ~hpd[i];
+			}
+			queue_dig = true;
+		}
+	}
+
+	for (i = 1; i < HPD_NUM_PINS; i++) {
 		if (hpd[i] & hotplug_trigger &&
 		    dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
 			/*
@@ -1550,7 +1812,11 @@
 		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
 			continue;
 
-		dev_priv->hpd_event_bits |= (1 << i);
+		if (!(dig_port_mask & hpd[i])) {
+			dev_priv->hpd_event_bits |= (1 << i);
+			queue_hp = true;
+		}
+
 		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
 				   dev_priv->hpd_stats[i].hpd_last_jiffies
 				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -1579,7 +1845,10 @@
 	 * queue for otherwise the flush_work in the pageflip code will
 	 * deadlock.
 	 */
-	schedule_work(&dev_priv->hotplug_work);
+	if (queue_dig)
+		queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
+	if (queue_hp)
+		schedule_work(&dev_priv->hotplug_work);
 }
 
 static void gmbus_irq_handler(struct drm_device *dev)
@@ -1700,7 +1969,7 @@
 	if (pm_iir & dev_priv->pm_rps_events) {
 		spin_lock(&dev_priv->irq_lock);
 		dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-		snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
 		spin_unlock(&dev_priv->irq_lock);
 
 		queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1809,26 +2078,28 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
-	if (IS_G4X(dev)) {
-		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+	if (hotplug_status) {
+		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+		/*
+		 * Make sure hotplug status is cleared before we clear IIR, or else we
+		 * may miss hotplug events.
+		 */
+		POSTING_READ(PORT_HOTPLUG_STAT);
 
-		intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
-	} else {
-		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+		if (IS_G4X(dev)) {
+			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
-		intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
+			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
+		} else {
+			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
+		}
+
+		if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
+		    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+			dp_aux_irq_handler(dev);
 	}
-
-	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
-	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
-		dp_aux_irq_handler(dev);
-
-	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-	/*
-	 * Make sure hotplug status is cleared before we clear IIR, or else we
-	 * may miss hotplug events.
-	 */
-	POSTING_READ(PORT_HOTPLUG_STAT);
 }
 
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1839,29 +2110,36 @@
 	irqreturn_t ret = IRQ_NONE;
 
 	while (true) {
-		iir = I915_READ(VLV_IIR);
+		/* Find, clear, then process each source of interrupt */
+
 		gt_iir = I915_READ(GTIIR);
+		if (gt_iir)
+			I915_WRITE(GTIIR, gt_iir);
+
 		pm_iir = I915_READ(GEN6_PMIIR);
+		if (pm_iir)
+			I915_WRITE(GEN6_PMIIR, pm_iir);
+
+		iir = I915_READ(VLV_IIR);
+		if (iir) {
+			/* Consume port before clearing IIR or we'll miss events */
+			if (iir & I915_DISPLAY_PORT_INTERRUPT)
+				i9xx_hpd_irq_handler(dev);
+			I915_WRITE(VLV_IIR, iir);
+		}
 
 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
 			goto out;
 
 		ret = IRQ_HANDLED;
 
-		snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
-		valleyview_pipestat_irq_handler(dev, iir);
-
-		/* Consume port.  Then clear IIR or we'll miss events */
-		if (iir & I915_DISPLAY_PORT_INTERRUPT)
-			i9xx_hpd_irq_handler(dev);
-
+		if (gt_iir)
+			snb_gt_irq_handler(dev, dev_priv, gt_iir);
 		if (pm_iir)
 			gen6_rps_irq_handler(dev_priv, pm_iir);
-
-		I915_WRITE(GTIIR, gt_iir);
-		I915_WRITE(GEN6_PMIIR, pm_iir);
-		I915_WRITE(VLV_IIR, iir);
+		/* Call regardless, as some status bits might not be
+		 * signalled in iir */
+		valleyview_pipestat_irq_handler(dev, iir);
 	}
 
 out:
@@ -1882,21 +2160,27 @@
 		if (master_ctl == 0 && iir == 0)
 			break;
 
+		ret = IRQ_HANDLED;
+
 		I915_WRITE(GEN8_MASTER_IRQ, 0);
 
+		/* Find, clear, then process each source of interrupt */
+
+		if (iir) {
+			/* Consume port before clearing IIR or we'll miss events */
+			if (iir & I915_DISPLAY_PORT_INTERRUPT)
+				i9xx_hpd_irq_handler(dev);
+			I915_WRITE(VLV_IIR, iir);
+		}
+
 		gen8_gt_irq_handler(dev, dev_priv, master_ctl);
 
+		/* Call regardless, as some status bits might not be
+		 * signalled in iir */
 		valleyview_pipestat_irq_handler(dev, iir);
 
-		/* Consume port.  Then clear IIR or we'll miss events */
-		i9xx_hpd_irq_handler(dev);
-
-		I915_WRITE(VLV_IIR, iir);
-
 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
 		POSTING_READ(GEN8_MASTER_IRQ);
-
-		ret = IRQ_HANDLED;
 	}
 
 	return ret;
@@ -1907,8 +2191,12 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+	u32 dig_hotplug_reg;
 
-	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -2014,8 +2302,12 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+	u32 dig_hotplug_reg;
 
-	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2132,6 +2424,14 @@
 	}
 }
 
+/*
+ * To handle irqs with the minimum potential races with fresh interrupts, we:
+ * 1 - Disable Master Interrupt Control.
+ * 2 - Find the source(s) of the interrupt.
+ * 3 - Clear the Interrupt Identity bits (IIR).
+ * 4 - Process the interrupt(s) that had bits set in the IIRs.
+ * 5 - Re-enable Master Interrupt Control.
+ */
 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
@@ -2159,32 +2459,34 @@
 		POSTING_READ(SDEIER);
 	}
 
+	/* Find, clear, then process each source of interrupt */
+
 	gt_iir = I915_READ(GTIIR);
 	if (gt_iir) {
+		I915_WRITE(GTIIR, gt_iir);
+		ret = IRQ_HANDLED;
 		if (INTEL_INFO(dev)->gen >= 6)
 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
 		else
 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
-		I915_WRITE(GTIIR, gt_iir);
-		ret = IRQ_HANDLED;
 	}
 
 	de_iir = I915_READ(DEIIR);
 	if (de_iir) {
+		I915_WRITE(DEIIR, de_iir);
+		ret = IRQ_HANDLED;
 		if (INTEL_INFO(dev)->gen >= 7)
 			ivb_display_irq_handler(dev, de_iir);
 		else
 			ilk_display_irq_handler(dev, de_iir);
-		I915_WRITE(DEIIR, de_iir);
-		ret = IRQ_HANDLED;
 	}
 
 	if (INTEL_INFO(dev)->gen >= 6) {
 		u32 pm_iir = I915_READ(GEN6_PMIIR);
 		if (pm_iir) {
-			gen6_rps_irq_handler(dev_priv, pm_iir);
 			I915_WRITE(GEN6_PMIIR, pm_iir);
 			ret = IRQ_HANDLED;
+			gen6_rps_irq_handler(dev_priv, pm_iir);
 		}
 	}
 
@@ -2215,36 +2517,36 @@
 	I915_WRITE(GEN8_MASTER_IRQ, 0);
 	POSTING_READ(GEN8_MASTER_IRQ);
 
+	/* Find, clear, then process each source of interrupt */
+
 	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
 
 	if (master_ctl & GEN8_DE_MISC_IRQ) {
 		tmp = I915_READ(GEN8_DE_MISC_IIR);
-		if (tmp & GEN8_DE_MISC_GSE)
-			intel_opregion_asle_intr(dev);
-		else if (tmp)
-			DRM_ERROR("Unexpected DE Misc interrupt\n");
-		else
-			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
-
 		if (tmp) {
 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
 			ret = IRQ_HANDLED;
+			if (tmp & GEN8_DE_MISC_GSE)
+				intel_opregion_asle_intr(dev);
+			else
+				DRM_ERROR("Unexpected DE Misc interrupt\n");
 		}
+		else
+			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
 	}
 
 	if (master_ctl & GEN8_DE_PORT_IRQ) {
 		tmp = I915_READ(GEN8_DE_PORT_IIR);
-		if (tmp & GEN8_AUX_CHANNEL_A)
-			dp_aux_irq_handler(dev);
-		else if (tmp)
-			DRM_ERROR("Unexpected DE Port interrupt\n");
-		else
-			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
-
 		if (tmp) {
 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
 			ret = IRQ_HANDLED;
+			if (tmp & GEN8_AUX_CHANNEL_A)
+				dp_aux_irq_handler(dev);
+			else
+				DRM_ERROR("Unexpected DE Port interrupt\n");
 		}
+		else
+			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
 	}
 
 	for_each_pipe(pipe) {
@@ -2254,33 +2556,32 @@
 			continue;
 
 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
-		if (pipe_iir & GEN8_PIPE_VBLANK)
-			intel_pipe_handle_vblank(dev, pipe);
-
-		if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
-			intel_prepare_page_flip(dev, pipe);
-			intel_finish_page_flip_plane(dev, pipe);
-		}
-
-		if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
-			hsw_pipe_crc_irq_handler(dev, pipe);
-
-		if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
-			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
-								  false))
-				DRM_ERROR("Pipe %c FIFO underrun\n",
-					  pipe_name(pipe));
-		}
-
-		if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
-			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
-				  pipe_name(pipe),
-				  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
-		}
-
 		if (pipe_iir) {
 			ret = IRQ_HANDLED;
 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+			if (pipe_iir & GEN8_PIPE_VBLANK)
+				intel_pipe_handle_vblank(dev, pipe);
+
+			if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+				intel_prepare_page_flip(dev, pipe);
+				intel_finish_page_flip_plane(dev, pipe);
+			}
+
+			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+				hsw_pipe_crc_irq_handler(dev, pipe);
+
+			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+				if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+									  false))
+					DRM_ERROR("Pipe %c FIFO underrun\n",
+						  pipe_name(pipe));
+			}
+
+			if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+					  pipe_name(pipe),
+					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+			}
 		} else
 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
 	}
@@ -2292,13 +2593,13 @@
 		 * on older pch-split platforms. But this needs testing.
 		 */
 		u32 pch_iir = I915_READ(SDEIIR);
-
-		cpt_irq_handler(dev, pch_iir);
-
 		if (pch_iir) {
 			I915_WRITE(SDEIIR, pch_iir);
 			ret = IRQ_HANDLED;
-		}
+			cpt_irq_handler(dev, pch_iir);
+		} else
+			DRM_ERROR("The master control interrupt lied (SDE)!\n");
+
 	}
 
 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2753,12 +3054,7 @@
 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
 {
 	if (INTEL_INFO(dev)->gen >= 8) {
-		/*
-		 * FIXME: gen8 semaphore support - currently we don't emit
-		 * semaphores on bdw anyway, but this needs to be addressed when
-		 * we merge that code.
-		 */
-		return false;
+		return (ipehr >> 23) == 0x1c;
 	} else {
 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
@@ -2767,19 +3063,20 @@
 }
 
 static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct intel_engine_cs *signaller;
 	int i;
 
 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
-		/*
-		 * FIXME: gen8 semaphore support - currently we don't emit
-		 * semaphores on bdw anyway, but this needs to be addressed when
-		 * we merge that code.
-		 */
-		return NULL;
+		for_each_ring(signaller, dev_priv, i) {
+			if (ring == signaller)
+				continue;
+
+			if (offset == signaller->semaphore.signal_ggtt[ring->id])
+				return signaller;
+		}
 	} else {
 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
@@ -2792,8 +3089,8 @@
 		}
 	}
 
-	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
-		  ring->id, ipehr);
+	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+		  ring->id, ipehr, offset);
 
 	return NULL;
 }
@@ -2803,7 +3100,8 @@
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	u32 cmd, ipehr, head;
-	int i;
+	u64 offset = 0;
+	int i, backwards;
 
 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
@@ -2812,13 +3110,15 @@
 	/*
 	 * HEAD is likely pointing to the dword after the actual command,
 	 * so scan backwards until we find the MBOX. But limit it to just 3
-	 * dwords. Note that we don't care about ACTHD here since that might
+	 * or 4 dwords depending on the semaphore wait command size.
+	 * Note that we don't care about ACTHD here since that might
 	 * point at at batch, and semaphores are always emitted into the
 	 * ringbuffer itself.
 	 */
 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
+	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
 
-	for (i = 4; i; --i) {
+	for (i = backwards; i; --i) {
 		/*
 		 * Be paranoid and presume the hw has gone off into the wild -
 		 * our ring is smaller than what the hardware (and hence
@@ -2838,7 +3138,12 @@
 		return NULL;
 
 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
-	return semaphore_wait_to_signaller_ring(ring, ipehr);
+	if (INTEL_INFO(ring->dev)->gen >= 8) {
+		offset = ioread32(ring->buffer->virtual_start + head + 12);
+		offset <<= 32;
+		offset = ioread32(ring->buffer->virtual_start + head + 8);
+	}
+	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
 }
 
 static int semaphore_passed(struct intel_engine_cs *ring)
@@ -3159,7 +3464,9 @@
 	gen8_gt_irq_reset(dev_priv);
 
 	for_each_pipe(pipe)
-		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+		if (intel_display_power_enabled(dev_priv,
+						POWER_DOMAIN_PIPE(pipe)))
+			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
 
 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
@@ -3168,6 +3475,18 @@
 	ibx_irq_reset(dev);
 }
 
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
+			  ~dev_priv->de_irq_mask[PIPE_B]);
+	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
+			  ~dev_priv->de_irq_mask[PIPE_C]);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
 static void cherryview_irq_preinstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3492,8 +3811,11 @@
 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
 
 	for_each_pipe(pipe)
-		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
-				  de_pipe_enables);
+		if (intel_display_power_enabled(dev_priv,
+				POWER_DOMAIN_PIPE(pipe)))
+			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+					  dev_priv->de_irq_mask[pipe],
+					  de_pipe_enables);
 
 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
 }
@@ -4324,12 +4646,17 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+	INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
 	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
 	/* Let's track the enabled rps events */
-	dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+	if (IS_VALLEYVIEW(dev))
+		/* WaGsvRC0ResidenncyMethod:VLV */
+		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+	else
+		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
 	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
 		    i915_hangcheck_elapsed,
@@ -4339,6 +4666,9 @@
 
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
+	/* Haven't installed the IRQ handler yet */
+	dev_priv->pm._irqs_disabled = true;
+
 	if (IS_GEN2(dev)) {
 		dev->max_vblank_count = 0;
 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4426,7 +4756,9 @@
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
 		struct intel_connector *intel_connector = to_intel_connector(connector);
 		connector->polled = intel_connector->polled;
-		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+		if (intel_connector->mst_port)
 			connector->polled = DRM_CONNECTOR_POLL_HPD;
 	}
 
@@ -4444,7 +4776,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	dev->driver->irq_uninstall(dev);
-	dev_priv->pm.irqs_disabled = true;
+	dev_priv->pm._irqs_disabled = true;
 }
 
 /* Restore interrupts so we can recover from runtime PM. */
@@ -4452,7 +4784,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	dev_priv->pm.irqs_disabled = false;
+	dev_priv->pm._irqs_disabled = false;
 	dev->driver->irq_preinstall(dev);
 	dev->driver->irq_postinstall(dev);
 }
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d05a2af..62ee830 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -37,7 +37,7 @@
 	.enable_fbc = -1,
 	.enable_hangcheck = true,
 	.enable_ppgtt = -1,
-	.enable_psr = 0,
+	.enable_psr = 1,
 	.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
 	.disable_power_well = 1,
 	.enable_ips = 1,
@@ -48,6 +48,8 @@
 	.disable_display = 0,
 	.enable_cmd_parser = 1,
 	.disable_vtd_wa = 0,
+	.use_mmio_flip = 0,
+	.mmio_debug = 0,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
@@ -117,7 +119,7 @@
 	"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
 
 module_param_named(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)");
 
 module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
 MODULE_PARM_DESC(preliminary_hw_support,
@@ -156,3 +158,12 @@
 module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
 MODULE_PARM_DESC(enable_cmd_parser,
 		 "Enable command parsing (1=enabled [default], 0=disabled)");
+
+module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
+MODULE_PARM_DESC(use_mmio_flip,
+		 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
+
+module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+MODULE_PARM_DESC(mmio_debug,
+	"Enable the MMIO debug code (default: false). This may negatively "
+	"affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a5bab61b..fe5c276 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,8 +29,8 @@
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
-#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
-#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
+#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
+			       (pipe) == PIPE_B ? (b) : (c))
 
 #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
 #define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -240,7 +240,7 @@
 #define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
 #define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
 #define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6, gen7 */
 #define   MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
 #define   MI_SEMAPHORE_UPDATE	    (1<<21)
 #define   MI_SEMAPHORE_COMPARE	    (1<<20)
@@ -266,6 +266,11 @@
 #define   MI_RESTORE_EXT_STATE_EN	(1<<2)
 #define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
+#define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
+#define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
+#define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
+#define   MI_SEMAPHORE_POLL		(1<<15)
+#define   MI_SEMAPHORE_SAD_GTE_SDD	(1<<12)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
@@ -360,6 +365,7 @@
 #define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE		(1<<10) /* GM45+ only */
 #define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
 #define   PIPE_CONTROL_NOTIFY				(1<<8)
+#define   PIPE_CONTROL_FLUSH_ENABLE			(1<<7) /* gen7+ */
 #define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
 #define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
 #define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
@@ -525,10 +531,21 @@
 #define PUNIT_REG_GPU_FREQ_STS			0xd8
 #define   GENFREQSTATUS				(1<<0)
 #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ		0xdc
+#define PUNIT_REG_CZ_TIMESTAMP			0xce
 
 #define PUNIT_FUSE_BUS2				0xf6 /* bits 47:40 */
 #define PUNIT_FUSE_BUS1				0xf5 /* bits 55:48 */
 
+#define PUNIT_GPU_STATUS_REG			0xdb
+#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT	16
+#define PUNIT_GPU_STATUS_MAX_FREQ_MASK		0xff
+#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT	8
+#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK	0xff
+
+#define PUNIT_GPU_DUTYCYCLE_REG		0xdf
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT	8
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK	0xff
+
 #define IOSF_NC_FB_GFX_FREQ_FUSE		0x1c
 #define   FB_GFX_MAX_FREQ_FUSE_SHIFT		3
 #define   FB_GFX_MAX_FREQ_FUSE_MASK		0x000007f8
@@ -540,6 +557,11 @@
 #define   FB_FMAX_VMIN_FREQ_LO_SHIFT		27
 #define   FB_FMAX_VMIN_FREQ_LO_MASK		0xf8000000
 
+#define VLV_CZ_CLOCK_TO_MILLI_SEC		100000
+#define VLV_RP_UP_EI_THRESHOLD			90
+#define VLV_RP_DOWN_EI_THRESHOLD		70
+#define VLV_INT_COUNT_FOR_DOWN_EI		5
+
 /* vlv2 north clock has */
 #define CCK_FUSE_REG				0x8
 #define  CCK_FUSE_HPLL_FREQ_MASK		0x3
@@ -574,6 +596,11 @@
 #define  DSI_PLL_M1_DIV_SHIFT			0
 #define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
 #define CCK_DISPLAY_CLOCK_CONTROL		0x6b
+#define  DISPLAY_TRUNK_FORCE_ON			(1 << 17)
+#define  DISPLAY_TRUNK_FORCE_OFF		(1 << 16)
+#define  DISPLAY_FREQUENCY_STATUS		(0x1f << 8)
+#define  DISPLAY_FREQUENCY_STATUS_SHIFT		8
+#define  DISPLAY_FREQUENCY_VALUES		(0x1f << 0)
 
 /**
  * DOC: DPIO
@@ -761,6 +788,8 @@
 
 #define _VLV_PCS_DW8_CH0		0x8220
 #define _VLV_PCS_DW8_CH1		0x8420
+#define   CHV_PCS_USEDCLKCHANNEL_OVRRIDE	(1 << 20)
+#define   CHV_PCS_USEDCLKCHANNEL		(1 << 21)
 #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
 
 #define _VLV_PCS01_DW8_CH0		0x0220
@@ -869,6 +898,16 @@
 #define   DPIO_CHV_PROP_COEFF_SHIFT	0
 #define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
 
+#define _CHV_CMN_DW5_CH0               0x8114
+#define   CHV_BUFRIGHTENA1_DISABLE	(0 << 20)
+#define   CHV_BUFRIGHTENA1_NORMAL	(1 << 20)
+#define   CHV_BUFRIGHTENA1_FORCE	(3 << 20)
+#define   CHV_BUFRIGHTENA1_MASK		(3 << 20)
+#define   CHV_BUFLEFTENA1_DISABLE	(0 << 22)
+#define   CHV_BUFLEFTENA1_NORMAL	(1 << 22)
+#define   CHV_BUFLEFTENA1_FORCE		(3 << 22)
+#define   CHV_BUFLEFTENA1_MASK		(3 << 22)
+
 #define _CHV_CMN_DW13_CH0		0x8134
 #define _CHV_CMN_DW0_CH1		0x8080
 #define   DPIO_CHV_S1_DIV_SHIFT		21
@@ -883,8 +922,21 @@
 #define _CHV_CMN_DW1_CH1		0x8084
 #define   DPIO_AFC_RECAL		(1 << 14)
 #define   DPIO_DCLKP_EN			(1 << 13)
+#define   CHV_BUFLEFTENA2_DISABLE	(0 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_NORMAL	(1 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_FORCE		(3 << 17) /* CL2 DW1 only */
+#define   CHV_BUFLEFTENA2_MASK		(3 << 17) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_DISABLE	(0 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_NORMAL	(1 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_FORCE	(3 << 19) /* CL2 DW1 only */
+#define   CHV_BUFRIGHTENA2_MASK		(3 << 19) /* CL2 DW1 only */
 #define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
 
+#define _CHV_CMN_DW19_CH0		0x814c
+#define _CHV_CMN_DW6_CH1		0x8098
+#define   CHV_CMN_USEDCLKCHANNEL	(1 << 13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
+
 #define CHV_CMN_DW30			0x8178
 #define   DPIO_LRC_BYPASS		(1 << 3)
 
@@ -933,6 +985,7 @@
 #define   SANDYBRIDGE_FENCE_PITCH_SHIFT	32
 #define   GEN7_FENCE_MAX_PITCH_VAL	0x0800
 
+
 /* control register for cpu gtt access */
 #define TILECTL				0x101000
 #define   TILECTL_SWZCTL			(1 << 0)
@@ -1170,6 +1223,8 @@
 #define VLV_IMR		(VLV_DISPLAY_BASE + 0x20a8)
 #define VLV_ISR		(VLV_DISPLAY_BASE + 0x20ac)
 #define VLV_PCBR	(VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT	12
+
 #define   DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
 #define EIR		0x020b0
 #define EMR		0x020b4
@@ -1570,11 +1625,10 @@
 /*
  * Clock control & power management
  */
-#define DPLL_A_OFFSET 0x6014
-#define DPLL_B_OFFSET 0x6018
-#define CHV_DPLL_C_OFFSET 0x6030
-#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
-		    dev_priv->info.display_mmio_offset)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
 
 #define VGA0	0x6000
 #define VGA1	0x6004
@@ -1662,11 +1716,10 @@
 #define   SDVO_MULTIPLIER_SHIFT_HIRES		4
 #define   SDVO_MULTIPLIER_SHIFT_VGA		0
 
-#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
-#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
-#define CHV_DPLL_C_MD_OFFSET 0x603c
-#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
-		       dev_priv->info.display_mmio_offset)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
 
 /*
  * UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2231,7 +2284,7 @@
 /* Same as Haswell, but 72064 bytes now. */
 #define GEN8_CXT_TOTAL_SIZE		(18 * PAGE_SIZE)
 
-
+#define CHV_CLK_CTL1			0x101100
 #define VLV_CLK_CTL2			0x101104
 #define   CLK_CTL2_CZCOUNT_30NS_SHIFT	28
 
@@ -2376,6 +2429,7 @@
 #define EDP_PSR_BASE(dev)                       (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
 #define EDP_PSR_CTL(dev)			(EDP_PSR_BASE(dev) + 0)
 #define   EDP_PSR_ENABLE			(1<<31)
+#define   BDW_PSR_SINGLE_FRAME			(1<<30)
 #define   EDP_PSR_LINK_DISABLE			(0<<27)
 #define   EDP_PSR_LINK_STANDBY			(1<<27)
 #define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK	(3<<25)
@@ -2533,8 +2587,14 @@
 #define   PORTC_HOTPLUG_LIVE_STATUS_VLV		(1 << 28)
 #define   PORTB_HOTPLUG_LIVE_STATUS_VLV		(1 << 29)
 #define   PORTD_HOTPLUG_INT_STATUS		(3 << 21)
+#define   PORTD_HOTPLUG_INT_LONG_PULSE		(2 << 21)
+#define   PORTD_HOTPLUG_INT_SHORT_PULSE		(1 << 21)
 #define   PORTC_HOTPLUG_INT_STATUS		(3 << 19)
+#define   PORTC_HOTPLUG_INT_LONG_PULSE		(2 << 19)
+#define   PORTC_HOTPLUG_INT_SHORT_PULSE		(1 << 19)
 #define   PORTB_HOTPLUG_INT_STATUS		(3 << 17)
+#define   PORTB_HOTPLUG_INT_LONG_PULSE		(2 << 17)
+#define   PORTB_HOTPLUG_INT_SHORT_PLUSE		(1 << 17)
 /* CRT/TV common between gen3+ */
 #define   CRT_HOTPLUG_INT_STATUS		(1 << 11)
 #define   TV_HOTPLUG_INT_STATUS			(1 << 10)
@@ -2588,7 +2648,7 @@
 
 #define PORT_DFT_I9XX				0x61150
 #define   DC_BALANCE_RESET			(1 << 25)
-#define PORT_DFT2_G4X				0x61154
+#define PORT_DFT2_G4X		(dev_priv->info.display_mmio_offset + 0x61154)
 #define   DC_BALANCE_RESET_VLV			(1 << 31)
 #define   PIPE_SCRAMBLE_RESET_MASK		(0x3 << 0)
 #define   PIPE_B_SCRAMBLE_RESET			(1 << 1)
@@ -4630,6 +4690,8 @@
 #define GEN7_L3CNTLREG1				0xB01C
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL			0x3C47FF8C
 #define  GEN7_L3AGDIS				(1<<19)
+#define GEN7_L3CNTLREG2				0xB020
+#define GEN7_L3CNTLREG3				0xB024
 
 #define GEN7_L3_CHICKEN_MODE_REGISTER		0xB030
 #define  GEN7_WA_L3_CHICKEN_MODE				0x20000000
@@ -4876,8 +4938,7 @@
 #define _PCH_TRANSA_LINK_M2	0xe0048
 #define _PCH_TRANSA_LINK_N2	0xe004c
 
-/* Per-transcoder DIP controls */
-
+/* Per-transcoder DIP controls (PCH) */
 #define _VIDEO_DIP_CTL_A         0xe0200
 #define _VIDEO_DIP_DATA_A        0xe0208
 #define _VIDEO_DIP_GCP_A         0xe0210
@@ -4890,6 +4951,7 @@
 #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
 #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
+/* Per-transcoder DIP controls (VLV) */
 #define VLV_VIDEO_DIP_CTL_A		(VLV_DISPLAY_BASE + 0x60200)
 #define VLV_VIDEO_DIP_DATA_A		(VLV_DISPLAY_BASE + 0x60208)
 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A	(VLV_DISPLAY_BASE + 0x60210)
@@ -4898,12 +4960,19 @@
 #define VLV_VIDEO_DIP_DATA_B		(VLV_DISPLAY_BASE + 0x61174)
 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B	(VLV_DISPLAY_BASE + 0x61178)
 
+#define CHV_VIDEO_DIP_CTL_C		(VLV_DISPLAY_BASE + 0x611f0)
+#define CHV_VIDEO_DIP_DATA_C		(VLV_DISPLAY_BASE + 0x611f4)
+#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C	(VLV_DISPLAY_BASE + 0x611f8)
+
 #define VLV_TVIDEO_DIP_CTL(pipe) \
-	 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+	_PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
+	       VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
 #define VLV_TVIDEO_DIP_DATA(pipe) \
-	 _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+	_PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
+	       VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
 #define VLV_TVIDEO_DIP_GCP(pipe) \
-	_PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+	_PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+		VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
 
 /* Haswell DIP controls */
 #define HSW_VIDEO_DIP_CTL_A		0x60200
@@ -5334,6 +5403,7 @@
 #define   VLV_GTLC_ALLOWWAKEERR			(1 << 1)
 #define   VLV_GTLC_PW_MEDIA_STATUS_MASK		(1 << 5)
 #define   VLV_GTLC_PW_RENDER_STATUS_MASK	(1 << 7)
+#define VLV_GTLC_SURVIVABILITY_REG              0x130098
 #define  FORCEWAKE_MT				0xa188 /* multi-threaded */
 #define   FORCEWAKE_KERNEL			0x1
 #define   FORCEWAKE_USER			0x2
@@ -5471,6 +5541,12 @@
 						 GEN6_PM_RP_DOWN_THRESHOLD | \
 						 GEN6_PM_RP_DOWN_TIMEOUT)
 
+#define CHV_CZ_CLOCK_FREQ_MODE_200			200
+#define CHV_CZ_CLOCK_FREQ_MODE_267			267
+#define CHV_CZ_CLOCK_FREQ_MODE_320			320
+#define CHV_CZ_CLOCK_FREQ_MODE_333			333
+#define CHV_CZ_CLOCK_FREQ_MODE_400			400
+
 #define GEN7_GT_SCRATCH_BASE			0x4F100
 #define GEN7_GT_SCRATCH_REG_NUM			8
 
@@ -5481,6 +5557,8 @@
 #define GEN6_GT_GFX_RC6_LOCKED			0x138104
 #define VLV_COUNTER_CONTROL			0x138104
 #define   VLV_COUNT_RANGE_HIGH			(1<<15)
+#define   VLV_MEDIA_RC0_COUNT_EN		(1<<5)
+#define   VLV_RENDER_RC0_COUNT_EN		(1<<4)
 #define   VLV_MEDIA_RC6_COUNT_EN		(1<<1)
 #define   VLV_RENDER_RC6_COUNT_EN		(1<<0)
 #define GEN6_GT_GFX_RC6				0x138108
@@ -5489,6 +5567,8 @@
 
 #define GEN6_GT_GFX_RC6p			0x13810C
 #define GEN6_GT_GFX_RC6pp			0x138110
+#define VLV_RENDER_C0_COUNT_REG		0x138118
+#define VLV_MEDIA_C0_COUNT_REG			0x13811C
 
 #define GEN6_PCODE_MAILBOX			0x138124
 #define   GEN6_PCODE_READY			(1<<31)
@@ -5723,6 +5803,7 @@
 #define  TRANS_DDI_FUNC_ENABLE		(1<<31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
 #define  TRANS_DDI_PORT_MASK		(7<<28)
+#define  TRANS_DDI_PORT_SHIFT		28
 #define  TRANS_DDI_SELECT_PORT(x)	((x)<<28)
 #define  TRANS_DDI_PORT_NONE		(0<<28)
 #define  TRANS_DDI_MODE_SELECT_MASK	(7<<24)
@@ -5743,6 +5824,7 @@
 #define  TRANS_DDI_EDP_INPUT_A_ONOFF	(4<<12)
 #define  TRANS_DDI_EDP_INPUT_B_ONOFF	(5<<12)
 #define  TRANS_DDI_EDP_INPUT_C_ONOFF	(6<<12)
+#define  TRANS_DDI_DP_VC_PAYLOAD_ALLOC	(1<<8)
 #define  TRANS_DDI_BFI_ENABLE		(1<<4)
 
 /* DisplayPort Transport Control */
@@ -5752,6 +5834,7 @@
 #define  DP_TP_CTL_ENABLE			(1<<31)
 #define  DP_TP_CTL_MODE_SST			(0<<27)
 #define  DP_TP_CTL_MODE_MST			(1<<27)
+#define  DP_TP_CTL_FORCE_ACT			(1<<25)
 #define  DP_TP_CTL_ENHANCED_FRAME_ENABLE	(1<<18)
 #define  DP_TP_CTL_FDI_AUTOTRAIN		(1<<15)
 #define  DP_TP_CTL_LINK_TRAIN_MASK		(7<<8)
@@ -5766,15 +5849,19 @@
 #define DP_TP_STATUS_A			0x64044
 #define DP_TP_STATUS_B			0x64144
 #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
-#define  DP_TP_STATUS_IDLE_DONE		(1<<25)
-#define  DP_TP_STATUS_AUTOTRAIN_DONE	(1<<12)
+#define  DP_TP_STATUS_IDLE_DONE			(1<<25)
+#define  DP_TP_STATUS_ACT_SENT			(1<<24)
+#define  DP_TP_STATUS_MODE_STATUS_MST		(1<<23)
+#define  DP_TP_STATUS_AUTOTRAIN_DONE		(1<<12)
+#define  DP_TP_STATUS_PAYLOAD_MAPPING_VC2	(3 << 8)
+#define  DP_TP_STATUS_PAYLOAD_MAPPING_VC1	(3 << 4)
+#define  DP_TP_STATUS_PAYLOAD_MAPPING_VC0	(3 << 0)
 
 /* DDI Buffer Control */
 #define DDI_BUF_CTL_A				0x64000
 #define DDI_BUF_CTL_B				0x64100
 #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
 #define  DDI_BUF_CTL_ENABLE			(1<<31)
-/* Haswell */
 #define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
 #define  DDI_BUF_EMP_400MV_3_5DB_HSW		(1<<24)   /* Sel1 */
 #define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
@@ -5784,16 +5871,6 @@
 #define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
 #define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
 #define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
-/* Broadwell */
-#define  DDI_BUF_EMP_400MV_0DB_BDW		(0<<24)   /* Sel0 */
-#define  DDI_BUF_EMP_400MV_3_5DB_BDW		(1<<24)   /* Sel1 */
-#define  DDI_BUF_EMP_400MV_6DB_BDW		(2<<24)   /* Sel2 */
-#define  DDI_BUF_EMP_600MV_0DB_BDW		(3<<24)   /* Sel3 */
-#define  DDI_BUF_EMP_600MV_3_5DB_BDW		(4<<24)   /* Sel4 */
-#define  DDI_BUF_EMP_600MV_6DB_BDW		(5<<24)   /* Sel5 */
-#define  DDI_BUF_EMP_800MV_0DB_BDW		(6<<24)   /* Sel6 */
-#define  DDI_BUF_EMP_800MV_3_5DB_BDW		(7<<24)   /* Sel7 */
-#define  DDI_BUF_EMP_1200MV_0DB_BDW		(8<<24)   /* Sel8 */
 #define  DDI_BUF_EMP_MASK			(0xf<<24)
 #define  DDI_BUF_PORT_REVERSAL			(1<<16)
 #define  DDI_BUF_IS_IDLE			(1<<7)
@@ -5861,10 +5938,12 @@
 /* WRPLL */
 #define WRPLL_CTL1			0x46040
 #define WRPLL_CTL2			0x46060
+#define WRPLL_CTL(pll)			(pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
 #define  WRPLL_PLL_ENABLE		(1<<31)
-#define  WRPLL_PLL_SELECT_SSC		(0x01<<28)
-#define  WRPLL_PLL_SELECT_NON_SSC	(0x02<<28)
-#define  WRPLL_PLL_SELECT_LCPLL_2700	(0x03<<28)
+#define  WRPLL_PLL_SSC			(1<<28)
+#define  WRPLL_PLL_NON_SSC		(2<<28)
+#define  WRPLL_PLL_LCPLL		(3<<28)
+#define  WRPLL_PLL_REF_MASK		(3<<28)
 /* WRPLL divider programming */
 #define  WRPLL_DIVIDER_REFERENCE(x)	((x)<<0)
 #define  WRPLL_DIVIDER_REF_MASK		(0xff)
@@ -5883,6 +5962,7 @@
 #define  PORT_CLK_SEL_LCPLL_1350	(1<<29)
 #define  PORT_CLK_SEL_LCPLL_810		(2<<29)
 #define  PORT_CLK_SEL_SPLL		(3<<29)
+#define  PORT_CLK_SEL_WRPLL(pll)	(((pll)+4)<<29)
 #define  PORT_CLK_SEL_WRPLL1		(4<<29)
 #define  PORT_CLK_SEL_WRPLL2		(5<<29)
 #define  PORT_CLK_SEL_NONE		(7<<29)
@@ -5924,7 +6004,10 @@
 #define  LCPLL_CD_SOURCE_FCLK		(1<<21)
 #define  LCPLL_CD_SOURCE_FCLK_DONE	(1<<19)
 
-#define D_COMP				(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using I915_WRITE. */
+#define D_COMP_HSW			(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_BDW			0x138144
 #define  D_COMP_RCOMP_IN_PROGRESS	(1<<9)
 #define  D_COMP_COMP_FORCE		(1<<8)
 #define  D_COMP_COMP_DISABLE		(1<<0)
@@ -6005,7 +6088,8 @@
 
 #define _MIPIA_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61190)
 #define _MIPIB_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(pipe)		_PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define MIPI_PORT_CTRL(tc)		_TRANSCODER(tc, _MIPIA_PORT_CTRL, \
+						_MIPIB_PORT_CTRL)
 #define  DPI_ENABLE					(1 << 31) /* A + B */
 #define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT		27
 #define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 27)
@@ -6047,18 +6131,20 @@
 
 #define _MIPIA_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61194)
 #define _MIPIB_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(pipe)		_PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define MIPI_TEARING_CTRL(tc)			_TRANSCODER(tc, \
+				_MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
 #define  TEARING_EFFECT_DELAY_SHIFT			0
 #define  TEARING_EFFECT_DELAY_MASK			(0xffff << 0)
 
 /* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG				(VLV_DISPLAY_BASE + 0x611a0)
+#define _MIPIA_AUTOPWG			(VLV_DISPLAY_BASE + 0x611a0)
 
 /* MIPI DSI Controller and D-PHY registers */
 
-#define _MIPIA_DEVICE_READY			(VLV_DISPLAY_BASE + 0xb000)
-#define _MIPIB_DEVICE_READY			(VLV_DISPLAY_BASE + 0xb800)
-#define MIPI_DEVICE_READY(pipe)		_PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define _MIPIA_DEVICE_READY		(dev_priv->mipi_mmio_base + 0xb000)
+#define _MIPIB_DEVICE_READY		(dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(tc)		_TRANSCODER(tc, _MIPIA_DEVICE_READY, \
+						_MIPIB_DEVICE_READY)
 #define  BUS_POSSESSION					(1 << 3) /* set to give bus to receiver */
 #define  ULPS_STATE_MASK				(3 << 1)
 #define  ULPS_STATE_ENTER				(2 << 1)
@@ -6066,12 +6152,14 @@
 #define  ULPS_STATE_NORMAL_OPERATION			(0 << 1)
 #define  DEVICE_READY					(1 << 0)
 
-#define _MIPIA_INTR_STAT			(VLV_DISPLAY_BASE + 0xb004)
-#define _MIPIB_INTR_STAT			(VLV_DISPLAY_BASE + 0xb804)
-#define MIPI_INTR_STAT(pipe)		_PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
-#define _MIPIA_INTR_EN				(VLV_DISPLAY_BASE + 0xb008)
-#define _MIPIB_INTR_EN				(VLV_DISPLAY_BASE + 0xb808)
-#define MIPI_INTR_EN(pipe)		_PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define _MIPIA_INTR_STAT		(dev_priv->mipi_mmio_base + 0xb004)
+#define _MIPIB_INTR_STAT		(dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(tc)		_TRANSCODER(tc, _MIPIA_INTR_STAT, \
+					_MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN			(dev_priv->mipi_mmio_base + 0xb008)
+#define _MIPIB_INTR_EN			(dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(tc)		_TRANSCODER(tc, _MIPIA_INTR_EN, \
+					_MIPIB_INTR_EN)
 #define  TEARING_EFFECT					(1 << 31)
 #define  SPL_PKT_SENT_INTERRUPT				(1 << 30)
 #define  GEN_READ_DATA_AVAIL				(1 << 29)
@@ -6105,9 +6193,10 @@
 #define  RXSOT_SYNC_ERROR				(1 << 1)
 #define  RXSOT_ERROR					(1 << 0)
 
-#define _MIPIA_DSI_FUNC_PRG			(VLV_DISPLAY_BASE + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG			(VLV_DISPLAY_BASE + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(pipe)		_PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG		(dev_priv->mipi_mmio_base + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG		(dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(tc)		_TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
+						_MIPIB_DSI_FUNC_PRG)
 #define  CMD_MODE_DATA_WIDTH_MASK			(7 << 13)
 #define  CMD_MODE_NOT_SUPPORTED				(0 << 13)
 #define  CMD_MODE_DATA_WIDTH_16_BIT			(1 << 13)
@@ -6128,78 +6217,94 @@
 #define  DATA_LANES_PRG_REG_SHIFT			0
 #define  DATA_LANES_PRG_REG_MASK			(7 << 0)
 
-#define _MIPIA_HS_TX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(tc)	_TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
+					_MIPIB_HS_TX_TIMEOUT)
 #define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK		0xffffff
 
-#define _MIPIA_LP_RX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT		(dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(tc)	_TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
+					_MIPIB_LP_RX_TIMEOUT)
 #define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK		0xffffff
 
-#define _MIPIA_TURN_AROUND_TIMEOUT		(VLV_DISPLAY_BASE + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT		(VLV_DISPLAY_BASE + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT	(dev_priv->mipi_mmio_base + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT	(dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(tc)	_TRANSCODER(tc, \
+			_MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
 #define  TURN_AROUND_TIMEOUT_MASK			0x3f
 
-#define _MIPIA_DEVICE_RESET_TIMER		(VLV_DISPLAY_BASE + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER		(VLV_DISPLAY_BASE + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(pipe)	_PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER	(dev_priv->mipi_mmio_base + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER	(dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(tc)	_TRANSCODER(tc, \
+			_MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
 #define  DEVICE_RESET_TIMER_MASK			0xffff
 
-#define _MIPIA_DPI_RESOLUTION			(VLV_DISPLAY_BASE + 0xb020)
-#define _MIPIB_DPI_RESOLUTION			(VLV_DISPLAY_BASE + 0xb820)
-#define MIPI_DPI_RESOLUTION(pipe)	_PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION		(dev_priv->mipi_mmio_base + 0xb020)
+#define _MIPIB_DPI_RESOLUTION		(dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(tc)	_TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
+					_MIPIB_DPI_RESOLUTION)
 #define  VERTICAL_ADDRESS_SHIFT				16
 #define  VERTICAL_ADDRESS_MASK				(0xffff << 16)
 #define  HORIZONTAL_ADDRESS_SHIFT			0
 #define  HORIZONTAL_ADDRESS_MASK			0xffff
 
-#define _MIPIA_DBI_FIFO_THROTTLE		(VLV_DISPLAY_BASE + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE		(VLV_DISPLAY_BASE + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(pipe)	_PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE	(dev_priv->mipi_mmio_base + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE	(dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(tc)	_TRANSCODER(tc, \
+			_MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
 #define  DBI_FIFO_EMPTY_HALF				(0 << 0)
 #define  DBI_FIFO_EMPTY_QUARTER				(1 << 0)
 #define  DBI_FIFO_EMPTY_7_LOCATIONS			(2 << 0)
 
 /* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(pipe)	_PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(tc)	_TRANSCODER(tc, \
+			_MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
 
-#define _MIPIA_HBP_COUNT			(VLV_DISPLAY_BASE + 0xb02c)
-#define _MIPIB_HBP_COUNT			(VLV_DISPLAY_BASE + 0xb82c)
-#define MIPI_HBP_COUNT(pipe)		_PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
+#define _MIPIA_HBP_COUNT		(dev_priv->mipi_mmio_base + 0xb02c)
+#define _MIPIB_HBP_COUNT		(dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(tc)		_TRANSCODER(tc, _MIPIA_HBP_COUNT, \
+					_MIPIB_HBP_COUNT)
 
-#define _MIPIA_HFP_COUNT			(VLV_DISPLAY_BASE + 0xb030)
-#define _MIPIB_HFP_COUNT			(VLV_DISPLAY_BASE + 0xb830)
-#define MIPI_HFP_COUNT(pipe)		_PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
+#define _MIPIA_HFP_COUNT		(dev_priv->mipi_mmio_base + 0xb030)
+#define _MIPIB_HFP_COUNT		(dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(tc)		_TRANSCODER(tc, _MIPIA_HFP_COUNT, \
+					_MIPIB_HFP_COUNT)
 
-#define _MIPIA_HACTIVE_AREA_COUNT		(VLV_DISPLAY_BASE + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT		(VLV_DISPLAY_BASE + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(pipe)	_PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+#define _MIPIA_HACTIVE_AREA_COUNT	(dev_priv->mipi_mmio_base + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT	(dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(tc)	_TRANSCODER(tc, \
+			_MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
 
-#define _MIPIA_VSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(pipe)	_PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+#define _MIPIA_VSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT	(dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(tc)	_TRANSCODER(tc, \
+			_MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
 
-#define _MIPIA_VBP_COUNT			(VLV_DISPLAY_BASE + 0xb03c)
-#define _MIPIB_VBP_COUNT			(VLV_DISPLAY_BASE + 0xb83c)
-#define MIPI_VBP_COUNT(pipe)		_PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+#define _MIPIA_VBP_COUNT		(dev_priv->mipi_mmio_base + 0xb03c)
+#define _MIPIB_VBP_COUNT		(dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(tc)		_TRANSCODER(tc, _MIPIA_VBP_COUNT, \
+					_MIPIB_VBP_COUNT)
 
-#define _MIPIA_VFP_COUNT			(VLV_DISPLAY_BASE + 0xb040)
-#define _MIPIB_VFP_COUNT			(VLV_DISPLAY_BASE + 0xb840)
-#define MIPI_VFP_COUNT(pipe)		_PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
+#define _MIPIA_VFP_COUNT		(dev_priv->mipi_mmio_base + 0xb040)
+#define _MIPIB_VFP_COUNT		(dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(tc)		_TRANSCODER(tc, _MIPIA_VFP_COUNT, \
+					_MIPIB_VFP_COUNT)
 
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT		(VLV_DISPLAY_BASE + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT		(VLV_DISPLAY_BASE + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe)	_PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT	(dev_priv->mipi_mmio_base + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT	(dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(tc)	_TRANSCODER(tc,	\
+		_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+
 /* regs above are bits 15:0 */
 
-#define _MIPIA_DPI_CONTROL			(VLV_DISPLAY_BASE + 0xb048)
-#define _MIPIB_DPI_CONTROL			(VLV_DISPLAY_BASE + 0xb848)
-#define MIPI_DPI_CONTROL(pipe)		_PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL		(dev_priv->mipi_mmio_base + 0xb048)
+#define _MIPIB_DPI_CONTROL		(dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(tc)		_TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
+					_MIPIB_DPI_CONTROL)
 #define  DPI_LP_MODE					(1 << 6)
 #define  BACKLIGHT_OFF					(1 << 5)
 #define  BACKLIGHT_ON					(1 << 4)
@@ -6208,27 +6313,31 @@
 #define  TURN_ON					(1 << 1)
 #define  SHUTDOWN					(1 << 0)
 
-#define _MIPIA_DPI_DATA				(VLV_DISPLAY_BASE + 0xb04c)
-#define _MIPIB_DPI_DATA				(VLV_DISPLAY_BASE + 0xb84c)
-#define MIPI_DPI_DATA(pipe)		_PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define _MIPIA_DPI_DATA			(dev_priv->mipi_mmio_base + 0xb04c)
+#define _MIPIB_DPI_DATA			(dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(tc)		_TRANSCODER(tc, _MIPIA_DPI_DATA, \
+					_MIPIB_DPI_DATA)
 #define  COMMAND_BYTE_SHIFT				0
 #define  COMMAND_BYTE_MASK				(0x3f << 0)
 
-#define _MIPIA_INIT_COUNT			(VLV_DISPLAY_BASE + 0xb050)
-#define _MIPIB_INIT_COUNT			(VLV_DISPLAY_BASE + 0xb850)
-#define MIPI_INIT_COUNT(pipe)		_PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define _MIPIA_INIT_COUNT		(dev_priv->mipi_mmio_base + 0xb050)
+#define _MIPIB_INIT_COUNT		(dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(tc)		_TRANSCODER(tc, _MIPIA_INIT_COUNT, \
+					_MIPIB_INIT_COUNT)
 #define  MASTER_INIT_TIMER_SHIFT			0
 #define  MASTER_INIT_TIMER_MASK				(0xffff << 0)
 
-#define _MIPIA_MAX_RETURN_PKT_SIZE		(VLV_DISPLAY_BASE + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE		(VLV_DISPLAY_BASE + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(pipe)	_PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE	(dev_priv->mipi_mmio_base + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE	(dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(tc)	_TRANSCODER(tc, \
+			_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
 #define  MAX_RETURN_PKT_SIZE_SHIFT			0
 #define  MAX_RETURN_PKT_SIZE_MASK			(0x3ff << 0)
 
-#define _MIPIA_VIDEO_MODE_FORMAT		(VLV_DISPLAY_BASE + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT		(VLV_DISPLAY_BASE + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(pipe)	_PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT	(dev_priv->mipi_mmio_base + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT	(dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(tc)	_TRANSCODER(tc, \
+			_MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
 #define  RANDOM_DPI_DISPLAY_RESOLUTION			(1 << 4)
 #define  DISABLE_VIDEO_BTA				(1 << 3)
 #define  IP_TG_CONFIG					(1 << 2)
@@ -6236,9 +6345,10 @@
 #define  VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS		(2 << 0)
 #define  VIDEO_MODE_BURST				(3 << 0)
 
-#define _MIPIA_EOT_DISABLE			(VLV_DISPLAY_BASE + 0xb05c)
-#define _MIPIB_EOT_DISABLE			(VLV_DISPLAY_BASE + 0xb85c)
-#define MIPI_EOT_DISABLE(pipe)		_PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE		(dev_priv->mipi_mmio_base + 0xb05c)
+#define _MIPIB_EOT_DISABLE		(dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(tc)		_TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
+					_MIPIB_EOT_DISABLE)
 #define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 7)
 #define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 6)
 #define  LOW_CONTENTION_RECOVERY_DISABLE		(1 << 5)
@@ -6248,28 +6358,33 @@
 #define  CLOCKSTOP					(1 << 1)
 #define  EOT_DISABLE					(1 << 0)
 
-#define _MIPIA_LP_BYTECLK			(VLV_DISPLAY_BASE + 0xb060)
-#define _MIPIB_LP_BYTECLK			(VLV_DISPLAY_BASE + 0xb860)
-#define MIPI_LP_BYTECLK(pipe)		_PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK		(dev_priv->mipi_mmio_base + 0xb060)
+#define _MIPIB_LP_BYTECLK		(dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(tc)		_TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
+					_MIPIB_LP_BYTECLK)
 #define  LP_BYTECLK_SHIFT				0
 #define  LP_BYTECLK_MASK				(0xffff << 0)
 
 /* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA			(VLV_DISPLAY_BASE + 0xb064)
-#define _MIPIB_LP_GEN_DATA			(VLV_DISPLAY_BASE + 0xb864)
-#define MIPI_LP_GEN_DATA(pipe)		_PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb064)
+#define _MIPIB_LP_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(tc)		_TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
+					_MIPIB_LP_GEN_DATA)
 
 /* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA			(VLV_DISPLAY_BASE + 0xb068)
-#define _MIPIB_HS_GEN_DATA			(VLV_DISPLAY_BASE + 0xb868)
-#define MIPI_HS_GEN_DATA(pipe)		_PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
+#define _MIPIA_HS_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb068)
+#define _MIPIB_HS_GEN_DATA		(dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(tc)		_TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
+					_MIPIB_HS_GEN_DATA)
 
-#define _MIPIA_LP_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb86c)
-#define MIPI_LP_GEN_CTRL(pipe)		_PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb070)
-#define _MIPIB_HS_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb870)
-#define MIPI_HS_GEN_CTRL(pipe)		_PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define _MIPIA_LP_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(tc)		_TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
+					_MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb070)
+#define _MIPIB_HS_GEN_CTRL		(dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(tc)		_TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
+					_MIPIB_HS_GEN_CTRL)
 #define  LONG_PACKET_WORD_COUNT_SHIFT			8
 #define  LONG_PACKET_WORD_COUNT_MASK			(0xffff << 8)
 #define  SHORT_PACKET_PARAM_SHIFT			8
@@ -6280,9 +6395,10 @@
 #define  DATA_TYPE_MASK					(3f << 0)
 /* data type values, see include/video/mipi_display.h */
 
-#define _MIPIA_GEN_FIFO_STAT			(VLV_DISPLAY_BASE + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT			(VLV_DISPLAY_BASE + 0xb874)
-#define MIPI_GEN_FIFO_STAT(pipe)	_PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT		(dev_priv->mipi_mmio_base + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT		(dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(tc)	_TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
+					_MIPIB_GEN_FIFO_STAT)
 #define  DPI_FIFO_EMPTY					(1 << 28)
 #define  DBI_FIFO_EMPTY					(1 << 27)
 #define  LP_CTRL_FIFO_EMPTY				(1 << 26)
@@ -6298,16 +6414,18 @@
 #define  HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
 #define  HS_DATA_FIFO_FULL				(1 << 0)
 
-#define _MIPIA_HS_LS_DBI_ENABLE			(VLV_DISPLAY_BASE + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE			(VLV_DISPLAY_BASE + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(pipe)	_PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE		(dev_priv->mipi_mmio_base + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE		(dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(tc)	_TRANSCODER(tc, \
+			_MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
 #define  DBI_HS_LP_MODE_MASK				(1 << 0)
 #define  DBI_LP_MODE					(1 << 0)
 #define  DBI_HS_MODE					(0 << 0)
 
-#define _MIPIA_DPHY_PARAM			(VLV_DISPLAY_BASE + 0xb080)
-#define _MIPIB_DPHY_PARAM			(VLV_DISPLAY_BASE + 0xb880)
-#define MIPI_DPHY_PARAM(pipe)		_PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM		(dev_priv->mipi_mmio_base + 0xb080)
+#define _MIPIB_DPHY_PARAM		(dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(tc)		_TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
+					_MIPIB_DPHY_PARAM)
 #define  EXIT_ZERO_COUNT_SHIFT				24
 #define  EXIT_ZERO_COUNT_MASK				(0x3f << 24)
 #define  TRAIL_COUNT_SHIFT				16
@@ -6318,34 +6436,41 @@
 #define  PREPARE_COUNT_MASK				(0x3f << 0)
 
 /* bits 31:0 */
-#define _MIPIA_DBI_BW_CTRL			(VLV_DISPLAY_BASE + 0xb084)
-#define _MIPIB_DBI_BW_CTRL			(VLV_DISPLAY_BASE + 0xb884)
-#define MIPI_DBI_BW_CTRL(pipe)		_PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
+#define _MIPIA_DBI_BW_CTRL		(dev_priv->mipi_mmio_base + 0xb084)
+#define _MIPIB_DBI_BW_CTRL		(dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(tc)		_TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
+					_MIPIB_DBI_BW_CTRL)
 
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT		(VLV_DISPLAY_BASE + 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT		(VLV_DISPLAY_BASE + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe)	_PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT		(dev_priv->mipi_mmio_base \
+							+ 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT		(dev_priv->mipi_mmio_base \
+							+ 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc)	_TRANSCODER(tc, \
+	_MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
 #define  LP_HS_SSW_CNT_SHIFT				16
 #define  LP_HS_SSW_CNT_MASK				(0xffff << 16)
 #define  HS_LP_PWR_SW_CNT_SHIFT				0
 #define  HS_LP_PWR_SW_CNT_MASK				(0xffff << 0)
 
-#define _MIPIA_STOP_STATE_STALL			(VLV_DISPLAY_BASE + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL			(VLV_DISPLAY_BASE + 0xb88c)
-#define MIPI_STOP_STATE_STALL(pipe)	_PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL		(dev_priv->mipi_mmio_base + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL		(dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(tc)	_TRANSCODER(tc, \
+			_MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
 #define  STOP_STATE_STALL_COUNTER_SHIFT			0
 #define  STOP_STATE_STALL_COUNTER_MASK			(0xff << 0)
 
-#define _MIPIA_INTR_STAT_REG_1			(VLV_DISPLAY_BASE + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1			(VLV_DISPLAY_BASE + 0xb890)
-#define MIPI_INTR_STAT_REG_1(pipe)	_PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1			(VLV_DISPLAY_BASE + 0xb094)
-#define _MIPIB_INTR_EN_REG_1			(VLV_DISPLAY_BASE + 0xb894)
-#define MIPI_INTR_EN_REG_1(pipe)	_PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1		(dev_priv->mipi_mmio_base + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1		(dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(tc)	_TRANSCODER(tc, \
+				_MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1		(dev_priv->mipi_mmio_base + 0xb094)
+#define _MIPIB_INTR_EN_REG_1		(dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(tc)	_TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
+					_MIPIB_INTR_EN_REG_1)
 #define  RX_CONTENTION_DETECTED				(1 << 0)
 
 /* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL			(VLV_DISPLAY_BASE + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL		(dev_priv->mipi_mmio_base + 0xb100)
 #define  DBI_TYPEC_ENABLE				(1 << 31)
 #define  DBI_TYPEC_WIP					(1 << 30)
 #define  DBI_TYPEC_OPTION_SHIFT				28
@@ -6359,9 +6484,10 @@
 
 /* MIPI adapter registers */
 
-#define _MIPIA_CTRL				(VLV_DISPLAY_BASE + 0xb104)
-#define _MIPIB_CTRL				(VLV_DISPLAY_BASE + 0xb904)
-#define MIPI_CTRL(pipe)			_PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define _MIPIA_CTRL			(dev_priv->mipi_mmio_base + 0xb104)
+#define _MIPIB_CTRL			(dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(tc)			_TRANSCODER(tc, _MIPIA_CTRL, \
+					_MIPIB_CTRL)
 #define  ESCAPE_CLOCK_DIVIDER_SHIFT			5 /* A only */
 #define  ESCAPE_CLOCK_DIVIDER_MASK			(3 << 5)
 #define  ESCAPE_CLOCK_DIVIDER_1				(0 << 5)
@@ -6373,50 +6499,52 @@
 #define  READ_REQUEST_PRIORITY_HIGH			(3 << 3)
 #define  RGB_FLIP_TO_BGR				(1 << 2)
 
-#define _MIPIA_DATA_ADDRESS			(VLV_DISPLAY_BASE + 0xb108)
-#define _MIPIB_DATA_ADDRESS			(VLV_DISPLAY_BASE + 0xb908)
-#define MIPI_DATA_ADDRESS(pipe)		_PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS		(dev_priv->mipi_mmio_base + 0xb108)
+#define _MIPIB_DATA_ADDRESS		(dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(tc)		_TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
+					_MIPIB_DATA_ADDRESS)
 #define  DATA_MEM_ADDRESS_SHIFT				5
 #define  DATA_MEM_ADDRESS_MASK				(0x7ffffff << 5)
 #define  DATA_VALID					(1 << 0)
 
-#define _MIPIA_DATA_LENGTH			(VLV_DISPLAY_BASE + 0xb10c)
-#define _MIPIB_DATA_LENGTH			(VLV_DISPLAY_BASE + 0xb90c)
-#define MIPI_DATA_LENGTH(pipe)		_PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH		(dev_priv->mipi_mmio_base + 0xb10c)
+#define _MIPIB_DATA_LENGTH		(dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(tc)		_TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
+					_MIPIB_DATA_LENGTH)
 #define  DATA_LENGTH_SHIFT				0
 #define  DATA_LENGTH_MASK				(0xfffff << 0)
 
-#define _MIPIA_COMMAND_ADDRESS			(VLV_DISPLAY_BASE + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS			(VLV_DISPLAY_BASE + 0xb910)
-#define MIPI_COMMAND_ADDRESS(pipe)	_PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS		(dev_priv->mipi_mmio_base + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS		(dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(tc)	_TRANSCODER(tc, \
+				_MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
 #define  COMMAND_MEM_ADDRESS_SHIFT			5
 #define  COMMAND_MEM_ADDRESS_MASK			(0x7ffffff << 5)
 #define  AUTO_PWG_ENABLE				(1 << 2)
 #define  MEMORY_WRITE_DATA_FROM_PIPE_RENDERING		(1 << 1)
 #define  COMMAND_VALID					(1 << 0)
 
-#define _MIPIA_COMMAND_LENGTH			(VLV_DISPLAY_BASE + 0xb114)
-#define _MIPIB_COMMAND_LENGTH			(VLV_DISPLAY_BASE + 0xb914)
-#define MIPI_COMMAND_LENGTH(pipe)	_PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH		(dev_priv->mipi_mmio_base + 0xb114)
+#define _MIPIB_COMMAND_LENGTH		(dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(tc)	_TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
+					_MIPIB_COMMAND_LENGTH)
 #define  COMMAND_LENGTH_SHIFT(n)			(8 * (n)) /* n: 0...3 */
 #define  COMMAND_LENGTH_MASK(n)				(0xff << (8 * (n)))
 
-#define _MIPIA_READ_DATA_RETURN0		(VLV_DISPLAY_BASE + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0		(VLV_DISPLAY_BASE + 0xb918)
-#define MIPI_READ_DATA_RETURN(pipe, n) \
-	(_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0	(dev_priv->mipi_mmio_base + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0	(dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(tc, n) \
+	(_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+					+ 4 * (n)) /* n: 0...7 */
 
-#define _MIPIA_READ_DATA_VALID			(VLV_DISPLAY_BASE + 0xb138)
-#define _MIPIB_READ_DATA_VALID			(VLV_DISPLAY_BASE + 0xb938)
-#define MIPI_READ_DATA_VALID(pipe)	_PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID		(dev_priv->mipi_mmio_base + 0xb138)
+#define _MIPIB_READ_DATA_VALID		(dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(tc)	_TRANSCODER(tc, \
+				_MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
 #define  READ_DATA_VALID(n)				(1 << (n))
 
 /* For UMS only (deprecated): */
 #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
 #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
 
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 86ce39a..ae7fd8f 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -47,22 +47,45 @@
 
 	intel_runtime_pm_get(dev_priv);
 
-	/* On VLV, residency time is in CZ units rather than 1.28us */
+	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
 	if (IS_VALLEYVIEW(dev)) {
-		u32 clkctl2;
+		u32 reg, czcount_30ns;
 
-		clkctl2 = I915_READ(VLV_CLK_CTL2) >>
-			CLK_CTL2_CZCOUNT_30NS_SHIFT;
-		if (!clkctl2) {
-			WARN(!clkctl2, "bogus CZ count value");
+		if (IS_CHERRYVIEW(dev))
+			reg = CHV_CLK_CTL1;
+		else
+			reg = VLV_CLK_CTL2;
+
+		czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
+
+		if (!czcount_30ns) {
+			WARN(!czcount_30ns, "bogus CZ count value");
 			ret = 0;
 			goto out;
 		}
-		units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+
+		units = 0;
+		div = 1000000ULL;
+
+		if (IS_CHERRYVIEW(dev)) {
+			/* Special case for 320Mhz */
+			if (czcount_30ns == 1) {
+				div = 10000000ULL;
+				units = 3125ULL;
+			} else {
+				/* chv counts are one less */
+				czcount_30ns += 1;
+			}
+		}
+
+		if (units == 0)
+			units = DIV_ROUND_UP_ULL(30ULL * bias,
+						 (u64)czcount_30ns);
+
 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
 			units <<= 8;
 
-		div = 1000000ULL * bias;
+		div = div * bias;
 	}
 
 	raw_time = I915_READ(reg) * units;
@@ -461,11 +484,20 @@
 	mutex_unlock(&dev->struct_mutex);
 
 	if (attr == &dev_attr_gt_RP0_freq_mhz) {
-		val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+		if (IS_VALLEYVIEW(dev))
+			val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+		else
+			val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
 	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
-		val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+		if (IS_VALLEYVIEW(dev))
+			val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+		else
+			val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
 	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
-		val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+		if (IS_VALLEYVIEW(dev))
+			val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+		else
+			val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
 	} else {
 		BUG();
 	}
@@ -486,6 +518,9 @@
 	&dev_attr_gt_cur_freq_mhz.attr,
 	&dev_attr_gt_max_freq_mhz.attr,
 	&dev_attr_gt_min_freq_mhz.attr,
+	&dev_attr_gt_RP0_freq_mhz.attr,
+	&dev_attr_gt_RP1_freq_mhz.attr,
+	&dev_attr_gt_RPn_freq_mhz.attr,
 	&dev_attr_vlv_rpe_freq_mhz.attr,
 	NULL,
 };
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 827498e..608ed30 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -336,11 +336,12 @@
 
 	dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
 	dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+	dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
 	DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
 		      "active %s, min brightness %u, level %u\n",
 		      dev_priv->vbt.backlight.pwm_freq_hz,
 		      dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
-		      entry->min_brightness,
+		      dev_priv->vbt.backlight.min_brightness,
 		      backlight_data->level[panel_type]);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 5a045d3..2efaf8e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -137,6 +137,18 @@
 	pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
 }
 
+static void hsw_crt_pre_enable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
+	I915_WRITE(SPLL_CTL,
+		   SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
+	POSTING_READ(SPLL_CTL);
+	udelay(20);
+}
+
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -194,6 +206,20 @@
 	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
 }
 
+
+static void hsw_crt_post_disable(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val;
+
+	DRM_DEBUG_KMS("Disabling SPLL\n");
+	val = I915_READ(SPLL_CTL);
+	WARN_ON(!(val & SPLL_PLL_ENABLE));
+	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+	POSTING_READ(SPLL_CTL);
+}
+
 static void intel_enable_crt(struct intel_encoder *encoder)
 {
 	struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -289,8 +315,10 @@
 		pipe_config->pipe_bpp = 24;
 
 	/* FDI must always be 2.7 GHz */
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev)) {
+		pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
 		pipe_config->port_clock = 135000 * 2;
+	}
 
 	return true;
 }
@@ -632,8 +660,6 @@
 	struct intel_load_detect_pipe tmp;
 	struct drm_modeset_acquire_ctx ctx;
 
-	intel_runtime_pm_get(dev_priv);
-
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
 		      connector->base.id, connector->name,
 		      force);
@@ -685,8 +711,6 @@
 
 out:
 	intel_display_power_put(dev_priv, power_domain);
-	intel_runtime_pm_put(dev_priv);
-
 	return status;
 }
 
@@ -860,6 +884,8 @@
 	if (HAS_DDI(dev)) {
 		crt->base.get_config = hsw_crt_get_config;
 		crt->base.get_hw_state = intel_ddi_get_hw_state;
+		crt->base.pre_enable = hsw_crt_pre_enable;
+		crt->base.post_disable = hsw_crt_post_disable;
 	} else {
 		crt->base.get_config = intel_crt_get_config;
 		crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -869,7 +895,7 @@
 
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	if (!I915_HAS_HOTPLUG(dev))
 		intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b17b9c7..5db0b555 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -76,12 +76,12 @@
 	0x00FFFFFF, 0x00000012,		/* eDP parameters */
 	0x00EBAFFF, 0x00020011,
 	0x00C71FFF, 0x0006000F,
+	0x00AAAFFF, 0x000E000A,
 	0x00FFFFFF, 0x00020011,
 	0x00DB6FFF, 0x0005000F,
 	0x00BEEFFF, 0x000A000C,
 	0x00FFFFFF, 0x0005000F,
 	0x00DB6FFF, 0x000A000C,
-	0x00FFFFFF, 0x000A000C,
 	0x00FFFFFF, 0x00140006		/* HDMI parameters 800mV 0dB*/
 };
 
@@ -89,12 +89,12 @@
 	0x00FFFFFF, 0x0007000E,		/* DP parameters */
 	0x00D75FFF, 0x000E000A,
 	0x00BEFFFF, 0x00140006,
+	0x80B2CFFF, 0x001B0002,
 	0x00FFFFFF, 0x000E000A,
 	0x00D75FFF, 0x00180004,
 	0x80CB2FFF, 0x001B0002,
 	0x00F7DFFF, 0x00180004,
 	0x80D75FFF, 0x001B0002,
-	0x80FFFFFF, 0x001B0002,
 	0x00FFFFFF, 0x00140006		/* HDMI parameters 800mV 0dB*/
 };
 
@@ -116,7 +116,10 @@
 	struct drm_encoder *encoder = &intel_encoder->base;
 	int type = intel_encoder->type;
 
-	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+	if (type == INTEL_OUTPUT_DP_MST) {
+		struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
+		return intel_dig_port->port;
+	} else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
 	    type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
 		struct intel_digital_port *intel_dig_port =
 			enc_to_dig_port(encoder);
@@ -277,7 +280,8 @@
 	I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
 
 	/* Configure Port Clock Select */
-	I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+	I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
+	WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
 
 	/* Start the training iterating through available voltages and emphasis,
 	 * testing each value twice. */
@@ -364,6 +368,18 @@
 	DRM_ERROR("FDI link training failed!\n");
 }
 
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_digital_port *intel_dig_port =
+		enc_to_dig_port(&encoder->base);
+
+	intel_dp->DP = intel_dig_port->saved_port_bits |
+		DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+	intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+
+}
+
 static struct intel_encoder *
 intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
 {
@@ -385,53 +401,6 @@
 	return ret;
 }
 
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
-	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t val;
-
-	switch (intel_crtc->ddi_pll_sel) {
-	case PORT_CLK_SEL_SPLL:
-		plls->spll_refcount--;
-		if (plls->spll_refcount == 0) {
-			DRM_DEBUG_KMS("Disabling SPLL\n");
-			val = I915_READ(SPLL_CTL);
-			WARN_ON(!(val & SPLL_PLL_ENABLE));
-			I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
-			POSTING_READ(SPLL_CTL);
-		}
-		break;
-	case PORT_CLK_SEL_WRPLL1:
-		plls->wrpll1_refcount--;
-		if (plls->wrpll1_refcount == 0) {
-			DRM_DEBUG_KMS("Disabling WRPLL 1\n");
-			val = I915_READ(WRPLL_CTL1);
-			WARN_ON(!(val & WRPLL_PLL_ENABLE));
-			I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
-			POSTING_READ(WRPLL_CTL1);
-		}
-		break;
-	case PORT_CLK_SEL_WRPLL2:
-		plls->wrpll2_refcount--;
-		if (plls->wrpll2_refcount == 0) {
-			DRM_DEBUG_KMS("Disabling WRPLL 2\n");
-			val = I915_READ(WRPLL_CTL2);
-			WARN_ON(!(val & WRPLL_PLL_ENABLE));
-			I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
-			POSTING_READ(WRPLL_CTL2);
-		}
-		break;
-	}
-
-	WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
-	WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
-	WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
-
-	intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
-}
-
 #define LC_FREQ 2700
 #define LC_FREQ_2K (LC_FREQ * 2000)
 
@@ -592,9 +561,9 @@
 	u32 wrpll;
 
 	wrpll = I915_READ(reg);
-	switch (wrpll & SPLL_PLL_REF_MASK) {
-	case SPLL_PLL_SSC:
-	case SPLL_PLL_NON_SSC:
+	switch (wrpll & WRPLL_PLL_REF_MASK) {
+	case WRPLL_PLL_SSC:
+	case WRPLL_PLL_NON_SSC:
 		/*
 		 * We could calculate spread here, but our checking
 		 * code only cares about 5% accuracy, and spread is a max of
@@ -602,7 +571,7 @@
 		 */
 		refclk = 135;
 		break;
-	case SPLL_PLL_LCPLL:
+	case WRPLL_PLL_LCPLL:
 		refclk = LC_FREQ;
 		break;
 	default:
@@ -618,15 +587,14 @@
 	return (refclk * n * 100) / (p * r);
 }
 
-static void intel_ddi_clock_get(struct intel_encoder *encoder,
-				struct intel_crtc_config *pipe_config)
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+			 struct intel_crtc_config *pipe_config)
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-	enum port port = intel_ddi_get_encoder_port(encoder);
 	int link_clock = 0;
 	u32 val, pll;
 
-	val = I915_READ(PORT_CLK_SEL(port));
+	val = pipe_config->ddi_pll_sel;
 	switch (val & PORT_CLK_SEL_MASK) {
 	case PORT_CLK_SEL_LCPLL_810:
 		link_clock = 81000;
@@ -750,173 +718,37 @@
 {
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
-	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
 	int type = intel_encoder->type;
-	enum pipe pipe = intel_crtc->pipe;
 	int clock = intel_crtc->config.port_clock;
 
-	intel_ddi_put_crtc_pll(crtc);
+	intel_put_shared_dpll(intel_crtc);
 
-	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		switch (intel_dp->link_bw) {
-		case DP_LINK_BW_1_62:
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
-			break;
-		case DP_LINK_BW_2_7:
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
-			break;
-		case DP_LINK_BW_5_4:
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
-			break;
-		default:
-			DRM_ERROR("Link bandwidth %d unsupported\n",
-				  intel_dp->link_bw);
-			return false;
-		}
-
-	} else if (type == INTEL_OUTPUT_HDMI) {
-		uint32_t reg, val;
+	if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_shared_dpll *pll;
+		uint32_t val;
 		unsigned p, n2, r2;
 
 		intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
-		val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+		val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
 		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
 		      WRPLL_DIVIDER_POST(p);
 
-		if (val == I915_READ(WRPLL_CTL1)) {
-			DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
-				      pipe_name(pipe));
-			reg = WRPLL_CTL1;
-		} else if (val == I915_READ(WRPLL_CTL2)) {
-			DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
-				      pipe_name(pipe));
-			reg = WRPLL_CTL2;
-		} else if (plls->wrpll1_refcount == 0) {
-			DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
-				      pipe_name(pipe));
-			reg = WRPLL_CTL1;
-		} else if (plls->wrpll2_refcount == 0) {
-			DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
-				      pipe_name(pipe));
-			reg = WRPLL_CTL2;
-		} else {
-			DRM_ERROR("No WRPLLs available!\n");
+		intel_crtc->config.dpll_hw_state.wrpll = val;
+
+		pll = intel_get_shared_dpll(intel_crtc);
+		if (pll == NULL) {
+			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+					 pipe_name(intel_crtc->pipe));
 			return false;
 		}
 
-		DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
-			      clock, p, n2, r2);
-
-		if (reg == WRPLL_CTL1) {
-			plls->wrpll1_refcount++;
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
-		} else {
-			plls->wrpll2_refcount++;
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
-		}
-
-	} else if (type == INTEL_OUTPUT_ANALOG) {
-		if (plls->spll_refcount == 0) {
-			DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
-				      pipe_name(pipe));
-			plls->spll_refcount++;
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
-		} else {
-			DRM_ERROR("SPLL already in use\n");
-			return false;
-		}
-
-	} else {
-		WARN(1, "Invalid DDI encoder type %d\n", type);
-		return false;
+		intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
 	}
 
 	return true;
 }
 
-/*
- * To be called after intel_ddi_pll_select(). That one selects the PLL to be
- * used, this one actually enables the PLL.
- */
-void intel_ddi_pll_enable(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
-	int clock = crtc->config.port_clock;
-	uint32_t reg, cur_val, new_val;
-	int refcount;
-	const char *pll_name;
-	uint32_t enable_bit = (1 << 31);
-	unsigned int p, n2, r2;
-
-	BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
-	BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
-
-	switch (crtc->ddi_pll_sel) {
-	case PORT_CLK_SEL_LCPLL_2700:
-	case PORT_CLK_SEL_LCPLL_1350:
-	case PORT_CLK_SEL_LCPLL_810:
-		/*
-		 * LCPLL should always be enabled at this point of the mode set
-		 * sequence, so nothing to do.
-		 */
-		return;
-
-	case PORT_CLK_SEL_SPLL:
-		pll_name = "SPLL";
-		reg = SPLL_CTL;
-		refcount = plls->spll_refcount;
-		new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
-			  SPLL_PLL_SSC;
-		break;
-
-	case PORT_CLK_SEL_WRPLL1:
-	case PORT_CLK_SEL_WRPLL2:
-		if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
-			pll_name = "WRPLL1";
-			reg = WRPLL_CTL1;
-			refcount = plls->wrpll1_refcount;
-		} else {
-			pll_name = "WRPLL2";
-			reg = WRPLL_CTL2;
-			refcount = plls->wrpll2_refcount;
-		}
-
-		intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
-		new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
-			  WRPLL_DIVIDER_REFERENCE(r2) |
-			  WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
-
-		break;
-
-	case PORT_CLK_SEL_NONE:
-		WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
-		return;
-	default:
-		WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
-		return;
-	}
-
-	cur_val = I915_READ(reg);
-
-	WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
-	if (refcount == 1) {
-		WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
-		I915_WRITE(reg, new_val);
-		POSTING_READ(reg);
-		udelay(20);
-	} else {
-		WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
-	}
-}
-
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -926,8 +758,7 @@
 	int type = intel_encoder->type;
 	uint32_t temp;
 
-	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
-
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
 		temp = TRANS_MSA_SYNC_CLK;
 		switch (intel_crtc->config.pipe_bpp) {
 		case 18:
@@ -949,6 +780,21 @@
 	}
 }
 
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+	uint32_t temp;
+	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	if (state == true)
+		temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+	else
+		temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
 void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -995,7 +841,9 @@
 			 * eDP when not using the panel fitter, and when not
 			 * using motion blur mitigation (which we don't
 			 * support). */
-			if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
+			if (IS_HASWELL(dev) &&
+			    (intel_crtc->config.pch_pfit.enabled ||
+			     intel_crtc->config.pch_pfit.force_thru))
 				temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
 			else
 				temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1026,7 +874,19 @@
 		   type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-		temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+		if (intel_dp->is_mst) {
+			temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+		} else
+			temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+		temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+	} else if (type == INTEL_OUTPUT_DP_MST) {
+		struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
+
+		if (intel_dp->is_mst) {
+			temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+		} else
+			temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
 		temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
 	} else {
@@ -1043,7 +903,7 @@
 	uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
 	uint32_t val = I915_READ(reg);
 
-	val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+	val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
 	val |= TRANS_DDI_PORT_NONE;
 	I915_WRITE(reg, val);
 }
@@ -1082,8 +942,11 @@
 	case TRANS_DDI_MODE_SELECT_DP_SST:
 		if (type == DRM_MODE_CONNECTOR_eDP)
 			return true;
-	case TRANS_DDI_MODE_SELECT_DP_MST:
 		return (type == DRM_MODE_CONNECTOR_DisplayPort);
+	case TRANS_DDI_MODE_SELECT_DP_MST:
+		/* if the transcoder is in MST state then
+		 * connector isn't connected */
+		return false;
 
 	case TRANS_DDI_MODE_SELECT_FDI:
 		return (type == DRM_MODE_CONNECTOR_VGA);
@@ -1135,6 +998,9 @@
 
 			if ((tmp & TRANS_DDI_PORT_MASK)
 			    == TRANS_DDI_SELECT_PORT(port)) {
+				if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
+					return false;
+
 				*pipe = i;
 				return true;
 			}
@@ -1146,76 +1012,6 @@
 	return false;
 }
 
-static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
-				       enum pipe pipe)
-{
-	uint32_t temp, ret;
-	enum port port = I915_MAX_PORTS;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
-	int i;
-
-	if (cpu_transcoder == TRANSCODER_EDP) {
-		port = PORT_A;
-	} else {
-		temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
-		temp &= TRANS_DDI_PORT_MASK;
-
-		for (i = PORT_B; i <= PORT_E; i++)
-			if (temp == TRANS_DDI_SELECT_PORT(i))
-				port = i;
-	}
-
-	if (port == I915_MAX_PORTS) {
-		WARN(1, "Pipe %c enabled on an unknown port\n",
-		     pipe_name(pipe));
-		ret = PORT_CLK_SEL_NONE;
-	} else {
-		ret = I915_READ(PORT_CLK_SEL(port));
-		DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
-			      "0x%08x\n", pipe_name(pipe), port_name(port),
-			      ret);
-	}
-
-	return ret;
-}
-
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum pipe pipe;
-	struct intel_crtc *intel_crtc;
-
-	dev_priv->ddi_plls.spll_refcount = 0;
-	dev_priv->ddi_plls.wrpll1_refcount = 0;
-	dev_priv->ddi_plls.wrpll2_refcount = 0;
-
-	for_each_pipe(pipe) {
-		intel_crtc =
-			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
-		if (!intel_crtc->active) {
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
-			continue;
-		}
-
-		intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
-								 pipe);
-
-		switch (intel_crtc->ddi_pll_sel) {
-		case PORT_CLK_SEL_SPLL:
-			dev_priv->ddi_plls.spll_refcount++;
-			break;
-		case PORT_CLK_SEL_WRPLL1:
-			dev_priv->ddi_plls.wrpll1_refcount++;
-			break;
-		case PORT_CLK_SEL_WRPLL2:
-			dev_priv->ddi_plls.wrpll2_refcount++;
-			break;
-		}
-	}
-}
-
 void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
 {
 	struct drm_crtc *crtc = &intel_crtc->base;
@@ -1261,17 +1057,13 @@
 		intel_edp_panel_on(intel_dp);
 	}
 
-	WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
-	I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
+	WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+	I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
 
 	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-		struct intel_digital_port *intel_dig_port =
-			enc_to_dig_port(encoder);
 
-		intel_dp->DP = intel_dig_port->saved_port_bits |
-			       DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
-		intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+		intel_ddi_init_dp_buf_reg(intel_encoder);
 
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 		intel_dp_start_link_train(intel_dp);
@@ -1418,10 +1210,60 @@
 	}
 }
 
+static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
+			       struct intel_shared_dpll *pll)
+{
+	I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+	POSTING_READ(WRPLL_CTL(pll->id));
+	udelay(20);
+}
+
+static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	uint32_t val;
+
+	val = I915_READ(WRPLL_CTL(pll->id));
+	I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+	POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+				     struct intel_shared_dpll *pll,
+				     struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+
+	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(WRPLL_CTL(pll->id));
+	hw_state->wrpll = val;
+
+	return val & WRPLL_PLL_ENABLE;
+}
+
+static const char * const hsw_ddi_pll_names[] = {
+	"WRPLL 1",
+	"WRPLL 2",
+};
+
 void intel_ddi_pll_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t val = I915_READ(LCPLL_CTL);
+	int i;
+
+	dev_priv->num_shared_dpll = 2;
+
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		dev_priv->shared_dplls[i].id = i;
+		dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+		dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
+		dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
+		dev_priv->shared_dplls[i].get_hw_state =
+			hsw_ddi_pll_get_hw_state;
+	}
 
 	/* The LCPLL register should be turned on by the BIOS. For now let's
 	 * just check its state and print errors in case something is wrong.
@@ -1465,10 +1307,15 @@
 			intel_wait_ddi_buf_idle(dev_priv, port);
 	}
 
-	val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+	val = DP_TP_CTL_ENABLE |
 	      DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
-	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
-		val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+	if (intel_dp->is_mst)
+		val |= DP_TP_CTL_MODE_MST;
+	else {
+		val |= DP_TP_CTL_MODE_SST;
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+			val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+	}
 	I915_WRITE(DP_TP_CTL(port), val);
 	POSTING_READ(DP_TP_CTL(port));
 
@@ -1507,11 +1354,16 @@
 
 static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
-	int type = intel_encoder->type;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+	int type = intel_dig_port->base.type;
 
-	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
-		intel_dp_check_link_status(intel_dp);
+	if (type != INTEL_OUTPUT_DISPLAYPORT &&
+	    type != INTEL_OUTPUT_EDP &&
+	    type != INTEL_OUTPUT_UNKNOWN) {
+		return;
+	}
+
+	intel_dp_hot_plug(intel_encoder);
 }
 
 void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -1663,15 +1515,13 @@
 	struct intel_digital_port *intel_dig_port;
 	struct intel_encoder *intel_encoder;
 	struct drm_encoder *encoder;
-	struct intel_connector *hdmi_connector = NULL;
-	struct intel_connector *dp_connector = NULL;
 	bool init_hdmi, init_dp;
 
 	init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
 		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
 	init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
 	if (!init_dp && !init_hdmi) {
-		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
 			      port_name(port));
 		init_hdmi = true;
 		init_dp = true;
@@ -1701,20 +1551,28 @@
 					   DDI_A_4_LANES);
 
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
-	intel_encoder->crtc_mask =  (1 << 0) | (1 << 1) | (1 << 2);
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
 	intel_encoder->hot_plug = intel_ddi_hot_plug;
 
-	if (init_dp)
-		dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
+	if (init_dp) {
+		if (!intel_ddi_init_dp_connector(intel_dig_port))
+			goto err;
+
+		intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+		dev_priv->hpd_irq_port[port] = intel_dig_port;
+	}
 
 	/* In theory we don't need the encoder->type check, but leave it just in
 	 * case we have some really bad VBTs... */
-	if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
-		hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
-
-	if (!dp_connector && !hdmi_connector) {
-		drm_encoder_cleanup(encoder);
-		kfree(intel_dig_port);
+	if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+		if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+			goto err;
 	}
+
+	return;
+
+err:
+	drm_encoder_cleanup(encoder);
+	kfree(intel_dig_port);
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0be855..99eb7ca 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -39,12 +39,45 @@
 #include "i915_trace.h"
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
 #include <linux/dma_remapping.h>
 
-#define DIV_ROUND_CLOSEST_ULL(ll, d)	\
-	({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+/* Primary plane formats supported by all gen */
+#define COMMON_PRIMARY_FORMATS \
+	DRM_FORMAT_C8, \
+	DRM_FORMAT_RGB565, \
+	DRM_FORMAT_XRGB8888, \
+	DRM_FORMAT_ARGB8888
 
-static void intel_increase_pllclock(struct drm_crtc *crtc);
+/* Primary plane formats for gen <= 3 */
+static const uint32_t intel_primary_formats_gen2[] = {
+	COMMON_PRIMARY_FORMATS,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_ARGB1555,
+};
+
+/* Primary plane formats for gen >= 4 */
+static const uint32_t intel_primary_formats_gen4[] = {
+	COMMON_PRIMARY_FORMATS, \
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_ARGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ABGR2101010,
+};
+
+/* Cursor formats */
+static const uint32_t intel_cursor_formats[] = {
+	DRM_FORMAT_ARGB8888,
+};
+
+#define DIV_ROUND_CLOSEST_ULL(ll, d)	\
+({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+
+static void intel_increase_pllclock(struct drm_device *dev,
+				    enum pipe pipe);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -68,6 +101,14 @@
 static void intel_set_pipe_csc(struct drm_crtc *crtc);
 static void vlv_prepare_pll(struct intel_crtc *crtc);
 
+static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
+{
+	if (!connector->mst_port)
+		return connector->encoder;
+	else
+		return &connector->mst_port->mst_encoders[pipe]->base;
+}
+
 typedef struct {
 	int	min, max;
 } intel_range_t;
@@ -1061,11 +1102,6 @@
 	bool cur_state;
 	struct intel_dpll_hw_state hw_state;
 
-	if (HAS_PCH_LPT(dev_priv->dev)) {
-		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
-		return;
-	}
-
 	if (WARN (!pll,
 		  "asserting DPLL %s with no DPLL\n", state_string(state)))
 		return;
@@ -1481,9 +1517,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!IS_VALLEYVIEW(dev))
-		return;
-
 	if (IS_CHERRYVIEW(dev)) {
 		enum dpio_phy phy;
 		u32 val;
@@ -1505,26 +1538,6 @@
 			I915_WRITE(DISPLAY_PHY_CONTROL,
 				PHY_COM_LANE_RESET_DEASSERT(phy, val));
 		}
-
-	} else {
-		/*
-		 * If DPIO has already been reset, e.g. by BIOS, just skip all
-		 * this.
-		 */
-		if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
-			return;
-
-		/*
-		 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
-		 * Need to assert and de-assert PHY SB reset by gating the
-		 * common lane power, then un-gating it.
-		 * Simply ungating isn't enough to reset the PHY enough to get
-		 * ports and lanes running.
-		 */
-		__vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
-				     false);
-		__vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
-				     true);
 	}
 }
 
@@ -1712,6 +1725,17 @@
 	val &= ~DPIO_DCLKP_EN;
 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
 
+	/* disable left/right clock distribution */
+	if (pipe != PIPE_B) {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+	} else {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+	}
+
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1749,6 +1773,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
+	if (WARN_ON(pll == NULL))
+		return;
+
 	WARN_ON(!pll->refcount);
 	if (pll->active == 0) {
 		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
@@ -1790,12 +1817,14 @@
 	}
 	WARN_ON(pll->on);
 
+	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
 	DRM_DEBUG_KMS("enabling %s\n", pll->name);
 	pll->enable(dev_priv, pll);
 	pll->on = true;
 }
 
-static void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1826,6 +1855,8 @@
 	DRM_DEBUG_KMS("disabling %s\n", pll->name);
 	pll->disable(dev_priv, pll);
 	pll->on = false;
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
 }
 
 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -2172,6 +2203,8 @@
 	u32 alignment;
 	int ret;
 
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
 	switch (obj->tiling_mode) {
 	case I915_TILING_NONE:
 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2228,6 +2261,8 @@
 
 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
 {
+	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+
 	i915_gem_object_unpin_fence(obj);
 	i915_gem_object_unpin_from_display_plane(obj);
 }
@@ -2314,6 +2349,7 @@
 		goto out_unref_obj;
 	}
 
+	obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG_KMS("plane fb obj %p\n", obj);
@@ -2331,7 +2367,7 @@
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_crtc *c;
 	struct intel_crtc *i;
-	struct intel_framebuffer *fb;
+	struct drm_i915_gem_object *obj;
 
 	if (!intel_crtc->base.primary->fb)
 		return;
@@ -2352,13 +2388,17 @@
 		if (c == &intel_crtc->base)
 			continue;
 
-		if (!i->active || !c->primary->fb)
+		if (!i->active)
 			continue;
 
-		fb = to_intel_framebuffer(c->primary->fb);
-		if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
+		obj = intel_fb_obj(c->primary->fb);
+		if (obj == NULL)
+			continue;
+
+		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
 			drm_framebuffer_reference(c->primary->fb);
 			intel_crtc->base.primary->fb = c->primary->fb;
+			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 			break;
 		}
 	}
@@ -2371,16 +2411,12 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int plane = intel_crtc->plane;
 	unsigned long linear_offset;
 	u32 dspcntr;
 	u32 reg;
 
-	intel_fb = to_intel_framebuffer(fb);
-	obj = intel_fb->obj;
-
 	reg = DSPCNTR(plane);
 	dspcntr = I915_READ(reg);
 	/* Mask out pixel format bits in case we change it */
@@ -2461,16 +2497,12 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int plane = intel_crtc->plane;
 	unsigned long linear_offset;
 	u32 dspcntr;
 	u32 reg;
 
-	intel_fb = to_intel_framebuffer(fb);
-	obj = intel_fb->obj;
-
 	reg = DSPCNTR(plane);
 	dspcntr = I915_READ(reg);
 	/* Mask out pixel format bits in case we change it */
@@ -2546,7 +2578,7 @@
 
 	if (dev_priv->display.disable_fbc)
 		dev_priv->display.disable_fbc(dev);
-	intel_increase_pllclock(crtc);
+	intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
 
 	dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -2601,7 +2633,7 @@
 static int
 intel_finish_fb(struct drm_framebuffer *old_fb)
 {
-	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	bool was_interruptible = dev_priv->mm.interruptible;
 	int ret;
@@ -2647,7 +2679,10 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_framebuffer *old_fb;
+	enum pipe pipe = intel_crtc->pipe;
+	struct drm_framebuffer *old_fb = crtc->primary->fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
 	int ret;
 
 	if (intel_crtc_has_pending_flip(crtc)) {
@@ -2669,9 +2704,10 @@
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	ret = intel_pin_and_fence_fb_obj(dev,
-					 to_intel_framebuffer(fb)->obj,
-					 NULL);
+	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+	if (ret == 0)
+		i915_gem_track_fb(old_obj, obj,
+				  INTEL_FRONTBUFFER_PRIMARY(pipe));
 	mutex_unlock(&dev->struct_mutex);
 	if (ret != 0) {
 		DRM_ERROR("pin & fence failed\n");
@@ -2711,7 +2747,9 @@
 
 	dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
-	old_fb = crtc->primary->fb;
+	if (intel_crtc->active)
+		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
 	crtc->primary->fb = fb;
 	crtc->x = x;
 	crtc->y = y;
@@ -2720,13 +2758,12 @@
 		if (intel_crtc->active && old_fb != fb)
 			intel_wait_for_vblank(dev, intel_crtc->pipe);
 		mutex_lock(&dev->struct_mutex);
-		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+		intel_unpin_fb_obj(old_obj);
 		mutex_unlock(&dev->struct_mutex);
 	}
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
-	intel_edp_psr_update(dev);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -3587,7 +3624,7 @@
 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
-static void intel_put_shared_dpll(struct intel_crtc *crtc)
+void intel_put_shared_dpll(struct intel_crtc *crtc)
 {
 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
@@ -3607,7 +3644,7 @@
 	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
 }
 
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3818,7 +3855,7 @@
 	}
 
 	/* use legacy palette for Ironlake */
-	if (HAS_PCH_SPLIT(dev))
+	if (!HAS_GMCH_DISPLAY(dev))
 		palreg = LGC_PALETTE(pipe);
 
 	/* Workaround : Do not read or write the pipe palette/gamma data while
@@ -3860,30 +3897,6 @@
 	 */
 }
 
-/**
- * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
- * cursor plane briefly if not already running after enabling the display
- * plane.
- * This workaround avoids occasional blank screens when self refresh is
- * enabled.
- */
-static void
-g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
-	u32 cntl = I915_READ(CURCNTR(pipe));
-
-	if ((cntl & CURSOR_MODE) == 0) {
-		u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
-
-		I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
-		I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
-		intel_wait_for_vblank(dev_priv->dev, pipe);
-		I915_WRITE(CURCNTR(pipe), cntl);
-		I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
-		I915_WRITE(FW_BLC_SELF, fw_bcl_self);
-	}
-}
-
 static void intel_crtc_enable_planes(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3892,11 +3905,10 @@
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 
+	drm_vblank_on(dev, pipe);
+
 	intel_enable_primary_hw_plane(dev_priv, plane, pipe);
 	intel_enable_planes(crtc);
-	/* The fixup needs to happen before cursor is enabled */
-	if (IS_G4X(dev))
-		g4x_fixup_plane(dev_priv, pipe);
 	intel_crtc_update_cursor(crtc, true);
 	intel_crtc_dpms_overlay(intel_crtc, true);
 
@@ -3904,8 +3916,14 @@
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
-	intel_edp_psr_update(dev);
 	mutex_unlock(&dev->struct_mutex);
+
+	/*
+	 * FIXME: Once we grow proper nuclear flip support out of this we need
+	 * to compute the mask of flip planes precisely. For the time being
+	 * consider this a flip from a NULL plane.
+	 */
+	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
 }
 
 static void intel_crtc_disable_planes(struct drm_crtc *crtc)
@@ -3917,7 +3935,6 @@
 	int plane = intel_crtc->plane;
 
 	intel_crtc_wait_for_pending_flips(crtc);
-	drm_crtc_vblank_off(crtc);
 
 	if (dev_priv->fbc.plane == plane)
 		intel_disable_fbc(dev);
@@ -3928,6 +3945,15 @@
 	intel_crtc_update_cursor(crtc, false);
 	intel_disable_planes(crtc);
 	intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+
+	/*
+	 * FIXME: Once we grow proper nuclear flip support out of this we need
+	 * to compute the mask of flip planes precisely. For the time being
+	 * consider this a flip to a NULL plane.
+	 */
+	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+
+	drm_vblank_off(dev, pipe);
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4006,8 +4032,6 @@
 		cpt_verify_modeset(dev, intel_crtc->pipe);
 
 	intel_crtc_enable_planes(crtc);
-
-	drm_crtc_vblank_on(crtc);
 }
 
 /* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4059,6 +4083,9 @@
 	if (intel_crtc->active)
 		return;
 
+	if (intel_crtc_to_shared_dpll(intel_crtc))
+		intel_enable_shared_dpll(intel_crtc);
+
 	if (intel_crtc->config.has_dp_encoder)
 		intel_dp_set_m_n(intel_crtc);
 
@@ -4083,16 +4110,15 @@
 	intel_crtc->active = true;
 
 	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
-	if (intel_crtc->config.has_pch_encoder)
-		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
-
-	if (intel_crtc->config.has_pch_encoder)
-		dev_priv->display.fdi_link_train(crtc);
-
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_enable)
 			encoder->pre_enable(encoder);
 
+	if (intel_crtc->config.has_pch_encoder) {
+		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+		dev_priv->display.fdi_link_train(crtc);
+	}
+
 	intel_ddi_enable_pipe_clock(intel_crtc);
 
 	ironlake_pfit_enable(intel_crtc);
@@ -4112,6 +4138,9 @@
 	if (intel_crtc->config.has_pch_encoder)
 		lpt_pch_enable(crtc);
 
+	if (intel_crtc->config.dp_encoder_is_mst)
+		intel_ddi_set_vc_payload_alloc(crtc, true);
+
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		encoder->enable(encoder);
 		intel_opregion_notify_encoder(encoder, true);
@@ -4121,8 +4150,6 @@
 	 * to change the workaround. */
 	haswell_mode_set_planes_workaround(intel_crtc);
 	intel_crtc_enable_planes(crtc);
-
-	drm_crtc_vblank_on(crtc);
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4162,6 +4189,9 @@
 
 	intel_disable_pipe(dev_priv, pipe);
 
+	if (intel_crtc->config.dp_encoder_is_mst)
+		intel_ddi_set_vc_payload_alloc(crtc, false);
+
 	ironlake_pfit_disable(intel_crtc);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4200,7 +4230,6 @@
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
-	intel_edp_psr_update(dev);
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -4233,23 +4262,25 @@
 
 	intel_ddi_disable_pipe_clock(intel_crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_disable)
-			encoder->post_disable(encoder);
-
 	if (intel_crtc->config.has_pch_encoder) {
 		lpt_disable_pch_transcoder(dev_priv);
 		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 		intel_ddi_fdi_disable(crtc);
 	}
 
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
+
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
-	intel_edp_psr_update(dev);
 	mutex_unlock(&dev->struct_mutex);
+
+	if (intel_crtc_to_shared_dpll(intel_crtc))
+		intel_disable_shared_dpll(intel_crtc);
 }
 
 static void ironlake_crtc_off(struct drm_crtc *crtc)
@@ -4258,10 +4289,6 @@
 	intel_put_shared_dpll(intel_crtc);
 }
 
-static void haswell_crtc_off(struct drm_crtc *crtc)
-{
-	intel_ddi_put_crtc_pll(crtc);
-}
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
 {
@@ -4287,6 +4314,23 @@
 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
 }
 
+static enum intel_display_power_domain port_to_power_domain(enum port port)
+{
+	switch (port) {
+	case PORT_A:
+		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+	case PORT_B:
+		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+	case PORT_C:
+		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+	case PORT_D:
+		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+	default:
+		WARN_ON_ONCE(1);
+		return POWER_DOMAIN_PORT_OTHER;
+	}
+}
+
 #define for_each_power_domain(domain, mask)				\
 	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
 		if ((1 << (domain)) & (mask))
@@ -4305,19 +4349,10 @@
 	case INTEL_OUTPUT_HDMI:
 	case INTEL_OUTPUT_EDP:
 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-		switch (intel_dig_port->port) {
-		case PORT_A:
-			return POWER_DOMAIN_PORT_DDI_A_4_LANES;
-		case PORT_B:
-			return POWER_DOMAIN_PORT_DDI_B_4_LANES;
-		case PORT_C:
-			return POWER_DOMAIN_PORT_DDI_C_4_LANES;
-		case PORT_D:
-			return POWER_DOMAIN_PORT_DDI_D_4_LANES;
-		default:
-			WARN_ON_ONCE(1);
-			return POWER_DOMAIN_PORT_OTHER;
-		}
+		return port_to_power_domain(intel_dig_port->port);
+	case INTEL_OUTPUT_DP_MST:
+		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+		return port_to_power_domain(intel_dig_port->port);
 	case INTEL_OUTPUT_ANALOG:
 		return POWER_DOMAIN_PORT_CRT;
 	case INTEL_OUTPUT_DSI:
@@ -4333,7 +4368,6 @@
 	struct intel_encoder *intel_encoder;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
-	bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
 	unsigned long mask;
 	enum transcoder transcoder;
 
@@ -4341,7 +4375,8 @@
 
 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
 	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
-	if (pfit_enabled)
+	if (intel_crtc->config.pch_pfit.enabled ||
+	    intel_crtc->config.pch_pfit.force_thru)
 		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 
 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4398,7 +4433,8 @@
 	intel_display_set_init_power(dev_priv, false);
 }
 
-int valleyview_get_vco(struct drm_i915_private *dev_priv)
+/* returns HPLL frequency in kHz */
+static int valleyview_get_vco(struct drm_i915_private *dev_priv)
 {
 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
 
@@ -4408,7 +4444,23 @@
 		CCK_FUSE_HPLL_FREQ_MASK;
 	mutex_unlock(&dev_priv->dpio_lock);
 
-	return vco_freq[hpll_freq];
+	return vco_freq[hpll_freq] * 1000;
+}
+
+static void vlv_update_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+			 dev_priv->vlv_cdclk_freq);
+
+	/*
+	 * Program the gmbus_freq based on the cdclk frequency.
+	 * BSpec erroneously claims we should aim for 4MHz, but
+	 * in fact 1MHz is the correct frequency.
+	 */
+	I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
 }
 
 /* Adjust CDclk dividers to allow high res or save power if possible */
@@ -4417,12 +4469,11 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, cmd;
 
-	WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
-	dev_priv->vlv_cdclk_freq = cdclk;
+	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
 
-	if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
 		cmd = 2;
-	else if (cdclk == 266)
+	else if (cdclk == 266667)
 		cmd = 1;
 	else
 		cmd = 0;
@@ -4439,18 +4490,23 @@
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
-	if (cdclk == 400) {
+	if (cdclk == 400000) {
 		u32 divider, vco;
 
 		vco = valleyview_get_vco(dev_priv);
-		divider = ((vco << 1) / cdclk) - 1;
+		divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
 
 		mutex_lock(&dev_priv->dpio_lock);
 		/* adjust cdclk divider */
 		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
-		val &= ~0xf;
+		val &= ~DISPLAY_FREQUENCY_VALUES;
 		val |= divider;
 		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+			      DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+			     50))
+			DRM_ERROR("timed out waiting for CDclk change\n");
 		mutex_unlock(&dev_priv->dpio_lock);
 	}
 
@@ -4463,54 +4519,43 @@
 	 * For high bandwidth configs, we set a higher latency in the bunit
 	 * so that the core display fetch happens in time to avoid underruns.
 	 */
-	if (cdclk == 400)
+	if (cdclk == 400000)
 		val |= 4500 / 250; /* 4.5 usec */
 	else
 		val |= 3000 / 250; /* 3.0 usec */
 	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
 	mutex_unlock(&dev_priv->dpio_lock);
 
-	/* Since we changed the CDclk, we need to update the GMBUSFREQ too */
-	intel_i2c_reset(dev);
-}
-
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
-{
-	int cur_cdclk, vco;
-	int divider;
-
-	vco = valleyview_get_vco(dev_priv);
-
-	mutex_lock(&dev_priv->dpio_lock);
-	divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
-	mutex_unlock(&dev_priv->dpio_lock);
-
-	divider &= 0xf;
-
-	cur_cdclk = (vco << 1) / (divider + 1);
-
-	return cur_cdclk;
+	vlv_update_cdclk(dev);
 }
 
 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
 				 int max_pixclk)
 {
+	int vco = valleyview_get_vco(dev_priv);
+	int freq_320 = (vco <<  1) % 320000 != 0 ? 333333 : 320000;
+
 	/*
 	 * Really only a few cases to deal with, as only 4 CDclks are supported:
 	 *   200MHz
 	 *   267MHz
-	 *   320MHz
+	 *   320/333MHz (depends on HPLL freq)
 	 *   400MHz
 	 * So we check to see whether we're above 90% of the lower bin and
 	 * adjust if needed.
+	 *
+	 * We seem to get an unstable or solid color picture at 200MHz.
+	 * Not sure what's wrong. For now use 200MHz only when all pipes
+	 * are off.
 	 */
-	if (max_pixclk > 288000) {
-		return 400;
-	} else if (max_pixclk > 240000) {
-		return 320;
-	} else
-		return 266;
-	/* Looks like the 200MHz CDclk freq doesn't work on some configs */
+	if (max_pixclk > freq_320*9/10)
+		return 400000;
+	else if (max_pixclk > 266667*9/10)
+		return freq_320;
+	else if (max_pixclk > 0)
+		return 266667;
+	else
+		return 200000;
 }
 
 /* compute the max pixel clock for new configuration */
@@ -4633,8 +4678,6 @@
 
 	intel_crtc_enable_planes(crtc);
 
-	drm_crtc_vblank_on(crtc);
-
 	/* Underruns don't raise interrupts, so check manually. */
 	i9xx_check_fifo_underruns(dev);
 }
@@ -4727,8 +4770,6 @@
 	if (IS_GEN2(dev))
 		intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
 
-	drm_crtc_vblank_on(crtc);
-
 	/* Underruns don't raise interrupts, so check manually. */
 	i9xx_check_fifo_underruns(dev);
 }
@@ -4768,6 +4809,16 @@
 	if (IS_GEN2(dev))
 		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
 
+	/*
+	 * Vblank time updates from the shadow to live plane control register
+	 * are blocked if the memory self-refresh mode is active at that
+	 * moment. So to make sure the plane gets truly disabled, disable
+	 * first the self-refresh mode. The self-refresh enable bit in turn
+	 * will be checked/applied by the HW only at the next frame start
+	 * event which is after the vblank start event, so we need to have a
+	 * wait-for-vblank between disabling the plane and the pipe.
+	 */
+	intel_set_memory_cxsr(dev_priv, false);
 	intel_crtc_disable_planes(crtc);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4776,9 +4827,10 @@
 	/*
 	 * On gen2 planes are double buffered but the pipe isn't, so we must
 	 * wait for planes to fully turn off before disabling the pipe.
+	 * We also need to wait on all gmch platforms because of the
+	 * self-refresh mode constraint explained above.
 	 */
-	if (IS_GEN2(dev))
-		intel_wait_for_vblank(dev, pipe);
+	intel_wait_for_vblank(dev, pipe);
 
 	intel_disable_pipe(dev_priv, pipe);
 
@@ -4805,7 +4857,6 @@
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
-	intel_edp_psr_update(dev);
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -4843,23 +4894,49 @@
 	}
 }
 
+/* Master function to enable/disable CRTC and corresponding power wells */
+void intel_crtc_control(struct drm_crtc *crtc, bool enable)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum intel_display_power_domain domain;
+	unsigned long domains;
+
+	if (enable) {
+		if (!intel_crtc->active) {
+			domains = get_crtc_power_domains(crtc);
+			for_each_power_domain(domain, domains)
+				intel_display_power_get(dev_priv, domain);
+			intel_crtc->enabled_power_domains = domains;
+
+			dev_priv->display.crtc_enable(crtc);
+		}
+	} else {
+		if (intel_crtc->active) {
+			dev_priv->display.crtc_disable(crtc);
+
+			domains = intel_crtc->enabled_power_domains;
+			for_each_power_domain(domain, domains)
+				intel_display_power_put(dev_priv, domain);
+			intel_crtc->enabled_power_domains = 0;
+		}
+	}
+}
+
 /**
  * Sets the power management mode of the pipe and plane.
  */
 void intel_crtc_update_dpms(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_encoder *intel_encoder;
 	bool enable = false;
 
 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
 		enable |= intel_encoder->connectors_active;
 
-	if (enable)
-		dev_priv->display.crtc_enable(crtc);
-	else
-		dev_priv->display.crtc_disable(crtc);
+	intel_crtc_control(crtc, enable);
 
 	intel_crtc_update_sarea(crtc, enable);
 }
@@ -4869,6 +4946,8 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_connector *connector;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
 	/* crtc should still be enabled when we disable it. */
 	WARN_ON(!crtc->enabled);
@@ -4877,13 +4956,11 @@
 	intel_crtc_update_sarea(crtc, false);
 	dev_priv->display.off(crtc);
 
-	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
-	assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
-	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
-
 	if (crtc->primary->fb) {
 		mutex_lock(&dev->struct_mutex);
-		intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
+		intel_unpin_fb_obj(old_obj);
+		i915_gem_track_fb(old_obj, NULL,
+				  INTEL_FRONTBUFFER_PRIMARY(pipe));
 		mutex_unlock(&dev->struct_mutex);
 		crtc->primary->fb = NULL;
 	}
@@ -4939,24 +5016,31 @@
 			      connector->base.base.id,
 			      connector->base.name);
 
+		/* there is no real hw state for MST connectors */
+		if (connector->mst_port)
+			return;
+
 		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
 		     "wrong connector dpms state\n");
 		WARN(connector->base.encoder != &encoder->base,
 		     "active connector not linked to encoder\n");
-		WARN(!encoder->connectors_active,
-		     "encoder->connectors_active not set\n");
 
-		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
-		WARN(!encoder_enabled, "encoder not enabled\n");
-		if (WARN_ON(!encoder->base.crtc))
-			return;
+		if (encoder) {
+			WARN(!encoder->connectors_active,
+			     "encoder->connectors_active not set\n");
 
-		crtc = encoder->base.crtc;
+			encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+			WARN(!encoder_enabled, "encoder not enabled\n");
+			if (WARN_ON(!encoder->base.crtc))
+				return;
 
-		WARN(!crtc->enabled, "crtc not enabled\n");
-		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
-		WARN(pipe != to_intel_crtc(crtc)->pipe,
-		     "encoder active on the wrong pipe\n");
+			crtc = encoder->base.crtc;
+
+			WARN(!crtc->enabled, "crtc not enabled\n");
+			WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+			WARN(pipe != to_intel_crtc(crtc)->pipe,
+			     "encoder active on the wrong pipe\n");
+		}
 	}
 }
 
@@ -5161,9 +5245,11 @@
 	if (HAS_IPS(dev))
 		hsw_compute_ips_config(crtc, pipe_config);
 
-	/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
-	 * clock survives for now. */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+	/*
+	 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
+	 * old clock survives for now.
+	 */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
 		pipe_config->shared_dpll = crtc->config.shared_dpll;
 
 	if (pipe_config->has_pch_encoder)
@@ -5174,7 +5260,22 @@
 
 static int valleyview_get_display_clock_speed(struct drm_device *dev)
 {
-	return 400000; /* FIXME */
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int vco = valleyview_get_vco(dev_priv);
+	u32 val;
+	int divider;
+
+	mutex_lock(&dev_priv->dpio_lock);
+	val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	divider = val & DISPLAY_FREQUENCY_VALUES;
+
+	WARN((val & DISPLAY_FREQUENCY_STATUS) !=
+	     (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+	     "cdclk change in progress\n");
+
+	return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
 }
 
 static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -6125,8 +6226,8 @@
 	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
 					    plane_config->tiled);
 
-	plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
-				   aligned_height, PAGE_SIZE);
+	plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+					aligned_height);
 
 	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 		      pipe, plane, crtc->base.primary->fb->width,
@@ -7145,8 +7246,8 @@
 	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
 					    plane_config->tiled);
 
-	plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
-				   aligned_height, PAGE_SIZE);
+	plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+					aligned_height);
 
 	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 		      pipe, plane, crtc->base.primary->fb->width,
@@ -7163,6 +7264,10 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
 
+	if (!intel_display_power_enabled(dev_priv,
+					 POWER_DOMAIN_PIPE(crtc->pipe)))
+		return false;
+
 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
@@ -7237,7 +7342,6 @@
 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
-	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
 	struct intel_crtc *crtc;
 
 	for_each_intel_crtc(dev, crtc)
@@ -7245,14 +7349,15 @@
 		     pipe_name(crtc->pipe));
 
 	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
-	WARN(plls->spll_refcount, "SPLL enabled\n");
-	WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
-	WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+	WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+	WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
 	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
 	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
 	     "CPU PWM1 enabled\n");
-	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
-	     "CPU PWM2 enabled\n");
+	if (IS_HASWELL(dev))
+		WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+		     "CPU PWM2 enabled\n");
 	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
 	     "PCH PWM1 enabled\n");
 	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
@@ -7265,7 +7370,17 @@
 	 * gen-specific and since we only disable LCPLL after we fully disable
 	 * the interrupts, the check below should be enough.
 	 */
-	WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
+	WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	if (IS_HASWELL(dev))
+		return I915_READ(D_COMP_HSW);
+	else
+		return I915_READ(D_COMP_BDW);
 }
 
 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
@@ -7276,12 +7391,12 @@
 		mutex_lock(&dev_priv->rps.hw_lock);
 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
 					    val))
-			DRM_ERROR("Failed to disable D_COMP\n");
+			DRM_ERROR("Failed to write to D_COMP\n");
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	} else {
-		I915_WRITE(D_COMP, val);
+		I915_WRITE(D_COMP_BDW, val);
+		POSTING_READ(D_COMP_BDW);
 	}
-	POSTING_READ(D_COMP);
 }
 
 /*
@@ -7319,12 +7434,13 @@
 	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
 		DRM_ERROR("LCPLL still locked\n");
 
-	val = I915_READ(D_COMP);
+	val = hsw_read_dcomp(dev_priv);
 	val |= D_COMP_COMP_DISABLE;
 	hsw_write_dcomp(dev_priv, val);
 	ndelay(100);
 
-	if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
+		     1))
 		DRM_ERROR("D_COMP RCOMP still in progress\n");
 
 	if (allow_power_down) {
@@ -7373,7 +7489,7 @@
 		POSTING_READ(LCPLL_CTL);
 	}
 
-	val = I915_READ(D_COMP);
+	val = hsw_read_dcomp(dev_priv);
 	val |= D_COMP_COMP_FORCE;
 	val &= ~D_COMP_COMP_DISABLE;
 	hsw_write_dcomp(dev_priv, val);
@@ -7479,13 +7595,59 @@
 
 	if (!intel_ddi_pll_select(intel_crtc))
 		return -EINVAL;
-	intel_ddi_pll_enable(intel_crtc);
 
 	intel_crtc->lowfreq_avail = false;
 
 	return 0;
 }
 
+static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
+				       struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_shared_dpll *pll;
+	enum port port;
+	uint32_t tmp;
+
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+
+	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+	switch (pipe_config->ddi_pll_sel) {
+	case PORT_CLK_SEL_WRPLL1:
+		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+		break;
+	case PORT_CLK_SEL_WRPLL2:
+		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+		break;
+	}
+
+	if (pipe_config->shared_dpll >= 0) {
+		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+
+		WARN_ON(!pll->get_hw_state(dev_priv, pll,
+					   &pipe_config->dpll_hw_state));
+	}
+
+	/*
+	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
+	 * DDI E. So just check whether this pipe is wired to DDI E and whether
+	 * the PCH transcoder is on.
+	 */
+	if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+		pipe_config->has_pch_encoder = true;
+
+		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+		ironlake_get_fdi_m_n_config(crtc, pipe_config);
+	}
+}
+
 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 				    struct intel_crtc_config *pipe_config)
 {
@@ -7531,22 +7693,7 @@
 	if (!(tmp & PIPECONF_ENABLE))
 		return false;
 
-	/*
-	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
-	 * DDI E. So just check whether this pipe is wired to DDI E and whether
-	 * the PCH transcoder is on.
-	 */
-	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
-	if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
-	    I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
-		pipe_config->has_pch_encoder = true;
-
-		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
-		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
-					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
-		ironlake_get_fdi_m_n_config(crtc, pipe_config);
-	}
+	haswell_get_ddi_port_state(crtc, pipe_config);
 
 	intel_get_pipe_timings(crtc, pipe_config);
 
@@ -7991,8 +8138,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	int x = intel_crtc->cursor_x;
-	int y = intel_crtc->cursor_y;
+	int x = crtc->cursor_x;
+	int y = crtc->cursor_y;
 	u32 base = 0, pos = 0;
 
 	if (on)
@@ -8036,21 +8183,27 @@
 	intel_crtc->cursor_base = base;
 }
 
-static int intel_crtc_cursor_set(struct drm_crtc *crtc,
-				 struct drm_file *file,
-				 uint32_t handle,
-				 uint32_t width, uint32_t height)
+/*
+ * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
+ *
+ * Note that the object's reference will be consumed if the update fails.  If
+ * the update succeeds, the reference of the old object (if any) will be
+ * consumed.
+ */
+static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
+				     struct drm_i915_gem_object *obj,
+				     uint32_t width, uint32_t height)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_i915_gem_object *obj;
+	enum pipe pipe = intel_crtc->pipe;
 	unsigned old_width;
 	uint32_t addr;
 	int ret;
 
 	/* if we want to turn off the cursor ignore width and height */
-	if (!handle) {
+	if (!obj) {
 		DRM_DEBUG_KMS("cursor off\n");
 		addr = 0;
 		obj = NULL;
@@ -8066,12 +8219,8 @@
 		return -EINVAL;
 	}
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
-	if (&obj->base == NULL)
-		return -ENOENT;
-
 	if (obj->base.size < width * height * 4) {
-		DRM_DEBUG_KMS("buffer is to small\n");
+		DRM_DEBUG_KMS("buffer is too small\n");
 		ret = -ENOMEM;
 		goto fail;
 	}
@@ -8126,9 +8275,10 @@
 	if (intel_crtc->cursor_bo) {
 		if (!INTEL_INFO(dev)->cursor_needs_physical)
 			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
-		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
 	}
 
+	i915_gem_track_fb(intel_crtc->cursor_bo, obj,
+			  INTEL_FRONTBUFFER_CURSOR(pipe));
 	mutex_unlock(&dev->struct_mutex);
 
 	old_width = intel_crtc->cursor_width;
@@ -8144,6 +8294,8 @@
 		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 	}
 
+	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+
 	return 0;
 fail_unpin:
 	i915_gem_object_unpin_from_display_plane(obj);
@@ -8154,19 +8306,6 @@
 	return ret;
 }
 
-static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
-	intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
-
-	if (intel_crtc->active)
-		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
-	return 0;
-}
-
 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
 				 u16 *blue, uint32_t start, uint32_t size)
 {
@@ -8242,7 +8381,7 @@
 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
 {
 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
-	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
+	return PAGE_ALIGN(pitch * mode->vdisplay);
 }
 
 static struct drm_framebuffer *
@@ -8667,16 +8806,14 @@
 	return mode;
 }
 
-static void intel_increase_pllclock(struct drm_crtc *crtc)
+static void intel_increase_pllclock(struct drm_device *dev,
+				    enum pipe pipe)
 {
-	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
 	int dpll_reg = DPLL(pipe);
 	int dpll;
 
-	if (HAS_PCH_SPLIT(dev))
+	if (!HAS_GMCH_DISPLAY(dev))
 		return;
 
 	if (!dev_priv->lvds_downclock_avail)
@@ -8704,7 +8841,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (!HAS_GMCH_DISPLAY(dev))
 		return;
 
 	if (!dev_priv->lvds_downclock_avail)
@@ -8773,28 +8910,179 @@
 	intel_runtime_pm_put(dev_priv);
 }
 
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-			struct intel_engine_cs *ring)
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+			       unsigned frontbuffer_bits,
+			       struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_crtc *crtc;
+	enum pipe pipe;
 
 	if (!i915.powersave)
 		return;
 
-	for_each_crtc(dev, crtc) {
-		if (!crtc->primary->fb)
+	for_each_pipe(pipe) {
+		if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
 			continue;
 
-		if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
-			continue;
-
-		intel_increase_pllclock(crtc);
+		intel_increase_pllclock(dev, pipe);
 		if (ring && intel_fbc_enabled(dev))
 			ring->fbc_dirty = true;
 	}
 }
 
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			     struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (!obj->frontbuffer_bits)
+		return;
+
+	if (ring) {
+		mutex_lock(&dev_priv->fb_tracking.lock);
+		dev_priv->fb_tracking.busy_bits
+			|= obj->frontbuffer_bits;
+		dev_priv->fb_tracking.flip_bits
+			&= ~obj->frontbuffer_bits;
+		mutex_unlock(&dev_priv->fb_tracking.lock);
+	}
+
+	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+	intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some oustanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+			     unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Delay flushing when rings are still busy.*/
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+	intel_edp_psr_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			bool retire)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned frontbuffer_bits;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (!obj->frontbuffer_bits)
+		return;
+
+	frontbuffer_bits = obj->frontbuffer_bits;
+
+	if (retire) {
+		mutex_lock(&dev_priv->fb_tracking.lock);
+		/* Filter out new bits since rendering started. */
+		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+		mutex_unlock(&dev_priv->fb_tracking.lock);
+	}
+
+	intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+				    unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	dev_priv->fb_tracking.flip_bits
+		|= frontbuffer_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+				     unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	/* Mask any cancelled flips. */
+	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8812,8 +9100,6 @@
 		kfree(work);
 	}
 
-	intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
-
 	drm_crtc_cleanup(crtc);
 
 	kfree(intel_crtc);
@@ -8824,6 +9110,7 @@
 	struct intel_unpin_work *work =
 		container_of(__work, struct intel_unpin_work, work);
 	struct drm_device *dev = work->crtc->dev;
+	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
 
 	mutex_lock(&dev->struct_mutex);
 	intel_unpin_fb_obj(work->old_fb_obj);
@@ -8833,6 +9120,8 @@
 	intel_update_fbc(dev);
 	mutex_unlock(&dev->struct_mutex);
 
+	intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
 	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
 	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
 
@@ -9202,6 +9491,150 @@
 	return 0;
 }
 
+static bool use_mmio_flip(struct intel_engine_cs *ring,
+			  struct drm_i915_gem_object *obj)
+{
+	/*
+	 * This is not being used for older platforms, because
+	 * non-availability of flip done interrupt forces us to use
+	 * CS flips. Older platforms derive flip done using some clever
+	 * tricks involving the flip_pending status bits and vblank irqs.
+	 * So using MMIO flips there would disrupt this mechanism.
+	 */
+
+	if (ring == NULL)
+		return true;
+
+	if (INTEL_INFO(ring->dev)->gen < 5)
+		return false;
+
+	if (i915.use_mmio_flip < 0)
+		return false;
+	else if (i915.use_mmio_flip > 0)
+		return true;
+	else
+		return ring != obj->ring;
+}
+
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_framebuffer *intel_fb =
+		to_intel_framebuffer(intel_crtc->base.primary->fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	u32 dspcntr;
+	u32 reg;
+
+	intel_mark_page_flip_active(intel_crtc);
+
+	reg = DSPCNTR(intel_crtc->plane);
+	dspcntr = I915_READ(reg);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (obj->tiling_mode != I915_TILING_NONE)
+			dspcntr |= DISPPLANE_TILED;
+		else
+			dspcntr &= ~DISPPLANE_TILED;
+	}
+	I915_WRITE(reg, dspcntr);
+
+	I915_WRITE(DSPSURF(intel_crtc->plane),
+		   intel_crtc->unpin_work->gtt_offset);
+	POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+{
+	struct intel_engine_cs *ring;
+	int ret;
+
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	if (!obj->last_write_seqno)
+		return 0;
+
+	ring = obj->ring;
+
+	if (i915_seqno_passed(ring->get_seqno(ring, true),
+			      obj->last_write_seqno))
+		return 0;
+
+	ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(!ring->irq_get(ring)))
+		return 0;
+
+	return 1;
+}
+
+void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct intel_crtc *intel_crtc;
+	unsigned long irq_flags;
+	u32 seqno;
+
+	seqno = ring->get_seqno(ring, false);
+
+	spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+	for_each_intel_crtc(ring->dev, intel_crtc) {
+		struct intel_mmio_flip *mmio_flip;
+
+		mmio_flip = &intel_crtc->mmio_flip;
+		if (mmio_flip->seqno == 0)
+			continue;
+
+		if (ring->id != mmio_flip->ring_id)
+			continue;
+
+		if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
+			intel_do_mmio_flip(intel_crtc);
+			mmio_flip->seqno = 0;
+			ring->irq_put(ring);
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj,
+				 struct intel_engine_cs *ring,
+				 uint32_t flags)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	unsigned long irq_flags;
+	int ret;
+
+	if (WARN_ON(intel_crtc->mmio_flip.seqno))
+		return -EBUSY;
+
+	ret = intel_postpone_flip(obj);
+	if (ret < 0)
+		return ret;
+	if (ret == 0) {
+		intel_do_mmio_flip(intel_crtc);
+		return 0;
+	}
+
+	spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+	intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+	intel_crtc->mmio_flip.ring_id = obj->ring->id;
+	spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+
+	/*
+	 * Double check to catch cases where irq fired before
+	 * mmio flip data was ready
+	 */
+	intel_notify_mmio_flip(obj->ring);
+	return 0;
+}
+
 static int intel_default_queue_flip(struct drm_device *dev,
 				    struct drm_crtc *crtc,
 				    struct drm_framebuffer *fb,
@@ -9220,13 +9653,22 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *old_fb = crtc->primary->fb;
-	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
 	struct intel_unpin_work *work;
 	struct intel_engine_cs *ring;
 	unsigned long flags;
 	int ret;
 
+	/*
+	 * drm_mode_page_flip_ioctl() should already catch this, but double
+	 * check to be safe.  In the future we may enable pageflipping from
+	 * a disabled primary plane.
+	 */
+	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
+		return -EBUSY;
+
 	/* Can't change pixel format via MI display flips. */
 	if (fb->pixel_format != crtc->primary->fb->pixel_format)
 		return -EINVAL;
@@ -9249,7 +9691,7 @@
 
 	work->event = event;
 	work->crtc = crtc;
-	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+	work->old_fb_obj = intel_fb_obj(old_fb);
 	INIT_WORK(&work->work, intel_unpin_work_fn);
 
 	ret = drm_crtc_vblank_get(crtc);
@@ -9290,10 +9732,15 @@
 	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
-		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
 
 	if (IS_VALLEYVIEW(dev)) {
 		ring = &dev_priv->ring[BCS];
+		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
+			/* vlv: DISPLAY_FLIP fails to change tiling */
+			ring = NULL;
+	} else if (IS_IVYBRIDGE(dev)) {
+		ring = &dev_priv->ring[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
 		ring = obj->ring;
 		if (ring == NULL || ring->id != RCS)
@@ -9309,12 +9756,20 @@
 	work->gtt_offset =
 		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
 
-	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+	if (use_mmio_flip(ring, obj))
+		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+					    page_flip_flags);
+	else
+		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+				page_flip_flags);
 	if (ret)
 		goto cleanup_unpin;
 
+	i915_gem_track_fb(work->old_fb_obj, obj,
+			  INTEL_FRONTBUFFER_PRIMARY(pipe));
+
 	intel_disable_fbc(dev);
-	intel_mark_fb_busy(obj, NULL);
+	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
 	mutex_unlock(&dev->struct_mutex);
 
 	trace_i915_flip_request(intel_crtc->plane, obj);
@@ -9344,7 +9799,7 @@
 		intel_crtc_wait_for_pending_flips(crtc);
 		ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
 		if (ret == 0 && event)
-			drm_send_vblank_event(dev, intel_crtc->pipe, event);
+			drm_send_vblank_event(dev, pipe, event);
 	}
 	return ret;
 }
@@ -10017,11 +10472,14 @@
 
 	PIPE_CONF_CHECK_I(double_wide);
 
+	PIPE_CONF_CHECK_X(ddi_pll_sel);
+
 	PIPE_CONF_CHECK_I(shared_dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
 
 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
 		PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10083,6 +10541,14 @@
 			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
 				active = true;
 		}
+		/*
+		 * for MST connectors if we unplug the connector is gone
+		 * away but the encoder is still connected to a crtc
+		 * until a modeset happens in response to the hotplug.
+		 */
+		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
+			continue;
+
 		WARN(!!encoder->base.crtc != enabled,
 		     "encoder's enabled state mismatch "
 		     "(expected %i, found %i)\n",
@@ -10378,20 +10844,23 @@
 	 * on the DPLL.
 	 */
 	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
-		struct drm_framebuffer *old_fb;
+		struct drm_framebuffer *old_fb = crtc->primary->fb;
+		struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
+		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
 		mutex_lock(&dev->struct_mutex);
 		ret = intel_pin_and_fence_fb_obj(dev,
-						 to_intel_framebuffer(fb)->obj,
+						 obj,
 						 NULL);
 		if (ret != 0) {
 			DRM_ERROR("pin & fence failed\n");
 			mutex_unlock(&dev->struct_mutex);
 			goto done;
 		}
-		old_fb = crtc->primary->fb;
 		if (old_fb)
-			intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+			intel_unpin_fb_obj(old_obj);
+		i915_gem_track_fb(old_obj, obj,
+				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
 		mutex_unlock(&dev->struct_mutex);
 
 		crtc->primary->fb = fb;
@@ -10563,12 +11032,17 @@
 	if (is_crtc_connector_off(set)) {
 		config->mode_changed = true;
 	} else if (set->crtc->primary->fb != set->fb) {
-		/* If we have no fb then treat it as a full mode set */
+		/*
+		 * If we have no fb, we can only flip as long as the crtc is
+		 * active, otherwise we need a full mode set.  The crtc may
+		 * be active if we've only disabled the primary plane, or
+		 * in fastboot situations.
+		 */
 		if (set->crtc->primary->fb == NULL) {
 			struct intel_crtc *intel_crtc =
 				to_intel_crtc(set->crtc);
 
-			if (intel_crtc->active && i915.fastboot) {
+			if (intel_crtc->active) {
 				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
 				config->fb_changed = true;
 			} else {
@@ -10620,7 +11094,7 @@
 		 * for them. */
 		for (ro = 0; ro < set->num_connectors; ro++) {
 			if (set->connectors[ro] == &connector->base) {
-				connector->new_encoder = connector->encoder;
+				connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
 				break;
 			}
 		}
@@ -10666,7 +11140,7 @@
 					 new_crtc)) {
 			return -EINVAL;
 		}
-		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+		connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
 
 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
 			connector->base.base.id,
@@ -10700,7 +11174,12 @@
 		}
 	}
 	/* Now we've also updated encoder->new_crtc for all encoders. */
-
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->new_encoder)
+			if (connector->new_encoder != connector->encoder)
+				connector->encoder = connector->new_encoder;
+	}
 	for_each_intel_crtc(dev, crtc) {
 		crtc->new_enabled = false;
 
@@ -10806,10 +11285,24 @@
 		ret = intel_set_mode(set->crtc, set->mode,
 				     set->x, set->y, set->fb);
 	} else if (config->fb_changed) {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+
 		intel_crtc_wait_for_pending_flips(set->crtc);
 
 		ret = intel_pipe_set_base(set->crtc,
 					  set->x, set->y, set->fb);
+
+		/*
+		 * We need to make sure the primary plane is re-enabled if it
+		 * has previously been turned off.
+		 */
+		if (!intel_crtc->primary_enabled && ret == 0) {
+			WARN_ON(!intel_crtc->active);
+			intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+						      intel_crtc->pipe);
+		}
+
 		/*
 		 * In the fastboot case this may be our only check of the
 		 * state after boot.  It would be better to only do it on
@@ -10850,26 +11343,21 @@
 }
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
-	.cursor_set = intel_crtc_cursor_set,
-	.cursor_move = intel_crtc_cursor_move,
 	.gamma_set = intel_crtc_gamma_set,
 	.set_config = intel_crtc_set_config,
 	.destroy = intel_crtc_destroy,
 	.page_flip = intel_crtc_page_flip,
 };
 
-static void intel_cpu_pll_init(struct drm_device *dev)
-{
-	if (HAS_DDI(dev))
-		intel_ddi_pll_init(dev);
-}
-
 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 				      struct intel_shared_dpll *pll,
 				      struct intel_dpll_hw_state *hw_state)
 {
 	uint32_t val;
 
+	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
 	val = I915_READ(PCH_DPLL(pll->id));
 	hw_state->dpll = val;
 	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
@@ -10951,7 +11439,9 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+	if (HAS_DDI(dev))
+		intel_ddi_pll_init(dev);
+	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 		ibx_pch_dpll_init(dev);
 	else
 		dev_priv->num_shared_dpll = 0;
@@ -10959,17 +11449,328 @@
 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
 }
 
+static int
+intel_primary_plane_disable(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct intel_crtc *intel_crtc;
+
+	if (!plane->fb)
+		return 0;
+
+	BUG_ON(!plane->crtc);
+
+	intel_crtc = to_intel_crtc(plane->crtc);
+
+	/*
+	 * Even though we checked plane->fb above, it's still possible that
+	 * the primary plane has been implicitly disabled because the crtc
+	 * coordinates given weren't visible, or because we detected
+	 * that it was 100% covered by a sprite plane.  Or, the CRTC may be
+	 * off and we've set a fb, but haven't actually turned on the CRTC yet.
+	 * In either case, we need to unpin the FB and let the fb pointer get
+	 * updated, but otherwise we don't need to touch the hardware.
+	 */
+	if (!intel_crtc->primary_enabled)
+		goto disable_unpin;
+
+	intel_crtc_wait_for_pending_flips(plane->crtc);
+	intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
+				       intel_plane->pipe);
+disable_unpin:
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
+			  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+	intel_unpin_fb_obj(intel_fb_obj(plane->fb));
+	mutex_unlock(&dev->struct_mutex);
+	plane->fb = NULL;
+
+	return 0;
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+			     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+			     unsigned int crtc_w, unsigned int crtc_h,
+			     uint32_t src_x, uint32_t src_y,
+			     uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+	struct drm_rect dest = {
+		/* integer pixels */
+		.x1 = crtc_x,
+		.y1 = crtc_y,
+		.x2 = crtc_x + crtc_w,
+		.y2 = crtc_y + crtc_h,
+	};
+	struct drm_rect src = {
+		/* 16.16 fixed point */
+		.x1 = src_x,
+		.y1 = src_y,
+		.x2 = src_x + src_w,
+		.y2 = src_y + src_h,
+	};
+	const struct drm_rect clip = {
+		/* integer pixels */
+		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+	};
+	bool visible;
+	int ret;
+
+	ret = drm_plane_helper_check_update(plane, crtc, fb,
+					    &src, &dest, &clip,
+					    DRM_PLANE_HELPER_NO_SCALING,
+					    DRM_PLANE_HELPER_NO_SCALING,
+					    false, true, &visible);
+
+	if (ret)
+		return ret;
+
+	/*
+	 * If the CRTC isn't enabled, we're just pinning the framebuffer,
+	 * updating the fb pointer, and returning without touching the
+	 * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
+	 * turn on the display with all planes setup as desired.
+	 */
+	if (!crtc->enabled) {
+		mutex_lock(&dev->struct_mutex);
+
+		/*
+		 * If we already called setplane while the crtc was disabled,
+		 * we may have an fb pinned; unpin it.
+		 */
+		if (plane->fb)
+			intel_unpin_fb_obj(old_obj);
+
+		i915_gem_track_fb(old_obj, obj,
+				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+		/* Pin and return without programming hardware */
+		ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+		mutex_unlock(&dev->struct_mutex);
+
+		return ret;
+	}
+
+	intel_crtc_wait_for_pending_flips(crtc);
+
+	/*
+	 * If clipping results in a non-visible primary plane, we'll disable
+	 * the primary plane.  Note that this is a bit different than what
+	 * happens if userspace explicitly disables the plane by passing fb=0
+	 * because plane->fb still gets set and pinned.
+	 */
+	if (!visible) {
+		mutex_lock(&dev->struct_mutex);
+
+		/*
+		 * Try to pin the new fb first so that we can bail out if we
+		 * fail.
+		 */
+		if (plane->fb != fb) {
+			ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+			if (ret) {
+				mutex_unlock(&dev->struct_mutex);
+				return ret;
+			}
+		}
+
+		i915_gem_track_fb(old_obj, obj,
+				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+		if (intel_crtc->primary_enabled)
+			intel_disable_primary_hw_plane(dev_priv,
+						       intel_plane->plane,
+						       intel_plane->pipe);
+
+
+		if (plane->fb != fb)
+			if (plane->fb)
+				intel_unpin_fb_obj(old_obj);
+
+		mutex_unlock(&dev->struct_mutex);
+
+		return 0;
+	}
+
+	ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
+	if (ret)
+		return ret;
+
+	if (!intel_crtc->primary_enabled)
+		intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+					      intel_crtc->pipe);
+
+	return 0;
+}
+
+/* Common destruction function for both primary and cursor planes */
+static void intel_plane_destroy(struct drm_plane *plane)
+{
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	drm_plane_cleanup(plane);
+	kfree(intel_plane);
+}
+
+static const struct drm_plane_funcs intel_primary_plane_funcs = {
+	.update_plane = intel_primary_plane_setplane,
+	.disable_plane = intel_primary_plane_disable,
+	.destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
+						    int pipe)
+{
+	struct intel_plane *primary;
+	const uint32_t *intel_primary_formats;
+	int num_formats;
+
+	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+	if (primary == NULL)
+		return NULL;
+
+	primary->can_scale = false;
+	primary->max_downscale = 1;
+	primary->pipe = pipe;
+	primary->plane = pipe;
+	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+		primary->plane = !pipe;
+
+	if (INTEL_INFO(dev)->gen <= 3) {
+		intel_primary_formats = intel_primary_formats_gen2;
+		num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
+	} else {
+		intel_primary_formats = intel_primary_formats_gen4;
+		num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
+	}
+
+	drm_universal_plane_init(dev, &primary->base, 0,
+				 &intel_primary_plane_funcs,
+				 intel_primary_formats, num_formats,
+				 DRM_PLANE_TYPE_PRIMARY);
+	return &primary->base;
+}
+
+static int
+intel_cursor_plane_disable(struct drm_plane *plane)
+{
+	if (!plane->fb)
+		return 0;
+
+	BUG_ON(!plane->crtc);
+
+	return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
+}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+			  unsigned int crtc_w, unsigned int crtc_h,
+			  uint32_t src_x, uint32_t src_y,
+			  uint32_t src_w, uint32_t src_h)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct drm_rect dest = {
+		/* integer pixels */
+		.x1 = crtc_x,
+		.y1 = crtc_y,
+		.x2 = crtc_x + crtc_w,
+		.y2 = crtc_y + crtc_h,
+	};
+	struct drm_rect src = {
+		/* 16.16 fixed point */
+		.x1 = src_x,
+		.y1 = src_y,
+		.x2 = src_x + src_w,
+		.y2 = src_y + src_h,
+	};
+	const struct drm_rect clip = {
+		/* integer pixels */
+		.x2 = intel_crtc->config.pipe_src_w,
+		.y2 = intel_crtc->config.pipe_src_h,
+	};
+	bool visible;
+	int ret;
+
+	ret = drm_plane_helper_check_update(plane, crtc, fb,
+					    &src, &dest, &clip,
+					    DRM_PLANE_HELPER_NO_SCALING,
+					    DRM_PLANE_HELPER_NO_SCALING,
+					    true, true, &visible);
+	if (ret)
+		return ret;
+
+	crtc->cursor_x = crtc_x;
+	crtc->cursor_y = crtc_y;
+	if (fb != crtc->cursor->fb) {
+		return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
+	} else {
+		intel_crtc_update_cursor(crtc, visible);
+		return 0;
+	}
+}
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+	.update_plane = intel_cursor_plane_update,
+	.disable_plane = intel_cursor_plane_disable,
+	.destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
+						   int pipe)
+{
+	struct intel_plane *cursor;
+
+	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
+	if (cursor == NULL)
+		return NULL;
+
+	cursor->can_scale = false;
+	cursor->max_downscale = 1;
+	cursor->pipe = pipe;
+	cursor->plane = pipe;
+
+	drm_universal_plane_init(dev, &cursor->base, 0,
+				 &intel_cursor_plane_funcs,
+				 intel_cursor_formats,
+				 ARRAY_SIZE(intel_cursor_formats),
+				 DRM_PLANE_TYPE_CURSOR);
+	return &cursor->base;
+}
+
 static void intel_crtc_init(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc;
-	int i;
+	struct drm_plane *primary = NULL;
+	struct drm_plane *cursor = NULL;
+	int i, ret;
 
 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
 	if (intel_crtc == NULL)
 		return;
 
-	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+	primary = intel_primary_plane_create(dev, pipe);
+	if (!primary)
+		goto fail;
+
+	cursor = intel_cursor_plane_create(dev, pipe);
+	if (!cursor)
+		goto fail;
+
+	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
+					cursor, &intel_crtc_funcs);
+	if (ret)
+		goto fail;
 
 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
 	for (i = 0; i < 256; i++) {
@@ -10980,7 +11781,7 @@
 
 	/*
 	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
-	 * is hooked to plane B. Hence we want plane A feeding pipe B.
+	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
 	 */
 	intel_crtc->pipe = pipe;
 	intel_crtc->plane = pipe;
@@ -11002,6 +11803,14 @@
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+	return;
+
+fail:
+	if (primary)
+		drm_plane_cleanup(primary);
+	if (cursor)
+		drm_plane_cleanup(cursor);
+	kfree(intel_crtc);
 }
 
 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -11021,21 +11830,20 @@
 				struct drm_file *file)
 {
 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
-	struct drm_mode_object *drmmode_obj;
+	struct drm_crtc *drmmode_crtc;
 	struct intel_crtc *crtc;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -ENODEV;
 
-	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
-			DRM_MODE_OBJECT_CRTC);
+	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
 
-	if (!drmmode_obj) {
+	if (!drmmode_crtc) {
 		DRM_ERROR("no such CRTC id\n");
 		return -ENOENT;
 	}
 
-	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+	crtc = to_intel_crtc(drmmode_crtc);
 	pipe_from_crtc_id->pipe = crtc->pipe;
 
 	return 0;
@@ -11236,6 +12044,8 @@
 	if (SUPPORTS_TV(dev))
 		intel_tv_init(dev);
 
+	intel_edp_psr_init(dev);
+
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 		encoder->base.possible_crtcs = encoder->crtc_mask;
 		encoder->base.possible_clones =
@@ -11249,11 +12059,14 @@
 
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
+	struct drm_device *dev = fb->dev;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 
 	drm_framebuffer_cleanup(fb);
+	mutex_lock(&dev->struct_mutex);
 	WARN_ON(!intel_fb->obj->framebuffer_references--);
-	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+	drm_gem_object_unreference(&intel_fb->obj->base);
+	mutex_unlock(&dev->struct_mutex);
 	kfree(intel_fb);
 }
 
@@ -11438,7 +12251,7 @@
 		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.off = haswell_crtc_off;
+		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			ironlake_update_primary_plane;
 	} else if (HAS_PCH_SPLIT(dev)) {
@@ -11722,6 +12535,9 @@
 {
 	intel_prepare_ddi(dev);
 
+	if (IS_VALLEYVIEW(dev))
+		vlv_update_cdclk(dev);
+
 	intel_init_clock_gating(dev);
 
 	intel_reset_dpio(dev);
@@ -11798,7 +12614,6 @@
 	intel_init_dpio(dev);
 	intel_reset_dpio(dev);
 
-	intel_cpu_pll_init(dev);
 	intel_shared_dpll_init(dev);
 
 	/* Just disable it once at startup */
@@ -12024,6 +12839,8 @@
 				      encoder->base.base.id,
 				      encoder->base.name);
 			encoder->disable(encoder);
+			if (encoder->post_disable)
+				encoder->post_disable(encoder);
 		}
 		encoder->base.crtc = NULL;
 		encoder->connectors_active = false;
@@ -12108,10 +12925,6 @@
 			      crtc->active ? "enabled" : "disabled");
 	}
 
-	/* FIXME: Smash this into the new shared dpll infrastructure. */
-	if (HAS_DDI(dev))
-		intel_ddi_setup_hw_pll_state(dev);
-
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
@@ -12125,6 +12938,9 @@
 
 		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
 			      pll->name, pll->refcount, pll->on);
+
+		if (pll->refcount)
+			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -12242,7 +13058,7 @@
 void intel_modeset_gem_init(struct drm_device *dev)
 {
 	struct drm_crtc *c;
-	struct intel_framebuffer *fb;
+	struct drm_i915_gem_object *obj;
 
 	mutex_lock(&dev->struct_mutex);
 	intel_init_gt_powersave(dev);
@@ -12259,11 +13075,11 @@
 	 */
 	mutex_lock(&dev->struct_mutex);
 	for_each_crtc(dev, c) {
-		if (!c->primary->fb)
+		obj = intel_fb_obj(c->primary->fb);
+		if (obj == NULL)
 			continue;
 
-		fb = to_intel_framebuffer(c->primary->fb);
-		if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
+		if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
 				  to_intel_crtc(c)->pipe);
 			drm_framebuffer_unreference(c->primary->fb);
@@ -12278,13 +13094,12 @@
 	struct drm_connector *connector = &intel_connector->base;
 
 	intel_panel_destroy_backlight(connector);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
 	struct drm_connector *connector;
 
 	/*
@@ -12294,6 +13109,8 @@
 	 */
 	drm_irq_uninstall(dev);
 	cancel_work_sync(&dev_priv->hotplug_work);
+	dev_priv->pm._irqs_disabled = true;
+
 	/*
 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
 	 * poll handlers. Hence disable polling after hpd handling is shut down.
@@ -12304,14 +13121,6 @@
 
 	intel_unregister_dsm_handler();
 
-	for_each_crtc(dev, crtc) {
-		/* Skip inactive CRTCs */
-		if (!crtc->primary->fb)
-			continue;
-
-		intel_increase_pllclock(crtc);
-	}
-
 	intel_disable_fbc(dev);
 
 	intel_disable_gt_powersave(dev);
@@ -12479,7 +13288,7 @@
 
 		error->pipe[i].source = I915_READ(PIPESRC(i));
 
-		if (!HAS_PCH_SPLIT(dev))
+		if (HAS_GMCH_DISPLAY(dev))
 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8a1a4fb..ea6ff71 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -114,7 +114,7 @@
 static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 
-static int
+int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -773,12 +773,29 @@
 {
 	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
 
-	sysfs_remove_link(&intel_connector->base.kdev->kobj,
-			  intel_dp->aux.ddc.dev.kobj.name);
+	if (!intel_connector->mst_port)
+		sysfs_remove_link(&intel_connector->base.kdev->kobj,
+				  intel_dp->aux.ddc.dev.kobj.name);
 	intel_connector_unregister(intel_connector);
 }
 
 static void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+{
+	switch (link_bw) {
+	case DP_LINK_BW_1_62:
+		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+		break;
+	case DP_LINK_BW_2_7:
+		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+		break;
+	case DP_LINK_BW_5_4:
+		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+		break;
+	}
+}
+
+static void
 intel_dp_set_clock(struct intel_encoder *encoder,
 		   struct intel_crtc_config *pipe_config, int link_bw)
 {
@@ -789,8 +806,6 @@
 	if (IS_G4X(dev)) {
 		divisor = gen4_dpll;
 		count = ARRAY_SIZE(gen4_dpll);
-	} else if (IS_HASWELL(dev)) {
-		/* Haswell has special-purpose DP DDI clocks. */
 	} else if (HAS_PCH_SPLIT(dev)) {
 		divisor = pch_dpll;
 		count = ARRAY_SIZE(pch_dpll);
@@ -961,7 +976,10 @@
 				&pipe_config->dp_m2_n2);
 	}
 
-	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+	if (HAS_DDI(dev))
+		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+	else
+		intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
 
 	return true;
 }
@@ -1349,8 +1367,6 @@
 
 	DRM_DEBUG_KMS("Turn eDP power off\n");
 
-	edp_wait_backlight_off(intel_dp);
-
 	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
 	pp = ironlake_get_pp_control(intel_dp);
@@ -1386,6 +1402,9 @@
 		return;
 
 	DRM_DEBUG_KMS("\n");
+
+	intel_panel_enable_backlight(intel_dp->attached_connector);
+
 	/*
 	 * If we enable the backlight right away following a panel power
 	 * on, we may see slight flicker as the panel syncs with the eDP
@@ -1400,8 +1419,6 @@
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
-
-	intel_panel_enable_backlight(intel_dp->attached_connector);
 }
 
 void intel_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1414,8 +1431,6 @@
 	if (!is_edp(intel_dp))
 		return;
 
-	intel_panel_disable_backlight(intel_dp->attached_connector);
-
 	DRM_DEBUG_KMS("\n");
 	pp = ironlake_get_pp_control(intel_dp);
 	pp &= ~EDP_BLC_ENABLE;
@@ -1425,6 +1440,10 @@
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
 	intel_dp->last_backlight_off = jiffies;
+
+	edp_wait_backlight_off(intel_dp);
+
+	intel_panel_disable_backlight(intel_dp->attached_connector);
 }
 
 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1646,11 +1665,9 @@
 	}
 }
 
-static bool is_edp_psr(struct drm_device *dev)
+static bool is_edp_psr(struct intel_dp *intel_dp)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	return dev_priv->psr.sink_support;
+	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
 }
 
 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
@@ -1698,9 +1715,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct edp_vsc_psr psr_vsc;
 
-	if (intel_dp->psr_setup_done)
-		return;
-
 	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
 	memset(&psr_vsc, 0, sizeof(psr_vsc));
 	psr_vsc.sdp_header.HB0 = 0;
@@ -1712,22 +1726,25 @@
 	/* Avoid continuous PSR exit by masking memup and hpd */
 	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
 		   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
-	intel_dp->psr_setup_done = true;
 }
 
 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t aux_clock_divider;
 	int precharge = 0x3;
 	int msg_size = 5;       /* Header(4) + Message(1) */
+	bool only_standby = false;
 
 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
+	if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+		only_standby = true;
+
 	/* Enable PSR in sink */
-	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 				   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
 	else
@@ -1746,18 +1763,24 @@
 
 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t max_sleep_time = 0x1f;
 	uint32_t idle_frames = 1;
 	uint32_t val = 0x0;
 	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+	bool only_standby = false;
 
-	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+	if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+		only_standby = true;
+
+	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
 		val |= EDP_PSR_LINK_STANDBY;
 		val |= EDP_PSR_TP2_TP3_TIME_0us;
 		val |= EDP_PSR_TP1_TIME_0us;
 		val |= EDP_PSR_SKIP_AUX_EXIT;
+		val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
 	} else
 		val |= EDP_PSR_LINK_DISABLE;
 
@@ -1775,18 +1798,15 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = dig_port->base.base.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
-	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+
+	lockdep_assert_held(&dev_priv->psr.lock);
+	lockdep_assert_held(&dev->struct_mutex);
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 	dev_priv->psr.source_ok = false;
 
-	if (!HAS_PSR(dev)) {
-		DRM_DEBUG_KMS("PSR not supported on this platform\n");
-		return false;
-	}
-
-	if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
-	    (dig_port->port != PORT_A)) {
+	if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
 		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
 		return false;
 	}
@@ -1796,29 +1816,9 @@
 		return false;
 	}
 
-	crtc = dig_port->base.base.crtc;
-	if (crtc == NULL) {
-		DRM_DEBUG_KMS("crtc not active for PSR\n");
-		return false;
-	}
-
-	intel_crtc = to_intel_crtc(crtc);
-	if (!intel_crtc_active(crtc)) {
-		DRM_DEBUG_KMS("crtc not active for PSR\n");
-		return false;
-	}
-
-	obj = to_intel_framebuffer(crtc->primary->fb)->obj;
-	if (obj->tiling_mode != I915_TILING_X ||
-	    obj->fence_reg == I915_FENCE_REG_NONE) {
-		DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
-		return false;
-	}
-
-	if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
-		DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
-		return false;
-	}
+	/* Below limitations aren't valid for Broadwell */
+	if (IS_BROADWELL(dev))
+		goto out;
 
 	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
 	    S3D_ENABLE) {
@@ -1831,35 +1831,60 @@
 		return false;
 	}
 
+ out:
 	dev_priv->psr.source_ok = true;
 	return true;
 }
 
 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!intel_edp_psr_match_conditions(intel_dp) ||
-	    intel_edp_is_psr_enabled(dev))
-		return;
-
-	/* Setup PSR once */
-	intel_edp_psr_setup(intel_dp);
+	WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+	WARN_ON(dev_priv->psr.active);
+	lockdep_assert_held(&dev_priv->psr.lock);
 
 	/* Enable PSR on the panel */
 	intel_edp_psr_enable_sink(intel_dp);
 
 	/* Enable PSR on the host */
 	intel_edp_psr_enable_source(intel_dp);
+
+	dev_priv->psr.active = true;
 }
 
 void intel_edp_psr_enable(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (intel_edp_psr_match_conditions(intel_dp) &&
-	    !intel_edp_is_psr_enabled(dev))
-		intel_edp_psr_do_enable(intel_dp);
+	if (!HAS_PSR(dev)) {
+		DRM_DEBUG_KMS("PSR not supported on this platform\n");
+		return;
+	}
+
+	if (!is_edp_psr(intel_dp)) {
+		DRM_DEBUG_KMS("PSR not supported by this panel\n");
+		return;
+	}
+
+	mutex_lock(&dev_priv->psr.lock);
+	if (dev_priv->psr.enabled) {
+		DRM_DEBUG_KMS("PSR already in use\n");
+		mutex_unlock(&dev_priv->psr.lock);
+		return;
+	}
+
+	dev_priv->psr.busy_frontbuffer_bits = 0;
+
+	/* Setup PSR once */
+	intel_edp_psr_setup(intel_dp);
+
+	if (intel_edp_psr_match_conditions(intel_dp))
+		dev_priv->psr.enabled = intel_dp;
+	mutex_unlock(&dev_priv->psr.lock);
 }
 
 void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1867,36 +1892,136 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!intel_edp_is_psr_enabled(dev))
+	mutex_lock(&dev_priv->psr.lock);
+	if (!dev_priv->psr.enabled) {
+		mutex_unlock(&dev_priv->psr.lock);
 		return;
+	}
 
-	I915_WRITE(EDP_PSR_CTL(dev),
-		   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+	if (dev_priv->psr.active) {
+		I915_WRITE(EDP_PSR_CTL(dev),
+			   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
-	/* Wait till PSR is idle */
-	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
-		       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
-		DRM_ERROR("Timed out waiting for PSR Idle State\n");
+		/* Wait till PSR is idle */
+		if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+			       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+			DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+		dev_priv->psr.active = false;
+	} else {
+		WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+	}
+
+	dev_priv->psr.enabled = NULL;
+	mutex_unlock(&dev_priv->psr.lock);
+
+	cancel_delayed_work_sync(&dev_priv->psr.work);
 }
 
-void intel_edp_psr_update(struct drm_device *dev)
+static void intel_edp_psr_work(struct work_struct *work)
 {
-	struct intel_encoder *encoder;
-	struct intel_dp *intel_dp = NULL;
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), psr.work.work);
+	struct intel_dp *intel_dp = dev_priv->psr.enabled;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
-		if (encoder->type == INTEL_OUTPUT_EDP) {
-			intel_dp = enc_to_intel_dp(&encoder->base);
+	mutex_lock(&dev_priv->psr.lock);
+	intel_dp = dev_priv->psr.enabled;
 
-			if (!is_edp_psr(dev))
-				return;
+	if (!intel_dp)
+		goto unlock;
 
-			if (!intel_edp_psr_match_conditions(intel_dp))
-				intel_edp_psr_disable(intel_dp);
-			else
-				if (!intel_edp_is_psr_enabled(dev))
-					intel_edp_psr_do_enable(intel_dp);
-		}
+	/*
+	 * The delayed work can race with an invalidate hence we need to
+	 * recheck. Since psr_flush first clears this and then reschedules we
+	 * won't ever miss a flush when bailing out here.
+	 */
+	if (dev_priv->psr.busy_frontbuffer_bits)
+		goto unlock;
+
+	intel_edp_psr_do_enable(intel_dp);
+unlock:
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_edp_psr_do_exit(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->psr.active) {
+		u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+		WARN_ON(!(val & EDP_PSR_ENABLE));
+
+		I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+		dev_priv->psr.active = false;
+	}
+
+}
+
+void intel_edp_psr_invalidate(struct drm_device *dev,
+			      unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	enum pipe pipe;
+
+	mutex_lock(&dev_priv->psr.lock);
+	if (!dev_priv->psr.enabled) {
+		mutex_unlock(&dev_priv->psr.lock);
+		return;
+	}
+
+	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+	pipe = to_intel_crtc(crtc)->pipe;
+
+	intel_edp_psr_do_exit(dev);
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_flush(struct drm_device *dev,
+			 unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	enum pipe pipe;
+
+	mutex_lock(&dev_priv->psr.lock);
+	if (!dev_priv->psr.enabled) {
+		mutex_unlock(&dev_priv->psr.lock);
+		return;
+	}
+
+	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+	pipe = to_intel_crtc(crtc)->pipe;
+	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+	/*
+	 * On Haswell sprite plane updates don't result in a psr invalidating
+	 * signal in the hardware. Which means we need to manually fake this in
+	 * software for all flushes, not just when we've seen a preceding
+	 * invalidation through frontbuffer rendering.
+	 */
+	if (IS_HASWELL(dev) &&
+	    (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+		intel_edp_psr_do_exit(dev);
+
+	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+		schedule_delayed_work(&dev_priv->psr.work,
+				      msecs_to_jiffies(100));
+	mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+	mutex_init(&dev_priv->psr.lock);
 }
 
 static void intel_disable_dp(struct intel_encoder *encoder)
@@ -2152,6 +2277,70 @@
 	vlv_wait_port_ready(dev_priv, dport);
 }
 
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel ch = vlv_dport_to_channel(dport);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 val;
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	/* program left/right clock distribution */
+	if (pipe != PIPE_B) {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+		if (ch == DPIO_CH0)
+			val |= CHV_BUFLEFTENA1_FORCE;
+		if (ch == DPIO_CH1)
+			val |= CHV_BUFRIGHTENA1_FORCE;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+	} else {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+		if (ch == DPIO_CH0)
+			val |= CHV_BUFLEFTENA2_FORCE;
+		if (ch == DPIO_CH1)
+			val |= CHV_BUFRIGHTENA2_FORCE;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+	}
+
+	/* program clock channel usage */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+	if (pipe != PIPE_B)
+		val &= ~CHV_PCS_USEDCLKCHANNEL;
+	else
+		val |= CHV_PCS_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+	if (pipe != PIPE_B)
+		val &= ~CHV_PCS_USEDCLKCHANNEL;
+	else
+		val |= CHV_PCS_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+	/*
+	 * This a a bit weird since generally CL
+	 * matches the pipe, but here we need to
+	 * pick the CL based on the port.
+	 */
+	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+	if (pipe != PIPE_B)
+		val &= ~CHV_CMN_USEDCLKCHANNEL;
+	else
+		val |= CHV_CMN_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+}
+
 /*
  * Native read with retry for link status and receiver capability reads for
  * cases where the sink may still be asleep.
@@ -2189,18 +2378,14 @@
 				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 }
 
-/*
- * These are source-specific values; current Intel hardware supports
- * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
- */
-
+/* These are source-specific values. */
 static uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
+	if (IS_VALLEYVIEW(dev))
 		return DP_TRAIN_VOLTAGE_SWING_1200;
 	else if (IS_GEN7(dev) && port == PORT_A)
 		return DP_TRAIN_VOLTAGE_SWING_800;
@@ -2216,18 +2401,7 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	if (IS_BROADWELL(dev)) {
-		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-		case DP_TRAIN_VOLTAGE_SWING_400:
-		case DP_TRAIN_VOLTAGE_SWING_600:
-			return DP_TRAIN_PRE_EMPHASIS_6;
-		case DP_TRAIN_VOLTAGE_SWING_800:
-			return DP_TRAIN_PRE_EMPHASIS_3_5;
-		case DP_TRAIN_VOLTAGE_SWING_1200:
-		default:
-			return DP_TRAIN_PRE_EMPHASIS_0;
-		}
-	} else if (IS_HASWELL(dev)) {
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 		case DP_TRAIN_VOLTAGE_SWING_400:
 			return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2699,41 +2873,6 @@
 	}
 }
 
-static uint32_t
-intel_bdw_signal_levels(uint8_t train_set)
-{
-	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-					 DP_TRAIN_PRE_EMPHASIS_MASK);
-	switch (signal_levels) {
-	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
-		return DDI_BUF_EMP_400MV_0DB_BDW;	/* Sel0 */
-	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
-		return DDI_BUF_EMP_400MV_3_5DB_BDW;	/* Sel1 */
-	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
-		return DDI_BUF_EMP_400MV_6DB_BDW;	/* Sel2 */
-
-	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
-		return DDI_BUF_EMP_600MV_0DB_BDW;	/* Sel3 */
-	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
-		return DDI_BUF_EMP_600MV_3_5DB_BDW;	/* Sel4 */
-	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
-		return DDI_BUF_EMP_600MV_6DB_BDW;	/* Sel5 */
-
-	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
-		return DDI_BUF_EMP_800MV_0DB_BDW;	/* Sel6 */
-	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
-		return DDI_BUF_EMP_800MV_3_5DB_BDW;	/* Sel7 */
-
-	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
-		return DDI_BUF_EMP_1200MV_0DB_BDW;	/* Sel8 */
-
-	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
-			      "0x%x\n", signal_levels);
-		return DDI_BUF_EMP_400MV_0DB_BDW;	/* Sel0 */
-	}
-}
-
 /* Properly updates "DP" with the correct signal levels. */
 static void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2744,10 +2883,7 @@
 	uint32_t signal_levels, mask;
 	uint8_t train_set = intel_dp->train_set[0];
 
-	if (IS_BROADWELL(dev)) {
-		signal_levels = intel_bdw_signal_levels(train_set);
-		mask = DDI_BUF_EMP_MASK;
-	} else if (IS_HASWELL(dev)) {
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		signal_levels = intel_hsw_signal_levels(train_set);
 		mask = DDI_BUF_EMP_MASK;
 	} else if (IS_CHERRYVIEW(dev)) {
@@ -3246,6 +3382,33 @@
 	edp_panel_vdd_off(intel_dp, false);
 }
 
+static bool
+intel_dp_probe_mst(struct intel_dp *intel_dp)
+{
+	u8 buf[1];
+
+	if (!intel_dp->can_mst)
+		return false;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
+		return false;
+
+	_edp_panel_vdd_on(intel_dp);
+	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+		if (buf[0] & DP_MST_CAP) {
+			DRM_DEBUG_KMS("Sink is MST capable\n");
+			intel_dp->is_mst = true;
+		} else {
+			DRM_DEBUG_KMS("Sink is not MST capable\n");
+			intel_dp->is_mst = false;
+		}
+	}
+	edp_panel_vdd_off(intel_dp, false);
+
+	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+	return intel_dp->is_mst;
+}
+
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -3283,6 +3446,20 @@
 				       sink_irq_vector, 1) == 1;
 }
 
+static bool
+intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+	int ret;
+
+	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+					     DP_SINK_COUNT_ESI,
+					     sink_irq_vector, 14);
+	if (ret != 14)
+		return false;
+
+	return true;
+}
+
 static void
 intel_dp_handle_test_request(struct intel_dp *intel_dp)
 {
@@ -3290,6 +3467,63 @@
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
 }
 
+static int
+intel_dp_check_mst_status(struct intel_dp *intel_dp)
+{
+	bool bret;
+
+	if (intel_dp->is_mst) {
+		u8 esi[16] = { 0 };
+		int ret = 0;
+		int retry;
+		bool handled;
+		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+go_again:
+		if (bret == true) {
+
+			/* check link status - esi[10] = 0x200c */
+			if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
+				intel_dp_start_link_train(intel_dp);
+				intel_dp_complete_link_train(intel_dp);
+				intel_dp_stop_link_train(intel_dp);
+			}
+
+			DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+
+			if (handled) {
+				for (retry = 0; retry < 3; retry++) {
+					int wret;
+					wret = drm_dp_dpcd_write(&intel_dp->aux,
+								 DP_SINK_COUNT_ESI+1,
+								 &esi[1], 3);
+					if (wret == 3) {
+						break;
+					}
+				}
+
+				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+				if (bret == true) {
+					DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+					goto go_again;
+				}
+			} else
+				ret = 0;
+
+			return ret;
+		} else {
+			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
+			intel_dp->is_mst = false;
+			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+			/* send a hotplug event */
+			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+		}
+	}
+	return -EINVAL;
+}
+
 /*
  * According to DP spec
  * 5.1.2:
@@ -3298,7 +3532,6 @@
  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
  *  4. Check link status on receipt of hot-plug interrupt
  */
-
 void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
@@ -3518,8 +3751,7 @@
 	enum drm_connector_status status;
 	enum intel_display_power_domain power_domain;
 	struct edid *edid = NULL;
-
-	intel_runtime_pm_get(dev_priv);
+	bool ret;
 
 	power_domain = intel_display_port_power_domain(intel_encoder);
 	intel_display_power_get(dev_priv, power_domain);
@@ -3527,6 +3759,14 @@
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, connector->name);
 
+	if (intel_dp->is_mst) {
+		/* MST devices are disconnected from a monitor POV */
+		if (intel_encoder->type != INTEL_OUTPUT_EDP)
+			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+		status = connector_status_disconnected;
+		goto out;
+	}
+
 	intel_dp->has_audio = false;
 
 	if (HAS_PCH_SPLIT(dev))
@@ -3539,6 +3779,16 @@
 
 	intel_dp_probe_oui(intel_dp);
 
+	ret = intel_dp_probe_mst(intel_dp);
+	if (ret) {
+		/* if we are in MST mode then this connector
+		   won't appear connected or have anything with EDID on it */
+		if (intel_encoder->type != INTEL_OUTPUT_EDP)
+			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+		status = connector_status_disconnected;
+		goto out;
+	}
+
 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
 		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
 	} else {
@@ -3555,9 +3805,6 @@
 
 out:
 	intel_display_power_put(dev_priv, power_domain);
-
-	intel_runtime_pm_put(dev_priv);
-
 	return status;
 }
 
@@ -3734,6 +3981,7 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
 	drm_dp_aux_unregister(&intel_dp->aux);
+	intel_dp_mst_encoder_cleanup(intel_dig_port);
 	drm_encoder_cleanup(encoder);
 	if (is_edp(intel_dp)) {
 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -3766,12 +4014,62 @@
 	.destroy = intel_dp_encoder_destroy,
 };
 
-static void
+void
 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+	return;
+}
 
-	intel_dp_check_link_status(intel_dp);
+bool
+intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+{
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+
+	DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
+		      long_hpd ? "long" : "short");
+
+	if (long_hpd) {
+		if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+			goto mst_fail;
+
+		if (!intel_dp_get_dpcd(intel_dp)) {
+			goto mst_fail;
+		}
+
+		intel_dp_probe_oui(intel_dp);
+
+		if (!intel_dp_probe_mst(intel_dp))
+			goto mst_fail;
+
+	} else {
+		if (intel_dp->is_mst) {
+			ret = intel_dp_check_mst_status(intel_dp);
+			if (ret == -EINVAL)
+				goto mst_fail;
+		}
+
+		if (!intel_dp->is_mst) {
+			/*
+			 * we'll check the link status via the normal hot plug path later -
+			 * but for short hpds we should check it now
+			 */
+			intel_dp_check_link_status(intel_dp);
+		}
+	}
+	return false;
+mst_fail:
+	/* if we were in MST mode, and device is not there get out of MST mode */
+	if (intel_dp->is_mst) {
+		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+		intel_dp->is_mst = false;
+		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+	}
+	return true;
 }
 
 /* Return which DP Port should be selected for Transcoder DP control */
@@ -3822,7 +4120,7 @@
 	return false;
 }
 
-static void
+void
 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -4035,6 +4333,11 @@
 		return;
 	}
 
+	/*
+	 * FIXME: This needs proper synchronization with psr state. But really
+	 * hard to tell without seeing the user of this function of this code.
+	 * Check locking and ordering once that lands.
+	 */
 	if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
 		DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
 		return;
@@ -4288,7 +4591,7 @@
 			  edp_panel_vdd_work);
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	if (HAS_DDI(dev))
 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -4321,7 +4624,12 @@
 
 	intel_dp_aux_init(intel_dp, intel_connector);
 
-	intel_dp->psr_setup_done = false;
+	/* init MST on ports that can support it */
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		if (port == PORT_B || port == PORT_C || port == PORT_D) {
+			intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
+		}
+	}
 
 	if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
 		drm_dp_aux_unregister(&intel_dp->aux);
@@ -4331,7 +4639,7 @@
 			edp_panel_vdd_off_sync(intel_dp);
 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
 		}
-		drm_sysfs_connector_remove(connector);
+		drm_connector_unregister(connector);
 		drm_connector_cleanup(connector);
 		return false;
 	}
@@ -4353,6 +4661,7 @@
 void
 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_digital_port *intel_dig_port;
 	struct intel_encoder *intel_encoder;
 	struct drm_encoder *encoder;
@@ -4379,6 +4688,7 @@
 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
 	intel_encoder->get_config = intel_dp_get_config;
 	if (IS_CHERRYVIEW(dev)) {
+		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
 		intel_encoder->pre_enable = chv_pre_enable_dp;
 		intel_encoder->enable = vlv_enable_dp;
 		intel_encoder->post_disable = chv_post_disable_dp;
@@ -4408,9 +4718,55 @@
 	intel_encoder->cloneable = 0;
 	intel_encoder->hot_plug = intel_dp_hot_plug;
 
+	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+	dev_priv->hpd_irq_port[port] = intel_dig_port;
+
 	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
 		drm_encoder_cleanup(encoder);
 		kfree(intel_dig_port);
 		kfree(intel_connector);
 	}
 }
+
+void intel_dp_mst_suspend(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	/* disable MST */
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+		if (!intel_dig_port)
+			continue;
+
+		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+			if (!intel_dig_port->dp.can_mst)
+				continue;
+			if (intel_dig_port->dp.is_mst)
+				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+		}
+	}
+}
+
+void intel_dp_mst_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+		if (!intel_dig_port)
+			continue;
+		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+			int ret;
+
+			if (!intel_dig_port->dp.can_mst)
+				continue;
+
+			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+			if (ret != 0) {
+				intel_dp_check_mst_status(&intel_dig_port->dp);
+			}
+		}
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
new file mode 100644
index 0000000..d9a7a78
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *             2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+					struct intel_crtc_config *pipe_config)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = encoder->base.dev;
+	int bpp;
+	int lane_count, slots;
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	struct intel_connector *found = NULL, *intel_connector;
+	int mst_pbn;
+
+	pipe_config->dp_encoder_is_mst = true;
+	pipe_config->has_pch_encoder = false;
+	pipe_config->has_dp_encoder = true;
+	bpp = 24;
+	/*
+	 * for MST we always configure max link bw - the spec doesn't
+	 * seem to suggest we should do otherwise.
+	 */
+	lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+	intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+	intel_dp->lane_count = lane_count;
+
+	pipe_config->pipe_bpp = 24;
+	pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+
+	list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+		if (intel_connector->new_encoder == encoder) {
+			found = intel_connector;
+			break;
+		}
+	}
+
+	if (!found) {
+		DRM_ERROR("can't find connector\n");
+		return false;
+	}
+
+	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+
+	pipe_config->pbn = mst_pbn;
+	slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
+
+	intel_link_compute_m_n(bpp, lane_count,
+			       adjusted_mode->crtc_clock,
+			       pipe_config->port_clock,
+			       &pipe_config->dp_m_n);
+
+	pipe_config->dp_m_n.tu = slots;
+	return true;
+
+}
+
+static void intel_mst_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	int ret;
+
+	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+	drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+
+	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+	if (ret) {
+		DRM_ERROR("failed to update payload %d\n", ret);
+	}
+}
+
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+	/* this can fail */
+	drm_dp_check_act_status(&intel_dp->mst_mgr);
+	/* and this can also fail */
+	drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+
+	intel_dp->active_mst_links--;
+	intel_mst->port = NULL;
+	if (intel_dp->active_mst_links == 0) {
+		intel_dig_port->base.post_disable(&intel_dig_port->base);
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+	}
+}
+
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	int ret;
+	uint32_t temp;
+	struct intel_connector *found = NULL, *intel_connector;
+	int slots;
+	struct drm_crtc *crtc = encoder->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+		if (intel_connector->new_encoder == encoder) {
+			found = intel_connector;
+			break;
+		}
+	}
+
+	if (!found) {
+		DRM_ERROR("can't find connector\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+	intel_mst->port = found->port;
+
+	if (intel_dp->active_mst_links == 0) {
+		enum port port = intel_ddi_get_encoder_port(encoder);
+
+		I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
+
+		intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
+
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+
+		intel_dp_start_link_train(intel_dp);
+		intel_dp_complete_link_train(intel_dp);
+		intel_dp_stop_link_train(intel_dp);
+	}
+
+	ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
+				       intel_mst->port, intel_crtc->config.pbn, &slots);
+	if (ret == false) {
+		DRM_ERROR("failed to allocate vcpi\n");
+		return;
+	}
+
+
+	intel_dp->active_mst_links++;
+	temp = I915_READ(DP_TP_STATUS(port));
+	I915_WRITE(DP_TP_STATUS(port), temp);
+
+	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+}
+
+static void intel_mst_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	int ret;
+
+	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
+		     1))
+		DRM_ERROR("Timed out waiting for ACT sent\n");
+
+	ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+
+	ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+}
+
+static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+				      enum pipe *pipe)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	*pipe = intel_mst->pipe;
+	if (intel_mst->port)
+		return true;
+	return false;
+}
+
+static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+					struct intel_crtc_config *pipe_config)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+	u32 temp, flags = 0;
+
+	pipe_config->has_dp_encoder = true;
+
+	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	if (temp & TRANS_DDI_PHSYNC)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	if (temp & TRANS_DDI_PVSYNC)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	switch (temp & TRANS_DDI_BPC_MASK) {
+	case TRANS_DDI_BPC_6:
+		pipe_config->pipe_bpp = 18;
+		break;
+	case TRANS_DDI_BPC_8:
+		pipe_config->pipe_bpp = 24;
+		break;
+	case TRANS_DDI_BPC_10:
+		pipe_config->pipe_bpp = 30;
+		break;
+	case TRANS_DDI_BPC_12:
+		pipe_config->pipe_bpp = 36;
+		break;
+	default:
+		break;
+	}
+	pipe_config->adjusted_mode.flags |= flags;
+	intel_dp_get_m_n(crtc, pipe_config);
+
+	intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
+}
+
+static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_dp *intel_dp = intel_connector->mst_port;
+	struct edid *edid;
+	int ret;
+
+	edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
+	if (!edid)
+		return 0;
+
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
+}
+
+static enum drm_connector_status
+intel_mst_port_dp_detect(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_dp *intel_dp = intel_connector->mst_port;
+
+	return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
+}
+
+static enum drm_connector_status
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+	enum drm_connector_status status;
+	status = intel_mst_port_dp_detect(connector);
+	return status;
+}
+
+static int
+intel_dp_mst_set_property(struct drm_connector *connector,
+			  struct drm_property *property,
+			  uint64_t val)
+{
+	return 0;
+}
+
+static void
+intel_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	if (!IS_ERR_OR_NULL(intel_connector->edid))
+		kfree(intel_connector->edid);
+
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+	.dpms = intel_connector_dpms,
+	.detect = intel_dp_mst_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_dp_mst_set_property,
+	.destroy = intel_dp_mst_connector_destroy,
+};
+
+static int intel_dp_mst_get_modes(struct drm_connector *connector)
+{
+	return intel_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+intel_dp_mst_mode_valid(struct drm_connector *connector,
+			struct drm_display_mode *mode)
+{
+	/* TODO - validate mode against available PBN for link */
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_H_ILLEGAL;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_dp *intel_dp = intel_connector->mst_port;
+	return &intel_dp->mst_encoders[0]->base.base;
+}
+
+static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
+	.get_modes = intel_dp_mst_get_modes,
+	.mode_valid = intel_dp_mst_mode_valid,
+	.best_encoder = intel_mst_best_encoder,
+};
+
+static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(intel_mst);
+}
+
+static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
+	.destroy = intel_dp_mst_encoder_destroy,
+};
+
+static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+{
+	if (connector->encoder) {
+		enum pipe pipe;
+		if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+			return false;
+		return true;
+	}
+	return false;
+}
+
+static void intel_connector_add_to_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+{
+	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	int i;
+
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+	if (!intel_connector)
+		return NULL;
+
+	connector = &intel_connector->base;
+	drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+	drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+
+	intel_connector->unregister = intel_connector_unregister;
+	intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+	intel_connector->mst_port = intel_dp;
+	intel_connector->port = port;
+
+	for (i = PIPE_A; i <= PIPE_C; i++) {
+		drm_mode_connector_attach_encoder(&intel_connector->base,
+						  &intel_dp->mst_encoders[i]->base.base);
+	}
+	intel_dp_add_properties(intel_dp, connector);
+
+	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+	drm_mode_connector_set_path_property(connector, pathprop);
+	drm_reinit_primary_mode_group(dev);
+	mutex_lock(&dev->mode_config.mutex);
+	intel_connector_add_to_fbdev(intel_connector);
+	mutex_unlock(&dev->mode_config.mutex);
+	drm_connector_register(&intel_connector->base);
+	return connector;
+}
+
+static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+					   struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_device *dev = connector->dev;
+	/* need to nuke the connector */
+	mutex_lock(&dev->mode_config.mutex);
+	intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	mutex_unlock(&dev->mode_config.mutex);
+
+	intel_connector->unregister(intel_connector);
+
+	mutex_lock(&dev->mode_config.mutex);
+	intel_connector_remove_from_fbdev(intel_connector);
+	drm_connector_cleanup(connector);
+	mutex_unlock(&dev->mode_config.mutex);
+
+	drm_reinit_primary_mode_group(dev);
+
+	kfree(intel_connector);
+	DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+
+	drm_kms_helper_hotplug_event(dev);
+}
+
+static struct drm_dp_mst_topology_cbs mst_cbs = {
+	.add_connector = intel_dp_add_mst_connector,
+	.destroy_connector = intel_dp_destroy_mst_connector,
+	.hotplug = intel_dp_mst_hotplug,
+};
+
+static struct intel_dp_mst_encoder *
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+{
+	struct intel_dp_mst_encoder *intel_mst;
+	struct intel_encoder *intel_encoder;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+
+	intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
+
+	if (!intel_mst)
+		return NULL;
+
+	intel_mst->pipe = pipe;
+	intel_encoder = &intel_mst->base;
+	intel_mst->primary = intel_dig_port;
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+			 DRM_MODE_ENCODER_DPMST);
+
+	intel_encoder->type = INTEL_OUTPUT_DP_MST;
+	intel_encoder->crtc_mask = 0x7;
+	intel_encoder->cloneable = 0;
+
+	intel_encoder->compute_config = intel_dp_mst_compute_config;
+	intel_encoder->disable = intel_mst_disable_dp;
+	intel_encoder->post_disable = intel_mst_post_disable_dp;
+	intel_encoder->pre_enable = intel_mst_pre_enable_dp;
+	intel_encoder->enable = intel_mst_enable_dp;
+	intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
+	intel_encoder->get_config = intel_dp_mst_enc_get_config;
+
+	return intel_mst;
+
+}
+
+static bool
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+{
+	int i;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+	for (i = PIPE_A; i <= PIPE_C; i++)
+		intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+	return true;
+}
+
+int
+intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+{
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	int ret;
+
+	intel_dp->can_mst = true;
+	intel_dp->mst_mgr.cbs = &mst_cbs;
+
+	/* create encoders */
+	intel_dp_create_fake_mst_encoders(intel_dig_port);
+	ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
+	if (ret) {
+		intel_dp->can_mst = false;
+		return ret;
+	}
+	return 0;
+}
+
+void
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+	if (!intel_dp->can_mst)
+		return;
+
+	drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+	/* encoders will get killed by normal cleanup */
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f67340e..8a475a6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,7 +32,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
 
 /**
  * _wait_for - magic (register) wait macro
@@ -100,6 +100,7 @@
 #define INTEL_OUTPUT_EDP 8
 #define INTEL_OUTPUT_DSI 9
 #define INTEL_OUTPUT_UNKNOWN 10
+#define INTEL_OUTPUT_DP_MST 11
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
@@ -165,6 +166,7 @@
 	struct {
 		bool present;
 		u32 level;
+		u32 min;
 		u32 max;
 		bool enabled;
 		bool combination_mode;	/* gen 2/4 only */
@@ -207,6 +209,10 @@
 	/* since POLL and HPD connectors may use the same HPD line keep the native
 	   state of connector->polled in case hotplug storm detection changes it */
 	u8 polled;
+
+	void *port; /* store this opaque as its illegal to dereference it */
+
+	struct intel_dp *mst_port;
 };
 
 typedef struct dpll {
@@ -307,6 +313,9 @@
 	/* Selected dpll when shared or DPLL_ID_PRIVATE. */
 	enum intel_dpll_id shared_dpll;
 
+	/* PORT_CLK_SEL for DDI ports. */
+	uint32_t ddi_pll_sel;
+
 	/* Actual register state of the dpll, for shared dpll cross-checking. */
 	struct intel_dpll_hw_state dpll_hw_state;
 
@@ -338,6 +347,7 @@
 		u32 pos;
 		u32 size;
 		bool enabled;
+		bool force_thru;
 	} pch_pfit;
 
 	/* FDI configuration, only valid if has_pch_encoder is set. */
@@ -347,6 +357,9 @@
 	bool ips_enabled;
 
 	bool double_wide;
+
+	bool dp_encoder_is_mst;
+	int pbn;
 };
 
 struct intel_pipe_wm {
@@ -358,6 +371,11 @@
 	bool sprites_scaled;
 };
 
+struct intel_mmio_flip {
+	u32 seqno;
+	u32 ring_id;
+};
+
 struct intel_crtc {
 	struct drm_crtc base;
 	enum pipe pipe;
@@ -384,7 +402,6 @@
 
 	struct drm_i915_gem_object *cursor_bo;
 	uint32_t cursor_addr;
-	int16_t cursor_x, cursor_y;
 	int16_t cursor_width, cursor_height;
 	uint32_t cursor_cntl;
 	uint32_t cursor_base;
@@ -394,8 +411,6 @@
 	struct intel_crtc_config *new_config;
 	bool new_enabled;
 
-	uint32_t ddi_pll_sel;
-
 	/* reset counter value when the last flip was submitted */
 	unsigned int reset_counter;
 
@@ -412,10 +427,12 @@
 	wait_queue_head_t vbl_wait;
 
 	int scanline_offset;
+	struct intel_mmio_flip mmio_flip;
 };
 
 struct intel_plane_wm_parameters {
 	uint32_t horiz_pixels;
+	uint32_t vert_pixels;
 	uint8_t bytes_per_pixel;
 	bool enabled;
 	bool scaled;
@@ -428,7 +445,6 @@
 	struct drm_i915_gem_object *obj;
 	bool can_scale;
 	int max_downscale;
-	u32 lut_r[1024], lut_g[1024], lut_b[1024];
 	int crtc_x, crtc_y;
 	unsigned int crtc_w, crtc_h;
 	uint32_t src_x, src_y;
@@ -481,6 +497,7 @@
 #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 #define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
 
 struct intel_hdmi {
 	u32 hdmi_reg;
@@ -491,6 +508,7 @@
 	bool has_audio;
 	enum hdmi_force_audio force_audio;
 	bool rgb_quant_range_selectable;
+	enum hdmi_picture_aspect aspect_ratio;
 	void (*write_infoframe)(struct drm_encoder *encoder,
 				enum hdmi_infoframe_type type,
 				const void *frame, ssize_t len);
@@ -499,6 +517,7 @@
 			       struct drm_display_mode *adjusted_mode);
 };
 
+struct intel_dp_mst_encoder;
 #define DP_MAX_DOWNSTREAM_PORTS		0x10
 
 /**
@@ -537,12 +556,20 @@
 	unsigned long last_power_cycle;
 	unsigned long last_power_on;
 	unsigned long last_backlight_off;
-	bool psr_setup_done;
+
 	struct notifier_block edp_notifier;
 
 	bool use_tps3;
+	bool can_mst; /* this port supports mst */
+	bool is_mst;
+	int active_mst_links;
+	/* connector directly attached - won't be use for modeset in mst world */
 	struct intel_connector *attached_connector;
 
+	/* mst connector list */
+	struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
+	struct drm_dp_mst_topology_mgr mst_mgr;
+
 	uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
 	/*
 	 * This function returns the value we have to program the AUX_CTL
@@ -566,6 +593,14 @@
 	u32 saved_port_bits;
 	struct intel_dp dp;
 	struct intel_hdmi hdmi;
+	bool (*hpd_pulse)(struct intel_digital_port *, bool);
+};
+
+struct intel_dp_mst_encoder {
+	struct intel_encoder base;
+	enum pipe pipe;
+	struct intel_digital_port *primary;
+	void *port; /* store this opaque as its illegal to dereference it */
 };
 
 static inline int
@@ -652,6 +687,12 @@
 	return container_of(encoder, struct intel_digital_port, base.base);
 }
 
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+}
+
 static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
 {
 	return &enc_to_dig_port(encoder)->dp;
@@ -676,17 +717,26 @@
 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
 					   enum transcoder pch_transcoder,
 					   bool enable);
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
 void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+	/*
+	 * We only use drm_irq_uninstall() at unload and VT switch, so
+	 * this is the only thing we need to check.
+	 */
+	return !dev_priv->pm._irqs_disabled;
+}
+
 int intel_get_crtc_scanline(struct intel_crtc *crtc);
 void i9xx_check_fifo_underruns(struct drm_device *dev);
-
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
 
 /* intel_crt.c */
 void intel_crt_init(struct drm_device *dev);
@@ -705,10 +755,7 @@
 				       enum transcoder cpu_transcoder);
 void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
 void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
 bool intel_ddi_pll_select(struct intel_crtc *crtc);
-void intel_ddi_pll_enable(struct intel_crtc *crtc);
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -716,17 +763,46 @@
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_config *pipe_config);
 
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+			 struct intel_crtc_config *pipe_config);
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 
 /* intel_display.c */
 const char *intel_output_name(int output);
 bool intel_has_pending_fb_unpin(struct drm_device *dev);
 int intel_pch_rawclk(struct drm_device *dev);
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
 void intel_mark_busy(struct drm_device *dev);
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-			struct intel_engine_cs *ring);
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			     struct intel_engine_cs *ring);
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+				    unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+				     unsigned frontbuffer_bits);
+void intel_frontbuffer_flush(struct drm_device *dev,
+			     unsigned frontbuffer_bits);
+/**
+ * intel_frontbuffer_flip - prepare frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+static inline
+void intel_frontbuffer_flip(struct drm_device *dev,
+			    unsigned frontbuffer_bits)
+{
+	intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_control(struct drm_crtc *crtc, bool enable);
 void intel_crtc_update_dpms(struct drm_crtc *crtc);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 void intel_connector_dpms(struct drm_connector *, int mode);
@@ -767,12 +843,18 @@
 void intel_prepare_page_flip(struct drm_device *dev, int plane);
 void intel_finish_page_flip(struct drm_device *dev, int pipe);
 void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+/* shared dpll functions */
 struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 void assert_shared_dpll(struct drm_i915_private *dev_priv,
 			struct intel_shared_dpll *pll,
 			bool state);
 #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
 #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
+void intel_put_shared_dpll(struct intel_crtc *crtc);
+
+/* modesetting asserts */
 void assert_pll(struct drm_i915_private *dev_priv,
 		enum pipe pipe, bool state);
 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -805,7 +887,6 @@
 void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
 enum intel_display_power_domain
 intel_display_port_power_domain(struct intel_encoder *intel_encoder);
-int valleyview_get_vco(struct drm_i915_private *dev_priv);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 				 struct intel_crtc_config *pipe_config);
 int intel_format_to_fourcc(int format);
@@ -826,6 +907,8 @@
 bool intel_dp_compute_config(struct intel_encoder *encoder,
 			     struct intel_crtc_config *pipe_config);
 bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+			bool long_hpd);
 void intel_edp_backlight_on(struct intel_dp *intel_dp);
 void intel_edp_backlight_off(struct intel_dp *intel_dp);
 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -833,11 +916,24 @@
 void intel_edp_panel_off(struct intel_dp *intel_dp);
 void intel_edp_psr_enable(struct intel_dp *intel_dp);
 void intel_edp_psr_disable(struct intel_dp *intel_dp);
-void intel_edp_psr_update(struct drm_device *dev);
 void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
+void intel_edp_psr_invalidate(struct drm_device *dev,
+			      unsigned frontbuffer_bits);
+void intel_edp_psr_flush(struct drm_device *dev,
+			 unsigned frontbuffer_bits);
+void intel_edp_psr_init(struct drm_device *dev);
 
+int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
+void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
+void intel_dp_mst_suspend(struct drm_device *dev);
+void intel_dp_mst_resume(struct drm_device *dev);
+int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
+/* intel_dp_mst.c */
+int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
 /* intel_dsi.c */
-bool intel_dsi_init(struct drm_device *dev);
+void intel_dsi_init(struct drm_device *dev);
 
 
 /* intel_dvo.c */
@@ -920,8 +1016,8 @@
 void intel_gmch_panel_fitting(struct intel_crtc *crtc,
 			      struct intel_crtc_config *pipe_config,
 			      int fitting_mode);
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
-			       u32 max);
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+				    u32 level, u32 max);
 int intel_panel_setup_backlight(struct drm_connector *connector);
 void intel_panel_enable_backlight(struct intel_connector *connector);
 void intel_panel_disable_backlight(struct intel_connector *connector);
@@ -940,7 +1036,9 @@
 void intel_update_watermarks(struct drm_crtc *crtc);
 void intel_update_sprite_watermarks(struct drm_plane *plane,
 				    struct drm_crtc *crtc,
-				    uint32_t sprite_width, int pixel_size,
+				    uint32_t sprite_width,
+				    uint32_t sprite_height,
+				    int pixel_size,
 				    bool enabled, bool scaled);
 void intel_init_pm(struct drm_device *dev);
 void intel_pm_setup(struct drm_device *dev);
@@ -963,6 +1061,7 @@
 void intel_cleanup_gt_powersave(struct drm_device *dev);
 void intel_enable_gt_powersave(struct drm_device *dev);
 void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_suspend_gt_powersave(struct drm_device *dev);
 void intel_reset_gt_powersave(struct drm_device *dev);
 void ironlake_teardown_rc6(struct drm_device *dev);
 void gen6_update_ring_freq(struct drm_device *dev);
@@ -976,8 +1075,7 @@
 void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_device *dev);
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
-			  enum punit_power_well power_well_id, bool enable);
+
 
 /* intel_sdvo.c */
 bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3fd0829..bfcefbf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -658,7 +658,7 @@
 	.fill_modes = drm_helper_probe_single_connector_modes,
 };
 
-bool intel_dsi_init(struct drm_device *dev)
+void intel_dsi_init(struct drm_device *dev)
 {
 	struct intel_dsi *intel_dsi;
 	struct intel_encoder *intel_encoder;
@@ -674,29 +674,29 @@
 
 	/* There is no detection method for MIPI so rely on VBT */
 	if (!dev_priv->vbt.has_mipi)
-		return false;
-
-	intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
-	if (!intel_dsi)
-		return false;
-
-	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
-	if (!intel_connector) {
-		kfree(intel_dsi);
-		return false;
-	}
-
-	intel_encoder = &intel_dsi->base;
-	encoder = &intel_encoder->base;
-	intel_dsi->attached_connector = intel_connector;
+		return;
 
 	if (IS_VALLEYVIEW(dev)) {
 		dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
 	} else {
 		DRM_ERROR("Unsupported Mipi device to reg base");
-		return false;
+		return;
 	}
 
+	intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+	if (!intel_dsi)
+		return;
+
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_dsi);
+		return;
+	}
+
+	intel_encoder = &intel_dsi->base;
+	encoder = &intel_encoder->base;
+	intel_dsi->attached_connector = intel_connector;
+
 	connector = &intel_connector->base;
 
 	drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -743,7 +743,7 @@
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
 	if (!fixed_mode) {
@@ -754,12 +754,10 @@
 	fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
 
-	return true;
+	return;
 
 err:
 	drm_encoder_cleanup(&intel_encoder->base);
 	kfree(intel_dsi);
 	kfree(intel_connector);
-
-	return false;
 }
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 21a0d34..47c7584 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -143,7 +143,7 @@
 	case MIPI_DSI_DCS_LONG_WRITE:
 		dsi_vc_dcs_write(intel_dsi, vc, data, len);
 		break;
-	};
+	}
 
 	data += len;
 
@@ -294,7 +294,8 @@
 	intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
 	intel_dsi->init_count = mipi_config->master_init_timer;
 	intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
-	intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+	intel_dsi->video_frmt_cfg_bits =
+		mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
 
 	switch (intel_dsi->escape_clk_div) {
 	case 0:
@@ -351,7 +352,8 @@
 	 *
 	 * prepare count
 	 */
-	ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
+	ths_prepare_ns = max(mipi_config->ths_prepare,
+			     mipi_config->tclk_prepare);
 	prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
 
 	/* exit zero count */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a3631c0..56b47d2 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -112,7 +112,15 @@
 
 static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 {
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+	if (!(tmp & DVO_ENABLE))
+		return false;
 
 	return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
 }
@@ -558,7 +566,7 @@
 			intel_dvo->panel_wants_dither = true;
 		}
 
-		drm_sysfs_connector_add(connector);
+		drm_connector_register(connector);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 088fe93..f475414 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -43,10 +43,36 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct intel_fbdev *ifbdev =
+		container_of(fb_helper, struct intel_fbdev, helper);
+	int ret;
+
+	ret = drm_fb_helper_set_par(info);
+
+	if (ret == 0) {
+		/*
+		 * FIXME: fbdev presumes that all callbacks also work from
+		 * atomic contexts and relies on that for emergency oops
+		 * printing. KMS totally doesn't do that and the locking here is
+		 * by far not the only place this goes wrong.  Ignore this for
+		 * now until we solve this for real.
+		 */
+		mutex_lock(&fb_helper->dev->struct_mutex);
+		ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
+							true);
+		mutex_unlock(&fb_helper->dev->struct_mutex);
+	}
+
+	return ret;
+}
+
 static struct fb_ops intelfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = drm_fb_helper_set_par,
+	.fb_set_par = intel_fbdev_set_par,
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
 	.fb_imageblit = cfb_imageblit,
@@ -81,7 +107,7 @@
 							  sizes->surface_depth);
 
 	size = mode_cmd.pitches[0] * mode_cmd.height;
-	size = ALIGN(size, PAGE_SIZE);
+	size = PAGE_ALIGN(size);
 	obj = i915_gem_object_create_stolen(dev, size);
 	if (obj == NULL)
 		obj = i915_gem_alloc_object(dev, size);
@@ -417,7 +443,7 @@
 		}
 		crtcs[i] = new_crtc;
 
-		DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+		DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
 			      connector->name,
 			      pipe_name(to_intel_crtc(encoder->crtc)->pipe),
 			      encoder->crtc->base.id,
@@ -452,7 +478,7 @@
 	return true;
 }
 
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
 	.initial_config = intel_fb_initial_config,
 	.gamma_set = intel_crtc_fb_gamma_set,
 	.gamma_get = intel_crtc_fb_gamma_get,
@@ -623,7 +649,8 @@
 	if (ifbdev == NULL)
 		return -ENOMEM;
 
-	ifbdev->helper.funcs = &intel_fb_helper_funcs;
+	drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+
 	if (!intel_fbdev_init_bios(dev, ifbdev))
 		ifbdev->preferred_bpp = 32;
 
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index eee2bbe..f9151f6 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -367,6 +367,9 @@
 	union hdmi_infoframe frame;
 	int ret;
 
+	/* Set user selected PAR to incoming mode's member */
+	adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+
 	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
 						       adjusted_mode);
 	if (ret < 0) {
@@ -879,7 +882,7 @@
 	struct intel_encoder *encoder;
 	int count = 0, count_hdmi = 0;
 
-	if (!HAS_PCH_SPLIT(dev))
+	if (HAS_GMCH_DISPLAY(dev))
 		return false;
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
@@ -1124,6 +1127,23 @@
 		goto done;
 	}
 
+	if (property == connector->dev->mode_config.aspect_ratio_property) {
+		switch (val) {
+		case DRM_MODE_PICTURE_ASPECT_NONE:
+			intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+			break;
+		case DRM_MODE_PICTURE_ASPECT_4_3:
+			intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
+			break;
+		case DRM_MODE_PICTURE_ASPECT_16_9:
+			intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
+			break;
+		default:
+			return -EINVAL;
+		}
+		goto done;
+	}
+
 	return -EINVAL;
 
 done:
@@ -1229,6 +1249,70 @@
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	enum dpio_channel ch = vlv_dport_to_channel(dport);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 val;
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	/* program left/right clock distribution */
+	if (pipe != PIPE_B) {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+		if (ch == DPIO_CH0)
+			val |= CHV_BUFLEFTENA1_FORCE;
+		if (ch == DPIO_CH1)
+			val |= CHV_BUFRIGHTENA1_FORCE;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+	} else {
+		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+		if (ch == DPIO_CH0)
+			val |= CHV_BUFLEFTENA2_FORCE;
+		if (ch == DPIO_CH1)
+			val |= CHV_BUFRIGHTENA2_FORCE;
+		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+	}
+
+	/* program clock channel usage */
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+	if (pipe != PIPE_B)
+		val &= ~CHV_PCS_USEDCLKCHANNEL;
+	else
+		val |= CHV_PCS_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+	if (pipe != PIPE_B)
+		val &= ~CHV_PCS_USEDCLKCHANNEL;
+	else
+		val |= CHV_PCS_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+	/*
+	 * This a a bit weird since generally CL
+	 * matches the pipe, but here we need to
+	 * pick the CL based on the port.
+	 */
+	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+	if (pipe != PIPE_B)
+		val &= ~CHV_CMN_USEDCLKCHANNEL;
+	else
+		val |= CHV_CMN_USEDCLKCHANNEL;
+	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+}
+
 static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1416,11 +1500,22 @@
 };
 
 static void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+	if (!drm_mode_create_aspect_ratio_property(connector->dev))
+		drm_object_attach_property(&connector->base,
+			connector->dev->mode_config.aspect_ratio_property,
+			DRM_MODE_PICTURE_ASPECT_NONE);
+}
+
+static void
 intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
 {
 	intel_attach_force_audio_property(connector);
 	intel_attach_broadcast_rgb_property(connector);
 	intel_hdmi->color_range_auto = true;
+	intel_attach_aspect_ratio_property(connector);
+	intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 }
 
 void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1467,7 +1562,7 @@
 	if (IS_VALLEYVIEW(dev)) {
 		intel_hdmi->write_infoframe = vlv_write_infoframe;
 		intel_hdmi->set_infoframes = vlv_set_infoframes;
-	} else if (!HAS_PCH_SPLIT(dev)) {
+	} else if (IS_G4X(dev)) {
 		intel_hdmi->write_infoframe = g4x_write_infoframe;
 		intel_hdmi->set_infoframes = g4x_set_infoframes;
 	} else if (HAS_DDI(dev)) {
@@ -1490,7 +1585,7 @@
 	intel_hdmi_add_properties(intel_hdmi, connector);
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
 	 * 0xd.  Failure to do so will result in spurious interrupts being
@@ -1528,6 +1623,7 @@
 	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
 	intel_encoder->get_config = intel_hdmi_get_config;
 	if (IS_CHERRYVIEW(dev)) {
+		intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
 		intel_encoder->pre_enable = chv_hdmi_pre_enable;
 		intel_encoder->enable = vlv_enable_hdmi;
 		intel_encoder->post_disable = chv_hdmi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d33b61d..b31088a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,11 +34,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-enum disp_clk {
-	CDCLK,
-	CZCLK
-};
-
 struct gmbus_port {
 	const char *name;
 	int reg;
@@ -63,60 +58,11 @@
 	return container_of(i2c, struct intel_gmbus, adapter);
 }
 
-static int get_disp_clk_div(struct drm_i915_private *dev_priv,
-			    enum disp_clk clk)
-{
-	u32 reg_val;
-	int clk_ratio;
-
-	reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
-
-	if (clk == CDCLK)
-		clk_ratio =
-			((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
-	else
-		clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
-
-	return clk_ratio;
-}
-
-static void gmbus_set_freq(struct drm_i915_private *dev_priv)
-{
-	int vco, gmbus_freq = 0, cdclk_div;
-
-	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
-
-	vco = valleyview_get_vco(dev_priv);
-
-	/* Get the CDCLK divide ratio */
-	cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
-
-	/*
-	 * Program the gmbus_freq based on the cdclk frequency.
-	 * BSpec erroneously claims we should aim for 4MHz, but
-	 * in fact 1MHz is the correct frequency.
-	 */
-	if (cdclk_div)
-		gmbus_freq = (vco << 1) / cdclk_div;
-
-	if (WARN_ON(gmbus_freq == 0))
-		return;
-
-	I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
-}
-
 void
 intel_i2c_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	/*
-	 * In BIOS-less system, program the correct gmbus frequency
-	 * before reading edid.
-	 */
-	if (IS_VALLEYVIEW(dev))
-		gmbus_set_freq(dev_priv);
-
 	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
 	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 5e5a72f..881361c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,6 +51,7 @@
 
 	bool is_dual_link;
 	u32 reg;
+	u32 a3_power;
 
 	struct intel_lvds_connector *attached_connector;
 };
@@ -71,8 +72,13 @@
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	enum intel_display_power_domain power_domain;
 	u32 tmp;
 
+	power_domain = intel_display_port_power_domain(encoder);
+	if (!intel_display_power_enabled(dev_priv, power_domain))
+		return false;
+
 	tmp = I915_READ(lvds_encoder->reg);
 
 	if (!(tmp & LVDS_PORT_EN))
@@ -172,8 +178,11 @@
 
 	/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
 	 * appropriately here, but we need to look more thoroughly into how
-	 * panels behave in the two modes.
+	 * panels behave in the two modes. For now, let's just maintain the
+	 * value we got from the BIOS.
 	 */
+	 temp &= ~LVDS_A3_POWER_MASK;
+	 temp |= lvds_encoder->a3_power;
 
 	/* Set the dithering flag on LVDS as needed, note that there is no
 	 * special lvds dither control bit on pch-split platforms, dithering is
@@ -271,7 +280,6 @@
 				      struct intel_crtc_config *pipe_config)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds_encoder *lvds_encoder =
 		to_lvds_encoder(&intel_encoder->base);
 	struct intel_connector *intel_connector =
@@ -286,8 +294,7 @@
 		return false;
 	}
 
-	if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
-	    LVDS_A3_POWER_UP)
+	if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
 		lvds_bpp = 8*3;
 	else
 		lvds_bpp = 6*3;
@@ -1088,6 +1095,9 @@
 	DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
 		      lvds_encoder->is_dual_link ? "dual" : "single");
 
+	lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
+				 LVDS_A3_POWER_MASK;
+
 	/*
 	 * Unlock registers and just
 	 * leave them unlocked
@@ -1104,7 +1114,7 @@
 		DRM_DEBUG_KMS("lid notifier registration failed\n");
 		lvds_connector->lid_notifier.notifier_call = NULL;
 	}
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
 	intel_panel_setup_backlight(connector);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4f6b539..ca52ad2 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -352,6 +352,7 @@
 	case INTEL_OUTPUT_UNKNOWN:
 	case INTEL_OUTPUT_DISPLAYPORT:
 	case INTEL_OUTPUT_HDMI:
+	case INTEL_OUTPUT_DP_MST:
 		type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
 		break;
 	case INTEL_OUTPUT_EDP:
@@ -427,7 +428,7 @@
 	 */
 	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
 	list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
-		intel_panel_set_backlight(intel_connector, bclp, 255);
+		intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
 	iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index daa1189..dc2f4f26 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -415,6 +415,10 @@
 	}
 
 	intel_overlay_release_old_vid_tail(overlay);
+
+
+	i915_gem_track_fb(overlay->old_vid_bo, NULL,
+			  INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
 	return 0;
 }
 
@@ -686,6 +690,7 @@
 	bool scale_changed = false;
 	struct drm_device *dev = overlay->dev;
 	u32 swidth, swidthsw, sheight, ostride;
+	enum pipe pipe = overlay->crtc->pipe;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 	BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -713,7 +718,7 @@
 		oconfig = OCONF_CC_OUT_8BIT;
 		if (IS_GEN4(overlay->dev))
 			oconfig |= OCONF_CSC_MODE_BT709;
-		oconfig |= overlay->crtc->pipe == 0 ?
+		oconfig |= pipe == 0 ?
 			OCONF_PIPE_A : OCONF_PIPE_B;
 		iowrite32(oconfig, &regs->OCONFIG);
 		intel_overlay_unmap_regs(overlay, regs);
@@ -776,9 +781,15 @@
 	if (ret)
 		goto out_unpin;
 
+	i915_gem_track_fb(overlay->vid_bo, new_bo,
+			  INTEL_FRONTBUFFER_OVERLAY(pipe));
+
 	overlay->old_vid_bo = overlay->vid_bo;
 	overlay->vid_bo = new_bo;
 
+	intel_frontbuffer_flip(dev,
+			       INTEL_FRONTBUFFER_OVERLAY(pipe));
+
 	return 0;
 
 out_unpin:
@@ -1028,7 +1039,7 @@
 	struct drm_intel_overlay_put_image *put_image_rec = data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay;
-	struct drm_mode_object *drmmode_obj;
+	struct drm_crtc *drmmode_crtc;
 	struct intel_crtc *crtc;
 	struct drm_i915_gem_object *new_bo;
 	struct put_image_params *params;
@@ -1057,13 +1068,12 @@
 	if (!params)
 		return -ENOMEM;
 
-	drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
-					   DRM_MODE_OBJECT_CRTC);
-	if (!drmmode_obj) {
+	drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
+	if (!drmmode_crtc) {
 		ret = -ENOENT;
 		goto out_free;
 	}
-	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+	crtc = to_intel_crtc(drmmode_crtc);
 
 	new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
 						   put_image_rec->bo_handle));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 12b02fe..59b028f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -398,6 +398,69 @@
 	}
 }
 
+/**
+ * scale - scale values from one range to another
+ *
+ * @source_val: value in range [@source_min..@source_max]
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static uint32_t scale(uint32_t source_val,
+		      uint32_t source_min, uint32_t source_max,
+		      uint32_t target_min, uint32_t target_max)
+{
+	uint64_t target_val;
+
+	WARN_ON(source_min > source_max);
+	WARN_ON(target_min > target_max);
+
+	/* defensive */
+	source_val = clamp(source_val, source_min, source_max);
+
+	/* avoid overflows */
+	target_val = (uint64_t)(source_val - source_min) *
+		(target_max - target_min);
+	do_div(target_val, source_max - source_min);
+	target_val += target_min;
+
+	return target_val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static inline u32 scale_user_to_hw(struct intel_connector *connector,
+				   u32 user_level, u32 user_max)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	return scale(user_level, 0, user_max,
+		     panel->backlight.min, panel->backlight.max);
+}
+
+/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max]. */
+static inline u32 clamp_user_to_hw(struct intel_connector *connector,
+				   u32 user_level, u32 user_max)
+{
+	struct intel_panel *panel = &connector->panel;
+	u32 hw_level;
+
+	hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+	hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+	return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static inline u32 scale_hw_to_user(struct intel_connector *connector,
+				   u32 hw_level, u32 user_max)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	return scale(hw_level, panel->backlight.min, panel->backlight.max,
+		     0, user_max);
+}
+
 static u32 intel_panel_compute_brightness(struct intel_connector *connector,
 					  u32 val)
 {
@@ -557,17 +620,16 @@
 	dev_priv->display.set_backlight(connector, level);
 }
 
-/* set backlight brightness to level in range [0..max] */
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
-			       u32 max)
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(struct intel_connector *connector,
+				      u32 user_level, u32 user_max)
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_panel *panel = &connector->panel;
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
-	u32 freq;
+	u32 hw_level;
 	unsigned long flags;
-	u64 n;
 
 	if (!panel->backlight.present || pipe == INVALID_PIPE)
 		return;
@@ -576,18 +638,46 @@
 
 	WARN_ON(panel->backlight.max == 0);
 
-	/* scale to hardware max, but be careful to not overflow */
-	freq = panel->backlight.max;
-	n = (u64)level * freq;
-	do_div(n, max);
-	level = n;
-
-	panel->backlight.level = level;
-	if (panel->backlight.device)
-		panel->backlight.device->props.brightness = level;
+	hw_level = scale_user_to_hw(connector, user_level, user_max);
+	panel->backlight.level = hw_level;
 
 	if (panel->backlight.enabled)
-		intel_panel_actually_set_backlight(connector, level);
+		intel_panel_actually_set_backlight(connector, hw_level);
+
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+				    u32 user_level, u32 user_max)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 hw_level;
+	unsigned long flags;
+
+	if (!panel->backlight.present || pipe == INVALID_PIPE)
+		return;
+
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+	WARN_ON(panel->backlight.max == 0);
+
+	hw_level = clamp_user_to_hw(connector, user_level, user_max);
+	panel->backlight.level = hw_level;
+
+	if (panel->backlight.device)
+		panel->backlight.device->props.brightness =
+			scale_hw_to_user(connector,
+					 panel->backlight.level,
+					 panel->backlight.device->props.max_brightness);
+
+	if (panel->backlight.enabled)
+		intel_panel_actually_set_backlight(connector, hw_level);
 
 	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 }
@@ -860,7 +950,9 @@
 		panel->backlight.level = panel->backlight.max;
 		if (panel->backlight.device)
 			panel->backlight.device->props.brightness =
-				panel->backlight.level;
+				scale_hw_to_user(connector,
+						 panel->backlight.level,
+						 panel->backlight.device->props.max_brightness);
 	}
 
 	dev_priv->display.enable_backlight(connector);
@@ -889,11 +981,15 @@
 	struct intel_connector *connector = bl_get_data(bd);
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hw_level;
 	int ret;
 
 	intel_runtime_pm_get(dev_priv);
 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-	ret = intel_panel_get_backlight(connector);
+
+	hw_level = intel_panel_get_backlight(connector);
+	ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
 	intel_runtime_pm_put(dev_priv);
 
@@ -913,12 +1009,19 @@
 	if (WARN_ON(panel->backlight.device))
 		return -ENODEV;
 
-	BUG_ON(panel->backlight.max == 0);
+	WARN_ON(panel->backlight.max == 0);
 
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
-	props.brightness = panel->backlight.level;
+
+	/*
+	 * Note: Everything should work even if the backlight device max
+	 * presented to the userspace is arbitrarily chosen.
+	 */
 	props.max_brightness = panel->backlight.max;
+	props.brightness = scale_hw_to_user(connector,
+					    panel->backlight.level,
+					    props.max_brightness);
 
 	/*
 	 * Note: using the same name independent of the connector prevents
@@ -964,6 +1067,19 @@
  * XXX: Query mode clock or hardware clock and program PWM modulation frequency
  * appropriately when it's 0. Use VBT and/or sane defaults.
  */
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+
+	WARN_ON(panel->backlight.max == 0);
+
+	/* vbt value is a coefficient in range [0..255] */
+	return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
+		     0, panel->backlight.max);
+}
+
 static int bdw_setup_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -979,6 +1095,8 @@
 	if (!panel->backlight.max)
 		return -ENODEV;
 
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
 	val = bdw_get_backlight(connector);
 	panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
@@ -1003,6 +1121,8 @@
 	if (!panel->backlight.max)
 		return -ENODEV;
 
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
 	val = pch_get_backlight(connector);
 	panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
@@ -1035,6 +1155,8 @@
 	if (!panel->backlight.max)
 		return -ENODEV;
 
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
 	val = i9xx_get_backlight(connector);
 	panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
@@ -1062,6 +1184,8 @@
 	if (!panel->backlight.max)
 		return -ENODEV;
 
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
 	val = i9xx_get_backlight(connector);
 	panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
@@ -1099,6 +1223,8 @@
 	if (!panel->backlight.max)
 		return -ENODEV;
 
+	panel->backlight.min = get_backlight_min_vbt(connector);
+
 	val = _vlv_get_backlight(dev, PIPE_A);
 	panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ee728070..3f88f29 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -93,8 +93,7 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->primary->fb;
-	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int cfb_pitch;
 	int i;
@@ -150,8 +149,7 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->primary->fb;
-	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
 
@@ -222,16 +220,26 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->primary->fb;
-	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
 
 	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+		dev_priv->fbc.threshold++;
+
+	switch (dev_priv->fbc.threshold) {
+	case 4:
+	case 3:
+		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+		break;
+	case 2:
 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-	else
+		break;
+	case 1:
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+		break;
+	}
 	dpfc_ctl |= DPFC_CTL_FENCE_EN;
 	if (IS_GEN5(dev))
 		dpfc_ctl |= obj->fence_reg;
@@ -278,16 +286,27 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->primary->fb;
-	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
 
 	dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+		dev_priv->fbc.threshold++;
+
+	switch (dev_priv->fbc.threshold) {
+	case 4:
+	case 3:
+		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+		break;
+	case 2:
 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-	else
+		break;
+	case 1:
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+		break;
+	}
+
 	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -462,7 +481,6 @@
 	struct drm_crtc *crtc = NULL, *tmp_crtc;
 	struct intel_crtc *intel_crtc;
 	struct drm_framebuffer *fb;
-	struct intel_framebuffer *intel_fb;
 	struct drm_i915_gem_object *obj;
 	const struct drm_display_mode *adjusted_mode;
 	unsigned int max_width, max_height;
@@ -507,8 +525,7 @@
 
 	intel_crtc = to_intel_crtc(crtc);
 	fb = crtc->primary->fb;
-	intel_fb = to_intel_framebuffer(fb);
-	obj = intel_fb->obj;
+	obj = intel_fb_obj(fb);
 	adjusted_mode = &intel_crtc->config.adjusted_mode;
 
 	if (i915.enable_fbc < 0) {
@@ -529,7 +546,10 @@
 		goto out_disable;
 	}
 
-	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+	if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+		max_width = 4096;
+		max_height = 4096;
+	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 		max_width = 4096;
 		max_height = 2048;
 	} else {
@@ -563,7 +583,8 @@
 	if (in_dbg_master())
 		goto out_disable;
 
-	if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+	if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+					      drm_format_plane_cpp(fb->pixel_format, 0))) {
 		if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
 			DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
 		goto out_disable;
@@ -789,12 +810,33 @@
 	return NULL;
 }
 
-static void pineview_disable_cxsr(struct drm_device *dev)
+void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_device *dev = dev_priv->dev;
+	u32 val;
 
-	/* deactivate cxsr */
-	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+	if (IS_VALLEYVIEW(dev)) {
+		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+	} else if (IS_PINEVIEW(dev)) {
+		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
+		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+		I915_WRITE(DSPFW3, val);
+	} else if (IS_I945G(dev) || IS_I945GM(dev)) {
+		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+		I915_WRITE(FW_BLC_SELF, val);
+	} else if (IS_I915GM(dev)) {
+		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+		I915_WRITE(INSTPM, val);
+	} else {
+		return;
+	}
+
+	DRM_DEBUG_KMS("memory self-refresh is %s\n",
+		      enable ? "enabled" : "disabled");
 }
 
 /*
@@ -864,95 +906,95 @@
 
 /* Pineview has different values for various configs */
 static const struct intel_watermark_params pineview_display_wm = {
-	PINEVIEW_DISPLAY_FIFO,
-	PINEVIEW_MAX_WM,
-	PINEVIEW_DFT_WM,
-	PINEVIEW_GUARD_WM,
-	PINEVIEW_FIFO_LINE_SIZE
+	.fifo_size = PINEVIEW_DISPLAY_FIFO,
+	.max_wm = PINEVIEW_MAX_WM,
+	.default_wm = PINEVIEW_DFT_WM,
+	.guard_size = PINEVIEW_GUARD_WM,
+	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_display_hplloff_wm = {
-	PINEVIEW_DISPLAY_FIFO,
-	PINEVIEW_MAX_WM,
-	PINEVIEW_DFT_HPLLOFF_WM,
-	PINEVIEW_GUARD_WM,
-	PINEVIEW_FIFO_LINE_SIZE
+	.fifo_size = PINEVIEW_DISPLAY_FIFO,
+	.max_wm = PINEVIEW_MAX_WM,
+	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+	.guard_size = PINEVIEW_GUARD_WM,
+	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_cursor_wm = {
-	PINEVIEW_CURSOR_FIFO,
-	PINEVIEW_CURSOR_MAX_WM,
-	PINEVIEW_CURSOR_DFT_WM,
-	PINEVIEW_CURSOR_GUARD_WM,
-	PINEVIEW_FIFO_LINE_SIZE,
+	.fifo_size = PINEVIEW_CURSOR_FIFO,
+	.max_wm = PINEVIEW_CURSOR_MAX_WM,
+	.default_wm = PINEVIEW_CURSOR_DFT_WM,
+	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
+	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
-	PINEVIEW_CURSOR_FIFO,
-	PINEVIEW_CURSOR_MAX_WM,
-	PINEVIEW_CURSOR_DFT_WM,
-	PINEVIEW_CURSOR_GUARD_WM,
-	PINEVIEW_FIFO_LINE_SIZE
+	.fifo_size = PINEVIEW_CURSOR_FIFO,
+	.max_wm = PINEVIEW_CURSOR_MAX_WM,
+	.default_wm = PINEVIEW_CURSOR_DFT_WM,
+	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
+	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params g4x_wm_info = {
-	G4X_FIFO_SIZE,
-	G4X_MAX_WM,
-	G4X_MAX_WM,
-	2,
-	G4X_FIFO_LINE_SIZE,
+	.fifo_size = G4X_FIFO_SIZE,
+	.max_wm = G4X_MAX_WM,
+	.default_wm = G4X_MAX_WM,
+	.guard_size = 2,
+	.cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params g4x_cursor_wm_info = {
-	I965_CURSOR_FIFO,
-	I965_CURSOR_MAX_WM,
-	I965_CURSOR_DFT_WM,
-	2,
-	G4X_FIFO_LINE_SIZE,
+	.fifo_size = I965_CURSOR_FIFO,
+	.max_wm = I965_CURSOR_MAX_WM,
+	.default_wm = I965_CURSOR_DFT_WM,
+	.guard_size = 2,
+	.cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params valleyview_wm_info = {
-	VALLEYVIEW_FIFO_SIZE,
-	VALLEYVIEW_MAX_WM,
-	VALLEYVIEW_MAX_WM,
-	2,
-	G4X_FIFO_LINE_SIZE,
+	.fifo_size = VALLEYVIEW_FIFO_SIZE,
+	.max_wm = VALLEYVIEW_MAX_WM,
+	.default_wm = VALLEYVIEW_MAX_WM,
+	.guard_size = 2,
+	.cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params valleyview_cursor_wm_info = {
-	I965_CURSOR_FIFO,
-	VALLEYVIEW_CURSOR_MAX_WM,
-	I965_CURSOR_DFT_WM,
-	2,
-	G4X_FIFO_LINE_SIZE,
+	.fifo_size = I965_CURSOR_FIFO,
+	.max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+	.default_wm = I965_CURSOR_DFT_WM,
+	.guard_size = 2,
+	.cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i965_cursor_wm_info = {
-	I965_CURSOR_FIFO,
-	I965_CURSOR_MAX_WM,
-	I965_CURSOR_DFT_WM,
-	2,
-	I915_FIFO_LINE_SIZE,
+	.fifo_size = I965_CURSOR_FIFO,
+	.max_wm = I965_CURSOR_MAX_WM,
+	.default_wm = I965_CURSOR_DFT_WM,
+	.guard_size = 2,
+	.cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i945_wm_info = {
-	I945_FIFO_SIZE,
-	I915_MAX_WM,
-	1,
-	2,
-	I915_FIFO_LINE_SIZE
+	.fifo_size = I945_FIFO_SIZE,
+	.max_wm = I915_MAX_WM,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i915_wm_info = {
-	I915_FIFO_SIZE,
-	I915_MAX_WM,
-	1,
-	2,
-	I915_FIFO_LINE_SIZE
+	.fifo_size = I915_FIFO_SIZE,
+	.max_wm = I915_MAX_WM,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i830_wm_info = {
-	I855GM_FIFO_SIZE,
-	I915_MAX_WM,
-	1,
-	2,
-	I830_FIFO_LINE_SIZE
+	.fifo_size = I855GM_FIFO_SIZE,
+	.max_wm = I915_MAX_WM,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I830_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i845_wm_info = {
-	I830_FIFO_SIZE,
-	I915_MAX_WM,
-	1,
-	2,
-	I830_FIFO_LINE_SIZE
+	.fifo_size = I830_FIFO_SIZE,
+	.max_wm = I915_MAX_WM,
+	.default_wm = 1,
+	.guard_size = 2,
+	.cacheline_size = I830_FIFO_LINE_SIZE,
 };
 
 /**
@@ -1033,7 +1075,7 @@
 					 dev_priv->fsb_freq, dev_priv->mem_freq);
 	if (!latency) {
 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-		pineview_disable_cxsr(dev);
+		intel_set_memory_cxsr(dev_priv, false);
 		return;
 	}
 
@@ -1084,13 +1126,9 @@
 		I915_WRITE(DSPFW3, reg);
 		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
-		/* activate cxsr */
-		I915_WRITE(DSPFW3,
-			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
-		DRM_DEBUG_KMS("Self-refresh is enabled\n");
+		intel_set_memory_cxsr(dev_priv, true);
 	} else {
-		pineview_disable_cxsr(dev);
-		DRM_DEBUG_KMS("Self-refresh is disabled\n");
+		intel_set_memory_cxsr(dev_priv, false);
 	}
 }
 
@@ -1316,6 +1354,7 @@
 	int plane_sr, cursor_sr;
 	int ignore_plane_sr, ignore_cursor_sr;
 	unsigned int enabled = 0;
+	bool cxsr_enabled;
 
 	vlv_update_drain_latency(dev);
 
@@ -1342,10 +1381,10 @@
 			     &valleyview_wm_info,
 			     &valleyview_cursor_wm_info,
 			     &ignore_plane_sr, &cursor_sr)) {
-		I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+		cxsr_enabled = true;
 	} else {
-		I915_WRITE(FW_BLC_SELF_VLV,
-			   I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+		cxsr_enabled = false;
+		intel_set_memory_cxsr(dev_priv, false);
 		plane_sr = cursor_sr = 0;
 	}
 
@@ -1365,6 +1404,9 @@
 	I915_WRITE(DSPFW3,
 		   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+	if (cxsr_enabled)
+		intel_set_memory_cxsr(dev_priv, true);
 }
 
 static void g4x_update_wm(struct drm_crtc *crtc)
@@ -1375,6 +1417,7 @@
 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
 	int plane_sr, cursor_sr;
 	unsigned int enabled = 0;
+	bool cxsr_enabled;
 
 	if (g4x_compute_wm0(dev, PIPE_A,
 			    &g4x_wm_info, latency_ns,
@@ -1394,10 +1437,10 @@
 			     &g4x_wm_info,
 			     &g4x_cursor_wm_info,
 			     &plane_sr, &cursor_sr)) {
-		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+		cxsr_enabled = true;
 	} else {
-		I915_WRITE(FW_BLC_SELF,
-			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+		cxsr_enabled = false;
+		intel_set_memory_cxsr(dev_priv, false);
 		plane_sr = cursor_sr = 0;
 	}
 
@@ -1418,6 +1461,9 @@
 	I915_WRITE(DSPFW3,
 		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+	if (cxsr_enabled)
+		intel_set_memory_cxsr(dev_priv, true);
 }
 
 static void i965_update_wm(struct drm_crtc *unused_crtc)
@@ -1427,6 +1473,7 @@
 	struct drm_crtc *crtc;
 	int srwm = 1;
 	int cursor_sr = 16;
+	bool cxsr_enabled;
 
 	/* Calc sr entries for one plane configs */
 	crtc = single_enabled_crtc(dev);
@@ -1468,13 +1515,11 @@
 		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
 			      "cursor %d\n", srwm, cursor_sr);
 
-		if (IS_CRESTLINE(dev))
-			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+		cxsr_enabled = true;
 	} else {
+		cxsr_enabled = false;
 		/* Turn off self refresh if both pipes are enabled */
-		if (IS_CRESTLINE(dev))
-			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
-				   & ~FW_BLC_SELF_EN);
+		intel_set_memory_cxsr(dev_priv, false);
 	}
 
 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -1486,6 +1531,9 @@
 	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
 	/* update cursor SR watermark */
 	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+	if (cxsr_enabled)
+		intel_set_memory_cxsr(dev_priv, true);
 }
 
 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
@@ -1545,12 +1593,12 @@
 	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
 	if (IS_I915GM(dev) && enabled) {
-		struct intel_framebuffer *fb;
+		struct drm_i915_gem_object *obj;
 
-		fb = to_intel_framebuffer(enabled->primary->fb);
+		obj = intel_fb_obj(enabled->primary->fb);
 
 		/* self-refresh seems busted with untiled */
-		if (fb->obj->tiling_mode == I915_TILING_NONE)
+		if (obj->tiling_mode == I915_TILING_NONE)
 			enabled = NULL;
 	}
 
@@ -1560,10 +1608,7 @@
 	cwm = 2;
 
 	/* Play safe and disable self-refresh before adjusting watermarks. */
-	if (IS_I945G(dev) || IS_I945GM(dev))
-		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
-	else if (IS_I915GM(dev))
-		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
+	intel_set_memory_cxsr(dev_priv, false);
 
 	/* Calc sr entries for one plane configs */
 	if (HAS_FW_BLC(dev) && enabled) {
@@ -1609,17 +1654,8 @@
 	I915_WRITE(FW_BLC, fwater_lo);
 	I915_WRITE(FW_BLC2, fwater_hi);
 
-	if (HAS_FW_BLC(dev)) {
-		if (enabled) {
-			if (IS_I945G(dev) || IS_I945GM(dev))
-				I915_WRITE(FW_BLC_SELF,
-					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
-			else if (IS_I915GM(dev))
-				I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
-			DRM_DEBUG_KMS("memory self refresh enabled\n");
-		} else
-			DRM_DEBUG_KMS("memory self refresh disabled\n");
-	}
+	if (enabled)
+		intel_set_memory_cxsr(dev_priv, true);
 }
 
 static void i845_update_wm(struct drm_crtc *unused_crtc)
@@ -2707,10 +2743,11 @@
 	ilk_write_wm_values(dev_priv, &results);
 }
 
-static void ilk_update_sprite_wm(struct drm_plane *plane,
-				     struct drm_crtc *crtc,
-				     uint32_t sprite_width, int pixel_size,
-				     bool enabled, bool scaled)
+static void
+ilk_update_sprite_wm(struct drm_plane *plane,
+		     struct drm_crtc *crtc,
+		     uint32_t sprite_width, uint32_t sprite_height,
+		     int pixel_size, bool enabled, bool scaled)
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2718,6 +2755,7 @@
 	intel_plane->wm.enabled = enabled;
 	intel_plane->wm.scaled = scaled;
 	intel_plane->wm.horiz_pixels = sprite_width;
+	intel_plane->wm.vert_pixels = sprite_width;
 	intel_plane->wm.bytes_per_pixel = pixel_size;
 
 	/*
@@ -2852,13 +2890,16 @@
 
 void intel_update_sprite_watermarks(struct drm_plane *plane,
 				    struct drm_crtc *crtc,
-				    uint32_t sprite_width, int pixel_size,
+				    uint32_t sprite_width,
+				    uint32_t sprite_height,
+				    int pixel_size,
 				    bool enabled, bool scaled)
 {
 	struct drm_i915_private *dev_priv = plane->dev->dev_private;
 
 	if (dev_priv->display.update_sprite_wm)
-		dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+		dev_priv->display.update_sprite_wm(plane, crtc,
+						   sprite_width, sprite_height,
 						   pixel_size, enabled, scaled);
 }
 
@@ -3147,6 +3188,9 @@
 	if (val < dev_priv->rps.max_freq_softlimit)
 		mask |= GEN6_PM_RP_UP_THRESHOLD;
 
+	mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+	mask &= dev_priv->pm_rps_events;
+
 	/* IVB and SNB hard hangs on looping batchbuffer
 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
 	 */
@@ -3250,7 +3294,9 @@
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (dev_priv->rps.enabled) {
-		if (IS_VALLEYVIEW(dev))
+		if (IS_CHERRYVIEW(dev))
+			valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+		else if (IS_VALLEYVIEW(dev))
 			vlv_set_rps_idle(dev_priv);
 		else
 			gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -3348,6 +3394,15 @@
 		gen6_disable_rps_interrupts(dev);
 }
 
+static void cherryview_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	gen8_disable_rps_interrupts(dev);
+}
+
 static void valleyview_disable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3419,7 +3474,7 @@
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON(dev_priv->rps.pm_iir);
-	bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 	I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
@@ -3430,7 +3485,7 @@
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON(dev_priv->rps.pm_iir);
-	snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 	I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
@@ -3483,15 +3538,23 @@
 	for_each_ring(ring, dev_priv, unused)
 		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
-	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+	if (IS_BROADWELL(dev))
+		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+	else
+		I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
 
 	/* 3: Enable RC6 */
 	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
 		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
 	intel_print_rc6_info(dev, rc6_mask);
-	I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-				    GEN6_RC_CTL_EI_MODE(1) |
-				    rc6_mask);
+	if (IS_BROADWELL(dev))
+		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+				GEN7_RC_CTL_TO_MODE |
+				rc6_mask);
+	else
+		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+				GEN6_RC_CTL_EI_MODE(1) |
+				rc6_mask);
 
 	/* 4 Program defaults and thresholds for RPS*/
 	I915_WRITE(GEN6_RPNSWREQ,
@@ -3727,7 +3790,57 @@
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rp0;
+
+	val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+	rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+	return rp0;
+}
+
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rpe;
+
+	val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+	rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+
+	return rpe;
+}
+
+static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rp1;
+
+	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+	return rp1;
+}
+
+static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rpn;
+
+	val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+	rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+	return rpn;
+}
+
+static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rp1;
+
+	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+	rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+	return rp1;
+}
+
+static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
 {
 	u32 val, rp0;
 
@@ -3752,7 +3865,7 @@
 	return rpe;
 }
 
-int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
 {
 	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
 }
@@ -3766,6 +3879,35 @@
 			     dev_priv->vlv_pctx->stolen->start);
 }
 
+
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+	WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
+
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long pctx_paddr, paddr;
+	struct i915_gtt *gtt = &dev_priv->gtt;
+	u32 pcbr;
+	int pctx_size = 32*1024;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	pcbr = I915_READ(VLV_PCBR);
+	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+		paddr = (dev_priv->mm.stolen_base +
+			 (gtt->stolen_size - pctx_size));
+
+		pctx_paddr = (paddr & (~4095));
+		I915_WRITE(VLV_PCBR, pctx_paddr);
+	}
+}
+
 static void valleyview_setup_pctx(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3840,6 +3982,11 @@
 			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
 			 dev_priv->rps.efficient_freq);
 
+	dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+	DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+			 dev_priv->rps.rp1_freq);
+
 	dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
 	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
 			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
@@ -3855,11 +4002,142 @@
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
+static void cherryview_init_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	cherryview_setup_pctx(dev);
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+	dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+			 dev_priv->rps.max_freq);
+
+	dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+			 dev_priv->rps.efficient_freq);
+
+	dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+	DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+			 dev_priv->rps.rp1_freq);
+
+	dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+			 dev_priv->rps.min_freq);
+
+	/* Preserve min/max settings in case of re-init */
+	if (dev_priv->rps.max_freq_softlimit == 0)
+		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+	if (dev_priv->rps.min_freq_softlimit == 0)
+		dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
 {
 	valleyview_cleanup_pctx(dev);
 }
 
+static void cherryview_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+	int i;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	gtfifodbg = I915_READ(GTFIFODBG);
+	if (gtfifodbg) {
+		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+				 gtfifodbg);
+		I915_WRITE(GTFIFODBG, gtfifodbg);
+	}
+
+	cherryview_check_pctx(dev_priv);
+
+	/* 1a & 1b: Get forcewake during program sequence. Although the driver
+	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+	/* 2a: Program RC6 thresholds.*/
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+
+	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+	/* allows RC6 residency counter to work */
+	I915_WRITE(VLV_COUNTER_CONTROL,
+		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+				      VLV_MEDIA_RC6_COUNT_EN |
+				      VLV_RENDER_RC6_COUNT_EN));
+
+	/* For now we assume BIOS is allocating and populating the PCBR  */
+	pcbr = I915_READ(VLV_PCBR);
+
+	DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+
+	/* 3: Enable RC6 */
+	if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+						(pcbr >> VLV_PCBR_ADDR_SHIFT))
+		rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+	/* 4 Program defaults and thresholds for RPS*/
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+	I915_WRITE(GEN6_RP_UP_EI, 66000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	/* WaDisablePwrmtrEvent:chv (pre-production hw) */
+	I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+	I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+
+	/* 5: Enable RPS */
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		   GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_AVG |
+		   GEN6_RP_DOWN_IDLE_AVG);
+
+	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+	dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+			 dev_priv->rps.cur_freq);
+
+	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+			 dev_priv->rps.efficient_freq);
+
+	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+	gen8_enable_rps_interrupts(dev);
+
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
 static void valleyview_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3886,6 +4164,7 @@
 	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
 
 	I915_WRITE(GEN6_RP_CONTROL,
 		   GEN6_RP_MEDIA_TURBO |
@@ -3906,9 +4185,11 @@
 
 	/* allows RC6 residency counter to work */
 	I915_WRITE(VLV_COUNTER_CONTROL,
-		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+		   _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+				      VLV_RENDER_RC0_COUNT_EN |
 				      VLV_MEDIA_RC6_COUNT_EN |
 				      VLV_RENDER_RC6_COUNT_EN));
+
 	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
 		rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
@@ -4668,33 +4949,60 @@
 {
 	i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
 
-	if (IS_VALLEYVIEW(dev))
+	if (IS_CHERRYVIEW(dev))
+		cherryview_init_gt_powersave(dev);
+	else if (IS_VALLEYVIEW(dev))
 		valleyview_init_gt_powersave(dev);
 }
 
 void intel_cleanup_gt_powersave(struct drm_device *dev)
 {
-	if (IS_VALLEYVIEW(dev))
+	if (IS_CHERRYVIEW(dev))
+		return;
+	else if (IS_VALLEYVIEW(dev))
 		valleyview_cleanup_gt_powersave(dev);
 }
 
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Interrupts should be disabled already to avoid re-arming. */
+	WARN_ON(intel_irqs_enabled(dev_priv));
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+	cancel_work_sync(&dev_priv->rps.work);
+
+	/* Force GPU to min freq during suspend */
+	gen6_rps_idle(dev_priv);
+}
+
 void intel_disable_gt_powersave(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	/* Interrupts should be disabled already to avoid re-arming. */
-	WARN_ON(dev->irq_enabled);
+	WARN_ON(intel_irqs_enabled(dev_priv));
 
 	if (IS_IRONLAKE_M(dev)) {
 		ironlake_disable_drps(dev);
 		ironlake_disable_rc6(dev);
-	} else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
-		if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
-			intel_runtime_pm_put(dev_priv);
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		intel_suspend_gt_powersave(dev);
 
-		cancel_work_sync(&dev_priv->rps.work);
 		mutex_lock(&dev_priv->rps.hw_lock);
-		if (IS_VALLEYVIEW(dev))
+		if (IS_CHERRYVIEW(dev))
+			cherryview_disable_rps(dev);
+		else if (IS_VALLEYVIEW(dev))
 			valleyview_disable_rps(dev);
 		else
 			gen6_disable_rps(dev);
@@ -4712,7 +5020,9 @@
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 
-	if (IS_VALLEYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev)) {
+		cherryview_enable_rps(dev);
+	} else if (IS_VALLEYVIEW(dev)) {
 		valleyview_enable_rps(dev);
 	} else if (IS_BROADWELL(dev)) {
 		gen8_enable_rps(dev);
@@ -4737,7 +5047,7 @@
 		ironlake_enable_rc6(dev);
 		intel_init_emon(dev);
 		mutex_unlock(&dev->struct_mutex);
-	} else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+	} else if (INTEL_INFO(dev)->gen >= 6) {
 		/*
 		 * PCU communication is slow and this doesn't need to be
 		 * done at any specific time, so do this out of our fast path
@@ -5110,7 +5420,7 @@
 	I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
 
 	I915_WRITE(_3D_CHICKEN3,
-		   _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
 
 	I915_WRITE(COMMON_SLICE_CHICKEN2,
 		   _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@ -5345,10 +5655,6 @@
 	}
 	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
-	dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
-	DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
-			 dev_priv->vlv_cdclk_freq);
-
 	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaDisableEarlyCull:vlv */
@@ -5423,6 +5729,35 @@
 static void cherryview_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+	switch ((val >> 2) & 0x7) {
+	case 0:
+	case 1:
+			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
+			dev_priv->mem_freq = 1600;
+			break;
+	case 2:
+			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
+			dev_priv->mem_freq = 1600;
+			break;
+	case 3:
+			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
+			dev_priv->mem_freq = 2000;
+			break;
+	case 4:
+			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
+			dev_priv->mem_freq = 1600;
+			break;
+	case 5:
+			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
+			dev_priv->mem_freq = 1600;
+			break;
+	}
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
 	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
@@ -5663,7 +5998,6 @@
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
-	unsigned long irqflags;
 
 	/*
 	 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -5679,21 +6013,8 @@
 	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
 
-	if (IS_BROADWELL(dev)) {
-		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-		I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
-			   dev_priv->de_irq_mask[PIPE_B]);
-		I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
-			   ~dev_priv->de_irq_mask[PIPE_B] |
-			   GEN8_PIPE_VBLANK);
-		I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
-			   dev_priv->de_irq_mask[PIPE_C]);
-		I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
-			   ~dev_priv->de_irq_mask[PIPE_C] |
-			   GEN8_PIPE_VBLANK);
-		POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
-		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-	}
+	if (IS_BROADWELL(dev))
+		gen8_irq_power_well_post_enable(dev_priv);
 }
 
 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -5764,34 +6085,13 @@
 	return true;
 }
 
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
-			  enum punit_power_well power_well_id, bool enable)
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+			       struct i915_power_well *power_well, bool enable)
 {
-	struct drm_device *dev = dev_priv->dev;
+	enum punit_power_well power_well_id = power_well->data;
 	u32 mask;
 	u32 state;
 	u32 ctrl;
-	enum pipe pipe;
-
-	if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
-		if (enable) {
-			/*
-			 * Enable the CRI clock source so we can get at the
-			 * display and the reference clock for VGA
-			 * hotplug / manual detection.
-			 */
-			I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-				   DPLL_REFA_CLK_ENABLE_VLV |
-				   DPLL_INTEGRATED_CRI_CLK_VLV);
-			udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-		} else {
-			for_each_pipe(pipe)
-				assert_pll_disabled(dev_priv, pipe);
-			/* Assert common reset */
-			I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
-				   ~DPIO_CMNRST);
-		}
-	}
 
 	mask = PUNIT_PWRGT_MASK(power_well_id);
 	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5819,28 +6119,6 @@
 
 out:
 	mutex_unlock(&dev_priv->rps.hw_lock);
-
-	/*
-	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
-	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
-	 *   b.	The other bits such as sfr settings / modesel may all
-	 *	be set to 0.
-	 *
-	 * This should only be done on init and resume from S3 with
-	 * both PLLs disabled, or we risk losing DPIO and PLL
-	 * synchronization.
-	 */
-	if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
-		I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
-			       struct i915_power_well *power_well, bool enable)
-{
-	enum punit_power_well power_well_id = power_well->data;
-
-	__vlv_set_power_well(dev_priv, power_well_id, enable);
 }
 
 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5932,6 +6210,53 @@
 	vlv_set_power_well(dev_priv, power_well, false);
 }
 
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+					   struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+	/*
+	 * Enable the CRI clock source so we can get at the
+	 * display and the reference clock for VGA
+	 * hotplug / manual detection.
+	 */
+	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+		   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+	vlv_set_power_well(dev_priv, power_well, true);
+
+	/*
+	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
+	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
+	 *   b.	The other bits such as sfr settings / modesel may all
+	 *	be set to 0.
+	 *
+	 * This should only be done on init and resume from S3 with
+	 * both PLLs disabled, or we risk losing DPIO and PLL
+	 * synchronization.
+	 */
+	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+					    struct i915_power_well *power_well)
+{
+	struct drm_device *dev = dev_priv->dev;
+	enum pipe pipe;
+
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+	for_each_pipe(pipe)
+		assert_pll_disabled(dev_priv, pipe);
+
+	/* Assert common reset */
+	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+	vlv_set_power_well(dev_priv, power_well, false);
+}
+
 static void check_power_well_state(struct drm_i915_private *dev_priv,
 				   struct i915_power_well *power_well)
 {
@@ -6081,6 +6406,7 @@
 	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_CRT) |			\
+	BIT(POWER_DOMAIN_PLLS) |			\
 	BIT(POWER_DOMAIN_INIT))
 #define HSW_DISPLAY_POWER_DOMAINS (				\
 	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
@@ -6180,6 +6506,13 @@
 	.is_enabled = vlv_power_well_enabled,
 };
 
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+	.sync_hw = vlv_power_well_sync_hw,
+	.enable = vlv_dpio_cmn_power_well_enable,
+	.disable = vlv_dpio_cmn_power_well_disable,
+	.is_enabled = vlv_power_well_enabled,
+};
+
 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
 	.sync_hw = vlv_power_well_sync_hw,
 	.enable = vlv_power_well_enable,
@@ -6240,10 +6573,25 @@
 		.name = "dpio-common",
 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
 		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-		.ops = &vlv_dpio_power_well_ops,
+		.ops = &vlv_dpio_cmn_power_well_ops,
 	},
 };
 
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+						 enum punit_power_well power_well_id)
+{
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *power_well;
+	int i;
+
+	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+		if (power_well->data == power_well_id)
+			return power_well;
+	}
+
+	return NULL;
+}
+
 #define set_power_wells(power_domains, __power_wells) ({		\
 	(power_domains)->power_wells = (__power_wells);			\
 	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
@@ -6294,11 +6642,50 @@
 	mutex_unlock(&power_domains->lock);
 }
 
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+	struct i915_power_well *cmn =
+		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+	struct i915_power_well *disp2d =
+		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+	/* nothing to do if common lane is already off */
+	if (!cmn->ops->is_enabled(dev_priv, cmn))
+		return;
+
+	/* If the display might be already active skip this */
+	if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
+	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
+		return;
+
+	DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+	/* cmnlane needs DPLL registers */
+	disp2d->ops->enable(dev_priv, disp2d);
+
+	/*
+	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+	 * Need to assert and de-assert PHY SB reset by gating the
+	 * common lane power, then un-gating it.
+	 * Simply ungating isn't enough to reset the PHY enough to get
+	 * ports and lanes running.
+	 */
+	cmn->ops->disable(dev_priv, cmn);
+}
+
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
 {
+	struct drm_device *dev = dev_priv->dev;
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
 	power_domains->initializing = true;
+
+	if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+		mutex_lock(&power_domains->lock);
+		vlv_cmnlane_wa(dev_priv);
+		mutex_unlock(&power_domains->lock);
+	}
+
 	/* For now, we need the power well to be always enabled. */
 	intel_display_set_init_power(dev_priv, true);
 	intel_power_domains_resume(dev_priv);
@@ -6471,7 +6858,7 @@
 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
 				 dev_priv->fsb_freq, dev_priv->mem_freq);
 			/* Disable CxSR and never update its watermark again */
-			pineview_disable_cxsr(dev);
+			intel_set_memory_cxsr(dev_priv, false);
 			dev_priv->display.update_wm = NULL;
 		} else
 			dev_priv->display.update_wm = pineview_update_wm;
@@ -6554,7 +6941,7 @@
 	return 0;
 }
 
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
 	int div;
 
@@ -6576,7 +6963,7 @@
 	return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
 }
 
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
 	int mul;
 
@@ -6598,6 +6985,80 @@
 	return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
 }
 
+static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+	int div, freq;
+
+	switch (dev_priv->rps.cz_freq) {
+	case 200:
+		div = 5;
+		break;
+	case 267:
+		div = 6;
+		break;
+	case 320:
+	case 333:
+	case 400:
+		div = 8;
+		break;
+	default:
+		return -1;
+	}
+
+	freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+
+	return freq;
+}
+
+static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+	int mul, opcode;
+
+	switch (dev_priv->rps.cz_freq) {
+	case 200:
+		mul = 5;
+		break;
+	case 267:
+		mul = 6;
+		break;
+	case 320:
+	case 333:
+	case 400:
+		mul = 8;
+		break;
+	default:
+		return -1;
+	}
+
+	opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
+
+	return opcode;
+}
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+	int ret = -1;
+
+	if (IS_CHERRYVIEW(dev_priv->dev))
+		ret = chv_gpu_freq(dev_priv, val);
+	else if (IS_VALLEYVIEW(dev_priv->dev))
+		ret = byt_gpu_freq(dev_priv, val);
+
+	return ret;
+}
+
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+	int ret = -1;
+
+	if (IS_CHERRYVIEW(dev_priv->dev))
+		ret = chv_freq_opcode(dev_priv, val);
+	else if (IS_VALLEYVIEW(dev_priv->dev))
+		ret = byt_freq_opcode(dev_priv, val);
+
+	return ret;
+}
+
 void intel_pm_setup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6608,5 +7069,5 @@
 			  intel_gen6_powersave_work);
 
 	dev_priv->pm.suspended = false;
-	dev_priv->pm.irqs_disabled = false;
+	dev_priv->pm._irqs_disabled = false;
 }
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index a5e783a..fd4f662 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -28,7 +28,6 @@
 
 struct intel_renderstate_rodata {
 	const u32 *reloc;
-	const u32 reloc_items;
 	const u32 *batch;
 	const u32 batch_items;
 };
@@ -40,7 +39,6 @@
 #define RO_RENDERSTATE(_g)						\
 	const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
 		.reloc = gen ## _g ## _null_state_relocs,		\
-		.reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
 		.batch = gen ## _g ## _null_state_batch,		\
 		.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
 	}
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 740538a..56c1429 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -6,6 +6,7 @@
 	0x0000002c,
 	0x000001e0,
 	0x000001e4,
+	-1,
 };
 
 static const u32 gen6_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 6fa7ff2..419e35a 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -5,6 +5,7 @@
 	0x00000010,
 	0x00000018,
 	0x000001ec,
+	-1,
 };
 
 static const u32 gen7_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 5c87561..75ef1b5d 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -5,6 +5,7 @@
 	0x00000050,
 	0x00000060,
 	0x000003ec,
+	-1,
 };
 
 static const u32 gen8_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 279488a..b3d8f76 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,9 +48,8 @@
 	return space;
 }
 
-static inline int ring_space(struct intel_engine_cs *ring)
+static inline int ring_space(struct intel_ringbuffer *ringbuf)
 {
-	struct intel_ringbuffer *ringbuf = ring->buffer;
 	return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
 }
 
@@ -545,7 +544,7 @@
 	else {
 		ringbuf->head = I915_READ_HEAD(ring);
 		ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-		ringbuf->space = ring_space(ring);
+		ringbuf->space = ring_space(ringbuf);
 		ringbuf->last_retired_head = -1;
 	}
 
@@ -604,6 +603,8 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret = init_ring_common(ring);
+	if (ret)
+		return ret;
 
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
 	if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
@@ -658,6 +659,13 @@
 static void render_ring_cleanup(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->semaphore_obj) {
+		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
+		drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
+		dev_priv->semaphore_obj = NULL;
+	}
 
 	if (ring->scratch.obj == NULL)
 		return;
@@ -671,29 +679,96 @@
 	ring->scratch.obj = NULL;
 }
 
+static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+			   unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 8
+	struct drm_device *dev = signaller->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *waiter;
+	int i, ret, num_rings;
+
+	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+	ret = intel_ring_begin(signaller, num_dwords);
+	if (ret)
+		return ret;
+
+	for_each_ring(waiter, dev_priv, i) {
+		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+			continue;
+
+		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
+					   PIPE_CONTROL_QW_WRITE |
+					   PIPE_CONTROL_FLUSH_ENABLE);
+		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
+		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+		intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+		intel_ring_emit(signaller, 0);
+		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+					   MI_SEMAPHORE_TARGET(waiter->id));
+		intel_ring_emit(signaller, 0);
+	}
+
+	return 0;
+}
+
+static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+			   unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 6
+	struct drm_device *dev = signaller->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *waiter;
+	int i, ret, num_rings;
+
+	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+	ret = intel_ring_begin(signaller, num_dwords);
+	if (ret)
+		return ret;
+
+	for_each_ring(waiter, dev_priv, i) {
+		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+			continue;
+
+		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
+					   MI_FLUSH_DW_OP_STOREDW);
+		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
+					   MI_FLUSH_DW_USE_GTT);
+		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+		intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+					   MI_SEMAPHORE_TARGET(waiter->id));
+		intel_ring_emit(signaller, 0);
+	}
+
+	return 0;
+}
+
 static int gen6_signal(struct intel_engine_cs *signaller,
 		       unsigned int num_dwords)
 {
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *useless;
-	int i, ret;
+	int i, ret, num_rings;
 
-	/* NB: In order to be able to do semaphore MBOX updates for varying
-	 * number of rings, it's easiest if we round up each individual update
-	 * to a multiple of 2 (since ring updates must always be a multiple of
-	 * 2) even though the actual update only requires 3 dwords.
-	 */
-#define MBOX_UPDATE_DWORDS 4
-	if (i915_semaphore_is_enabled(dev))
-		num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
-	else
-		return intel_ring_begin(signaller, num_dwords);
+#define MBOX_UPDATE_DWORDS 3
+	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
+#undef MBOX_UPDATE_DWORDS
 
 	ret = intel_ring_begin(signaller, num_dwords);
 	if (ret)
 		return ret;
-#undef MBOX_UPDATE_DWORDS
 
 	for_each_ring(useless, dev_priv, i) {
 		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
@@ -701,15 +776,13 @@
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
 			intel_ring_emit(signaller, mbox_reg);
 			intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
-			intel_ring_emit(signaller, MI_NOOP);
-		} else {
-			intel_ring_emit(signaller, MI_NOOP);
-			intel_ring_emit(signaller, MI_NOOP);
-			intel_ring_emit(signaller, MI_NOOP);
-			intel_ring_emit(signaller, MI_NOOP);
 		}
 	}
 
+	/* If num_dwords was rounded, make sure the tail pointer is correct */
+	if (num_rings % 2 == 0)
+		intel_ring_emit(signaller, MI_NOOP);
+
 	return 0;
 }
 
@@ -727,7 +800,11 @@
 {
 	int ret;
 
-	ret = ring->semaphore.signal(ring, 4);
+	if (ring->semaphore.signal)
+		ret = ring->semaphore.signal(ring, 4);
+	else
+		ret = intel_ring_begin(ring, 4);
+
 	if (ret)
 		return ret;
 
@@ -754,6 +831,32 @@
  * @signaller - ring which has, or will signal
  * @seqno - seqno which the waiter will block on
  */
+
+static int
+gen8_ring_sync(struct intel_engine_cs *waiter,
+	       struct intel_engine_cs *signaller,
+	       u32 seqno)
+{
+	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+	int ret;
+
+	ret = intel_ring_begin(waiter, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
+				MI_SEMAPHORE_GLOBAL_GTT |
+				MI_SEMAPHORE_POLL |
+				MI_SEMAPHORE_SAD_GTE_SDD);
+	intel_ring_emit(waiter, seqno);
+	intel_ring_emit(waiter,
+			lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+	intel_ring_emit(waiter,
+			upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+	intel_ring_advance(waiter);
+	return 0;
+}
+
 static int
 gen6_ring_sync(struct intel_engine_cs *waiter,
 	       struct intel_engine_cs *signaller,
@@ -901,7 +1004,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0)
-		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	return true;
@@ -916,7 +1019,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0)
-		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
@@ -1109,7 +1212,7 @@
 					 GT_PARITY_ERROR(dev)));
 		else
 			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1129,7 +1232,7 @@
 			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 		else
 			I915_WRITE_IMR(ring, ~0);
-		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
@@ -1147,7 +1250,7 @@
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
 		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+		gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1167,7 +1270,7 @@
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
 		I915_WRITE_IMR(ring, ~0);
-		snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+		gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
@@ -1329,6 +1432,7 @@
 	struct drm_i915_gem_object *obj;
 
 	if ((obj = ring->status_page.obj) == NULL) {
+		unsigned flags;
 		int ret;
 
 		obj = i915_gem_alloc_object(ring->dev, 4096);
@@ -1341,7 +1445,20 @@
 		if (ret)
 			goto err_unref;
 
-		ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+		flags = 0;
+		if (!HAS_LLC(ring->dev))
+			/* On g33, we cannot place HWS above 256MiB, so
+			 * restrict its pinning to the low mappable arena.
+			 * Though this restriction is not documented for
+			 * gen4, gen5, or byt, they also behave similarly
+			 * and hang if the HWS is placed at the top of the
+			 * GTT. To generalise, it appears that all !llc
+			 * platforms have issues with us placing the HWS
+			 * above the mappable region (even though we never
+			 * actualy map it).
+			 */
+			flags |= PIN_MAPPABLE;
+		ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
 		if (ret) {
 err_unref:
 			drm_gem_object_unreference(&obj->base);
@@ -1378,15 +1495,25 @@
 	return 0;
 }
 
-static int allocate_ring_buffer(struct intel_engine_cs *ring)
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
-	struct drm_device *dev = ring->dev;
+	if (!ringbuf->obj)
+		return;
+
+	iounmap(ringbuf->virtual_start);
+	i915_gem_object_ggtt_unpin(ringbuf->obj);
+	drm_gem_object_unreference(&ringbuf->obj->base);
+	ringbuf->obj = NULL;
+}
+
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+				      struct intel_ringbuffer *ringbuf)
+{
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ringbuffer *ringbuf = ring->buffer;
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	if (intel_ring_initialized(ring))
+	if (ringbuf->obj)
 		return 0;
 
 	obj = NULL;
@@ -1397,6 +1524,9 @@
 	if (obj == NULL)
 		return -ENOMEM;
 
+	/* mark ring buffers as read-only from GPU side by default */
+	obj->gt_ro = 1;
+
 	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
 	if (ret)
 		goto err_unref;
@@ -1455,7 +1585,7 @@
 			goto error;
 	}
 
-	ret = allocate_ring_buffer(ring);
+	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
 		goto error;
@@ -1496,11 +1626,7 @@
 	intel_stop_ring_buffer(ring);
 	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
-	iounmap(ringbuf->virtual_start);
-
-	i915_gem_object_ggtt_unpin(ringbuf->obj);
-	drm_gem_object_unreference(&ringbuf->obj->base);
-	ringbuf->obj = NULL;
+	intel_destroy_ringbuffer_obj(ringbuf);
 	ring->preallocated_lazy_request = NULL;
 	ring->outstanding_lazy_seqno = 0;
 
@@ -1526,7 +1652,7 @@
 		ringbuf->head = ringbuf->last_retired_head;
 		ringbuf->last_retired_head = -1;
 
-		ringbuf->space = ring_space(ring);
+		ringbuf->space = ring_space(ringbuf);
 		if (ringbuf->space >= n)
 			return 0;
 	}
@@ -1549,7 +1675,7 @@
 	ringbuf->head = ringbuf->last_retired_head;
 	ringbuf->last_retired_head = -1;
 
-	ringbuf->space = ring_space(ring);
+	ringbuf->space = ring_space(ringbuf);
 	return 0;
 }
 
@@ -1578,7 +1704,7 @@
 	trace_i915_ring_wait_begin(ring);
 	do {
 		ringbuf->head = I915_READ_HEAD(ring);
-		ringbuf->space = ring_space(ring);
+		ringbuf->space = ring_space(ringbuf);
 		if (ringbuf->space >= n) {
 			ret = 0;
 			break;
@@ -1630,7 +1756,7 @@
 		iowrite32(MI_NOOP, virt++);
 
 	ringbuf->tail = 0;
-	ringbuf->space = ring_space(ring);
+	ringbuf->space = ring_space(ringbuf);
 
 	return 0;
 }
@@ -1746,14 +1872,15 @@
 
 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	BUG_ON(ring->outstanding_lazy_seqno);
 
-	if (INTEL_INFO(ring->dev)->gen >= 6) {
+	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
 		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
 		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
-		if (HAS_VEBOX(ring->dev))
+		if (HAS_VEBOX(dev))
 			I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
 	}
 
@@ -1941,45 +2068,74 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_object *obj;
+	int ret;
 
 	ring->name = "render ring";
 	ring->id = RCS;
 	ring->mmio_base = RENDER_RING_BASE;
 
-	if (INTEL_INFO(dev)->gen >= 6) {
+	if (INTEL_INFO(dev)->gen >= 8) {
+		if (i915_semaphore_is_enabled(dev)) {
+			obj = i915_gem_alloc_object(dev, 4096);
+			if (obj == NULL) {
+				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+				i915.semaphores = 0;
+			} else {
+				i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+				ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+				if (ret != 0) {
+					drm_gem_object_unreference(&obj->base);
+					DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+					i915.semaphores = 0;
+				} else
+					dev_priv->semaphore_obj = obj;
+			}
+		}
+		ring->add_request = gen6_add_request;
+		ring->flush = gen8_render_ring_flush;
+		ring->irq_get = gen8_ring_get_irq;
+		ring->irq_put = gen8_ring_put_irq;
+		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+		ring->get_seqno = gen6_ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		if (i915_semaphore_is_enabled(dev)) {
+			WARN_ON(!dev_priv->semaphore_obj);
+			ring->semaphore.sync_to = gen8_ring_sync;
+			ring->semaphore.signal = gen8_rcs_signal;
+			GEN8_RING_SEMAPHORE_INIT;
+		}
+	} else if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
 		ring->flush = gen7_render_ring_flush;
 		if (INTEL_INFO(dev)->gen == 6)
 			ring->flush = gen6_render_ring_flush;
-		if (INTEL_INFO(dev)->gen >= 8) {
-			ring->flush = gen8_render_ring_flush;
-			ring->irq_get = gen8_ring_get_irq;
-			ring->irq_put = gen8_ring_put_irq;
-		} else {
-			ring->irq_get = gen6_ring_get_irq;
-			ring->irq_put = gen6_ring_put_irq;
-		}
+		ring->irq_get = gen6_ring_get_irq;
+		ring->irq_put = gen6_ring_put_irq;
 		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
-		ring->semaphore.sync_to = gen6_ring_sync;
-		ring->semaphore.signal = gen6_signal;
-		/*
-		 * The current semaphore is only applied on pre-gen8 platform.
-		 * And there is no VCS2 ring on the pre-gen8 platform. So the
-		 * semaphore between RCS and VCS2 is initialized as INVALID.
-		 * Gen8 will initialize the sema between VCS2 and RCS later.
-		 */
-		ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-		ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
-		ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
-		ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
-		ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-		ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-		ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
-		ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
-		ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
-		ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.sync_to = gen6_ring_sync;
+			ring->semaphore.signal = gen6_signal;
+			/*
+			 * The current semaphore is only applied on pre-gen8
+			 * platform.  And there is no VCS2 ring on the pre-gen8
+			 * platform. So the semaphore between RCS and VCS2 is
+			 * initialized as INVALID.  Gen8 will initialize the
+			 * sema between VCS2 and RCS later.
+			 */
+			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+			ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+			ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+			ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+		}
 	} else if (IS_GEN5(dev)) {
 		ring->add_request = pc_render_add_request;
 		ring->flush = gen4_render_ring_flush;
@@ -2007,6 +2163,7 @@
 		ring->irq_enable_mask = I915_USER_INTERRUPT;
 	}
 	ring->write_tail = ring_write_tail;
+
 	if (IS_HASWELL(dev))
 		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
 	else if (IS_GEN8(dev))
@@ -2024,9 +2181,6 @@
 
 	/* Workaround batchbuffer to combat CS tlb bug. */
 	if (HAS_BROKEN_CS_TLB(dev)) {
-		struct drm_i915_gem_object *obj;
-		int ret;
-
 		obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
 		if (obj == NULL) {
 			DRM_ERROR("Failed to allocate batch bo\n");
@@ -2157,31 +2311,32 @@
 			ring->irq_put = gen8_ring_put_irq;
 			ring->dispatch_execbuffer =
 				gen8_ring_dispatch_execbuffer;
+			if (i915_semaphore_is_enabled(dev)) {
+				ring->semaphore.sync_to = gen8_ring_sync;
+				ring->semaphore.signal = gen8_xcs_signal;
+				GEN8_RING_SEMAPHORE_INIT;
+			}
 		} else {
 			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 			ring->irq_get = gen6_ring_get_irq;
 			ring->irq_put = gen6_ring_put_irq;
 			ring->dispatch_execbuffer =
 				gen6_ring_dispatch_execbuffer;
+			if (i915_semaphore_is_enabled(dev)) {
+				ring->semaphore.sync_to = gen6_ring_sync;
+				ring->semaphore.signal = gen6_signal;
+				ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+				ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+				ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+				ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+				ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+				ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+				ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+				ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+				ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+				ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+			}
 		}
-		ring->semaphore.sync_to = gen6_ring_sync;
-		ring->semaphore.signal = gen6_signal;
-		/*
-		 * The current semaphore is only applied on pre-gen8 platform.
-		 * And there is no VCS2 ring on the pre-gen8 platform. So the
-		 * semaphore between VCS and VCS2 is initialized as INVALID.
-		 * Gen8 will initialize the sema between VCS2 and VCS later.
-		 */
-		ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
-		ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-		ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
-		ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
-		ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-		ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
-		ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-		ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
-		ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
-		ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 	} else {
 		ring->mmio_base = BSD_RING_BASE;
 		ring->flush = bsd_ring_flush;
@@ -2218,7 +2373,7 @@
 		return -EINVAL;
 	}
 
-	ring->name = "bds2_ring";
+	ring->name = "bsd2 ring";
 	ring->id = VCS2;
 
 	ring->write_tail = ring_write_tail;
@@ -2233,25 +2388,11 @@
 	ring->irq_put = gen8_ring_put_irq;
 	ring->dispatch_execbuffer =
 			gen8_ring_dispatch_execbuffer;
-	ring->semaphore.sync_to = gen6_ring_sync;
-	ring->semaphore.signal = gen6_signal;
-	/*
-	 * The current semaphore is only applied on the pre-gen8. And there
-	 * is no bsd2 ring on the pre-gen8. So now the semaphore_register
-	 * between VCS2 and other ring is initialized as invalid.
-	 * Gen8 will initialize the sema between VCS2 and other ring later.
-	 */
-	ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-	ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-	ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-	ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-
+	if (i915_semaphore_is_enabled(dev)) {
+		ring->semaphore.sync_to = gen8_ring_sync;
+		ring->semaphore.signal = gen8_xcs_signal;
+		GEN8_RING_SEMAPHORE_INIT;
+	}
 	ring->init = init_ring_common;
 
 	return intel_init_ring_buffer(dev, ring);
@@ -2277,30 +2418,38 @@
 		ring->irq_get = gen8_ring_get_irq;
 		ring->irq_put = gen8_ring_put_irq;
 		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.sync_to = gen8_ring_sync;
+			ring->semaphore.signal = gen8_xcs_signal;
+			GEN8_RING_SEMAPHORE_INIT;
+		}
 	} else {
 		ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 		ring->irq_get = gen6_ring_get_irq;
 		ring->irq_put = gen6_ring_put_irq;
 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.signal = gen6_signal;
+			ring->semaphore.sync_to = gen6_ring_sync;
+			/*
+			 * The current semaphore is only applied on pre-gen8
+			 * platform.  And there is no VCS2 ring on the pre-gen8
+			 * platform. So the semaphore between BCS and VCS2 is
+			 * initialized as INVALID.  Gen8 will initialize the
+			 * sema between BCS and VCS2 later.
+			 */
+			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+			ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+			ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+			ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+		}
 	}
-	ring->semaphore.sync_to = gen6_ring_sync;
-	ring->semaphore.signal = gen6_signal;
-	/*
-	 * The current semaphore is only applied on pre-gen8 platform. And
-	 * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
-	 * between BCS and VCS2 is initialized as INVALID.
-	 * Gen8 will initialize the sema between BCS and VCS2 later.
-	 */
-	ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
-	ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
-	ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
-	ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
-	ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
-	ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-	ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
-	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 	ring->init = init_ring_common;
 
 	return intel_init_ring_buffer(dev, ring);
@@ -2327,24 +2476,31 @@
 		ring->irq_get = gen8_ring_get_irq;
 		ring->irq_put = gen8_ring_put_irq;
 		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.sync_to = gen8_ring_sync;
+			ring->semaphore.signal = gen8_xcs_signal;
+			GEN8_RING_SEMAPHORE_INIT;
+		}
 	} else {
 		ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
 		ring->irq_get = hsw_vebox_get_irq;
 		ring->irq_put = hsw_vebox_put_irq;
 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		if (i915_semaphore_is_enabled(dev)) {
+			ring->semaphore.sync_to = gen6_ring_sync;
+			ring->semaphore.signal = gen6_signal;
+			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+			ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+			ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+			ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+		}
 	}
-	ring->semaphore.sync_to = gen6_ring_sync;
-	ring->semaphore.signal = gen6_signal;
-	ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
-	ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
-	ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
-	ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
-	ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
-	ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
-	ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 	ring->init = init_ring_common;
 
 	return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e72017b..ed59410 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -40,6 +40,32 @@
 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
 
+/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
+ * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
+ */
+#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
+	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+	((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) +	\
+	(i915_semaphore_seqno_size * (to)))
+
+#define GEN8_WAIT_OFFSET(__ring, from)			     \
+	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+	((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+	(i915_semaphore_seqno_size * (__ring)->id))
+
+#define GEN8_RING_SEMAPHORE_INIT do { \
+	if (!dev_priv->semaphore_obj) { \
+		break; \
+	} \
+	ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
+	ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
+	ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
+	ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
+	ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
+	ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+	} while(0)
+
 enum intel_ring_hangcheck_action {
 	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
@@ -127,15 +153,55 @@
 #define I915_DISPATCH_PINNED 0x2
 	void		(*cleanup)(struct intel_engine_cs *ring);
 
+	/* GEN8 signal/wait table - never trust comments!
+	 *	  signal to	signal to    signal to   signal to      signal to
+	 *	    RCS		   VCS          BCS        VECS		 VCS2
+	 *      --------------------------------------------------------------------
+	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
+	 *	|-------------------------------------------------------------------
+	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
+	 *	|-------------------------------------------------------------------
+	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
+	 *	|-------------------------------------------------------------------
+	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
+	 *	|-------------------------------------------------------------------
+	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
+	 *	|-------------------------------------------------------------------
+	 *
+	 * Generalization:
+	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
+	 *  ie. transpose of g(x, y)
+	 *
+	 *	 sync from	sync from    sync from    sync from	sync from
+	 *	    RCS		   VCS          BCS        VECS		 VCS2
+	 *      --------------------------------------------------------------------
+	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
+	 *	|-------------------------------------------------------------------
+	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
+	 *	|-------------------------------------------------------------------
+	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
+	 *	|-------------------------------------------------------------------
+	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
+	 *	|-------------------------------------------------------------------
+	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
+	 *	|-------------------------------------------------------------------
+	 *
+	 * Generalization:
+	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
+	 *  ie. transpose of f(x, y)
+	 */
 	struct {
 		u32	sync_seqno[I915_NUM_RINGS-1];
 
-		struct {
-			/* our mbox written by others */
-			u32		wait[I915_NUM_RINGS];
-			/* mboxes this ring signals to */
-			u32		signal[I915_NUM_RINGS];
-		} mbox;
+		union {
+			struct {
+				/* our mbox written by others */
+				u32		wait[I915_NUM_RINGS];
+				/* mboxes this ring signals to */
+				u32		signal[I915_NUM_RINGS];
+			} mbox;
+			u64		signal_ggtt[I915_NUM_RINGS];
+		};
 
 		/* AKA wait() */
 		int	(*sync_to)(struct intel_engine_cs *ring,
@@ -238,9 +304,11 @@
 	int idx;
 
 	/*
-	 * cs -> 0 = vcs, 1 = bcs
-	 * vcs -> 0 = bcs, 1 = cs,
-	 * bcs -> 0 = cs, 1 = vcs.
+	 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+	 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+	 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+	 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+	 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
 	 */
 
 	idx = (other - ring) - 1;
@@ -318,9 +386,9 @@
 u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
 void intel_ring_setup_status_page(struct intel_engine_cs *ring);
 
-static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
+static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
 {
-	return ring->buffer->tail;
+	return ringbuf->tail;
 }
 
 static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 20375cc..9350edd 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2433,7 +2433,7 @@
 	connector->base.unregister = intel_sdvo_connector_unregister;
 
 	intel_connector_attach_encoder(&connector->base, &encoder->base);
-	ret = drm_sysfs_connector_add(drm_connector);
+	ret = drm_connector_register(drm_connector);
 	if (ret < 0)
 		goto err1;
 
@@ -2446,7 +2446,7 @@
 	return 0;
 
 err2:
-	drm_sysfs_connector_remove(drm_connector);
+	drm_connector_unregister(drm_connector);
 err1:
 	drm_connector_cleanup(drm_connector);
 
@@ -2559,7 +2559,7 @@
 	return true;
 
 err:
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	intel_sdvo_destroy(connector);
 	return false;
 }
@@ -2638,7 +2638,7 @@
 	return true;
 
 err:
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	intel_sdvo_destroy(connector);
 	return false;
 }
@@ -2711,7 +2711,7 @@
 	list_for_each_entry_safe(connector, tmp,
 				 &dev->mode_config.connector_list, head) {
 		if (intel_attached_encoder(connector) == &intel_sdvo->base) {
-			drm_sysfs_connector_remove(connector);
+			drm_connector_unregister(connector);
 			intel_sdvo_destroy(connector);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9a17b4e..168c665 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -218,7 +218,8 @@
 
 	sprctl |= SP_ENABLE;
 
-	intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+	intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
+				       pixel_size, true,
 				       src_w != crtc_w || src_h != crtc_h);
 
 	/* Sizes are 0 based */
@@ -283,7 +284,7 @@
 	if (atomic_update)
 		intel_pipe_update_end(intel_crtc, start_vbl_count);
 
-	intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
+	intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
 }
 
 static int
@@ -406,7 +407,8 @@
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
-	intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+	intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
+				       true,
 				       src_w != crtc_w || src_h != crtc_h);
 
 	/* Sizes are 0 based */
@@ -486,7 +488,7 @@
 	 */
 	intel_wait_for_vblank(dev, pipe);
 
-	intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+	intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
 }
 
 static int
@@ -606,7 +608,8 @@
 		dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
 	dvscntr |= DVS_ENABLE;
 
-	intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+	intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
+				       pixel_size, true,
 				       src_w != crtc_w || src_h != crtc_h);
 
 	/* Sizes are 0 based */
@@ -681,7 +684,7 @@
 	 */
 	intel_wait_for_vblank(dev, pipe);
 
-	intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+	intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
 }
 
 static void
@@ -819,6 +822,7 @@
 	struct drm_device *dev = plane->dev;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
+	enum pipe pipe = intel_crtc->pipe;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct drm_i915_gem_object *old_obj = intel_plane->obj;
@@ -1006,6 +1010,8 @@
 	 */
 	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 
+	i915_gem_track_fb(old_obj, obj,
+			  INTEL_FRONTBUFFER_SPRITE(pipe));
 	mutex_unlock(&dev->struct_mutex);
 
 	if (ret)
@@ -1039,6 +1045,8 @@
 		else
 			intel_plane->disable_plane(plane, crtc);
 
+		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
+
 		if (!primary_was_enabled && primary_enabled)
 			intel_post_enable_primary(crtc);
 	}
@@ -1068,6 +1076,7 @@
 	struct drm_device *dev = plane->dev;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct intel_crtc *intel_crtc;
+	enum pipe pipe;
 
 	if (!plane->fb)
 		return 0;
@@ -1076,6 +1085,7 @@
 		return -EINVAL;
 
 	intel_crtc = to_intel_crtc(plane->crtc);
+	pipe = intel_crtc->pipe;
 
 	if (intel_crtc->active) {
 		bool primary_was_enabled = intel_crtc->primary_enabled;
@@ -1094,6 +1104,8 @@
 
 		mutex_lock(&dev->struct_mutex);
 		intel_unpin_fb_obj(intel_plane->obj);
+		i915_gem_track_fb(intel_plane->obj, NULL,
+				  INTEL_FRONTBUFFER_SPRITE(pipe));
 		mutex_unlock(&dev->struct_mutex);
 
 		intel_plane->obj = NULL;
@@ -1114,7 +1126,6 @@
 			      struct drm_file *file_priv)
 {
 	struct drm_intel_sprite_colorkey *set = data;
-	struct drm_mode_object *obj;
 	struct drm_plane *plane;
 	struct intel_plane *intel_plane;
 	int ret = 0;
@@ -1128,13 +1139,12 @@
 
 	drm_modeset_lock_all(dev);
 
-	obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
-	if (!obj) {
+	plane = drm_plane_find(dev, set->plane_id);
+	if (!plane) {
 		ret = -ENOENT;
 		goto out_unlock;
 	}
 
-	plane = obj_to_plane(obj);
 	intel_plane = to_intel_plane(plane);
 	ret = intel_plane->update_colorkey(plane, set);
 
@@ -1147,7 +1157,6 @@
 			      struct drm_file *file_priv)
 {
 	struct drm_intel_sprite_colorkey *get = data;
-	struct drm_mode_object *obj;
 	struct drm_plane *plane;
 	struct intel_plane *intel_plane;
 	int ret = 0;
@@ -1157,13 +1166,12 @@
 
 	drm_modeset_lock_all(dev);
 
-	obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
-	if (!obj) {
+	plane = drm_plane_find(dev, get->plane_id);
+	if (!plane) {
 		ret = -ENOENT;
 		goto out_unlock;
 	}
 
-	plane = obj_to_plane(obj);
 	intel_plane = to_intel_plane(plane);
 	intel_plane->get_colorkey(plane, get);
 
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 67c6c9a..e211eef 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1680,5 +1680,5 @@
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_bottom_margin_property,
 				   intel_tv->margin[TV_MARGIN_BOTTOM]);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f6fef7..e81bc3b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,8 +231,8 @@
 	}
 
 	/* WaRsForcewakeWaitTC0:vlv */
-	__gen6_gt_wait_for_thread_c0(dev_priv);
-
+	if (!IS_CHERRYVIEW(dev_priv->dev))
+		__gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -250,9 +250,10 @@
 		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
 				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
-	/* The below doubles as a POSTING_READ */
-	gen6_gt_check_fifodbg(dev_priv);
-
+	/* something from same cacheline, but !FORCEWAKE_VLV */
+	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+	if (!IS_CHERRYVIEW(dev_priv->dev))
+		gen6_gt_check_fifodbg(dev_priv);
 }
 
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
@@ -315,7 +316,7 @@
 	intel_runtime_pm_put(dev_priv);
 }
 
-static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long irqflags;
@@ -357,16 +358,12 @@
 			dev_priv->uncore.fifo_count =
 				__raw_i915_read32(dev_priv, GTFIFOCTL) &
 				GT_FIFO_FREE_ENTRIES_MASK;
-	} else {
-		dev_priv->uncore.forcewake_count = 0;
-		dev_priv->uncore.fw_rendercount = 0;
-		dev_priv->uncore.fw_mediacount = 0;
 	}
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-void intel_uncore_early_sanitize(struct drm_device *dev)
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -389,7 +386,7 @@
 		__raw_i915_write32(dev_priv, GTFIFODBG,
 				   __raw_i915_read32(dev_priv, GTFIFODBG));
 
-	intel_uncore_forcewake_reset(dev, false);
+	intel_uncore_forcewake_reset(dev, restore_forcewake);
 }
 
 void intel_uncore_sanitize(struct drm_device *dev)
@@ -469,16 +466,43 @@
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
 	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
 
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
-	(((reg) >= 0x2000 && (reg) < 0x4000) ||\
-	((reg) >= 0x5000 && (reg) < 0x8000) ||\
-	((reg) >= 0xB000 && (reg) < 0x12000) ||\
-	((reg) >= 0x2E000 && (reg) < 0x30000))
+#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
 
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
-	(((reg) >= 0x12000 && (reg) < 0x14000) ||\
-	((reg) >= 0x22000 && (reg) < 0x24000) ||\
-	((reg) >= 0x30000 && (reg) < 0x40000))
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x2000, 0x4000) || \
+	 REG_RANGE((reg), 0x5000, 0x8000) || \
+	 REG_RANGE((reg), 0xB000, 0x12000) || \
+	 REG_RANGE((reg), 0x2E000, 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x12000, 0x14000) || \
+	 REG_RANGE((reg), 0x22000, 0x24000) || \
+	 REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x2000, 0x4000) || \
+	 REG_RANGE((reg), 0x5000, 0x8000) || \
+	 REG_RANGE((reg), 0x8300, 0x8500) || \
+	 REG_RANGE((reg), 0xB000, 0xC000) || \
+	 REG_RANGE((reg), 0xE000, 0xE800))
+
+#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x8800, 0x8900) || \
+	 REG_RANGE((reg), 0xD000, 0xD800) || \
+	 REG_RANGE((reg), 0x12000, 0x14000) || \
+	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
+	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
+	 REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
+	(REG_RANGE((reg), 0x4000, 0x5000) || \
+	 REG_RANGE((reg), 0x8000, 0x8300) || \
+	 REG_RANGE((reg), 0x8500, 0x8600) || \
+	 REG_RANGE((reg), 0x9000, 0xB000) || \
+	 REG_RANGE((reg), 0xC000, 0xC800) || \
+	 REG_RANGE((reg), 0xF000, 0x10000) || \
+	 REG_RANGE((reg), 0x14000, 0x14400) || \
+	 REG_RANGE((reg), 0x22000, 0x24000))
 
 static void
 ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -490,20 +514,30 @@
 }
 
 static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
+			bool before)
 {
+	const char *op = read ? "reading" : "writing to";
+	const char *when = before ? "before" : "after";
+
+	if (!i915.mmio_debug)
+		return;
+
 	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
-		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
-			  reg);
+		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
+		     when, op, reg);
 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 	}
 }
 
 static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
 {
+	if (i915.mmio_debug)
+		return;
+
 	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
-		DRM_ERROR("Unclaimed write to %x\n", reg);
+		DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 	}
 }
@@ -540,6 +574,7 @@
 static u##x \
 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
 	REG_READ_HEADER(x); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
 	if (dev_priv->uncore.forcewake_count == 0 && \
 	    NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -550,6 +585,7 @@
 	} else { \
 		val = __raw_i915_read##x(dev_priv, reg); \
 	} \
+	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
 	REG_READ_FOOTER; \
 }
 
@@ -573,7 +609,35 @@
 	REG_READ_FOOTER; \
 }
 
+#define __chv_read(x) \
+static u##x \
+chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	unsigned fwengine = 0; \
+	REG_READ_HEADER(x); \
+	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+		if (dev_priv->uncore.fw_rendercount == 0) \
+			fwengine = FORCEWAKE_RENDER; \
+	} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+		if (dev_priv->uncore.fw_mediacount == 0) \
+			fwengine = FORCEWAKE_MEDIA; \
+	} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+		if (dev_priv->uncore.fw_rendercount == 0) \
+			fwengine |= FORCEWAKE_RENDER; \
+		if (dev_priv->uncore.fw_mediacount == 0) \
+			fwengine |= FORCEWAKE_MEDIA; \
+	} \
+	if (fwengine) \
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	if (fwengine) \
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+	REG_READ_FOOTER; \
+}
 
+__chv_read(8)
+__chv_read(16)
+__chv_read(32)
+__chv_read(64)
 __vlv_read(8)
 __vlv_read(16)
 __vlv_read(32)
@@ -591,6 +655,7 @@
 __gen4_read(32)
 __gen4_read(64)
 
+#undef __chv_read
 #undef __vlv_read
 #undef __gen6_read
 #undef __gen5_read
@@ -647,12 +712,13 @@
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
 	} \
-	hsw_unclaimed_reg_clear(dev_priv, reg); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	if (unlikely(__fifo_ret)) { \
 		gen6_gt_check_fifodbg(dev_priv); \
 	} \
-	hsw_unclaimed_reg_check(dev_priv, reg); \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+	hsw_unclaimed_reg_detect(dev_priv); \
 	REG_WRITE_FOOTER; \
 }
 
@@ -681,6 +747,7 @@
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
 	REG_WRITE_HEADER; \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
 	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
 		if (dev_priv->uncore.forcewake_count == 0) \
 			dev_priv->uncore.funcs.force_wake_get(dev_priv,	\
@@ -692,9 +759,43 @@
 	} else { \
 		__raw_i915_write##x(dev_priv, reg, val); \
 	} \
+	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+	hsw_unclaimed_reg_detect(dev_priv); \
 	REG_WRITE_FOOTER; \
 }
 
+#define __chv_write(x) \
+static void \
+chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	unsigned fwengine = 0; \
+	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
+	REG_WRITE_HEADER; \
+	if (!shadowed) { \
+		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+			if (dev_priv->uncore.fw_rendercount == 0) \
+				fwengine = FORCEWAKE_RENDER; \
+		} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+			if (dev_priv->uncore.fw_mediacount == 0) \
+				fwengine = FORCEWAKE_MEDIA; \
+		} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+			if (dev_priv->uncore.fw_rendercount == 0) \
+				fwengine |= FORCEWAKE_RENDER; \
+			if (dev_priv->uncore.fw_mediacount == 0) \
+				fwengine |= FORCEWAKE_MEDIA; \
+		} \
+	} \
+	if (fwengine) \
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	if (fwengine) \
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+	REG_WRITE_FOOTER; \
+}
+
+__chv_write(8)
+__chv_write(16)
+__chv_write(32)
+__chv_write(64)
 __gen8_write(8)
 __gen8_write(16)
 __gen8_write(32)
@@ -716,6 +817,7 @@
 __gen4_write(32)
 __gen4_write(64)
 
+#undef __chv_write
 #undef __gen8_write
 #undef __hsw_write
 #undef __gen6_write
@@ -731,7 +833,7 @@
 	setup_timer(&dev_priv->uncore.force_wake_timer,
 		    gen6_force_wake_timer, (unsigned long)dev_priv);
 
-	intel_uncore_early_sanitize(dev);
+	intel_uncore_early_sanitize(dev, false);
 
 	if (IS_VALLEYVIEW(dev)) {
 		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
@@ -779,14 +881,26 @@
 
 	switch (INTEL_INFO(dev)->gen) {
 	default:
-		dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
-		dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
-		dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
-		dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
-		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
-		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
-		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
-		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+		if (IS_CHERRYVIEW(dev)) {
+			dev_priv->uncore.funcs.mmio_writeb  = chv_write8;
+			dev_priv->uncore.funcs.mmio_writew  = chv_write16;
+			dev_priv->uncore.funcs.mmio_writel  = chv_write32;
+			dev_priv->uncore.funcs.mmio_writeq  = chv_write64;
+			dev_priv->uncore.funcs.mmio_readb  = chv_read8;
+			dev_priv->uncore.funcs.mmio_readw  = chv_read16;
+			dev_priv->uncore.funcs.mmio_readl  = chv_read32;
+			dev_priv->uncore.funcs.mmio_readq  = chv_read64;
+
+		} else {
+			dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
+			dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
+			dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
+			dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
+			dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+			dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+			dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+			dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+		}
 		break;
 	case 7:
 	case 6:
@@ -912,7 +1026,7 @@
 	if (args->flags || args->pad)
 		return -EINVAL;
 
-	if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1053,18 +1167,16 @@
 
 int intel_gpu_reset(struct drm_device *dev)
 {
-	switch (INTEL_INFO(dev)->gen) {
-	case 8:
-	case 7:
-	case 6: return gen6_do_reset(dev);
-	case 5: return ironlake_do_reset(dev);
-	case 4:
-		if (IS_G4X(dev))
-			return g4x_do_reset(dev);
-		else
-			return i965_do_reset(dev);
-	default: return -ENODEV;
-	}
+	if (INTEL_INFO(dev)->gen >= 6)
+		return gen6_do_reset(dev);
+	else if (IS_GEN5(dev))
+		return ironlake_do_reset(dev);
+	else if (IS_G4X(dev))
+		return g4x_do_reset(dev);
+	else if (IS_GEN4(dev))
+		return i965_do_reset(dev);
+	else
+		return -ENODEV;
 }
 
 void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index cf11ee6..80de23d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -280,7 +280,7 @@
 {
 	int ret;
 
-	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
 	if (ret) {
 		if (ret != -ERESTARTSYS && ret != -EBUSY)
 			DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 13b7dd8..5451dc5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -272,7 +272,7 @@
 	return 0;
 }
 
-static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
 	.gamma_set = mga_crtc_fb_gamma_set,
 	.gamma_get = mga_crtc_fb_gamma_get,
 	.fb_probe = mgag200fb_create,
@@ -293,9 +293,10 @@
 		return -ENOMEM;
 
 	mdev->mfbdev = mfbdev;
-	mfbdev->helper.funcs = &mga_fb_helper_funcs;
 	spin_lock_init(&mfbdev->dirty_lock);
 
+	drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
+
 	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
 				 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
 	if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a034ed4..45f04de 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1562,19 +1562,9 @@
 						  *connector)
 {
 	int enc_id = connector->encoder_ids[0];
-	struct drm_mode_object *obj;
-	struct drm_encoder *encoder;
-
 	/* pick the encoder ids */
-	if (enc_id) {
-		obj =
-		    drm_mode_object_find(connector->dev, enc_id,
-					 DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
-			return NULL;
-		encoder = obj_to_encoder(obj);
-		return encoder;
-	}
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
 	return NULL;
 }
 
@@ -1621,7 +1611,7 @@
 
 	drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	mga_connector->i2c = mgag200_i2c_create(dev);
 	if (!mga_connector->i2c)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 28f7e3e..76960fa 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -306,7 +306,7 @@
 
 	hdp_disable(hdmi_connector);
 
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 
 	hdmi_unreference(hdmi_connector->hdmi);
@@ -416,7 +416,7 @@
 	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	ret = hpd_enable(hdmi_connector);
 	if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9a5d87d..a322029 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -905,12 +905,41 @@
 {
 	return dev->of_node == data;
 }
-
-static int msm_drm_add_components(struct device *master, struct master *m)
+#else
+static int compare_dev(struct device *dev, void *data)
 {
-	struct device_node *np = master->of_node;
+	return dev == data;
+}
+#endif
+
+static int msm_drm_bind(struct device *dev)
+{
+	return drm_platform_init(&msm_driver, to_platform_device(dev));
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+	drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+	.bind = msm_drm_bind,
+	.unbind = msm_drm_unbind,
+};
+
+/*
+ * Platform driver:
+ */
+
+static int msm_pdev_probe(struct platform_device *pdev)
+{
+	struct component_match *match = NULL;
+#ifdef CONFIG_OF
+	/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
+	 * (or probably any other).. so probably some room for some helpers
+	 */
+	struct device_node *np = pdev->dev.of_node;
 	unsigned i;
-	int ret;
 
 	for (i = 0; ; i++) {
 		struct device_node *node;
@@ -919,22 +948,9 @@
 		if (!node)
 			break;
 
-		ret = component_master_add_child(m, compare_of, node);
-		of_node_put(node);
-
-		if (ret)
-			return ret;
+		component_match_add(&pdev->dev, &match, compare_of, node);
 	}
-	return 0;
-}
 #else
-static int compare_dev(struct device *dev, void *data)
-{
-	return dev == data;
-}
-
-static int msm_drm_add_components(struct device *master, struct master *m)
-{
 	/* For non-DT case, it kinda sucks.  We don't actually have a way
 	 * to know whether or not we are waiting for certain devices (or if
 	 * they are simply not present).  But for non-DT we only need to
@@ -958,41 +974,12 @@
 			return -EPROBE_DEFER;
 		}
 
-		ret = component_master_add_child(m, compare_dev, dev);
-		if (ret) {
-			DBG("could not add child: %d", ret);
-			return ret;
-		}
+		component_match_add(&pdev->dev, &match, compare_dev, dev);
 	}
-
-	return 0;
-}
 #endif
 
-static int msm_drm_bind(struct device *dev)
-{
-	return drm_platform_init(&msm_driver, to_platform_device(dev));
-}
-
-static void msm_drm_unbind(struct device *dev)
-{
-	drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
-}
-
-static const struct component_master_ops msm_drm_ops = {
-		.add_components = msm_drm_add_components,
-		.bind = msm_drm_bind,
-		.unbind = msm_drm_unbind,
-};
-
-/*
- * Platform driver:
- */
-
-static int msm_pdev_probe(struct platform_device *pdev)
-{
 	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-	return component_master_add(&pdev->dev, &msm_drm_ops);
+	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
 }
 
 static int msm_pdev_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 5107fc4..c437065 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -177,7 +177,7 @@
 	DBG("fbdev: get gamma");
 }
 
-static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
 	.gamma_set = msm_crtc_fb_gamma_set,
 	.gamma_get = msm_crtc_fb_gamma_get,
 	.fb_probe = msm_fbdev_create,
@@ -197,7 +197,7 @@
 
 	helper = &fbdev->base;
 
-	helper->funcs = &msm_fb_helper_funcs;
+	drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(dev, helper,
 			priv->num_crtcs, priv->num_connectors);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 690d7e7..713722b 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -73,7 +73,7 @@
 		int npages = obj->size >> PAGE_SHIFT;
 
 		if (iommu_present(&platform_bus_type))
-			p = drm_gem_get_pages(obj, 0);
+			p = drm_gem_get_pages(obj);
 		else
 			p = get_pages_vram(obj, npages);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b6dc85c..ba29a70 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -309,7 +309,7 @@
 	struct ttm_buffer_object *bo = &nvbo->bo;
 	int ret;
 
-	ret = ttm_bo_reserve(bo, false, false, false, 0);
+	ret = ttm_bo_reserve(bo, false, false, false, NULL);
 	if (ret)
 		goto out;
 
@@ -350,7 +350,7 @@
 	struct ttm_buffer_object *bo = &nvbo->bo;
 	int ret, ref;
 
-	ret = ttm_bo_reserve(bo, false, false, false, 0);
+	ret = ttm_bo_reserve(bo, false, false, false, NULL);
 	if (ret)
 		return ret;
 
@@ -385,7 +385,7 @@
 {
 	int ret;
 
-	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1fa222e..dbdc9ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -63,7 +63,7 @@
 {
 	struct drm_device *dev = connector->dev;
 	struct nouveau_encoder *nv_encoder;
-	struct drm_mode_object *obj;
+	struct drm_encoder *enc;
 	int i, id;
 
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
@@ -71,10 +71,10 @@
 		if (!id)
 			break;
 
-		obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
+		enc = drm_encoder_find(dev, id);
+		if (!enc)
 			continue;
-		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+		nv_encoder = nouveau_encoder(enc);
 
 		if (type == DCB_OUTPUT_ANY ||
 		    (nv_encoder->dcb && nv_encoder->dcb->type == type))
@@ -104,7 +104,7 @@
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	nouveau_event_ref(NULL, &nv_connector->hpd);
 	kfree(nv_connector->edid);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	if (nv_connector->aux.transfer)
 		drm_dp_aux_unregister(&nv_connector->aux);
@@ -119,7 +119,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	struct nouveau_encoder *nv_encoder;
-	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
 	int i, panel = -ENODEV;
 
 	/* eDP panels need powering on by us (if the VBIOS doesn't default it
@@ -139,10 +139,10 @@
 		if (id == 0)
 			break;
 
-		obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
+		encoder = drm_encoder_find(dev, id);
+		if (!encoder)
 			continue;
-		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+		nv_encoder = nouveau_encoder(encoder);
 
 		if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
 			int ret = nouveau_dp_detect(nv_encoder);
@@ -1236,6 +1236,6 @@
 
 	INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work);
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	return connector;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 191665e..758c11c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -438,7 +438,7 @@
 	info->flags |= FBINFO_HWACCEL_DISABLED;
 }
 
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
 	.gamma_set = nouveau_fbcon_gamma_set,
 	.gamma_get = nouveau_fbcon_gamma_get,
 	.fb_probe = nouveau_fbcon_create,
@@ -464,7 +464,8 @@
 
 	fbcon->dev = dev;
 	drm->fbcon = fbcon;
-	fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+	drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
 	ret = drm_fb_helper_init(dev, &fbcon->helper,
 				 dev->mode_config.num_crtc, 4);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c90c0dc..df9d451 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,7 +61,7 @@
 	if (!cli->base.vm)
 		return 0;
 
-	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
 	if (ret)
 		return ret;
 
@@ -132,7 +132,7 @@
 	if (!cli->base.vm)
 		return;
 
-	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
 	if (ret)
 		return;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index ab0228f..7e185c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -76,6 +76,7 @@
 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *bo,
 			 struct ttm_placement *placement,
+			 uint32_t flags,
 			 struct ttm_mem_reg *mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
@@ -162,6 +163,7 @@
 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *bo,
 			 struct ttm_placement *placement,
+			 uint32_t flags,
 			 struct ttm_mem_reg *mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -242,6 +244,7 @@
 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
 		      struct ttm_buffer_object *bo,
 		      struct ttm_placement *placement,
+		      uint32_t flags,
 		      struct ttm_mem_reg *mem)
 {
 	struct nouveau_mem *node;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 86f4ead..36bc5cc 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -130,7 +130,7 @@
 	struct omap_dss_device *dssdev = omap_connector->dssdev;
 
 	DBG("%s", omap_connector->dssdev->name);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(omap_connector);
 
@@ -307,7 +307,7 @@
 	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	return connector;
 
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f926b4c..56c6055 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -199,7 +199,7 @@
 static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
 		struct page **pages, uint32_t npages, uint32_t roll)
 {
-	dma_addr_t pat_pa = 0;
+	dma_addr_t pat_pa = 0, data_pa = 0;
 	uint32_t *data;
 	struct pat *pat;
 	struct refill_engine *engine = txn->engine_handle;
@@ -223,7 +223,9 @@
 			.lut_id = engine->tcm->lut_id,
 		};
 
-	data = alloc_dma(txn, 4*i, &pat->data_pa);
+	data = alloc_dma(txn, 4*i, &data_pa);
+	/* FIXME: what if data_pa is more than 32-bit ? */
+	pat->data_pa = data_pa;
 
 	while (i--) {
 		int n = i + roll;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 284b80f..b08a450 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -119,13 +119,6 @@
 	struct omap_drm_irq error_handler;
 };
 
-/* this should probably be in drm-core to standardize amongst drivers */
-#define DRM_ROTATE_0	0
-#define DRM_ROTATE_90	1
-#define DRM_ROTATE_180	2
-#define DRM_ROTATE_270	3
-#define DRM_REFLECT_X	4
-#define DRM_REFLECT_Y	5
 
 #ifdef CONFIG_DEBUG_FS
 int omap_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 1388ca7..8436c685 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -281,7 +281,7 @@
 	return ret;
 }
 
-static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
 	.fb_probe = omap_fbdev_create,
 };
 
@@ -325,7 +325,7 @@
 
 	helper = &fbdev->base;
 
-	helper->funcs = &omap_fb_helper_funcs;
+	drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(dev, helper,
 			priv->num_crtcs, priv->num_connectors);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 95dbce2..e484941 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -233,11 +233,7 @@
 
 	WARN_ON(omap_obj->pages);
 
-	/* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
-	 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
-	 * we actually want CMA memory for it all anyways..
-	 */
-	pages = drm_gem_get_pages(obj, GFP_KERNEL);
+	pages = drm_gem_get_pages(obj);
 	if (IS_ERR(pages)) {
 		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 		return PTR_ERR(pages);
@@ -791,7 +787,7 @@
 			omap_obj->paddr = tiler_ssptr(block);
 			omap_obj->block = block;
 
-			DBG("got paddr: %08x", omap_obj->paddr);
+			DBG("got paddr: %pad", &omap_obj->paddr);
 		}
 
 		omap_obj->paddr_cnt++;
@@ -985,9 +981,9 @@
 
 	off = drm_vma_node_start(&obj->vma_node);
 
-	seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
 			omap_obj->flags, obj->name, obj->refcount.refcount.counter,
-			off, omap_obj->paddr, omap_obj->paddr_cnt,
+			off, &omap_obj->paddr, omap_obj->paddr_cnt,
 			omap_obj->vaddr, omap_obj->roll);
 
 	if (omap_obj->flags & OMAP_BO_TILED) {
@@ -1183,9 +1179,7 @@
 			}
 		}
 		spin_unlock(&sync_lock);
-
-		if (waiter)
-			kfree(waiter);
+		kfree(waiter);
 	}
 	return ret;
 }
@@ -1347,6 +1341,7 @@
 	struct omap_drm_private *priv = dev->dev_private;
 	struct omap_gem_object *omap_obj;
 	struct drm_gem_object *obj = NULL;
+	struct address_space *mapping;
 	size_t size;
 	int ret;
 
@@ -1404,14 +1399,16 @@
 		omap_obj->height = gsize.tiled.height;
 	}
 
-	ret = 0;
-	if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
+	if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
 		drm_gem_private_object_init(dev, obj, size);
-	else
+	} else {
 		ret = drm_gem_object_init(dev, obj, size);
+		if (ret)
+			goto fail;
 
-	if (ret)
-		goto fail;
+		mapping = file_inode(obj->filp)->i_mapping;
+		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
+	}
 
 	return obj;
 
@@ -1467,8 +1464,8 @@
 			entry->paddr = tiler_ssptr(block);
 			entry->block = block;
 
-			DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
-					entry->paddr,
+			DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
+					&entry->paddr,
 					usergart[i].stride_pfn << PAGE_SHIFT);
 		}
 	}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 3cf31ee..891a4dc 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -142,8 +142,8 @@
 	DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
 			info->out_width, info->out_height,
 			info->screen_width);
-	DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
-			info->paddr, info->p_uv_addr);
+	DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
+			&info->paddr, &info->p_uv_addr);
 
 	/* TODO: */
 	ilace = false;
@@ -308,16 +308,13 @@
 	if (priv->has_dmm) {
 		prop = priv->rotation_prop;
 		if (!prop) {
-			const struct drm_prop_enum_list props[] = {
-					{ DRM_ROTATE_0,   "rotate-0" },
-					{ DRM_ROTATE_90,  "rotate-90" },
-					{ DRM_ROTATE_180, "rotate-180" },
-					{ DRM_ROTATE_270, "rotate-270" },
-					{ DRM_REFLECT_X,  "reflect-x" },
-					{ DRM_REFLECT_Y,  "reflect-y" },
-			};
-			prop = drm_property_create_bitmask(dev, 0, "rotation",
-					props, ARRAY_SIZE(props));
+			prop = drm_mode_create_rotation_property(dev,
+								 BIT(DRM_ROTATE_0) |
+								 BIT(DRM_ROTATE_90) |
+								 BIT(DRM_ROTATE_180) |
+								 BIT(DRM_ROTATE_270) |
+								 BIT(DRM_REFLECT_X) |
+								 BIT(DRM_REFLECT_Y));
 			if (prop == NULL)
 				return;
 			priv->rotation_prop = prop;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5d7ea24..b8ced08 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -835,7 +835,7 @@
 	struct qxl_output *qxl_output =
 		drm_connector_to_qxl_output(connector);
 
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(qxl_output);
 }
@@ -902,7 +902,7 @@
 
 	drm_object_attach_property(&connector->base,
 				   qdev->hotplug_mode_update_property, 0);
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index f437b30..df56788 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -660,7 +660,7 @@
 	return 0;
 }
 
-static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
 	.fb_probe = qxl_fb_find_or_create_single,
 };
 
@@ -676,9 +676,12 @@
 
 	qfbdev->qdev = qdev;
 	qdev->mode_info.qfbdev = qfbdev;
-	qfbdev->helper.funcs = &qxl_fb_helper_funcs;
 	spin_lock_init(&qfbdev->delayed_ops_lock);
 	INIT_LIST_HEAD(&qfbdev->delayed_ops);
+
+	drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
+			      &qxl_fb_helper_funcs);
+
 	ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
 				 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
 				 QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index d458a14..83a4232 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -31,7 +31,7 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
 			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -67,7 +67,7 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
 			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 1544efc..ebdce08 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1401,7 +1401,6 @@
  */
 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
 {
-	struct drm_mode_object *obj;
 	struct drm_crtc *crtc;
 	struct radeon_crtc *radeon_crtc;
 	struct radeon_cs_packet p3reloc, waitreloc;
@@ -1441,12 +1440,11 @@
 	header = radeon_get_ib_value(p, h_idx);
 	crtc_id = radeon_get_ib_value(p, h_idx + 5);
 	reg = R100_CP_PACKET0_GET_REG(header);
-	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
-	if (!obj) {
+	crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+	if (!crtc) {
 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
 		return -ENOENT;
 	}
-	crtc = obj_to_crtc(obj);
 	radeon_crtc = to_radeon_crtc(crtc);
 	crtc_id = radeon_crtc->crtc_id;
 
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 12511bb..c47537a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -825,7 +825,6 @@
 			       uint32_t *vline_start_end,
 			       uint32_t *vline_status)
 {
-	struct drm_mode_object *obj;
 	struct drm_crtc *crtc;
 	struct radeon_crtc *radeon_crtc;
 	struct radeon_cs_packet p3reloc, wait_reg_mem;
@@ -887,12 +886,11 @@
 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
 	reg = R600_CP_PACKET0_GET_REG(header);
 
-	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
-	if (!obj) {
+	crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+	if (!crtc) {
 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
 		return -ENOENT;
 	}
-	crtc = obj_to_crtc(obj);
 	radeon_crtc = to_radeon_crtc(crtc);
 	crtc_id = radeon_crtc->crtc_id;
 
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4483119..c667c43 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -216,7 +216,6 @@
 	struct drm_encoder *best_encoder = NULL;
 	struct drm_encoder *encoder = NULL;
 	struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
-	struct drm_mode_object *obj;
 	bool connected;
 	int i;
 
@@ -226,14 +225,11 @@
 		if (connector->encoder_ids[i] == 0)
 			break;
 
-		obj = drm_mode_object_find(connector->dev,
-					   connector->encoder_ids[i],
-					   DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
+		encoder = drm_encoder_find(connector->dev,
+					   connector->encoder_ids[i]);
+		if (!encoder)
 			continue;
 
-		encoder = obj_to_encoder(obj);
-
 		if ((encoder == best_encoder) && (status == connector_status_connected))
 			connected = true;
 		else
@@ -249,7 +245,6 @@
 
 static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
 {
-	struct drm_mode_object *obj;
 	struct drm_encoder *encoder;
 	int i;
 
@@ -257,11 +252,10 @@
 		if (connector->encoder_ids[i] == 0)
 			break;
 
-		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
 			continue;
 
-		encoder = obj_to_encoder(obj);
 		if (encoder->encoder_type == encoder_type)
 			return encoder;
 	}
@@ -271,17 +265,9 @@
 static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
 {
 	int enc_id = connector->encoder_ids[0];
-	struct drm_mode_object *obj;
-	struct drm_encoder *encoder;
-
 	/* pick the encoder ids */
-	if (enc_id) {
-		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
-			return NULL;
-		encoder = obj_to_encoder(obj);
-		return encoder;
-	}
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
 	return NULL;
 }
 
@@ -740,7 +726,7 @@
 	if (radeon_connector->edid)
 		kfree(radeon_connector->edid);
 	kfree(radeon_connector->con_priv);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -1048,7 +1034,6 @@
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	struct drm_encoder *encoder = NULL;
 	struct drm_encoder_helper_funcs *encoder_funcs;
-	struct drm_mode_object *obj;
 	int i, r;
 	enum drm_connector_status ret = connector_status_disconnected;
 	bool dret = false, broken_edid = false;
@@ -1153,14 +1138,11 @@
 			if (connector->encoder_ids[i] == 0)
 				break;
 
-			obj = drm_mode_object_find(connector->dev,
-						   connector->encoder_ids[i],
-						   DRM_MODE_OBJECT_ENCODER);
-			if (!obj)
+			encoder = drm_encoder_find(connector->dev,
+						   connector->encoder_ids[i]);
+			if (!encoder)
 				continue;
 
-			encoder = obj_to_encoder(obj);
-
 			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
 			    encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
 				continue;
@@ -1225,19 +1207,16 @@
 {
 	int enc_id = connector->encoder_ids[0];
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-	struct drm_mode_object *obj;
 	struct drm_encoder *encoder;
 	int i;
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
 		if (connector->encoder_ids[i] == 0)
 			break;
 
-		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
 			continue;
 
-		encoder = obj_to_encoder(obj);
-
 		if (radeon_connector->use_digital == true) {
 			if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
 				return encoder;
@@ -1252,13 +1231,8 @@
 
 	/* then check use digitial */
 	/* pick the first one */
-	if (enc_id) {
-		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
-			return NULL;
-		encoder = obj_to_encoder(obj);
-		return encoder;
-	}
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
 	return NULL;
 }
 
@@ -1391,7 +1365,6 @@
 
 u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
 {
-	struct drm_mode_object *obj;
 	struct drm_encoder *encoder;
 	struct radeon_encoder *radeon_encoder;
 	int i;
@@ -1400,11 +1373,10 @@
 		if (connector->encoder_ids[i] == 0)
 			break;
 
-		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
 			continue;
 
-		encoder = obj_to_encoder(obj);
 		radeon_encoder = to_radeon_encoder(encoder);
 
 		switch (radeon_encoder->encoder_id) {
@@ -1421,7 +1393,6 @@
 
 bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
 {
-	struct drm_mode_object *obj;
 	struct drm_encoder *encoder;
 	struct radeon_encoder *radeon_encoder;
 	int i;
@@ -1431,11 +1402,10 @@
 		if (connector->encoder_ids[i] == 0)
 			break;
 
-		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
-		if (!obj)
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
 			continue;
 
-		encoder = obj_to_encoder(obj);
 		radeon_encoder = to_radeon_encoder(encoder);
 		if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
 			found = true;
@@ -2050,7 +2020,7 @@
 		connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	connector->display_info.subpixel_order = subpixel_order;
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	if (has_aux)
 		radeon_dp_aux_init(radeon_connector);
@@ -2211,5 +2181,5 @@
 	} else
 		connector->polled = DRM_CONNECTOR_POLL_HPD;
 	connector->display_info.subpixel_order = subpixel_order;
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 665ced3..db598d7 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -331,7 +331,7 @@
 	return 0;
 }
 
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
 	.gamma_set = radeon_crtc_fb_gamma_set,
 	.gamma_get = radeon_crtc_fb_gamma_get,
 	.fb_probe = radeonfb_create,
@@ -353,7 +353,9 @@
 
 	rfbdev->rdev = rdev;
 	rdev->mode_info.rfbdev = rfbdev;
-	rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+	drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
+			      &radeon_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
 				 rdev->num_crtc,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 792fd1d..fda64b7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -187,7 +187,7 @@
  * Power management
  */
 
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
 static int rcar_du_pm_suspend(struct device *dev)
 {
 	struct rcar_du_device *rcdu = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index a87edfac..7602610 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -135,7 +135,9 @@
 {
 	struct rcar_du_device *rcdu = dev->dev_private;
 	const struct rcar_du_format_info *format;
+	unsigned int max_pitch;
 	unsigned int align;
+	unsigned int bpp;
 
 	format = rcar_du_format_info(mode_cmd->pixel_format);
 	if (format == NULL) {
@@ -144,13 +146,20 @@
 		return ERR_PTR(-EINVAL);
 	}
 
+	/*
+	 * The pitch and alignment constraints are expressed in pixels on the
+	 * hardware side and in bytes in the DRM API.
+	 */
+	bpp = format->planes == 2 ? 1 : format->bpp / 8;
+	max_pitch =  4096 * bpp;
+
 	if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
 		align = 128;
 	else
-		align = 16 * format->bpp / 8;
+		align = 16 * bpp;
 
 	if (mode_cmd->pitches[0] & (align - 1) ||
-	    mode_cmd->pitches[0] >= 8192) {
+	    mode_cmd->pitches[0] >= max_pitch) {
 		dev_dbg(dev->dev, "invalid pitch value %u\n",
 			mode_cmd->pitches[0]);
 		return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 289048d..21426bd 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -64,7 +64,7 @@
 
 static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 }
 
@@ -105,7 +105,7 @@
 		return ret;
 
 	drm_connector_helper_add(connector, &connector_helper_funcs);
-	ret = drm_sysfs_connector_add(connector);
+	ret = drm_connector_register(connector);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index ccfe64c..8af3944 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -32,7 +32,7 @@
 
 static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 }
 
@@ -70,7 +70,7 @@
 		return ret;
 
 	drm_connector_helper_add(connector, &connector_helper_funcs);
-	ret = drm_sysfs_connector_add(connector);
+	ret = drm_connector_register(connector);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index faf176b..47875de 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -692,7 +692,7 @@
 	struct shmob_drm_connector *scon = to_shmob_connector(connector);
 
 	shmob_drm_backlight_exit(scon);
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 }
 
@@ -726,7 +726,7 @@
 		return ret;
 
 	drm_connector_helper_add(connector, &connector_helper_funcs);
-	ret = drm_sysfs_connector_add(connector);
+	ret = drm_connector_register(connector);
 	if (ret < 0)
 		goto err_cleanup;
 
@@ -749,7 +749,7 @@
 err_backlight:
 	shmob_drm_backlight_exit(&sdev->connector);
 err_sysfs:
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 err_cleanup:
 	drm_connector_cleanup(connector);
 	return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 82c84c7..ff4ba48 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -297,7 +297,7 @@
  * Power management
  */
 
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
 static int shmob_drm_pm_suspend(struct device *dev)
 {
 	struct shmob_drm_device *sdev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3396f9f..fd736ef 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -40,6 +40,12 @@
 
 	drm_mode_config_init(drm);
 
+	err = tegra_drm_fb_prepare(drm);
+	if (err < 0)
+		return err;
+
+	drm_kms_helper_poll_init(drm);
+
 	err = host1x_device_init(device);
 	if (err < 0)
 		return err;
@@ -59,8 +65,6 @@
 	if (err < 0)
 		return err;
 
-	drm_kms_helper_poll_init(drm);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 6b8fe9d..0d30689 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -280,6 +280,7 @@
 				    unsigned int index);
 bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
 bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
+int tegra_drm_fb_prepare(struct drm_device *drm);
 int tegra_drm_fb_init(struct drm_device *drm);
 void tegra_drm_fb_exit(struct drm_device *drm);
 #ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 9798a70..fc1528e 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -267,18 +267,13 @@
 	return err;
 }
 
-static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
 	.fb_probe = tegra_fbdev_probe,
 };
 
-static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
-					      unsigned int preferred_bpp,
-					      unsigned int num_crtc,
-					      unsigned int max_connectors)
+static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
 {
-	struct drm_fb_helper *helper;
 	struct tegra_fbdev *fbdev;
-	int err;
 
 	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
 	if (!fbdev) {
@@ -286,13 +281,23 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	fbdev->base.funcs = &tegra_fb_helper_funcs;
-	helper = &fbdev->base;
+	drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs);
+
+	return fbdev;
+}
+
+static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
+			    unsigned int preferred_bpp,
+			    unsigned int num_crtc,
+			    unsigned int max_connectors)
+{
+	struct drm_device *drm = fbdev->base.dev;
+	int err;
 
 	err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
 	if (err < 0) {
 		dev_err(drm->dev, "failed to initialize DRM FB helper\n");
-		goto free;
+		return err;
 	}
 
 	err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
@@ -301,21 +306,17 @@
 		goto fini;
 	}
 
-	drm_helper_disable_unused_functions(drm);
-
 	err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
 	if (err < 0) {
 		dev_err(drm->dev, "failed to set initial configuration\n");
 		goto fini;
 	}
 
-	return fbdev;
+	return 0;
 
 fini:
 	drm_fb_helper_fini(&fbdev->base);
-free:
-	kfree(fbdev);
-	return ERR_PTR(err);
+	return err;
 }
 
 static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
@@ -366,7 +367,7 @@
 #endif
 };
 
-int tegra_drm_fb_init(struct drm_device *drm)
+int tegra_drm_fb_prepare(struct drm_device *drm)
 {
 #ifdef CONFIG_DRM_TEGRA_FBDEV
 	struct tegra_drm *tegra = drm->dev_private;
@@ -381,8 +382,7 @@
 	drm->mode_config.funcs = &tegra_drm_mode_funcs;
 
 #ifdef CONFIG_DRM_TEGRA_FBDEV
-	tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
-					  drm->mode_config.num_connector);
+	tegra->fbdev = tegra_fbdev_create(drm);
 	if (IS_ERR(tegra->fbdev))
 		return PTR_ERR(tegra->fbdev);
 #endif
@@ -390,6 +390,21 @@
 	return 0;
 }
 
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+	struct tegra_drm *tegra = drm->dev_private;
+	int err;
+
+	err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc,
+			       drm->mode_config.num_connector);
+	if (err < 0)
+		return err;
+#endif
+
+	return 0;
+}
+
 void tegra_drm_fb_exit(struct drm_device *drm)
 {
 #ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index a3e4f1e..446837e 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -105,7 +105,7 @@
 
 static void tegra_connector_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	drm_connector_clear(connector);
 }
@@ -318,7 +318,7 @@
 	drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
 
 	drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
-	drm_sysfs_connector_add(&output->connector);
+	drm_connector_register(&output->connector);
 
 	output->encoder.possible_crtcs = 0x3;
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b20b694..6be623b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -120,8 +120,8 @@
 static int tilcdc_unload(struct drm_device *dev)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
-	struct tilcdc_module *mod, *cur;
 
+	drm_fbdev_cma_fini(priv->fbdev);
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
 	drm_vblank_cleanup(dev);
@@ -148,11 +148,6 @@
 
 	pm_runtime_disable(dev->dev);
 
-	list_for_each_entry_safe(mod, cur, &module_list, list) {
-		DBG("destroying module: %s", mod->name);
-		mod->funcs->destroy(mod);
-	}
-
 	kfree(priv);
 
 	return 0;
@@ -628,13 +623,13 @@
 static void __exit tilcdc_drm_fini(void)
 {
 	DBG("fini");
-	tilcdc_tfp410_fini();
-	tilcdc_slave_fini();
-	tilcdc_panel_fini();
 	platform_driver_unregister(&tilcdc_platform_driver);
+	tilcdc_panel_fini();
+	tilcdc_slave_fini();
+	tilcdc_tfp410_fini();
 }
 
-late_initcall(tilcdc_drm_init);
+module_init(tilcdc_drm_init);
 module_exit(tilcdc_drm_fini);
 
 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 0938036..7596c14 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -98,7 +98,6 @@
 struct tilcdc_module_ops {
 	/* create appropriate encoders/connectors: */
 	int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
-	void (*destroy)(struct tilcdc_module *mod);
 #ifdef CONFIG_DEBUG_FS
 	/* create debugfs nodes (can be NULL): */
 	int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 86c6732..4c7aa1d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -151,6 +151,7 @@
 static void panel_connector_destroy(struct drm_connector *connector)
 {
 	struct panel_connector *panel_connector = to_panel_connector(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(panel_connector);
 }
@@ -247,7 +248,7 @@
 	if (ret)
 		goto fail;
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	return connector;
 
@@ -281,23 +282,8 @@
 	return 0;
 }
 
-static void panel_destroy(struct tilcdc_module *mod)
-{
-	struct panel_module *panel_mod = to_panel_module(mod);
-
-	if (panel_mod->timings) {
-		display_timings_release(panel_mod->timings);
-		kfree(panel_mod->timings);
-	}
-
-	tilcdc_module_cleanup(mod);
-	kfree(panel_mod->info);
-	kfree(panel_mod);
-}
-
 static const struct tilcdc_module_ops panel_module_ops = {
 		.modeset_init = panel_modeset_init,
-		.destroy = panel_destroy,
 };
 
 /*
@@ -373,6 +359,7 @@
 		return -ENOMEM;
 
 	mod = &panel_mod->base;
+	pdev->dev.platform_data = mod;
 
 	tilcdc_module_init(mod, "panel", &panel_module_ops);
 
@@ -380,17 +367,16 @@
 	if (IS_ERR(pinctrl))
 		dev_warn(&pdev->dev, "pins are not configured\n");
 
-
 	panel_mod->timings = of_get_display_timings(node);
 	if (!panel_mod->timings) {
 		dev_err(&pdev->dev, "could not get panel timings\n");
-		goto fail;
+		goto fail_free;
 	}
 
 	panel_mod->info = of_get_panel_info(node);
 	if (!panel_mod->info) {
 		dev_err(&pdev->dev, "could not get panel info\n");
-		goto fail;
+		goto fail_timings;
 	}
 
 	mod->preferred_bpp = panel_mod->info->bpp;
@@ -401,13 +387,26 @@
 
 	return 0;
 
-fail:
-	panel_destroy(mod);
+fail_timings:
+	display_timings_release(panel_mod->timings);
+
+fail_free:
+	kfree(panel_mod);
+	tilcdc_module_cleanup(mod);
 	return ret;
 }
 
 static int panel_remove(struct platform_device *pdev)
 {
+	struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+	struct panel_module *panel_mod = to_panel_module(mod);
+
+	display_timings_release(panel_mod->timings);
+
+	tilcdc_module_cleanup(mod);
+	kfree(panel_mod->info);
+	kfree(panel_mod);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index 595068b..3775fd49 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -166,6 +166,7 @@
 static void slave_connector_destroy(struct drm_connector *connector)
 {
 	struct slave_connector *slave_connector = to_slave_connector(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(slave_connector);
 }
@@ -261,7 +262,7 @@
 	if (ret)
 		goto fail;
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	return connector;
 
@@ -295,17 +296,8 @@
 	return 0;
 }
 
-static void slave_destroy(struct tilcdc_module *mod)
-{
-	struct slave_module *slave_mod = to_slave_module(mod);
-
-	tilcdc_module_cleanup(mod);
-	kfree(slave_mod);
-}
-
 static const struct tilcdc_module_ops slave_module_ops = {
 		.modeset_init = slave_modeset_init,
-		.destroy = slave_destroy,
 };
 
 /*
@@ -355,10 +347,13 @@
 	}
 
 	slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
-	if (!slave_mod)
-		return -ENOMEM;
+	if (!slave_mod) {
+		ret = -ENOMEM;
+		goto fail_adapter;
+	}
 
 	mod = &slave_mod->base;
+	pdev->dev.platform_data = mod;
 
 	mod->preferred_bpp = slave_info.bpp;
 
@@ -373,10 +368,20 @@
 	tilcdc_slave_probedefer(false);
 
 	return 0;
+
+fail_adapter:
+	i2c_put_adapter(slavei2c);
+	return ret;
 }
 
 static int slave_remove(struct platform_device *pdev)
 {
+	struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+	struct slave_module *slave_mod = to_slave_module(mod);
+
+	tilcdc_module_cleanup(mod);
+	kfree(slave_mod);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index c38b56b..354c47c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -167,6 +167,7 @@
 static void tfp410_connector_destroy(struct drm_connector *connector)
 {
 	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(tfp410_connector);
 }
@@ -261,7 +262,7 @@
 	if (ret)
 		goto fail;
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 
 	return connector;
 
@@ -295,23 +296,8 @@
 	return 0;
 }
 
-static void tfp410_destroy(struct tilcdc_module *mod)
-{
-	struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
-
-	if (tfp410_mod->i2c)
-		i2c_put_adapter(tfp410_mod->i2c);
-
-	if (!IS_ERR_VALUE(tfp410_mod->gpio))
-		gpio_free(tfp410_mod->gpio);
-
-	tilcdc_module_cleanup(mod);
-	kfree(tfp410_mod);
-}
-
 static const struct tilcdc_module_ops tfp410_module_ops = {
 		.modeset_init = tfp410_modeset_init,
-		.destroy = tfp410_destroy,
 };
 
 /*
@@ -341,6 +327,7 @@
 		return -ENOMEM;
 
 	mod = &tfp410_mod->base;
+	pdev->dev.platform_data = mod;
 
 	tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
 
@@ -364,6 +351,7 @@
 	tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
 	if (!tfp410_mod->i2c) {
 		dev_err(&pdev->dev, "could not get i2c\n");
+		of_node_put(i2c_node);
 		goto fail;
 	}
 
@@ -377,19 +365,32 @@
 		ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
 		if (ret) {
 			dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
-			goto fail;
+			goto fail_adapter;
 		}
 	}
 
 	return 0;
 
+fail_adapter:
+	i2c_put_adapter(tfp410_mod->i2c);
+
 fail:
-	tfp410_destroy(mod);
+	kfree(tfp410_mod);
+	tilcdc_module_cleanup(mod);
 	return ret;
 }
 
 static int tfp410_remove(struct platform_device *pdev)
 {
+	struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+	struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+
+	i2c_put_adapter(tfp410_mod->i2c);
+	gpio_free(tfp410_mod->gpio);
+
+	tilcdc_module_cleanup(mod);
+	kfree(tfp410_mod);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4ab9f71..3da89d5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -412,7 +412,7 @@
 	int ret;
 
 	spin_lock(&glob->lru_lock);
-	ret = __ttm_bo_reserve(bo, false, true, false, 0);
+	ret = __ttm_bo_reserve(bo, false, true, false, NULL);
 
 	spin_lock(&bdev->fence_lock);
 	(void) ttm_bo_wait(bo, false, false, true);
@@ -514,7 +514,7 @@
 			return ret;
 
 		spin_lock(&glob->lru_lock);
-		ret = __ttm_bo_reserve(bo, false, true, false, 0);
+		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
 
 		/*
 		 * We raced, and lost, someone else holds the reservation now,
@@ -577,11 +577,11 @@
 			kref_get(&nentry->list_kref);
 		}
 
-		ret = __ttm_bo_reserve(entry, false, true, false, 0);
+		ret = __ttm_bo_reserve(entry, false, true, false, NULL);
 		if (remove_all && ret) {
 			spin_unlock(&glob->lru_lock);
 			ret = __ttm_bo_reserve(entry, false, false,
-					       false, 0);
+					       false, NULL);
 			spin_lock(&glob->lru_lock);
 		}
 
@@ -726,7 +726,7 @@
 
 	spin_lock(&glob->lru_lock);
 	list_for_each_entry(bo, &man->lru, lru) {
-		ret = __ttm_bo_reserve(bo, false, true, false, 0);
+		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
 		if (!ret)
 			break;
 	}
@@ -784,7 +784,7 @@
 	int ret;
 
 	do {
-		ret = (*man->func->get_node)(man, bo, placement, mem);
+		ret = (*man->func->get_node)(man, bo, placement, 0, mem);
 		if (unlikely(ret != 0))
 			return ret;
 		if (mem->mm_node)
@@ -897,7 +897,8 @@
 
 		if (man->has_type && man->use_type) {
 			type_found = true;
-			ret = (*man->func->get_node)(man, bo, placement, mem);
+			ret = (*man->func->get_node)(man, bo, placement,
+						     cur_flags, mem);
 			if (unlikely(ret))
 				return ret;
 		}
@@ -937,7 +938,6 @@
 		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
 				~TTM_PL_MASK_MEMTYPE);
 
-
 		if (mem_type == TTM_PL_SYSTEM) {
 			mem->mem_type = mem_type;
 			mem->placement = cur_flags;
@@ -1595,7 +1595,7 @@
 	 * Using ttm_bo_reserve makes sure the lru lists are updated.
 	 */
 
-	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+	ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
 	if (unlikely(ret != 0))
 		return ret;
 	spin_lock(&bdev->fence_lock);
@@ -1630,7 +1630,7 @@
 
 	spin_lock(&glob->lru_lock);
 	list_for_each_entry(bo, &glob->swap_lru, swap) {
-		ret = __ttm_bo_reserve(bo, false, true, false, 0);
+		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
 		if (!ret)
 			break;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index bd850c9..9e103a48 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -50,6 +50,7 @@
 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 			       struct ttm_buffer_object *bo,
 			       struct ttm_placement *placement,
+			       uint32_t flags,
 			       struct ttm_mem_reg *mem)
 {
 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -67,7 +68,7 @@
 	if (!node)
 		return -ENOMEM;
 
-	if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN)
+	if (flags & TTM_PL_FLAG_TOPDOWN)
 		aflags = DRM_MM_CREATE_TOP;
 
 	spin_lock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1df856f..30e5d90 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -500,7 +500,7 @@
 			pgprot_val(tmp) |= _PAGE_GUARDED;
 	}
 #endif
-#if defined(__ia64__)
+#if defined(__ia64__) || defined(__arm__)
 	if (caching_flags & TTM_PL_FLAG_WC)
 		tmp = pgprot_writecombine(tmp);
 	else
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index d7f92fe..66fc639 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -35,7 +35,7 @@
 #include <drm/drm_sysfs.h>
 
 static DECLARE_WAIT_QUEUE_HEAD(exit_q);
-atomic_t device_released;
+static atomic_t device_released;
 
 static struct device_type ttm_drm_class_type = {
 	.name = "ttm",
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 863bef9..beb8e75 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -790,7 +790,7 @@
 	return 0;
 }
 
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
 		char *name)
 {
 	spin_lock_init(&pool->lock);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b44d548..e026a9e 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -105,14 +105,7 @@
 udl_best_single_encoder(struct drm_connector *connector)
 {
 	int enc_id = connector->encoder_ids[0];
-	struct drm_mode_object *obj;
-	struct drm_encoder *encoder;
-
-	obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
-	if (!obj)
-		return NULL;
-	encoder = obj_to_encoder(obj);
-	return encoder;
+	return drm_encoder_find(connector->dev, enc_id);
 }
 
 static int udl_connector_set_property(struct drm_connector *connector,
@@ -124,7 +117,7 @@
 
 static void udl_connector_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -154,7 +147,7 @@
 	drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
 	drm_connector_helper_add(connector, &udl_connector_helper_funcs);
 
-	drm_sysfs_connector_add(connector);
+	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
 	drm_object_attach_property(&connector->base,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 3771763..d1da339 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -550,7 +550,7 @@
 	return ret;
 }
 
-static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
 	.fb_probe = udlfb_create,
 };
 
@@ -583,7 +583,8 @@
 		return -ENOMEM;
 
 	udl->fbdev = ufbdev;
-	ufbdev->helper.funcs = &udl_fb_helper_funcs;
+
+	drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
 
 	ret = drm_fb_helper_init(dev, &ufbdev->helper,
 				 1, 1);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c041cd7..8044f5f 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,14 @@
 	}
 }
 
-static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
+static int udl_gem_get_pages(struct udl_gem_object *obj)
 {
 	struct page **pages;
 
 	if (obj->pages)
 		return 0;
 
-	pages = drm_gem_get_pages(&obj->base, gfpmask);
+	pages = drm_gem_get_pages(&obj->base);
 	if (IS_ERR(pages))
 		return PTR_ERR(pages);
 
@@ -147,7 +147,7 @@
 		return 0;
 	}
 		
-	ret = udl_gem_get_pages(obj, GFP_KERNEL);
+	ret = udl_gem_get_pages(obj);
 	if (ret)
 		return ret;
 
@@ -205,7 +205,7 @@
 	}
 	gobj = to_udl_bo(obj);
 
-	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
+	ret = udl_gem_get_pages(gobj);
 	if (ret)
 		goto out;
 	ret = drm_gem_create_mmap_offset(obj);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 7094b92..4279567 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -306,10 +306,23 @@
 
 	DRM_DEBUG("\n");
 	ret = udl_modeset_init(dev);
+	if (ret)
+		goto err;
 
 	ret = udl_fbdev_init(dev);
+	if (ret)
+		goto err;
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret)
+		goto err_fb;
+
 	return 0;
+err_fb:
+	udl_fbdev_cleanup(dev);
 err:
+	if (udl->urbs.count)
+		udl_free_urb_list(dev);
 	kfree(udl);
 	DRM_ERROR("%d\n", ret);
 	return ret;
@@ -325,6 +338,8 @@
 {
 	struct udl_device *udl = dev->dev_private;
 
+	drm_vblank_cleanup(dev);
+
 	if (udl->urbs.count)
 		udl_free_urb_list(dev);
 
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index cddc4fc..dc145d3 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -363,6 +363,26 @@
 	kfree(crtc);
 }
 
+static int udl_crtc_page_flip(struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb,
+			      struct drm_pending_vblank_event *event,
+			      uint32_t page_flip_flags)
+{
+	struct udl_framebuffer *ufb = to_udl_fb(fb);
+	struct drm_device *dev = crtc->dev;
+	unsigned long flags;
+
+	udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (event)
+		drm_send_vblank_event(dev, 0, event);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	crtc->primary->fb = fb;
+
+	return 0;
+}
+
 static void udl_crtc_prepare(struct drm_crtc *crtc)
 {
 }
@@ -384,6 +404,7 @@
 static const struct drm_crtc_funcs udl_crtc_funcs = {
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = udl_crtc_destroy,
+	.page_flip = udl_crtc_page_flip,
 };
 
 static int udl_crtc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 458cdf6..ce0ab95 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,7 @@
 	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
 	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
 	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
-	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
+	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
+	    vmwgfx_cmdbuf_res.o \
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
new file mode 100644
index 0000000..bfeb4b1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -0,0 +1,341 @@
+/**************************************************************************
+ *
+ * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
+
+enum vmw_cmdbuf_res_state {
+	VMW_CMDBUF_RES_COMMITED,
+	VMW_CMDBUF_RES_ADD,
+	VMW_CMDBUF_RES_DEL
+};
+
+/**
+ * struct vmw_cmdbuf_res - Command buffer managed resource entry.
+ *
+ * @res: Refcounted pointer to a struct vmw_resource.
+ * @hash: Hash entry for the manager hash table.
+ * @head: List head used either by the staging list or the manager list
+ * of commited resources.
+ * @state: Staging state of this resource entry.
+ * @man: Pointer to a resource manager for this entry.
+ */
+struct vmw_cmdbuf_res {
+	struct vmw_resource *res;
+	struct drm_hash_item hash;
+	struct list_head head;
+	enum vmw_cmdbuf_res_state state;
+	struct vmw_cmdbuf_res_manager *man;
+};
+
+/**
+ * struct vmw_cmdbuf_res_manager - Command buffer resource manager.
+ *
+ * @resources: Hash table containing staged and commited command buffer
+ * resources
+ * @list: List of commited command buffer resources.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @resources and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_cmdbuf_res_manager {
+	struct drm_open_hash resources;
+	struct list_head list;
+	struct vmw_private *dev_priv;
+};
+
+
+/**
+ * vmw_cmdbuf_res_lookup - Look up a command buffer resource
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @resource_type: The resource type, that combined with the user key
+ * identifies the resource.
+ * @user_key: The user key.
+ *
+ * Returns a valid refcounted struct vmw_resource pointer on success,
+ * an error pointer on failure.
+ */
+struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+		      enum vmw_cmdbuf_res_type res_type,
+		      u32 user_key)
+{
+	struct drm_hash_item *hash;
+	int ret;
+	unsigned long key = user_key | (res_type << 24);
+
+	ret = drm_ht_find_item(&man->resources, key, &hash);
+	if (unlikely(ret != 0))
+		return ERR_PTR(ret);
+
+	return vmw_resource_reference
+		(drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
+}
+
+/**
+ * vmw_cmdbuf_res_free - Free a command buffer resource.
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @entry: Pointer to a struct vmw_cmdbuf_res.
+ *
+ * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
+ * struct vmw_resource.
+ */
+static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
+				struct vmw_cmdbuf_res *entry)
+{
+	list_del(&entry->head);
+	WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
+	vmw_resource_unreference(&entry->res);
+	kfree(entry);
+}
+
+/**
+ * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
+ *
+ * @list: Caller's list of command buffer resource actions.
+ *
+ * This function commits a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_cmdbuf_res_commit(struct list_head *list)
+{
+	struct vmw_cmdbuf_res *entry, *next;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		list_del(&entry->head);
+		switch (entry->state) {
+		case VMW_CMDBUF_RES_ADD:
+			entry->state = VMW_CMDBUF_RES_COMMITED;
+			list_add_tail(&entry->head, &entry->man->list);
+			break;
+		case VMW_CMDBUF_RES_DEL:
+			vmw_resource_unreference(&entry->res);
+			kfree(entry);
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+}
+
+/**
+ * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @list: Caller's list of command buffer resource action
+ *
+ * This function reverts a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_cmdbuf_res_revert(struct list_head *list)
+{
+	struct vmw_cmdbuf_res *entry, *next;
+	int ret;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		switch (entry->state) {
+		case VMW_CMDBUF_RES_ADD:
+			vmw_cmdbuf_res_free(entry->man, entry);
+			break;
+		case VMW_CMDBUF_RES_DEL:
+			ret = drm_ht_insert_item(&entry->man->resources,
+						 &entry->hash);
+			list_del(&entry->head);
+			list_add_tail(&entry->head, &entry->man->list);
+			entry->state = VMW_CMDBUF_RES_COMMITED;
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+}
+
+/**
+ * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @res: Valid (refcount != 0) pointer to a struct vmw_resource.
+ * @list: The staging list.
+ *
+ * This function allocates a struct vmw_cmdbuf_res entry and adds the
+ * resource to the hash table of the manager identified by @man. The
+ * entry is then put on the staging list identified by @list.
+ */
+int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+		       enum vmw_cmdbuf_res_type res_type,
+		       u32 user_key,
+		       struct vmw_resource *res,
+		       struct list_head *list)
+{
+	struct vmw_cmdbuf_res *cres;
+	int ret;
+
+	cres = kzalloc(sizeof(*cres), GFP_KERNEL);
+	if (unlikely(cres == NULL))
+		return -ENOMEM;
+
+	cres->hash.key = user_key | (res_type << 24);
+	ret = drm_ht_insert_item(&man->resources, &cres->hash);
+	if (unlikely(ret != 0))
+		goto out_invalid_key;
+
+	cres->state = VMW_CMDBUF_RES_ADD;
+	cres->res = vmw_resource_reference(res);
+	cres->man = man;
+	list_add_tail(&cres->head, list);
+
+out_invalid_key:
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @list: The staging list.
+ *
+ * This function looks up the struct vmw_cmdbuf_res entry from the manager
+ * hash table and, if it exists, removes it. Depending on its current staging
+ * state it then either removes the entry from the staging list or adds it
+ * to it with a staging state of removal.
+ */
+int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+			  enum vmw_cmdbuf_res_type res_type,
+			  u32 user_key,
+			  struct list_head *list)
+{
+	struct vmw_cmdbuf_res *entry;
+	struct drm_hash_item *hash;
+	int ret;
+
+	ret = drm_ht_find_item(&man->resources, user_key, &hash);
+	if (likely(ret != 0))
+		return -EINVAL;
+
+	entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
+
+	switch (entry->state) {
+	case VMW_CMDBUF_RES_ADD:
+		vmw_cmdbuf_res_free(man, entry);
+		break;
+	case VMW_CMDBUF_RES_COMMITED:
+		(void) drm_ht_remove_item(&man->resources, &entry->hash);
+		list_del(&entry->head);
+		entry->state = VMW_CMDBUF_RES_DEL;
+		list_add_tail(&entry->head, list);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
+ * manager.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * Allocates and initializes a command buffer managed resource manager. Returns
+ * an error pointer on failure.
+ */
+struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
+{
+	struct vmw_cmdbuf_res_manager *man;
+	int ret;
+
+	man = kzalloc(sizeof(*man), GFP_KERNEL);
+	if (man == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	man->dev_priv = dev_priv;
+	INIT_LIST_HEAD(&man->list);
+	ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+	if (ret == 0)
+		return man;
+
+	kfree(man);
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
+ * manager.
+ *
+ * @man: Pointer to the  manager to destroy.
+ *
+ * This function destroys a command buffer managed resource manager and
+ * unreferences / frees all command buffer managed resources and -entries
+ * associated with it.
+ */
+void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
+{
+	struct vmw_cmdbuf_res *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &man->list, head)
+		vmw_cmdbuf_res_free(man, entry);
+
+	kfree(man);
+}
+
+/**
+ *
+ * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
+ * resource manager
+ *
+ * Returns the approximate allocation size of a command buffer managed
+ * resource manager.
+ */
+size_t vmw_cmdbuf_res_man_size(void)
+{
+	static size_t res_man_size;
+
+	if (unlikely(res_man_size == 0))
+		res_man_size =
+			ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
+			ttm_round_pot(sizeof(struct hlist_head) <<
+				      VMW_CMDBUF_RES_MAN_HT_ORDER);
+
+	return res_man_size;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 8bb26dc..5ac9287 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -33,6 +33,7 @@
 	struct ttm_base_object base;
 	struct vmw_resource res;
 	struct vmw_ctx_binding_state cbs;
+	struct vmw_cmdbuf_res_manager *man;
 };
 
 
@@ -103,7 +104,8 @@
 
 static void vmw_hw_context_destroy(struct vmw_resource *res)
 {
-
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct {
 		SVGA3dCmdHeader header;
@@ -113,9 +115,9 @@
 
 	if (res->func->destroy == vmw_gb_context_destroy) {
 		mutex_lock(&dev_priv->cmdbuf_mutex);
+		vmw_cmdbuf_res_man_destroy(uctx->man);
 		mutex_lock(&dev_priv->binding_mutex);
-		(void) vmw_context_binding_state_kill
-			(&container_of(res, struct vmw_user_context, res)->cbs);
+		(void) vmw_context_binding_state_kill(&uctx->cbs);
 		(void) vmw_gb_context_destroy(res);
 		mutex_unlock(&dev_priv->binding_mutex);
 		if (dev_priv->pinned_bo != NULL &&
@@ -152,13 +154,16 @@
 	ret = vmw_resource_init(dev_priv, res, true,
 				res_free, &vmw_gb_context_func);
 	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+	if (unlikely(ret != 0))
+		goto out_err;
 
-	if (unlikely(ret != 0)) {
-		if (res_free)
-			res_free(res);
-		else
-			kfree(res);
-		return ret;
+	if (dev_priv->has_mob) {
+		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
+		if (unlikely(IS_ERR(uctx->man))) {
+			ret = PTR_ERR(uctx->man);
+			uctx->man = NULL;
+			goto out_err;
+		}
 	}
 
 	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
@@ -166,6 +171,13 @@
 
 	vmw_resource_activate(res, vmw_hw_context_destroy);
 	return 0;
+
+out_err:
+	if (res_free)
+		res_free(res);
+	else
+		kfree(res);
+	return ret;
 }
 
 static int vmw_context_init(struct vmw_private *dev_priv,
@@ -471,7 +483,8 @@
 	 */
 
 	if (unlikely(vmw_user_context_size == 0))
-		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
+		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
 
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0))
@@ -901,3 +914,8 @@
 {
 	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
 }
+
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
+{
+	return container_of(ctx, struct vmw_user_context, res)->man;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 70ddce835..ed1d510 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -61,7 +61,7 @@
 
 	vmw_execbuf_release_pinned_bo(dev_priv);
 
-	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err;
 
@@ -105,7 +105,7 @@
 	if (pin)
 		vmw_execbuf_release_pinned_bo(dev_priv);
 
-	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err;
 
@@ -212,7 +212,7 @@
 
 	if (pin)
 		vmw_execbuf_release_pinned_bo(dev_priv);
-	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err_unlock;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 246a62b..18b54ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -316,7 +316,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_bo_reserve(bo, false, true, false, 0);
+	ret = ttm_bo_reserve(bo, false, true, false, NULL);
 	BUG_ON(ret != 0);
 
 	ret = ttm_bo_kmap(bo, 0, 1, &map);
@@ -946,7 +946,6 @@
 		drm_master_put(&vmw_fp->locked_master);
 	}
 
-	vmw_compat_shader_man_destroy(vmw_fp->shman);
 	ttm_object_file_release(&vmw_fp->tfile);
 	kfree(vmw_fp);
 }
@@ -966,16 +965,10 @@
 	if (unlikely(vmw_fp->tfile == NULL))
 		goto out_no_tfile;
 
-	vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
-	if (IS_ERR(vmw_fp->shman))
-		goto out_no_shman;
-
 	file_priv->driver_priv = vmw_fp;
 
 	return 0;
 
-out_no_shman:
-	ttm_object_file_release(&vmw_fp->tfile);
 out_no_tfile:
 	kfree(vmw_fp);
 	return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6b252a8..c181175 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,10 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20140325"
+#define VMWGFX_DRIVER_DATE "20140704"
 #define VMWGFX_DRIVER_MAJOR 2
 #define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_DRIVER_PATCHLEVEL 1
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_RELOCATIONS 2048
@@ -75,14 +75,11 @@
 #define VMW_RES_FENCE ttm_driver_type3
 #define VMW_RES_SHADER ttm_driver_type4
 
-struct vmw_compat_shader_manager;
-
 struct vmw_fpriv {
 	struct drm_master *locked_master;
 	struct ttm_object_file *tfile;
 	struct list_head fence_events;
 	bool gb_aware;
-	struct vmw_compat_shader_manager *shman;
 };
 
 struct vmw_dma_buffer {
@@ -124,6 +121,10 @@
 	void (*hw_destroy) (struct vmw_resource *res);
 };
 
+
+/*
+ * Resources that are managed using ioctls.
+ */
 enum vmw_res_type {
 	vmw_res_context,
 	vmw_res_surface,
@@ -132,6 +133,15 @@
 	vmw_res_max
 };
 
+/*
+ * Resources that are managed using command streams.
+ */
+enum vmw_cmdbuf_res_type {
+	vmw_cmdbuf_res_compat_shader
+};
+
+struct vmw_cmdbuf_res_manager;
+
 struct vmw_cursor_snooper {
 	struct drm_crtc *crtc;
 	size_t age;
@@ -341,7 +351,7 @@
 	bool needs_post_query_barrier;
 	struct vmw_resource *error_resource;
 	struct vmw_ctx_binding_state staged_bindings;
-	struct list_head staged_shaders;
+	struct list_head staged_cmd_res;
 };
 
 struct vmw_legacy_display;
@@ -974,7 +984,8 @@
 extern void vmw_context_binding_res_list_scrub(struct list_head *head);
 extern int vmw_context_rebind_all(struct vmw_resource *ctx);
 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
-
+extern struct vmw_cmdbuf_res_manager *
+vmw_context_res_man(struct vmw_resource *ctx);
 /*
  * Surface management - vmwgfx_surface.c
  */
@@ -1008,27 +1019,42 @@
 				   struct drm_file *file_priv);
 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_priv);
-extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
-				    SVGA3dShaderType shader_type,
-				    u32 *user_key);
-extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
-				      struct list_head *list);
-extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
-				      struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
-				    u32 user_key,
-				    SVGA3dShaderType shader_type,
-				    struct list_head *list);
-extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
+				 struct vmw_cmdbuf_res_manager *man,
 				 u32 user_key, const void *bytecode,
 				 SVGA3dShaderType shader_type,
 				 size_t size,
-				 struct ttm_object_file *tfile,
 				 struct list_head *list);
-extern struct vmw_compat_shader_manager *
-vmw_compat_shader_man_create(struct vmw_private *dev_priv);
-extern void
-vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
+				    u32 user_key, SVGA3dShaderType shader_type,
+				    struct list_head *list);
+extern struct vmw_resource *
+vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+			 u32 user_key, SVGA3dShaderType shader_type);
+
+/*
+ * Command buffer managed resources - vmwgfx_cmdbuf_res.c
+ */
+
+extern struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
+extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
+extern size_t vmw_cmdbuf_res_man_size(void);
+extern struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+		      enum vmw_cmdbuf_res_type res_type,
+		      u32 user_key);
+extern void vmw_cmdbuf_res_revert(struct list_head *list);
+extern void vmw_cmdbuf_res_commit(struct list_head *list);
+extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+			      enum vmw_cmdbuf_res_type res_type,
+			      u32 user_key,
+			      struct vmw_resource *res,
+			      struct list_head *list);
+extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+				 enum vmw_cmdbuf_res_type res_type,
+				 u32 user_key,
+				 struct list_head *list);
 
 
 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 87df0b3..7bfdaa1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -422,117 +422,71 @@
 	return 0;
 }
 
+
 /**
- * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
- * on the resource validate list unless it's already there.
+ * vmw_cmd_res_reloc_add - Add a resource to a software context's
+ * relocation- and validation lists.
  *
- * @dev_priv: Pointer to a device private structure.
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
  * @sw_context: Pointer to the software context.
  * @res_type: Resource type.
- * @converter: User-space visisble type specific information.
- * @id: user-space resource id handle.
- * @id_loc: Pointer to the location in the command buffer currently being
- * parsed from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated
- * on exit.
+ * @id_loc: Pointer to where the id that needs translation is located.
+ * @res: Valid pointer to a struct vmw_resource.
+ * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
+ * used for this resource is returned here.
  */
-static int
-vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
-			 struct vmw_sw_context *sw_context,
-			 enum vmw_res_type res_type,
-			 const struct vmw_user_resource_conv *converter,
-			 uint32_t id,
-			 uint32_t *id_loc,
-			 struct vmw_resource_val_node **p_val)
+static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 enum vmw_res_type res_type,
+				 uint32_t *id_loc,
+				 struct vmw_resource *res,
+				 struct vmw_resource_val_node **p_val)
 {
-	struct vmw_res_cache_entry *rcache =
-		&sw_context->res_cache[res_type];
-	struct vmw_resource *res;
-	struct vmw_resource_val_node *node;
 	int ret;
+	struct vmw_resource_val_node *node;
 
-	if (id == SVGA3D_INVALID_ID) {
-		if (p_val)
-			*p_val = NULL;
-		if (res_type == vmw_res_context) {
-			DRM_ERROR("Illegal context invalid id.\n");
-			return -EINVAL;
-		}
-		return 0;
-	}
-
-	/*
-	 * Fastpath in case of repeated commands referencing the same
-	 * resource
-	 */
-
-	if (likely(rcache->valid && id == rcache->handle)) {
-		const struct vmw_resource *res = rcache->res;
-
-		rcache->node->first_usage = false;
-		if (p_val)
-			*p_val = rcache->node;
-
-		return vmw_resource_relocation_add
-			(&sw_context->res_relocations, res,
-			 id_loc - sw_context->buf_start);
-	}
-
-	ret = vmw_user_resource_lookup_handle(dev_priv,
-					      sw_context->fp->tfile,
-					      id,
-					      converter,
-					      &res);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Could not find or use resource 0x%08x.\n",
-			  (unsigned) id);
-		dump_stack();
-		return ret;
-	}
-
-	rcache->valid = true;
-	rcache->res = res;
-	rcache->handle = id;
-
+	*p_val = NULL;
 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 					  res,
 					  id_loc - sw_context->buf_start);
 	if (unlikely(ret != 0))
-		goto out_no_reloc;
+		goto out_err;
 
 	ret = vmw_resource_val_add(sw_context, res, &node);
 	if (unlikely(ret != 0))
-		goto out_no_reloc;
+		goto out_err;
 
-	rcache->node = node;
-	if (p_val)
-		*p_val = node;
+	if (res_type == vmw_res_context && dev_priv->has_mob &&
+	    node->first_usage) {
 
-	if (dev_priv->has_mob && node->first_usage &&
-	    res_type == vmw_res_context) {
+		/*
+		 * Put contexts first on the list to be able to exit
+		 * list traversal for contexts early.
+		 */
+		list_del(&node->head);
+		list_add(&node->head, &sw_context->resource_list);
+
 		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 		if (unlikely(ret != 0))
-			goto out_no_reloc;
+			goto out_err;
 		node->staged_bindings =
 			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
 		if (node->staged_bindings == NULL) {
 			DRM_ERROR("Failed to allocate context binding "
 				  "information.\n");
-			goto out_no_reloc;
+			goto out_err;
 		}
 		INIT_LIST_HEAD(&node->staged_bindings->list);
 	}
 
-	vmw_resource_unreference(&res);
-	return 0;
+	if (p_val)
+		*p_val = node;
 
-out_no_reloc:
-	BUG_ON(sw_context->error_resource != NULL);
-	sw_context->error_resource = res;
-
+out_err:
 	return ret;
 }
 
+
 /**
  * vmw_cmd_res_check - Check that a resource is present and if so, put it
  * on the resource validate list unless it's already there.
@@ -554,8 +508,71 @@
 		  uint32_t *id_loc,
 		  struct vmw_resource_val_node **p_val)
 {
-	return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
-					converter, *id_loc, id_loc, p_val);
+	struct vmw_res_cache_entry *rcache =
+		&sw_context->res_cache[res_type];
+	struct vmw_resource *res;
+	struct vmw_resource_val_node *node;
+	int ret;
+
+	if (*id_loc == SVGA3D_INVALID_ID) {
+		if (p_val)
+			*p_val = NULL;
+		if (res_type == vmw_res_context) {
+			DRM_ERROR("Illegal context invalid id.\n");
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	/*
+	 * Fastpath in case of repeated commands referencing the same
+	 * resource
+	 */
+
+	if (likely(rcache->valid && *id_loc == rcache->handle)) {
+		const struct vmw_resource *res = rcache->res;
+
+		rcache->node->first_usage = false;
+		if (p_val)
+			*p_val = rcache->node;
+
+		return vmw_resource_relocation_add
+			(&sw_context->res_relocations, res,
+			 id_loc - sw_context->buf_start);
+	}
+
+	ret = vmw_user_resource_lookup_handle(dev_priv,
+					      sw_context->fp->tfile,
+					      *id_loc,
+					      converter,
+					      &res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not find or use resource 0x%08x.\n",
+			  (unsigned) *id_loc);
+		dump_stack();
+		return ret;
+	}
+
+	rcache->valid = true;
+	rcache->res = res;
+	rcache->handle = *id_loc;
+
+	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+				    res, &node);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
+
+	rcache->node = node;
+	if (p_val)
+		*p_val = node;
+	vmw_resource_unreference(&res);
+	return 0;
+
+out_no_reloc:
+	BUG_ON(sw_context->error_resource != NULL);
+	sw_context->error_resource = res;
+
+	return ret;
 }
 
 /**
@@ -572,8 +589,8 @@
 	int ret;
 
 	list_for_each_entry(val, &sw_context->resource_list, head) {
-		if (likely(!val->staged_bindings))
-			continue;
+		if (unlikely(!val->staged_bindings))
+			break;
 
 		ret = vmw_context_rebind_all(val->res);
 		if (unlikely(ret != 0)) {
@@ -1626,13 +1643,14 @@
 	} *cmd;
 	int ret;
 	size_t size;
+	struct vmw_resource_val_node *val;
 
 	cmd = container_of(header, struct vmw_shader_define_cmd,
 			   header);
 
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				user_context_converter, &cmd->body.cid,
-				NULL);
+				&val);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -1640,11 +1658,11 @@
 		return 0;
 
 	size = cmd->header.size - sizeof(cmd->body);
-	ret = vmw_compat_shader_add(sw_context->fp->shman,
+	ret = vmw_compat_shader_add(dev_priv,
+				    vmw_context_res_man(val->res),
 				    cmd->body.shid, cmd + 1,
 				    cmd->body.type, size,
-				    sw_context->fp->tfile,
-				    &sw_context->staged_shaders);
+				    &sw_context->staged_cmd_res);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -1672,23 +1690,24 @@
 		SVGA3dCmdDestroyShader body;
 	} *cmd;
 	int ret;
+	struct vmw_resource_val_node *val;
 
 	cmd = container_of(header, struct vmw_shader_destroy_cmd,
 			   header);
 
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				user_context_converter, &cmd->body.cid,
-				NULL);
+				&val);
 	if (unlikely(ret != 0))
 		return ret;
 
 	if (unlikely(!dev_priv->has_mob))
 		return 0;
 
-	ret = vmw_compat_shader_remove(sw_context->fp->shman,
+	ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
 				       cmd->body.shid,
 				       cmd->body.type,
-				       &sw_context->staged_shaders);
+				       &sw_context->staged_cmd_res);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -1715,7 +1734,9 @@
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSetShader body;
 	} *cmd;
-	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
+	struct vmw_ctx_bindinfo bi;
+	struct vmw_resource *res = NULL;
 	int ret;
 
 	cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -1727,32 +1748,40 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (dev_priv->has_mob) {
-		struct vmw_ctx_bindinfo bi;
-		struct vmw_resource_val_node *res_node;
-		u32 shid = cmd->body.shid;
+	if (!dev_priv->has_mob)
+		return 0;
 
-		if (shid != SVGA3D_INVALID_ID)
-			(void) vmw_compat_shader_lookup(sw_context->fp->shman,
-							cmd->body.type,
-							&shid);
+	if (cmd->body.shid != SVGA3D_INVALID_ID) {
+		res = vmw_compat_shader_lookup
+			(vmw_context_res_man(ctx_node->res),
+			 cmd->body.shid,
+			 cmd->body.type);
 
-		ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
-					       vmw_res_shader,
-					       user_shader_converter,
-					       shid,
-					       &cmd->body.shid, &res_node);
-		if (unlikely(ret != 0))
-			return ret;
-
-		bi.ctx = ctx_node->res;
-		bi.res = res_node ? res_node->res : NULL;
-		bi.bt = vmw_ctx_binding_shader;
-		bi.i1.shader_type = cmd->body.type;
-		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+		if (!IS_ERR(res)) {
+			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
+						    vmw_res_shader,
+						    &cmd->body.shid, res,
+						    &res_node);
+			vmw_resource_unreference(&res);
+			if (unlikely(ret != 0))
+				return ret;
+		}
 	}
 
-	return 0;
+	if (!res_node) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context,
+					vmw_res_shader,
+					user_shader_converter,
+					&cmd->body.shid, &res_node);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	bi.ctx = ctx_node->res;
+	bi.res = res_node ? res_node->res : NULL;
+	bi.bt = vmw_ctx_binding_shader;
+	bi.i1.shader_type = cmd->body.type;
+	return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
 }
 
 /**
@@ -2394,6 +2423,8 @@
 	}
 }
 
+
+
 int vmw_execbuf_process(struct drm_file *file_priv,
 			struct vmw_private *dev_priv,
 			void __user *user_commands,
@@ -2453,7 +2484,7 @@
 			goto out_unlock;
 		sw_context->res_ht_initialized = true;
 	}
-	INIT_LIST_HEAD(&sw_context->staged_shaders);
+	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
 
 	INIT_LIST_HEAD(&resource_list);
 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
@@ -2548,8 +2579,7 @@
 	}
 
 	list_splice_init(&sw_context->resource_list, &resource_list);
-	vmw_compat_shaders_commit(sw_context->fp->shman,
-				  &sw_context->staged_shaders);
+	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 	/*
@@ -2576,8 +2606,7 @@
 	list_splice_init(&sw_context->resource_list, &resource_list);
 	error_resource = sw_context->error_resource;
 	sw_context->error_resource = NULL;
-	vmw_compat_shaders_revert(sw_context->fp->shman,
-				  &sw_context->staged_shaders);
+	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 	/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b1273e8..26f8bdd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -47,6 +47,7 @@
 static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 				  struct ttm_buffer_object *bo,
 				  struct ttm_placement *placement,
+				  uint32_t flags,
 				  struct ttm_mem_reg *mem)
 {
 	struct vmwgfx_gmrid_man *gman =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8f3edc4..d2bc2b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,7 +75,7 @@
 		vmw_surface_unreference(&du->cursor_surface);
 	if (du->cursor_dmabuf)
 		vmw_dmabuf_unreference(&du->cursor_dmabuf);
-	drm_sysfs_connector_remove(&du->connector);
+	drm_connector_unregister(&du->connector);
 	drm_crtc_cleanup(&du->crtc);
 	drm_encoder_cleanup(&du->encoder);
 	drm_connector_cleanup(&du->connector);
@@ -136,7 +136,7 @@
 	kmap_offset = 0;
 	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("reserve failed\n");
 		return -EINVAL;
@@ -343,7 +343,7 @@
 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 	kmap_num = (64*64*4) >> PAGE_SHIFT;
 
-	ret = ttm_bo_reserve(bo, true, false, false, 0);
+	ret = ttm_bo_reserve(bo, true, false, false, NULL);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("reserve failed\n");
 		return;
@@ -1501,7 +1501,6 @@
 {
 	struct drm_vmw_cursor_bypass_arg *arg = data;
 	struct vmw_display_unit *du;
-	struct drm_mode_object *obj;
 	struct drm_crtc *crtc;
 	int ret = 0;
 
@@ -1519,13 +1518,12 @@
 		return 0;
 	}
 
-	obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
-	if (!obj) {
+	crtc = drm_crtc_find(dev, arg->crtc_id);
+	if (!crtc) {
 		ret = -ENOENT;
 		goto out;
 	}
 
-	crtc = obj_to_crtc(obj);
 	du = vmw_crtc_to_du(crtc);
 
 	du->hotspot_x = arg->xhot;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b2b9bd2..15e185a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -371,7 +371,7 @@
 	encoder->possible_crtcs = (1 << unit);
 	encoder->possible_clones = 0;
 
-	(void) drm_sysfs_connector_add(connector);
+	(void) drm_connector_register(connector);
 
 	drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 01d68f0..a432c0d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -127,7 +127,7 @@
 	if (res->backup) {
 		struct ttm_buffer_object *bo = &res->backup->base;
 
-		ttm_bo_reserve(bo, false, false, false, 0);
+		ttm_bo_reserve(bo, false, false, false, NULL);
 		if (!list_empty(&res->mob_head) &&
 		    res->func->unbind != NULL) {
 			struct ttm_validate_buffer val_buf;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index a95d3a0..b295463 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -467,7 +467,7 @@
 	encoder->possible_crtcs = (1 << unit);
 	encoder->possible_clones = 0;
 
-	(void) drm_sysfs_connector_add(connector);
+	(void) drm_connector_register(connector);
 
 	drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index c1559eea..8719fb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,8 +29,6 @@
 #include "vmwgfx_resource_priv.h"
 #include "ttm/ttm_placement.h"
 
-#define VMW_COMPAT_SHADER_HT_ORDER 12
-
 struct vmw_shader {
 	struct vmw_resource res;
 	SVGA3dShaderType type;
@@ -42,49 +40,8 @@
 	struct vmw_shader shader;
 };
 
-/**
- * enum vmw_compat_shader_state - Staging state for compat shaders
- */
-enum vmw_compat_shader_state {
-	VMW_COMPAT_COMMITED,
-	VMW_COMPAT_ADD,
-	VMW_COMPAT_DEL
-};
-
-/**
- * struct vmw_compat_shader - Metadata for compat shaders.
- *
- * @handle: The TTM handle of the guest backed shader.
- * @tfile: The struct ttm_object_file the guest backed shader is registered
- * with.
- * @hash: Hash item for lookup.
- * @head: List head for staging lists or the compat shader manager list.
- * @state: Staging state.
- *
- * The structure is protected by the cmdbuf lock.
- */
-struct vmw_compat_shader {
-	u32 handle;
-	struct ttm_object_file *tfile;
-	struct drm_hash_item hash;
-	struct list_head head;
-	enum vmw_compat_shader_state state;
-};
-
-/**
- * struct vmw_compat_shader_manager - Compat shader manager.
- *
- * @shaders: Hash table containing staged and commited compat shaders
- * @list: List of commited shaders.
- * @dev_priv: Pointer to a device private structure.
- *
- * @shaders and @list are protected by the cmdbuf mutex for now.
- */
-struct vmw_compat_shader_manager {
-	struct drm_open_hash shaders;
-	struct list_head list;
-	struct vmw_private *dev_priv;
-};
+static uint64_t vmw_user_shader_size;
+static uint64_t vmw_shader_size;
 
 static void vmw_user_shader_free(struct vmw_resource *res);
 static struct vmw_resource *
@@ -98,8 +55,6 @@
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_shader_destroy(struct vmw_resource *res);
 
-static uint64_t vmw_user_shader_size;
-
 static const struct vmw_user_resource_conv user_shader_conv = {
 	.object_type = VMW_RES_SHADER,
 	.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -347,6 +302,16 @@
 			    vmw_user_shader_size);
 }
 
+static void vmw_shader_free(struct vmw_resource *res)
+{
+	struct vmw_shader *shader = vmw_res_to_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	kfree(shader);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv),
+			    vmw_shader_size);
+}
+
 /**
  * This function is called when user space has no more references on the
  * base object. It releases the base-object's reference on the resource object.
@@ -371,13 +336,13 @@
 					 TTM_REF_USAGE);
 }
 
-static int vmw_shader_alloc(struct vmw_private *dev_priv,
-			    struct vmw_dma_buffer *buffer,
-			    size_t shader_size,
-			    size_t offset,
-			    SVGA3dShaderType shader_type,
-			    struct ttm_object_file *tfile,
-			    u32 *handle)
+static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
+				 struct vmw_dma_buffer *buffer,
+				 size_t shader_size,
+				 size_t offset,
+				 SVGA3dShaderType shader_type,
+				 struct ttm_object_file *tfile,
+				 u32 *handle)
 {
 	struct vmw_user_shader *ushader;
 	struct vmw_resource *res, *tmp;
@@ -442,6 +407,56 @@
 }
 
 
+struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+				      struct vmw_dma_buffer *buffer,
+				      size_t shader_size,
+				      size_t offset,
+				      SVGA3dShaderType shader_type)
+{
+	struct vmw_shader *shader;
+	struct vmw_resource *res;
+	int ret;
+
+	/*
+	 * Approximate idr memory usage with 128 bytes. It will be limited
+	 * by maximum number_of shaders anyway.
+	 */
+	if (unlikely(vmw_shader_size == 0))
+		vmw_shader_size =
+			ttm_round_pot(sizeof(struct vmw_shader)) + 128;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_shader_size,
+				   false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		goto out_err;
+	}
+
+	shader = kzalloc(sizeof(*shader), GFP_KERNEL);
+	if (unlikely(shader == NULL)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_shader_size);
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	res = &shader->res;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+				 offset, shader_type, buffer,
+				 vmw_shader_free);
+
+out_err:
+	return ret ? ERR_PTR(ret) : res;
+}
+
+
 int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv)
 {
@@ -490,8 +505,8 @@
 	if (unlikely(ret != 0))
 		goto out_bad_arg;
 
-	ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
-			       shader_type, tfile, &arg->shader_handle);
+	ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+				    shader_type, tfile, &arg->shader_handle);
 
 	ttm_read_unlock(&dev_priv->reservation_sem);
 out_bad_arg:
@@ -500,202 +515,83 @@
 }
 
 /**
- * vmw_compat_shader_lookup - Look up a compat shader
+ * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * shader type are within valid bounds.
  *
- * @man: Pointer to the compat shader manager.
- * @shader_type: The shader type, that combined with the user_key identifies
- * the shader.
- * @user_key: On entry, this should be a pointer to the user_key.
- * On successful exit, it will contain the guest-backed shader's TTM handle.
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
  *
- * Returns 0 on success. Non-zero on failure, in which case the value pointed
- * to by @user_key is unmodified.
+ * Returns true if valid false if not.
  */
-int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
-			     SVGA3dShaderType shader_type,
-			     u32 *user_key)
+static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
 {
-	struct drm_hash_item *hash;
-	int ret;
-	unsigned long key = *user_key | (shader_type << 24);
-
-	ret = drm_ht_find_item(&man->shaders, key, &hash);
-	if (unlikely(ret != 0))
-		return ret;
-
-	*user_key = drm_hash_entry(hash, struct vmw_compat_shader,
-				   hash)->handle;
-
-	return 0;
+	return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
 }
 
 /**
- * vmw_compat_shader_free - Free a compat shader.
+ * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
  *
- * @man: Pointer to the compat shader manager.
- * @entry: Pointer to a struct vmw_compat_shader.
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
  *
- * Frees a struct vmw_compat_shder entry and drops its reference to the
- * guest backed shader.
+ * Returns a hash key suitable for a command buffer managed resource
+ * manager hash table.
  */
-static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
-				   struct vmw_compat_shader *entry)
+static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
 {
-	list_del(&entry->head);
-	WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
-	WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
-					  TTM_REF_USAGE));
-	kfree(entry);
-}
-
-/**
- * vmw_compat_shaders_commit - Commit a list of compat shader actions.
- *
- * @man: Pointer to the compat shader manager.
- * @list: Caller's list of compat shader actions.
- *
- * This function commits a list of compat shader additions or removals.
- * It is typically called when the execbuf ioctl call triggering these
- * actions has commited the fifo contents to the device.
- */
-void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
-			       struct list_head *list)
-{
-	struct vmw_compat_shader *entry, *next;
-
-	list_for_each_entry_safe(entry, next, list, head) {
-		list_del(&entry->head);
-		switch (entry->state) {
-		case VMW_COMPAT_ADD:
-			entry->state = VMW_COMPAT_COMMITED;
-			list_add_tail(&entry->head, &man->list);
-			break;
-		case VMW_COMPAT_DEL:
-			ttm_ref_object_base_unref(entry->tfile, entry->handle,
-						  TTM_REF_USAGE);
-			kfree(entry);
-			break;
-		default:
-			BUG();
-			break;
-		}
-	}
-}
-
-/**
- * vmw_compat_shaders_revert - Revert a list of compat shader actions
- *
- * @man: Pointer to the compat shader manager.
- * @list: Caller's list of compat shader actions.
- *
- * This function reverts a list of compat shader additions or removals.
- * It is typically called when the execbuf ioctl call triggering these
- * actions failed for some reason, and the command stream was never
- * submitted.
- */
-void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
-			       struct list_head *list)
-{
-	struct vmw_compat_shader *entry, *next;
-	int ret;
-
-	list_for_each_entry_safe(entry, next, list, head) {
-		switch (entry->state) {
-		case VMW_COMPAT_ADD:
-			vmw_compat_shader_free(man, entry);
-			break;
-		case VMW_COMPAT_DEL:
-			ret = drm_ht_insert_item(&man->shaders, &entry->hash);
-			list_del(&entry->head);
-			list_add_tail(&entry->head, &man->list);
-			entry->state = VMW_COMPAT_COMMITED;
-			break;
-		default:
-			BUG();
-			break;
-		}
-	}
+	return user_key | (shader_type << 20);
 }
 
 /**
  * vmw_compat_shader_remove - Stage a compat shader for removal.
  *
- * @man: Pointer to the compat shader manager
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
  * @user_key: The key that is used to identify the shader. The key is
  * unique to the shader type.
  * @shader_type: Shader type.
- * @list: Caller's list of staged shader actions.
- *
- * This function stages a compat shader for removal and removes the key from
- * the shader manager's hash table. If the shader was previously only staged
- * for addition it is completely removed (But the execbuf code may keep a
- * reference if it was bound to a context between addition and removal). If
- * it was previously commited to the manager, it is staged for removal.
+ * @list: Caller's list of staged command buffer resource actions.
  */
-int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
 			     u32 user_key, SVGA3dShaderType shader_type,
 			     struct list_head *list)
 {
-	struct vmw_compat_shader *entry;
-	struct drm_hash_item *hash;
-	int ret;
-
-	ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
-			       &hash);
-	if (likely(ret != 0))
+	if (!vmw_compat_shader_id_ok(user_key, shader_type))
 		return -EINVAL;
 
-	entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
-
-	switch (entry->state) {
-	case VMW_COMPAT_ADD:
-		vmw_compat_shader_free(man, entry);
-		break;
-	case VMW_COMPAT_COMMITED:
-		(void) drm_ht_remove_item(&man->shaders, &entry->hash);
-		list_del(&entry->head);
-		entry->state = VMW_COMPAT_DEL;
-		list_add_tail(&entry->head, list);
-		break;
-	default:
-		BUG();
-		break;
-	}
-
-	return 0;
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
+				     vmw_compat_shader_key(user_key,
+							   shader_type),
+				     list);
 }
 
 /**
- * vmw_compat_shader_add - Create a compat shader and add the
- * key to the manager
+ * vmw_compat_shader_add - Create a compat shader and stage it for addition
+ * as a command buffer managed resource.
  *
- * @man: Pointer to the compat shader manager
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
  * @user_key: The key that is used to identify the shader. The key is
  * unique to the shader type.
  * @bytecode: Pointer to the bytecode of the shader.
  * @shader_type: Shader type.
  * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
  * to be created with.
- * @list: Caller's list of staged shader actions.
+ * @list: Caller's list of staged command buffer resource actions.
  *
- * Note that only the key is added to the shader manager's hash table.
- * The shader is not yet added to the shader manager's list of shaders.
  */
-int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+int vmw_compat_shader_add(struct vmw_private *dev_priv,
+			  struct vmw_cmdbuf_res_manager *man,
 			  u32 user_key, const void *bytecode,
 			  SVGA3dShaderType shader_type,
 			  size_t size,
-			  struct ttm_object_file *tfile,
 			  struct list_head *list)
 {
 	struct vmw_dma_buffer *buf;
 	struct ttm_bo_kmap_obj map;
 	bool is_iomem;
-	struct vmw_compat_shader *compat;
-	u32 handle;
 	int ret;
+	struct vmw_resource *res;
 
-	if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+	if (!vmw_compat_shader_id_ok(user_key, shader_type))
 		return -EINVAL;
 
 	/* Allocate and pin a DMA buffer */
@@ -703,7 +599,7 @@
 	if (unlikely(buf == NULL))
 		return -ENOMEM;
 
-	ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+	ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
 			      true, vmw_dmabuf_bo_free);
 	if (unlikely(ret != 0))
 		goto out;
@@ -728,84 +624,40 @@
 	WARN_ON(ret != 0);
 	ttm_bo_unreserve(&buf->base);
 
-	/* Create a guest-backed shader container backed by the dma buffer */
-	ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
-			       tfile, &handle);
-	vmw_dmabuf_unreference(&buf);
+	res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
 	if (unlikely(ret != 0))
 		goto no_reserve;
-	/*
-	 * Create a compat shader structure and stage it for insertion
-	 * in the manager
-	 */
-	compat = kzalloc(sizeof(*compat), GFP_KERNEL);
-	if (compat == NULL)
-		goto no_compat;
 
-	compat->hash.key = user_key |  (shader_type << 24);
-	ret = drm_ht_insert_item(&man->shaders, &compat->hash);
-	if (unlikely(ret != 0))
-		goto out_invalid_key;
-
-	compat->state = VMW_COMPAT_ADD;
-	compat->handle = handle;
-	compat->tfile = tfile;
-	list_add_tail(&compat->head, list);
-
-	return 0;
-
-out_invalid_key:
-	kfree(compat);
-no_compat:
-	ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
+				 vmw_compat_shader_key(user_key, shader_type),
+				 res, list);
+	vmw_resource_unreference(&res);
 no_reserve:
+	vmw_dmabuf_unreference(&buf);
 out:
 	return ret;
 }
 
 /**
- * vmw_compat_shader_man_create - Create a compat shader manager
+ * vmw_compat_shader_lookup - Look up a compat shader
  *
- * @dev_priv: Pointer to a device private structure.
+ * @man: Pointer to the command buffer managed resource manager identifying
+ * the shader namespace.
+ * @user_key: The user space id of the shader.
+ * @shader_type: The shader type.
  *
- * Typically done at file open time. If successful returns a pointer to a
- * compat shader manager. Otherwise returns an error pointer.
+ * Returns a refcounted pointer to a struct vmw_resource if the shader was
+ * found. An error pointer otherwise.
  */
-struct vmw_compat_shader_manager *
-vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+struct vmw_resource *
+vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+			 u32 user_key,
+			 SVGA3dShaderType shader_type)
 {
-	struct vmw_compat_shader_manager *man;
-	int ret;
+	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+		return ERR_PTR(-EINVAL);
 
-	man = kzalloc(sizeof(*man), GFP_KERNEL);
-	if (man == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	man->dev_priv = dev_priv;
-	INIT_LIST_HEAD(&man->list);
-	ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
-	if (ret == 0)
-		return man;
-
-	kfree(man);
-	return ERR_PTR(ret);
-}
-
-/**
- * vmw_compat_shader_man_destroy - Destroy a compat shader manager
- *
- * @man: Pointer to the shader manager to destroy.
- *
- * Typically done at file close time.
- */
-void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
-{
-	struct vmw_compat_shader *entry, *next;
-
-	mutex_lock(&man->dev_priv->cmdbuf_mutex);
-	list_for_each_entry_safe(entry, next, &man->list, head)
-		vmw_compat_shader_free(man, entry);
-
-	mutex_unlock(&man->dev_priv->cmdbuf_mutex);
-	kfree(man);
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
+				     vmw_compat_shader_key(user_key,
+							   shader_type));
 }
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index af02597..d2077f0 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -237,12 +237,10 @@
 		if (conflict->locks & lwants)
 			return conflict;
 
-		/* Ok, now check if he owns the resource we want. We don't need
-		 * to check "decodes" since it should be impossible to own
-		 * own legacy resources you don't decode unless I have a bug
-		 * in this code...
+		/* Ok, now check if it owns the resource we want.  We can
+		 * lock resources that are not decoded, therefore a device
+		 * can own resources it doesn't decode.
 		 */
-		WARN_ON(conflict->owns & ~conflict->decodes);
 		match = lwants & conflict->owns;
 		if (!match)
 			continue;
@@ -254,13 +252,19 @@
 		flags = 0;
 		pci_bits = 0;
 
+		/* If we can't control legacy resources via the bridge, we
+		 * also need to disable normal decoding.
+		 */
 		if (!conflict->bridge_has_one_vga) {
-			vga_irq_set_state(conflict, false);
-			flags |= PCI_VGA_STATE_CHANGE_DECODES;
-			if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+			if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM)
 				pci_bits |= PCI_COMMAND_MEMORY;
-			if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+			if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO)
 				pci_bits |= PCI_COMMAND_IO;
+
+			if (pci_bits) {
+				vga_irq_set_state(conflict, false);
+				flags |= PCI_VGA_STATE_CHANGE_DECODES;
+			}
 		}
 
 		if (change_bridge)
@@ -268,18 +272,19 @@
 
 		pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
 		conflict->owns &= ~match;
-		/* If he also owned non-legacy, that is no longer the case */
-		if (match & VGA_RSRC_LEGACY_MEM)
+
+		/* If we disabled normal decoding, reflect it in owns */
+		if (pci_bits & PCI_COMMAND_MEMORY)
 			conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
-		if (match & VGA_RSRC_LEGACY_IO)
+		if (pci_bits & PCI_COMMAND_IO)
 			conflict->owns &= ~VGA_RSRC_NORMAL_IO;
 	}
 
 enable_them:
 	/* ok dude, we got it, everybody conflicting has been disabled, let's
-	 * enable us. Make sure we don't mark a bit in "owns" that we don't
-	 * also have in "decodes". We can lock resources we don't decode but
-	 * not own them.
+	 * enable us.  Mark any bits in "owns" regardless of whether we
+	 * decoded them.  We can lock resources we don't decode, therefore
+	 * we must track them via "owns".
 	 */
 	flags = 0;
 	pci_bits = 0;
@@ -291,7 +296,7 @@
 		if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
 			pci_bits |= PCI_COMMAND_IO;
 	}
-	if (!!(wants & VGA_RSRC_LEGACY_MASK))
+	if (wants & VGA_RSRC_LEGACY_MASK)
 		flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
 
 	pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
@@ -299,7 +304,7 @@
 	if (!vgadev->bridge_has_one_vga) {
 		vga_irq_set_state(vgadev, true);
 	}
-	vgadev->owns |= (wants & vgadev->decodes);
+	vgadev->owns |= wants;
 lock_them:
 	vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
 	if (rsrc & VGA_RSRC_LEGACY_IO)
@@ -649,7 +654,6 @@
 	old_decodes = vgadev->decodes;
 	decodes_removed = ~new_decodes & old_decodes;
 	decodes_unlocked = vgadev->locks & decodes_removed;
-	vgadev->owns &= ~decodes_removed;
 	vgadev->decodes = new_decodes;
 
 	pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index def8280..6f54ff4 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -202,7 +202,7 @@
 
 void imx_drm_connector_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
+	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 }
 EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
@@ -293,10 +293,10 @@
 	 * userspace will expect to be able to access DRM at this point.
 	 */
 	list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-		ret = drm_sysfs_connector_add(connector);
+		ret = drm_connector_register(connector);
 		if (ret) {
 			dev_err(drm->dev,
-				"[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n",
+				"[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
 				connector->base.id,
 				connector->name, ret);
 			goto err_unbind;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 8af71a8..d3d9be6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -346,18 +346,6 @@
 	spinlock_t write_lock;
 };
 
-struct drm_freelist {
-	int initialized;	       /**< Freelist in use */
-	atomic_t count;		       /**< Number of free buffers */
-	struct drm_buf *next;	       /**< End pointer */
-
-	wait_queue_head_t waiting;     /**< Processes waiting on free bufs */
-	int low_mark;		       /**< Low water mark */
-	int high_mark;		       /**< High water mark */
-	atomic_t wfh;		       /**< If waiting for high mark */
-	spinlock_t lock;
-};
-
 typedef struct drm_dma_handle {
 	dma_addr_t busaddr;
 	void *vaddr;
@@ -375,7 +363,8 @@
 	int page_order;
 	struct drm_dma_handle **seglist;
 
-	struct drm_freelist freelist;
+	int low_mark;			/**< Low water mark */
+	int high_mark;			/**< High water mark */
 };
 
 /* Event queued up for userspace to read */
@@ -441,23 +430,6 @@
 	struct drm_prime_file_private prime;
 };
 
-/** Wait queue */
-struct drm_queue {
-	atomic_t use_count;		/**< Outstanding uses (+1) */
-	atomic_t finalization;		/**< Finalization in progress */
-	atomic_t block_count;		/**< Count of processes waiting */
-	atomic_t block_read;		/**< Queue blocked for reads */
-	wait_queue_head_t read_queue;	/**< Processes waiting on block_read */
-	atomic_t block_write;		/**< Queue blocked for writes */
-	wait_queue_head_t write_queue;	/**< Processes waiting on block_write */
-	atomic_t total_queued;		/**< Total queued statistic */
-	atomic_t total_flushed;		/**< Total flushes statistic */
-	atomic_t total_locks;		/**< Total locks statistics */
-	enum drm_ctx_flags flags;	/**< Context preserving and 2D-only */
-	struct drm_waitlist waitlist;	/**< Pending buffers */
-	wait_queue_head_t flush_queue;	/**< Processes waiting until flush */
-};
-
 /**
  * Lock data.
  */
@@ -1395,8 +1367,6 @@
 extern void drm_put_dev(struct drm_device *dev);
 extern void drm_unplug_dev(struct drm_device *dev);
 extern unsigned int drm_debug;
-extern unsigned int drm_rnodes;
-extern unsigned int drm_universal_planes;
 
 extern unsigned int drm_vblank_offdelay;
 extern unsigned int drm_timestamp_precision;
@@ -1419,6 +1389,8 @@
 extern int drm_debugfs_remove_files(const struct drm_info_list *files,
 				    int count, struct drm_minor *minor);
 extern int drm_debugfs_cleanup(struct drm_minor *minor);
+extern int drm_debugfs_connector_add(struct drm_connector *connector);
+extern void drm_debugfs_connector_remove(struct drm_connector *connector);
 #else
 static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 				   struct dentry *root)
@@ -1443,6 +1415,15 @@
 {
 	return 0;
 }
+
+static inline int drm_debugfs_connector_add(struct drm_connector *connector)
+{
+	return 0;
+}
+static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
+{
+}
+
 #endif
 
 				/* Info file support */
@@ -1574,7 +1555,7 @@
 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
 
-struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+struct page **drm_gem_get_pages(struct drm_gem_object *obj);
 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 		bool dirty, bool accessed);
 
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 251b75e..f1105d0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -41,6 +41,7 @@
 struct drm_object_properties;
 struct drm_file;
 struct drm_clip_rect;
+struct device_node;
 
 #define DRM_MODE_OBJECT_CRTC 0xcccccccc
 #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -75,6 +76,14 @@
 	return (uint64_t)*((uint64_t *)&val);
 }
 
+/* rotation property bits */
+#define DRM_ROTATE_0	0
+#define DRM_ROTATE_90	1
+#define DRM_ROTATE_180	2
+#define DRM_ROTATE_270	3
+#define DRM_REFLECT_X	4
+#define DRM_REFLECT_Y	5
+
 enum drm_connector_force {
 	DRM_FORCE_UNSPECIFIED,
 	DRM_FORCE_OFF,
@@ -314,6 +323,7 @@
  */
 struct drm_crtc {
 	struct drm_device *dev;
+	struct device_node *port;
 	struct list_head head;
 
 	/**
@@ -331,6 +341,10 @@
 	struct drm_plane *primary;
 	struct drm_plane *cursor;
 
+	/* position of cursor plane on crtc */
+	int cursor_x;
+	int cursor_y;
+
 	/* Temporary tracking of the old fb while a modeset is ongoing. Used
 	 * by drm_mode_set_config_internal to implement correct refcounting. */
 	struct drm_framebuffer *old_fb;
@@ -524,6 +538,8 @@
 	struct drm_property_blob *edid_blob_ptr;
 	struct drm_object_properties properties;
 
+	struct drm_property_blob *path_blob_ptr;
+
 	uint8_t polled; /* DRM_CONNECTOR_POLL_* */
 
 	/* requested DPMS state */
@@ -533,6 +549,7 @@
 
 	/* forced on connector */
 	enum drm_connector_force force;
+	bool override_edid;
 	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
 	struct drm_encoder *encoder; /* currently active encoder */
 
@@ -545,6 +562,8 @@
 	int audio_latency[2];
 	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
 	unsigned bad_edid_counter;
+
+	struct dentry *debugfs_entry;
 };
 
 /**
@@ -800,6 +819,7 @@
 	struct list_head property_blob_list;
 	struct drm_property *edid_property;
 	struct drm_property *dpms_property;
+	struct drm_property *path_property;
 	struct drm_property *plane_type_property;
 
 	/* DVI-I properties */
@@ -823,6 +843,7 @@
 
 	/* Optional properties */
 	struct drm_property *scaling_mode_property;
+	struct drm_property *aspect_ratio_property;
 	struct drm_property *dirty_info_property;
 
 	/* dumb ioctl parameters */
@@ -852,7 +873,7 @@
 extern int drm_crtc_init_with_planes(struct drm_device *dev,
 				     struct drm_crtc *crtc,
 				     struct drm_plane *primary,
-				     void *cursor,
+				     struct drm_plane *cursor,
 				     const struct drm_crtc_funcs *funcs);
 extern int drm_crtc_init(struct drm_device *dev,
 			 struct drm_crtc *crtc,
@@ -878,6 +899,8 @@
 			      struct drm_connector *connector,
 			      const struct drm_connector_funcs *funcs,
 			      int connector_type);
+int drm_connector_register(struct drm_connector *connector);
+void drm_connector_unregister(struct drm_connector *connector);
 
 extern void drm_connector_cleanup(struct drm_connector *connector);
 /* helper to unplug all connectors from sysfs for device */
@@ -937,6 +960,7 @@
 extern void drm_fb_release(struct drm_file *file_priv);
 extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
 extern void drm_mode_group_destroy(struct drm_mode_group *group);
+extern void drm_reinit_primary_mode_group(struct drm_device *dev);
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 struct i2c_adapter *adapter);
@@ -946,6 +970,8 @@
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 
+extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
+						char *path);
 extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
 						struct edid *edid);
 
@@ -994,7 +1020,8 @@
 struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
 					 int flags, const char *name,
 					 const struct drm_prop_enum_list *props,
-					 int num_values);
+					 int num_props,
+					 uint64_t supported_bits);
 struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
 					 const char *name,
 					 uint64_t min, uint64_t max);
@@ -1010,6 +1037,7 @@
 extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
 				     char *formats[]);
 extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
 extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
 
 extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -1100,6 +1128,10 @@
 extern int drm_format_horz_chroma_subsampling(uint32_t format);
 extern int drm_format_vert_chroma_subsampling(uint32_t format);
 extern const char *drm_get_format_name(uint32_t format);
+extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+							      unsigned int supported_rotations);
+extern unsigned int drm_rotation_simplify(unsigned int rotation,
+					  unsigned int supported_rotations);
 
 /* Helpers */
 
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
new file mode 100644
index 0000000..9b446ad
--- /dev/null
+++ b/include/drm/drm_dp_mst_helper.h
@@ -0,0 +1,509 @@
+/*
+ * Copyright © 2014 Red Hat.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#ifndef _DRM_DP_MST_HELPER_H_
+#define _DRM_DP_MST_HELPER_H_
+
+#include <linux/types.h>
+#include <drm/drm_dp_helper.h>
+
+struct drm_dp_mst_branch;
+
+/**
+ * struct drm_dp_vcpi - Virtual Channel Payload Identifer
+ * @vcpi: Virtual channel ID.
+ * @pbn: Payload Bandwidth Number for this channel
+ * @aligned_pbn: PBN aligned with slot size
+ * @num_slots: number of slots for this PBN
+ */
+struct drm_dp_vcpi {
+	int vcpi;
+	int pbn;
+	int aligned_pbn;
+	int num_slots;
+};
+
+/**
+ * struct drm_dp_mst_port - MST port
+ * @kref: reference count for this port.
+ * @guid_valid: for DP 1.2 devices if we have validated the GUID.
+ * @guid: guid for DP 1.2 device on this port.
+ * @port_num: port number
+ * @input: if this port is an input port.
+ * @mcs: message capability status - DP 1.2 spec.
+ * @ddps: DisplayPort Device Plug Status - DP 1.2
+ * @pdt: Peer Device Type
+ * @ldps: Legacy Device Plug Status
+ * @dpcd_rev: DPCD revision of device on this port
+ * @num_sdp_streams: Number of simultaneous streams
+ * @num_sdp_stream_sinks: Number of stream sinks
+ * @available_pbn: Available bandwidth for this port.
+ * @next: link to next port on this branch device
+ * @mstb: branch device attach below this port
+ * @aux: i2c aux transport to talk to device connected to this port.
+ * @parent: branch device parent of this port
+ * @vcpi: Virtual Channel Payload info for this port.
+ * @connector: DRM connector this port is connected to.
+ * @mgr: topology manager this port lives under.
+ *
+ * This structure represents an MST port endpoint on a device somewhere
+ * in the MST topology.
+ */
+struct drm_dp_mst_port {
+	struct kref kref;
+
+	/* if dpcd 1.2 device is on this port - its GUID info */
+	bool guid_valid;
+	u8 guid[16];
+
+	u8 port_num;
+	bool input;
+	bool mcs;
+	bool ddps;
+	u8 pdt;
+	bool ldps;
+	u8 dpcd_rev;
+	u8 num_sdp_streams;
+	u8 num_sdp_stream_sinks;
+	uint16_t available_pbn;
+	struct list_head next;
+	struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
+	struct drm_dp_aux aux; /* i2c bus for this port? */
+	struct drm_dp_mst_branch *parent;
+
+	struct drm_dp_vcpi vcpi;
+	struct drm_connector *connector;
+	struct drm_dp_mst_topology_mgr *mgr;
+};
+
+/**
+ * struct drm_dp_mst_branch - MST branch device.
+ * @kref: reference count for this port.
+ * @rad: Relative Address to talk to this branch device.
+ * @lct: Link count total to talk to this branch device.
+ * @num_ports: number of ports on the branch.
+ * @msg_slots: one bit per transmitted msg slot.
+ * @ports: linked list of ports on this branch.
+ * @port_parent: pointer to the port parent, NULL if toplevel.
+ * @mgr: topology manager for this branch device.
+ * @tx_slots: transmission slots for this device.
+ * @last_seqno: last sequence number used to talk to this.
+ * @link_address_sent: if a link address message has been sent to this device yet.
+ *
+ * This structure represents an MST branch device, there is one
+ * primary branch device at the root, along with any others connected
+ * to downstream ports
+ */
+struct drm_dp_mst_branch {
+	struct kref kref;
+	u8 rad[8];
+	u8 lct;
+	int num_ports;
+
+	int msg_slots;
+	struct list_head ports;
+
+	/* list of tx ops queue for this port */
+	struct drm_dp_mst_port *port_parent;
+	struct drm_dp_mst_topology_mgr *mgr;
+
+	/* slots are protected by mstb->mgr->qlock */
+	struct drm_dp_sideband_msg_tx *tx_slots[2];
+	int last_seqno;
+	bool link_address_sent;
+};
+
+
+/* sideband msg header - not bit struct */
+struct drm_dp_sideband_msg_hdr {
+	u8 lct;
+	u8 lcr;
+	u8 rad[8];
+	bool broadcast;
+	bool path_msg;
+	u8 msg_len;
+	bool somt;
+	bool eomt;
+	bool seqno;
+};
+
+struct drm_dp_nak_reply {
+	u8 guid[16];
+	u8 reason;
+	u8 nak_data;
+};
+
+struct drm_dp_link_address_ack_reply {
+	u8 guid[16];
+	u8 nports;
+	struct drm_dp_link_addr_reply_port {
+		bool input_port;
+		u8 peer_device_type;
+		u8 port_number;
+		bool mcs;
+		bool ddps;
+		bool legacy_device_plug_status;
+		u8 dpcd_revision;
+		u8 peer_guid[16];
+		u8 num_sdp_streams;
+		u8 num_sdp_stream_sinks;
+	} ports[16];
+};
+
+struct drm_dp_remote_dpcd_read_ack_reply {
+	u8 port_number;
+	u8 num_bytes;
+	u8 bytes[255];
+};
+
+struct drm_dp_remote_dpcd_write_ack_reply {
+	u8 port_number;
+};
+
+struct drm_dp_remote_dpcd_write_nak_reply {
+	u8 port_number;
+	u8 reason;
+	u8 bytes_written_before_failure;
+};
+
+struct drm_dp_remote_i2c_read_ack_reply {
+	u8 port_number;
+	u8 num_bytes;
+	u8 bytes[255];
+};
+
+struct drm_dp_remote_i2c_read_nak_reply {
+	u8 port_number;
+	u8 nak_reason;
+	u8 i2c_nak_transaction;
+};
+
+struct drm_dp_remote_i2c_write_ack_reply {
+	u8 port_number;
+};
+
+
+struct drm_dp_sideband_msg_rx {
+	u8 chunk[48];
+	u8 msg[256];
+	u8 curchunk_len;
+	u8 curchunk_idx; /* chunk we are parsing now */
+	u8 curchunk_hdrlen;
+	u8 curlen; /* total length of the msg */
+	bool have_somt;
+	bool have_eomt;
+	struct drm_dp_sideband_msg_hdr initial_hdr;
+};
+
+
+struct drm_dp_allocate_payload {
+	u8 port_number;
+	u8 number_sdp_streams;
+	u8 vcpi;
+	u16 pbn;
+	u8 sdp_stream_sink[8];
+};
+
+struct drm_dp_allocate_payload_ack_reply {
+	u8 port_number;
+	u8 vcpi;
+	u16 allocated_pbn;
+};
+
+struct drm_dp_connection_status_notify {
+	u8 guid[16];
+	u8 port_number;
+	bool legacy_device_plug_status;
+	bool displayport_device_plug_status;
+	bool message_capability_status;
+	bool input_port;
+	u8 peer_device_type;
+};
+
+struct drm_dp_remote_dpcd_read {
+	u8 port_number;
+	u32 dpcd_address;
+	u8 num_bytes;
+};
+
+struct drm_dp_remote_dpcd_write {
+	u8 port_number;
+	u32 dpcd_address;
+	u8 num_bytes;
+	u8 *bytes;
+};
+
+struct drm_dp_remote_i2c_read {
+	u8 num_transactions;
+	u8 port_number;
+	struct {
+		u8 i2c_dev_id;
+		u8 num_bytes;
+		u8 *bytes;
+		u8 no_stop_bit;
+		u8 i2c_transaction_delay;
+	} transactions[4];
+	u8 read_i2c_device_id;
+	u8 num_bytes_read;
+};
+
+struct drm_dp_remote_i2c_write {
+	u8 port_number;
+	u8 write_i2c_device_id;
+	u8 num_bytes;
+	u8 *bytes;
+};
+
+/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
+struct drm_dp_port_number_req {
+	u8 port_number;
+};
+
+struct drm_dp_enum_path_resources_ack_reply {
+	u8 port_number;
+	u16 full_payload_bw_number;
+	u16 avail_payload_bw_number;
+};
+
+/* covers POWER_DOWN_PHY, POWER_UP_PHY */
+struct drm_dp_port_number_rep {
+	u8 port_number;
+};
+
+struct drm_dp_query_payload {
+	u8 port_number;
+	u8 vcpi;
+};
+
+struct drm_dp_resource_status_notify {
+	u8 port_number;
+	u8 guid[16];
+	u16 available_pbn;
+};
+
+struct drm_dp_query_payload_ack_reply {
+	u8 port_number;
+	u8 allocated_pbn;
+};
+
+struct drm_dp_sideband_msg_req_body {
+	u8 req_type;
+	union ack_req {
+		struct drm_dp_connection_status_notify conn_stat;
+		struct drm_dp_port_number_req port_num;
+		struct drm_dp_resource_status_notify resource_stat;
+
+		struct drm_dp_query_payload query_payload;
+		struct drm_dp_allocate_payload allocate_payload;
+
+		struct drm_dp_remote_dpcd_read dpcd_read;
+		struct drm_dp_remote_dpcd_write dpcd_write;
+
+		struct drm_dp_remote_i2c_read i2c_read;
+		struct drm_dp_remote_i2c_write i2c_write;
+	} u;
+};
+
+struct drm_dp_sideband_msg_reply_body {
+	u8 reply_type;
+	u8 req_type;
+	union ack_replies {
+		struct drm_dp_nak_reply nak;
+		struct drm_dp_link_address_ack_reply link_addr;
+		struct drm_dp_port_number_rep port_number;
+
+		struct drm_dp_enum_path_resources_ack_reply path_resources;
+		struct drm_dp_allocate_payload_ack_reply allocate_payload;
+		struct drm_dp_query_payload_ack_reply query_payload;
+
+		struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
+		struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
+		struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
+
+		struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
+		struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
+		struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
+	} u;
+};
+
+/* msg is queued to be put into a slot */
+#define DRM_DP_SIDEBAND_TX_QUEUED 0
+/* msg has started transmitting on a slot - still on msgq */
+#define DRM_DP_SIDEBAND_TX_START_SEND 1
+/* msg has finished transmitting on a slot - removed from msgq only in slot */
+#define DRM_DP_SIDEBAND_TX_SENT 2
+/* msg has received a response - removed from slot */
+#define DRM_DP_SIDEBAND_TX_RX 3
+#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
+
+struct drm_dp_sideband_msg_tx {
+	u8 msg[256];
+	u8 chunk[48];
+	u8 cur_offset;
+	u8 cur_len;
+	struct drm_dp_mst_branch *dst;
+	struct list_head next;
+	int seqno;
+	int state;
+	bool path_msg;
+	struct drm_dp_sideband_msg_reply_body reply;
+};
+
+/* sideband msg handler */
+struct drm_dp_mst_topology_mgr;
+struct drm_dp_mst_topology_cbs {
+	/* create a connector for a port */
+	struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path);
+	void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
+				  struct drm_connector *connector);
+	void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
+
+};
+
+#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
+
+#define DP_PAYLOAD_LOCAL 1
+#define DP_PAYLOAD_REMOTE 2
+#define DP_PAYLOAD_DELETE_LOCAL 3
+
+struct drm_dp_payload {
+	int payload_state;
+	int start_slot;
+	int num_slots;
+};
+
+/**
+ * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
+ * @dev: device pointer for adding i2c devices etc.
+ * @cbs: callbacks for connector addition and destruction.
+ * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
+ * @aux: aux channel for the DP connector.
+ * @max_payloads: maximum number of payloads the GPU can generate.
+ * @conn_base_id: DRM connector ID this mgr is connected to.
+ * @down_rep_recv: msg receiver state for down replies.
+ * @up_req_recv: msg receiver state for up requests.
+ * @lock: protects mst state, primary, guid, dpcd.
+ * @mst_state: if this manager is enabled for an MST capable port.
+ * @mst_primary: pointer to the primary branch device.
+ * @guid_valid: GUID valid for the primary branch device.
+ * @guid: GUID for primary port.
+ * @dpcd: cache of DPCD for primary port.
+ * @pbn_div: PBN to slots divisor.
+ *
+ * This struct represents the toplevel displayport MST topology manager.
+ * There should be one instance of this for every MST capable DP connector
+ * on the GPU.
+ */
+struct drm_dp_mst_topology_mgr {
+
+	struct device *dev;
+	struct drm_dp_mst_topology_cbs *cbs;
+	int max_dpcd_transaction_bytes;
+	struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
+	int max_payloads;
+	int conn_base_id;
+
+	/* only ever accessed from the workqueue - which should be serialised */
+	struct drm_dp_sideband_msg_rx down_rep_recv;
+	struct drm_dp_sideband_msg_rx up_req_recv;
+
+	/* pointer to info about the initial MST device */
+	struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+
+	bool mst_state;
+	struct drm_dp_mst_branch *mst_primary;
+	/* primary MST device GUID */
+	bool guid_valid;
+	u8 guid[16];
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 sink_count;
+	int pbn_div;
+	int total_slots;
+	int avail_slots;
+	int total_pbn;
+
+	/* messages to be transmitted */
+	/* qlock protects the upq/downq and in_progress,
+	   the mstb tx_slots and txmsg->state once they are queued */
+	struct mutex qlock;
+	struct list_head tx_msg_downq;
+	struct list_head tx_msg_upq;
+	bool tx_down_in_progress;
+	bool tx_up_in_progress;
+
+	/* payload info + lock for it */
+	struct mutex payload_lock;
+	struct drm_dp_vcpi **proposed_vcpis;
+	struct drm_dp_payload *payloads;
+	unsigned long payload_mask;
+
+	wait_queue_head_t tx_waitq;
+	struct work_struct work;
+
+	struct work_struct tx_work;
+};
+
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
+
+void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
+
+
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
+
+
+int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
+
+
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+
+int drm_dp_calc_pbn_mode(int clock, int bpp);
+
+
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
+
+
+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+				struct drm_dp_mst_port *port);
+
+
+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
+			   int pbn);
+
+
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
+
+
+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
+
+int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_dump_topology(struct seq_file *m,
+			      struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 7997246..bfd329d 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -86,8 +86,9 @@
 	int crtc_count;
 	struct drm_fb_helper_crtc *crtc_info;
 	int connector_count;
+	int connector_info_alloc_count;
 	struct drm_fb_helper_connector **connector_info;
-	struct drm_fb_helper_funcs *funcs;
+	const struct drm_fb_helper_funcs *funcs;
 	struct fb_info *fbdev;
 	u32 pseudo_palette[17];
 	struct list_head kernel_fb_list;
@@ -97,6 +98,8 @@
 	bool delayed_hotplug;
 };
 
+void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+			   const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
 		       struct drm_fb_helper *helper, int crtc_count,
 		       int max_conn);
@@ -128,4 +131,7 @@
 drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
 		      int width, int height);
 
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+				       struct drm_connector *connector);
 #endif
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
new file mode 100644
index 0000000..2441f71
--- /dev/null
+++ b/include/drm/drm_of.h
@@ -0,0 +1,18 @@
+#ifndef __DRM_OF_H__
+#define __DRM_OF_H__
+
+struct drm_device;
+struct device_node;
+
+#ifdef CONFIG_OF
+extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+					   struct device_node *port);
+#else
+static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+						  struct device_node *port)
+{
+	return 0;
+}
+#endif
+
+#endif /* __DRM_OF_H__ */
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index d128629..26bb55e 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -163,5 +163,11 @@
 				 struct drm_rect *dst,
 				 int min_vscale, int max_vscale);
 void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
+void drm_rect_rotate(struct drm_rect *r,
+		     int width, int height,
+		     unsigned int rotation);
+void drm_rect_rotate_inv(struct drm_rect *r,
+			 int width, int height,
+			 unsigned int rotation);
 
 #endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index a5183da..202f0a7 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -182,6 +182,7 @@
 	 * @man: Pointer to a memory type manager.
 	 * @bo: Pointer to the buffer object we're allocating space for.
 	 * @placement: Placement details.
+	 * @flags: Additional placement flags.
 	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
 	 *
 	 * This function should allocate space in the memory type managed
@@ -206,6 +207,7 @@
 	int  (*get_node)(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *bo,
 			 struct ttm_placement *placement,
+			 uint32_t flags,
 			 struct ttm_mem_reg *mem);
 
 	/**
@@ -653,18 +655,6 @@
 extern int ttm_tt_swapin(struct ttm_tt *ttm);
 
 /**
- * ttm_tt_cache_flush:
- *
- * @pages: An array of pointers to struct page:s to flush.
- * @num_pages: Number of pages to flush.
- *
- * Flush the data of the indicated pages from the cpu caches.
- * This is used when changing caching attributes of the pages from
- * cache-coherent.
- */
-extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
-
-/**
  * ttm_tt_set_placement_caching:
  *
  * @ttm A struct ttm_tt the backing pages of which will change caching policy.
diff --git a/include/linux/component.h b/include/linux/component.h
index 6887018..c00dcc3 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -29,4 +29,11 @@
 int component_master_add_child(struct master *master,
 	int (*compare)(struct device *, void *), void *compare_data);
 
+struct component_match;
+
+int component_master_add_with_match(struct device *,
+	const struct component_master_ops *, struct component_match *);
+void component_match_add(struct device *, struct component_match **,
+	int (*compare)(struct device *, void *), void *compare_data);
+
 #endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 9abbeb9..b0b8556 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -780,7 +780,7 @@
 
 /**
  * Device specific ioctls should only be in their respective headers
- * The device specific ioctl range is from 0x40 to 0x99.
+ * The device specific ioctl range is from 0x40 to 0x9f.
  * Generic IOCTLS restart at 0xA0.
  *
  * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index def54f9..a0db2d4a 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -88,6 +88,11 @@
 #define DRM_MODE_SCALE_CENTER		2 /* Centered, no scaling */
 #define DRM_MODE_SCALE_ASPECT		3 /* Full screen, preserve aspect */
 
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE	0
+#define DRM_MODE_PICTURE_ASPECT_4_3	1
+#define DRM_MODE_PICTURE_ASPECT_16_9	2
+
 /* Dithering mode options */
 #define DRM_MODE_DITHERING_OFF	0
 #define DRM_MODE_DITHERING_ON	1
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index b039320..eaad58b 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -19,6 +19,7 @@
 /* VIDCON0 */
 
 #define VIDCON0					0x00
+#define VIDCON0_DSI_EN				(1 << 30)
 #define VIDCON0_INTERLACE			(1 << 29)
 #define VIDCON0_VIDOUT_MASK			(0x7 << 26)
 #define VIDCON0_VIDOUT_SHIFT			26
@@ -355,7 +356,7 @@
 #define VIDINTCON0_INT_ENABLE			(1 << 0)
 
 #define VIDINTCON1				0x134
-#define VIDINTCON1_INT_I180			(1 << 2)
+#define VIDINTCON1_INT_I80			(1 << 2)
 #define VIDINTCON1_INT_FRAME			(1 << 1)
 #define VIDINTCON1_INT_FIFO			(1 << 0)