Merge remote branch 'nouveau/for-airlied' of ../drm-nouveau-next into drm-testing

* 'nouveau/for-airlied' of ../drm-nouveau-next:
  drm/nv50: cast IGP memory location to u64 before shifting
  drm/nv50: use alternate source of SOR_MODE_CTRL for DP hack
  drm/nouveau: fix dual-link displays when plugged into single-link outputs
  drm/nv50: obey dcb->duallink_possible
  drm/nv50: fix duallink_possible calculation for DCB 4.0 cards
  drm/nouveau: don't execute INIT_GPIO unless we're really running the table
  drm/nv40: allow cold-booting of nv4x chipsets
  drm/nouveau: fix POST detection for certain chipsets
  drm/nouveau: Add getparam for current PTIMER time.
  drm/nouveau: allow cursor image and position to survive suspend
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 67ea3a6..70312da 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -384,7 +384,7 @@
 {
 	u32 httfea,baseaddr,enuscr;
 	struct pci_dev *dev1;
-	int i;
+	int i, ret;
 	unsigned size = amd64_fetch_size();
 
 	dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -400,15 +400,18 @@
 
 	if (i == ARRAY_SIZE(uli_sizes)) {
 		dev_info(&pdev->dev, "no ULi size found for %d\n", size);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto put;
 	}
 
 	/* shadow x86-64 registers into ULi registers */
 	pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
 
 	/* if x86-64 aperture base is beyond 4G, exit here */
-	if ((httfea & 0x7fff) >> (32 - 25))
-		return -ENODEV;
+	if ((httfea & 0x7fff) >> (32 - 25)) {
+		ret = -ENODEV;
+		goto put;
+	}
 
 	httfea = (httfea& 0x7fff) << 25;
 
@@ -420,9 +423,10 @@
 	enuscr= httfea+ (size * 1024 * 1024) - 1;
 	pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
 	pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
-
+	ret = 0;
+put:
 	pci_dev_put(dev1);
-	return 0;
+	return ret;
 }
 
 
@@ -441,7 +445,7 @@
 {
 	u32 tmp, apbase, apbar, aplimit;
 	struct pci_dev *dev1;
-	int i;
+	int i, ret;
 	unsigned size = amd64_fetch_size();
 
 	dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -458,7 +462,8 @@
 
 	if (i == ARRAY_SIZE(nforce3_sizes)) {
 		dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto put;
 	}
 
 	pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -472,7 +477,8 @@
 	/* if x86-64 aperture base is beyond 4G, exit here */
 	if ( (apbase & 0x7fff) >> (32 - 25) ) {
 		dev_info(&pdev->dev, "aperture base > 4G\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto put;
 	}
 
 	apbase = (apbase & 0x7fff) << 25;
@@ -488,9 +494,11 @@
 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
 	pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
 
+	ret = 0;
+put:
 	pci_dev_put(dev1);
 
-	return 0;
+	return ret;
 }
 
 static int __devinit agp_amd64_probe(struct pci_dev *pdev,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2583ddf..88910e5 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -60,6 +60,7 @@
 	select FW_LOADER
         select DRM_KMS_HELPER
         select DRM_TTM
+	select POWER_SUPPLY
 	help
 	  Choose this option if you have an ATI Radeon graphics card.  There
 	  are both PCI and AGP versions.  You don't need to choose this to
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7644019..9b2a541 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -860,19 +860,24 @@
 	}
 }
 
-void drm_kms_helper_poll_init(struct drm_device *dev)
+void drm_kms_helper_poll_disable(struct drm_device *dev)
 {
-	struct drm_connector *connector;
+	if (!dev->mode_config.poll_enabled)
+		return;
+	delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
 	bool poll = false;
+	struct drm_connector *connector;
 	int ret;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		if (connector->polled)
 			poll = true;
 	}
-	slow_work_register_user(THIS_MODULE);
-	delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
-			       &output_poll_ops);
 
 	if (poll) {
 		ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
@@ -880,11 +885,22 @@
 			DRM_ERROR("delayed enqueue failed %d\n", ret);
 	}
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+	slow_work_register_user(THIS_MODULE);
+	delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+			       &output_poll_ops);
+	dev->mode_config.poll_enabled = true;
+
+	drm_kms_helper_poll_enable(dev);
+}
 EXPORT_SYMBOL(drm_kms_helper_poll_init);
 
 void drm_kms_helper_poll_fini(struct drm_device *dev)
 {
-	delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+	drm_kms_helper_poll_disable(dev);
 	slow_work_unregister_user(THIS_MODULE);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_fini);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index dfd4f36..c198186 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -147,7 +147,10 @@
 		csum += raw_edid[i];
 	if (csum) {
 		DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
-		goto bad;
+
+		/* allow CEA to slide through, switches mangle this */
+		if (raw_edid[0] != 0x02)
+			goto bad;
 	}
 
 	/* per-block-type checks */
@@ -587,7 +590,7 @@
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1600x1200@75Hz */
-	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1600x1200@85Hz */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2a6b5de..cc6e56a 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1399,12 +1399,14 @@
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 	if (state == VGA_SWITCHEROO_ON) {
-		printk(KERN_INFO "i915: switched off\n");
+		printk(KERN_INFO "i915: switched on\n");
 		/* i915 resume handler doesn't set to D0 */
 		pci_set_power_state(dev->pdev, PCI_D0);
 		i915_resume(dev);
+		drm_kms_helper_poll_enable(dev);
 	} else {
 		printk(KERN_ERR "i915: switched off\n");
+		drm_kms_helper_poll_disable(dev);
 		i915_suspend(dev, pmm);
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index e13f6af..d4bcca8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -34,7 +34,7 @@
 static struct nouveau_dsm_priv {
 	bool dsm_detected;
 	acpi_handle dhandle;
-	acpi_handle dsm_handle;
+	acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
 static const char nouveau_dsm_muid[] = {
@@ -107,9 +107,9 @@
 static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
 {
 	if (id == VGA_SWITCHEROO_IGD)
-		return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
+		return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
 	else
-		return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
+		return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
 }
 
 static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
@@ -118,7 +118,7 @@
 	if (id == VGA_SWITCHEROO_IGD)
 		return 0;
 
-	return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
+	return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
 }
 
 static int nouveau_dsm_init(void)
@@ -151,18 +151,18 @@
 	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
 	if (!dhandle)
 		return false;
+
 	status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
 	if (ACPI_FAILURE(status)) {
 		return false;
 	}
 
-	ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
-			 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+	ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
+			  NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
 	if (ret < 0)
 		return false;
 
 	nouveau_dsm_priv.dhandle = dhandle;
-	nouveau_dsm_priv.dsm_handle = nvidia_handle;
 	return true;
 }
 
@@ -173,6 +173,7 @@
 	struct pci_dev *pdev = NULL;
 	int has_dsm = 0;
 	int vga_count = 0;
+
 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
 		vga_count++;
 
@@ -180,7 +181,7 @@
 	}
 
 	if (vga_count == 2 && has_dsm) {
-		acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
+		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
 		printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
 		       acpi_method_name);
 		nouveau_dsm_priv.dsm_detected = true;
@@ -204,3 +205,57 @@
 {
 	vga_switcheroo_unregister_handler();
 }
+
+/* retrieve the ROM in 4k blocks */
+static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
+			    int offset, int len)
+{
+	acpi_status status;
+	union acpi_object rom_arg_elements[2], *obj;
+	struct acpi_object_list rom_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+	rom_arg.count = 2;
+	rom_arg.pointer = &rom_arg_elements[0];
+
+	rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	rom_arg_elements[0].integer.value = offset;
+
+	rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+	rom_arg_elements[1].integer.value = len;
+
+	status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
+		return -ENODEV;
+	}
+	obj = (union acpi_object *)buffer.pointer;
+	memcpy(bios+offset, obj->buffer.pointer, len);
+	kfree(buffer.pointer);
+	return len;
+}
+
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+	acpi_status status;
+	acpi_handle dhandle, rom_handle;
+
+	if (!nouveau_dsm_priv.dsm_detected)
+		return false;
+
+	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+	if (ACPI_FAILURE(status))
+		return false;
+
+	nouveau_dsm_priv.rom_handle = rom_handle;
+	return true;
+}
+
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+	return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 9f30fb8..9ba2dea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -178,6 +178,25 @@
 	pci_disable_rom(dev->pdev);
 }
 
+static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
+{
+	int i;
+	int ret;
+	int size = 64 * 1024;
+
+	if (!nouveau_acpi_rom_supported(dev->pdev))
+		return;
+
+	for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
+		ret = nouveau_acpi_get_bios_chunk(data,
+						  (i * ROM_BIOS_PAGE),
+						  ROM_BIOS_PAGE);
+		if (ret <= 0)
+			break;
+	}
+	return;
+}
+
 struct methods {
 	const char desc[8];
 	void (*loadbios)(struct drm_device *, uint8_t *);
@@ -191,6 +210,7 @@
 };
 
 static struct methods nv50_methods[] = {
+	{ "ACPI", load_vbios_acpi, true },
 	{ "PRAMIN", load_vbios_pramin, true },
 	{ "PROM", load_vbios_prom, false },
 	{ "PCIROM", load_vbios_pci, true },
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 256e82b..149ed22 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -241,7 +241,8 @@
 	if (nv_encoder && nv_connector->native_mode) {
 		unsigned status = connector_status_connected;
 
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_ACPI_BUTTON) || \
+	(defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
 		if (!nouveau_ignorelid && !acpi_lid_open())
 			status = connector_status_unknown;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5b13443..c697191 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -851,12 +851,17 @@
 extern int  nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
 
 /* nouveau_acpi.c */
+#define ROM_BIOS_PAGE 4096
 #if defined(CONFIG_ACPI)
 void nouveau_register_dsm_handler(void);
 void nouveau_unregister_dsm_handler(void);
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
 #else
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
+static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
 #endif
 
 /* nouveau_backlight.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a2544ff..147e59c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,12 +376,15 @@
 static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
 					 enum vga_switcheroo_state state)
 {
+	struct drm_device *dev = pci_get_drvdata(pdev);
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 	if (state == VGA_SWITCHEROO_ON) {
 		printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
 		nouveau_pci_resume(pdev);
+		drm_kms_helper_poll_enable(dev);
 	} else {
 		printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+		drm_kms_helper_poll_disable(dev);
 		nouveau_pci_suspend(pdev, pmm);
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 0616c96..704a25d 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@
 
 	if (!dev_priv->engine.graph.ctxprog) {
 		struct nouveau_grctx ctx = {};
-		uint32_t cp[256];
+		uint32_t *cp;
+
+		cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+		if (!cp)
+			return -ENOMEM;
 
 		ctx.dev = dev;
 		ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@
 		nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
 		for (i = 0; i < ctx.ctxprog_len; i++)
 			nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+
+		kfree(cp);
 	}
 
 	/* No context present currently */
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 80c5b3e..1c02d23 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,7 +1,6 @@
 config DRM_RADEON_KMS
 	bool "Enable modesetting on radeon by default - NEW DRIVER"
 	depends on DRM_RADEON
-	depends on POWER_SUPPLY
 	help
 	  Choose this option if you want kernel modesetting enabled by default.
 
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 03dd6c4..f3f2827 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -707,6 +707,7 @@
 		break;
 	case ATOM_DCPLL:
 	case ATOM_PPLL_INVALID:
+	default:
 		pll = &rdev->clock.dcpll;
 		break;
 	}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7ffc389..44e96a2 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -430,7 +430,7 @@
 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 2;
+				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
 			} else {
 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5c9ce2b..669feb6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -261,6 +261,7 @@
 	unsigned		rdomain;
 	unsigned		wdomain;
 	u32			tiling_flags;
+	bool			reserved;
 };
 
 /*
@@ -575,6 +576,7 @@
  */
 int radeon_agp_init(struct radeon_device *rdev);
 void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
 void radeon_agp_fini(struct radeon_device *rdev);
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f..f40dfb7 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@
 	}
 #endif
 }
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+	radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6e733fd..24ea683 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -680,11 +680,19 @@
 	uint8_t dac;
 	union atom_supported_devices *supported_devices;
 	int i, j, max_device;
-	struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+	struct bios_connector *bios_connectors;
+	size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
 
-	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+	bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+	if (!bios_connectors)
 		return false;
 
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+				    &data_offset)) {
+		kfree(bios_connectors);
+		return false;
+	}
+
 	supported_devices =
 	    (union atom_supported_devices *)(ctx->bios + data_offset);
 
@@ -851,6 +859,7 @@
 
 	radeon_link_encoder_connector(dev);
 
+	kfree(bios_connectors);
 	return true;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a20b612..db33852 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -546,8 +546,10 @@
 		/* don't suspend or resume card normally */
 		rdev->powered_down = false;
 		radeon_resume_kms(dev);
+		drm_kms_helper_poll_enable(dev);
 	} else {
 		printk(KERN_INFO "radeon: switched off\n");
+		drm_kms_helper_poll_disable(dev);
 		radeon_suspend_kms(dev, pmm);
 		/* don't suspend or resume card normally */
 		rdev->powered_down = true;
@@ -754,6 +756,8 @@
 	/* evict remaining vram memory */
 	radeon_bo_evict_vram(rdev);
 
+	radeon_agp_suspend(rdev);
+
 	pci_save_state(dev->pdev);
 	if (state.event == PM_EVENT_SUSPEND) {
 		/* Shut down the device */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index da85cad..1006549 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -983,8 +983,11 @@
 		/* set display priority to high for r3xx, rv515 chips
 		 * this avoids flickering due to underflow to the
 		 * display controllers during heavy acceleration.
+		 * Don't force high on rs4xx igp chips as it seems to
+		 * affect the sound card.  See kernel bug 15982.
 		 */
-		if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+		if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+		    !(rdev->flags & RADEON_IS_IGP))
 			rdev->disp_priority = 2;
 		else
 			rdev->disp_priority = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a8d18bc..d5b9373 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -301,6 +301,7 @@
 		r = radeon_bo_reserve(lobj->bo, false);
 		if (unlikely(r != 0))
 			return r;
+		lobj->reserved = true;
 	}
 	return 0;
 }
@@ -311,7 +312,7 @@
 
 	list_for_each_entry(lobj, head, list) {
 		/* only unreserve object we successfully reserved */
-		if (radeon_bo_is_reserved(lobj->bo))
+		if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
 			radeon_bo_unreserve(lobj->bo);
 	}
 }
@@ -322,6 +323,9 @@
 	struct radeon_bo *bo;
 	int r;
 
+	list_for_each_entry(lobj, head, list) {
+		lobj->reserved = false;
+	}
 	r = radeon_bo_list_reserve(head);
 	if (unlikely(r != 0)) {
 		return r;
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 40ab6d9..11ce94c 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -900,9 +900,10 @@
 			flags |= RADEON_FRONT;
 	}
 	if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
-		if (!dev_priv->have_z_offset)
+		if (!dev_priv->have_z_offset) {
 			printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
-		flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+			flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+		}
 	}
 
 	if (flags & (RADEON_FRONT | RADEON_BACK)) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3aa3a65..e9918d8 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -451,7 +451,7 @@
 			/* RADEON_IS_AGP is set only if AGP is active */
 			mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
 			mem->bus.base = rdev->mc.agp_base;
-			mem->bus.is_iomem = true;
+			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
 		}
 #endif
 		break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 0d9a42c..ef91069 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -77,7 +77,7 @@
 /**
  * Limits for the pool. They are handled without locks because only place where
  * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialiazation to access them is pointless.
+ * so forcing serialization to access them is pointless.
  */
 
 struct ttm_pool_opts {
@@ -165,16 +165,18 @@
 		m->options.small = val;
 	else if (attr == &ttm_page_pool_alloc_size) {
 		if (val > NUM_PAGES_TO_ALLOC*8) {
-			printk(KERN_ERR "[ttm] Setting allocation size to %lu "
-					"is not allowed. Recomended size is "
-					"%lu\n",
-					NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
-					NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+			printk(KERN_ERR TTM_PFX
+			       "Setting allocation size to %lu "
+			       "is not allowed. Recommended size is "
+			       "%lu\n",
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
 			return size;
 		} else if (val > NUM_PAGES_TO_ALLOC) {
-			printk(KERN_WARNING "[ttm] Setting allocation size to "
-					"larger than %lu is not recomended.\n",
-					NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+			printk(KERN_WARNING TTM_PFX
+			       "Setting allocation size to "
+			       "larger than %lu is not recommended.\n",
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
 		}
 		m->options.alloc_size = val;
 	}
@@ -277,7 +279,7 @@
 {
 	unsigned i;
 	if (set_pages_array_wb(pages, npages))
-		printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+		printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
 				npages);
 	for (i = 0; i < npages; ++i)
 		__free_page(pages[i]);
@@ -313,7 +315,8 @@
 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
 			GFP_KERNEL);
 	if (!pages_to_free) {
-		printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+		printk(KERN_ERR TTM_PFX
+		       "Failed to allocate memory for pool free operation.\n");
 		return 0;
 	}
 
@@ -390,7 +393,7 @@
 }
 
 /**
- * Calback for mm to request pool to reduce number of page held.
+ * Callback for mm to request pool to reduce number of page held.
  */
 static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
 {
@@ -433,14 +436,16 @@
 	case tt_uncached:
 		r = set_pages_array_uc(pages, cpages);
 		if (r)
-			printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
-					cpages);
+			printk(KERN_ERR TTM_PFX
+			       "Failed to set %d pages to uc!\n",
+			       cpages);
 		break;
 	case tt_wc:
 		r = set_pages_array_wc(pages, cpages);
 		if (r)
-			printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
-					cpages);
+			printk(KERN_ERR TTM_PFX
+			       "Failed to set %d pages to wc!\n",
+			       cpages);
 		break;
 	default:
 		break;
@@ -458,7 +463,7 @@
 		struct page **failed_pages, unsigned cpages)
 {
 	unsigned i;
-	/* Failed pages has to be reed */
+	/* Failed pages have to be freed */
 	for (i = 0; i < cpages; ++i) {
 		list_del(&failed_pages[i]->lru);
 		__free_page(failed_pages[i]);
@@ -485,7 +490,8 @@
 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
 
 	if (!caching_array) {
-		printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+		printk(KERN_ERR TTM_PFX
+		       "Unable to allocate table for new pages.");
 		return -ENOMEM;
 	}
 
@@ -493,12 +499,13 @@
 		p = alloc_page(gfp_flags);
 
 		if (!p) {
-			printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+			printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
 
 			/* store already allocated pages in the pool after
 			 * setting the caching state */
 			if (cpages) {
-				r = ttm_set_pages_caching(caching_array, cstate, cpages);
+				r = ttm_set_pages_caching(caching_array,
+							  cstate, cpages);
 				if (r)
 					ttm_handle_caching_state_failure(pages,
 						ttm_flags, cstate,
@@ -590,7 +597,8 @@
 			++pool->nrefills;
 			pool->npages += alloc_size;
 		} else {
-			printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+			printk(KERN_ERR TTM_PFX
+			       "Failed to fill pool (%p).", pool);
 			/* If we have any pages left put them to the pool. */
 			list_for_each_entry(p, &pool->list, lru) {
 				++cpages;
@@ -671,13 +679,14 @@
 		if (flags & TTM_PAGE_FLAG_DMA32)
 			gfp_flags |= GFP_DMA32;
 		else
-			gfp_flags |= __GFP_HIGHMEM;
+			gfp_flags |= GFP_HIGHUSER;
 
 		for (r = 0; r < count; ++r) {
 			p = alloc_page(gfp_flags);
 			if (!p) {
 
-				printk(KERN_ERR "[ttm] unable to allocate page.");
+				printk(KERN_ERR TTM_PFX
+				       "Unable to allocate page.");
 				return -ENOMEM;
 			}
 
@@ -709,8 +718,9 @@
 		if (r) {
 			/* If there is any pages in the list put them back to
 			 * the pool. */
-			printk(KERN_ERR "[ttm] Failed to allocate extra pages "
-					"for large request.");
+			printk(KERN_ERR TTM_PFX
+			       "Failed to allocate extra pages "
+			       "for large request.");
 			ttm_put_pages(pages, 0, flags, cstate);
 			return r;
 		}
@@ -778,7 +788,7 @@
 	if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
 		return 0;
 
-	printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+	printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
 
 	ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
 
@@ -813,7 +823,7 @@
 	if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
 		return;
 
-	printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+	printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
 	ttm_pool_mm_shrink_fini(&_manager);
 
 	for (i = 0; i < NUM_POOLS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 1a3cb68..4505e17 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@
 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
 	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
 	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
-	    vmwgfx_overlay.o
+	    vmwgfx_overlay.o vmwgfx_fence.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0c9c081..7597323 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -318,6 +318,15 @@
 		goto out_err3;
 	}
 
+	/* Need mmio memory to check for fifo pitchlock cap. */
+	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
+	    !vmw_fifo_have_pitchlock(dev_priv)) {
+		ret = -ENOSYS;
+		DRM_ERROR("Hardware has no pitchlock\n");
+		goto out_err4;
+	}
+
 	dev_priv->tdev = ttm_object_device_init
 	    (dev_priv->mem_global_ref.object, 12);
 
@@ -399,8 +408,6 @@
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 
-	DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
-
 	unregister_pm_notifier(&dev_priv->pm_nb);
 
 	vmw_fb_close(dev_priv);
@@ -546,7 +553,6 @@
 {
 	struct vmw_master *vmaster;
 
-	DRM_INFO("Master create.\n");
 	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
 	if (unlikely(vmaster == NULL))
 		return -ENOMEM;
@@ -563,7 +569,6 @@
 {
 	struct vmw_master *vmaster = vmw_master(master);
 
-	DRM_INFO("Master destroy.\n");
 	master->driver_priv = NULL;
 	kfree(vmaster);
 }
@@ -579,8 +584,6 @@
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret = 0;
 
-	DRM_INFO("Master set.\n");
-
 	if (active) {
 		BUG_ON(active != &dev_priv->fbdev_master);
 		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -622,8 +625,6 @@
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret;
 
-	DRM_INFO("Master drop.\n");
-
 	/**
 	 * Make sure the master doesn't disappear while we have
 	 * it locked.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 356dc93..1341adef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,7 +41,7 @@
 
 #define VMWGFX_DRIVER_DATE "20100209"
 #define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_MINOR 1
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -102,6 +102,13 @@
 	struct vmw_cursor_snooper snooper;
 };
 
+struct vmw_fence_queue {
+	struct list_head head;
+	struct timespec lag;
+	struct timespec lag_time;
+	spinlock_t lock;
+};
+
 struct vmw_fifo_state {
 	unsigned long reserved_size;
 	__le32 *dynamic_buffer;
@@ -115,6 +122,7 @@
 	uint32_t capabilities;
 	struct mutex fifo_mutex;
 	struct rw_semaphore rwsem;
+	struct vmw_fence_queue fence_queue;
 };
 
 struct vmw_relocation {
@@ -179,6 +187,7 @@
 	uint32_t vga_red_mask;
 	uint32_t vga_blue_mask;
 	uint32_t vga_green_mask;
+	uint32_t vga_pitchlock;
 
 	/*
 	 * Framebuffer info.
@@ -393,6 +402,7 @@
 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
 extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
 
 /**
  * TTM glue - vmwgfx_ttm_glue.c
@@ -441,6 +451,23 @@
 			     uint32_t sequence,
 			     bool interruptible,
 			     unsigned long timeout);
+extern void vmw_update_sequence(struct vmw_private *dev_priv,
+				struct vmw_fifo_state *fifo_state);
+
+
+/**
+ * Rudimentary fence objects currently used only for throttling -
+ * vmwgfx_fence.c
+ */
+
+extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
+extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
+extern int vmw_fence_push(struct vmw_fence_queue *queue,
+			  uint32_t sequence);
+extern int vmw_fence_pull(struct vmw_fence_queue *queue,
+			  uint32_t signaled_sequence);
+extern int vmw_wait_lag(struct vmw_private *dev_priv,
+			struct vmw_fence_queue *queue, uint32_t us);
 
 /**
  * Kernel framebuffer - vmwgfx_fb.c
@@ -466,6 +493,9 @@
 			  struct ttm_object_file *tfile,
 			  struct ttm_buffer_object *bo,
 			  SVGA3dCmdHeader *header);
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+			unsigned width, unsigned height, unsigned pitch,
+			unsigned bbp, unsigned depth);
 
 /**
  * Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dbd36b8..bdd67cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -669,6 +669,15 @@
 		goto out_err;
 
 	vmw_apply_relocations(sw_context);
+
+	if (arg->throttle_us) {
+		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
+				   arg->throttle_us);
+
+		if (unlikely(ret != 0))
+			goto out_err;
+	}
+
 	vmw_fifo_commit(dev_priv, arg->command_size);
 
 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 7421aaa..181f472 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -132,16 +132,14 @@
 		return -EINVAL;
 	}
 
-	/* without multimon its hard to resize */
-	if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
-	    (var->xres != par->max_width ||
-	     var->yres != par->max_height)) {
-		DRM_ERROR("Tried to resize, but we don't have multimon\n");
+	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+	    (var->xoffset != 0 || var->yoffset != 0)) {
+		DRM_ERROR("Can not handle panning without display topology\n");
 		return -EINVAL;
 	}
 
-	if (var->xres > par->max_width ||
-	    var->yres > par->max_height) {
+	if ((var->xoffset + var->xres) > par->max_width ||
+	    (var->yoffset + var->yres) > par->max_height) {
 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
 		return -EINVAL;
 	}
@@ -154,8 +152,7 @@
 	struct vmw_fb_par *par = info->par;
 	struct vmw_private *vmw_priv = par->vmw_priv;
 
-	if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
-		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
@@ -164,18 +161,11 @@
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
 
-		vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
-		vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
-		vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
-		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
-		vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
-		vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
-		vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
-		vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+		vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+				   info->fix.line_length,
+				   par->bpp, par->depth);
 
 		/* TODO check if pitch and offset changes */
-
-		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
@@ -183,13 +173,19 @@
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
 	} else {
-		vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
-		vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
+		vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+				   info->fix.line_length,
+				   par->bpp, par->depth);
 
-		/* TODO check if pitch and offset changes */
 	}
 
+	/* This is really helpful since if this fails the user
+	 * can probably not see anything on the screen.
+	 */
+	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
+
 	return 0;
 }
 
@@ -416,48 +412,23 @@
 	unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
 	int ret;
 
+	/* XXX These shouldn't be hardcoded. */
 	initial_width = 800;
 	initial_height = 600;
 
 	fb_bbp = 32;
 	fb_depth = 24;
 
-	if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
-		fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
-		fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
-	} else {
-		fb_width = min(vmw_priv->fb_max_width, initial_width);
-		fb_height = min(vmw_priv->fb_max_height, initial_height);
-	}
+	/* XXX As shouldn't these be as well. */
+	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
 
 	initial_width = min(fb_width, initial_width);
 	initial_height = min(fb_height, initial_height);
 
-	vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
-	vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
-	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
-	vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
-	vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
-	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
-	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-
-	fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
+	fb_pitch = fb_width * fb_bbp / 8;
+	fb_size = fb_pitch * fb_height;
 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
-	fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
-
-	DRM_DEBUG("width  %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
-	DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
-	DRM_DEBUG("width  %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
-	DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
-	DRM_DEBUG("bpp    %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
-	DRM_DEBUG("depth  %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
-	DRM_DEBUG("bpl    %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
-	DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
-	DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
-	DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
-	DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
-	DRM_DEBUG("fb_pitch  %u\n", fb_pitch);
-	DRM_DEBUG("fb_size   %u kiB\n", fb_size / 1024);
 
 	info = framebuffer_alloc(sizeof(*par), device);
 	if (!info)
@@ -659,6 +630,10 @@
 		goto err_unlock;
 
 	ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
+
+	/* Could probably bug on */
+	WARN_ON(bo->offset != 0);
+
 	ttm_bo_unreserve(bo);
 err_unlock:
 	ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 0000000..61eacc1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,173 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_fence {
+	struct list_head head;
+	uint32_t sequence;
+	struct timespec submitted;
+};
+
+void vmw_fence_queue_init(struct vmw_fence_queue *queue)
+{
+	INIT_LIST_HEAD(&queue->head);
+	queue->lag = ns_to_timespec(0);
+	getrawmonotonic(&queue->lag_time);
+	spin_lock_init(&queue->lock);
+}
+
+void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
+{
+	struct vmw_fence *fence, *next;
+
+	spin_lock(&queue->lock);
+	list_for_each_entry_safe(fence, next, &queue->head, head) {
+		kfree(fence);
+	}
+	spin_unlock(&queue->lock);
+}
+
+int vmw_fence_push(struct vmw_fence_queue *queue,
+		   uint32_t sequence)
+{
+	struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+
+	if (unlikely(!fence))
+		return -ENOMEM;
+
+	fence->sequence = sequence;
+	getrawmonotonic(&fence->submitted);
+	spin_lock(&queue->lock);
+	list_add_tail(&fence->head, &queue->head);
+	spin_unlock(&queue->lock);
+
+	return 0;
+}
+
+int vmw_fence_pull(struct vmw_fence_queue *queue,
+		   uint32_t signaled_sequence)
+{
+	struct vmw_fence *fence, *next;
+	struct timespec now;
+	bool updated = false;
+
+	spin_lock(&queue->lock);
+	getrawmonotonic(&now);
+
+	if (list_empty(&queue->head)) {
+		queue->lag = ns_to_timespec(0);
+		queue->lag_time = now;
+		updated = true;
+		goto out_unlock;
+	}
+
+	list_for_each_entry_safe(fence, next, &queue->head, head) {
+		if (signaled_sequence - fence->sequence > (1 << 30))
+			continue;
+
+		queue->lag = timespec_sub(now, fence->submitted);
+		queue->lag_time = now;
+		updated = true;
+		list_del(&fence->head);
+		kfree(fence);
+	}
+
+out_unlock:
+	spin_unlock(&queue->lock);
+
+	return (updated) ? 0 : -EBUSY;
+}
+
+static struct timespec vmw_timespec_add(struct timespec t1,
+					struct timespec t2)
+{
+	t1.tv_sec += t2.tv_sec;
+	t1.tv_nsec += t2.tv_nsec;
+	if (t1.tv_nsec >= 1000000000L) {
+		t1.tv_sec += 1;
+		t1.tv_nsec -= 1000000000L;
+	}
+
+	return t1;
+}
+
+static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
+{
+	struct timespec now;
+
+	spin_lock(&queue->lock);
+	getrawmonotonic(&now);
+	queue->lag = vmw_timespec_add(queue->lag,
+				      timespec_sub(now, queue->lag_time));
+	queue->lag_time = now;
+	spin_unlock(&queue->lock);
+	return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_fence_queue *queue,
+		       uint32_t us)
+{
+	struct timespec lag, cond;
+
+	cond = ns_to_timespec((s64) us * 1000);
+	lag = vmw_fifo_lag(queue);
+	return (timespec_compare(&lag, &cond) < 1);
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+		 struct vmw_fence_queue *queue, uint32_t us)
+{
+	struct vmw_fence *fence;
+	uint32_t sequence;
+	int ret;
+
+	while (!vmw_lag_lt(queue, us)) {
+		spin_lock(&queue->lock);
+		if (list_empty(&queue->head))
+			sequence = atomic_read(&dev_priv->fence_seq);
+		else {
+			fence = list_first_entry(&queue->head,
+						 struct vmw_fence, head);
+			sequence = fence->sequence;
+		}
+		spin_unlock(&queue->lock);
+
+		ret = vmw_wait_fence(dev_priv, false, sequence, true,
+				     3*HZ);
+
+		if (unlikely(ret != 0))
+			return ret;
+
+		(void) vmw_fence_pull(queue, sequence);
+	}
+	return 0;
+}
+
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39d43a0..e6a1eb7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -34,6 +34,9 @@
 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t fifo_min, hwversion;
 
+	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+		return false;
+
 	fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
 	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
 		return false;
@@ -48,6 +51,21 @@
 	return true;
 }
 
+bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t caps;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+		return false;
+
+	caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
+		return true;
+
+	return false;
+}
+
 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -120,7 +138,7 @@
 
 	atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
 	iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
-
+	vmw_fence_queue_init(&fifo->fence_queue);
 	return vmw_fifo_send_fence(dev_priv, &dummy);
 out_err:
 	vfree(fifo->static_buffer);
@@ -159,6 +177,7 @@
 		  dev_priv->enable_state);
 
 	mutex_unlock(&dev_priv->hw_mutex);
+	vmw_fence_queue_takedown(&fifo->fence_queue);
 
 	if (likely(fifo->last_buffer != NULL)) {
 		vfree(fifo->last_buffer);
@@ -484,6 +503,8 @@
 	fifo_state->last_buffer_add = true;
 	vmw_fifo_commit(dev_priv, bytes);
 	fifo_state->last_buffer_add = false;
+	(void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
+	vmw_update_sequence(dev_priv, fifo_state);
 
 out_err:
 	return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4d7cb53..e92298a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -64,22 +64,33 @@
 	return (busy == 0);
 }
 
+void vmw_update_sequence(struct vmw_private *dev_priv,
+			 struct vmw_fifo_state *fifo_state)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+	uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+	if (dev_priv->last_read_sequence != sequence) {
+		dev_priv->last_read_sequence = sequence;
+		vmw_fence_pull(&fifo_state->fence_queue, sequence);
+	}
+}
 
 bool vmw_fence_signaled(struct vmw_private *dev_priv,
 			uint32_t sequence)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	struct vmw_fifo_state *fifo_state;
 	bool ret;
 
 	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
 		return true;
 
-	dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+	fifo_state = &dev_priv->fifo;
+	vmw_update_sequence(dev_priv, fifo_state);
 	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
 		return true;
 
-	fifo_state = &dev_priv->fifo;
 	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
 	    vmw_fifo_idle(dev_priv, sequence))
 		return true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bbc7c4c..b78dcf0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,8 @@
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
 
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 {
@@ -326,6 +328,7 @@
 struct vmw_framebuffer_surface {
 	struct vmw_framebuffer base;
 	struct vmw_surface *surface;
+	struct vmw_dma_buffer *buffer;
 	struct delayed_work d_work;
 	struct mutex work_lock;
 	bool present_fs;
@@ -500,8 +503,8 @@
 	vfbs->base.base.depth = 24;
 	vfbs->base.base.width = width;
 	vfbs->base.base.height = height;
-	vfbs->base.pin = NULL;
-	vfbs->base.unpin = NULL;
+	vfbs->base.pin = &vmw_surface_dmabuf_pin;
+	vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
 	vfbs->surface = surface;
 	mutex_init(&vfbs->work_lock);
 	INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
@@ -589,6 +592,40 @@
 	.create_handle = vmw_framebuffer_create_handle,
 };
 
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+	struct vmw_framebuffer_surface *vfbs =
+		vmw_framebuffer_to_vfbs(&vfb->base);
+	unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
+	int ret;
+
+	vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
+	if (unlikely(vfbs->buffer == NULL))
+		return -ENOMEM;
+
+	vmw_overlay_pause_all(dev_priv);
+	ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
+			       &vmw_vram_ne_placement,
+			       false, &vmw_dmabuf_bo_free);
+	vmw_overlay_resume_all(dev_priv);
+
+	return ret;
+}
+
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+	struct ttm_buffer_object *bo;
+	struct vmw_framebuffer_surface *vfbs =
+		vmw_framebuffer_to_vfbs(&vfb->base);
+
+	bo = &vfbs->buffer->base;
+	ttm_bo_unref(&bo);
+	vfbs->buffer = NULL;
+
+	return 0;
+}
+
 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
 {
 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -596,33 +633,15 @@
 		vmw_framebuffer_to_vfbd(&vfb->base);
 	int ret;
 
+
 	vmw_overlay_pause_all(dev_priv);
 
 	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
 
-	if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
-		vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-
-		vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
-		vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
-		vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
-		vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
-		vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
-		vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
-		vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
-		vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-	} else
-		WARN_ON(true);
-
 	vmw_overlay_resume_all(dev_priv);
 
+	WARN_ON(ret != 0);
+
 	return 0;
 }
 
@@ -668,7 +687,7 @@
 
 	/* XXX get the first 3 from the surface info */
 	vfbd->base.base.bits_per_pixel = 32;
-	vfbd->base.base.pitch = width * 32 / 4;
+	vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
 	vfbd->base.base.depth = 24;
 	vfbd->base.base.width = width;
 	vfbd->base.base.height = height;
@@ -765,8 +784,9 @@
 	dev->mode_config.funcs = &vmw_kms_funcs;
 	dev->mode_config.min_width = 1;
 	dev->mode_config.min_height = 1;
-	dev->mode_config.max_width = dev_priv->fb_max_width;
-	dev->mode_config.max_height = dev_priv->fb_max_height;
+	/* assumed largest fb size */
+	dev->mode_config.max_width = 8192;
+	dev->mode_config.max_height = 8192;
 
 	ret = vmw_kms_init_legacy_display_system(dev_priv);
 
@@ -826,24 +846,25 @@
 	return ret;
 }
 
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+			unsigned width, unsigned height, unsigned pitch,
+			unsigned bbp, unsigned depth)
+{
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
+	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
+	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
+	vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
+	vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+}
+
 int vmw_kms_save_vga(struct vmw_private *vmw_priv)
 {
-	/*
-	 * setup a single multimon monitor with the size
-	 * of 0x0, this stops the UI from resizing when we
-	 * change the framebuffer size
-	 */
-	if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
-		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-	}
-
 	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
 	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
 	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
@@ -852,6 +873,12 @@
 	vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
 	vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
 	vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_priv->vga_pitchlock =
+			vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		vmw_priv->vga_pitchlock =
+			ioread32(vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 
 	return 0;
 }
@@ -866,9 +893,12 @@
 	vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
 	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
 	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
-
-	/* TODO check for multimon */
-	vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
+			  vmw_priv->vga_pitchlock);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		iowrite32(vmw_priv->vga_pitchlock,
+			  vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 9089159..f7094dd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -38,6 +38,7 @@
 	struct list_head active;
 
 	unsigned num_active;
+	unsigned last_num_active;
 
 	struct vmw_framebuffer *fb;
 };
@@ -49,8 +50,6 @@
 	struct vmw_display_unit base;
 
 	struct list_head active;
-
-	unsigned unit;
 };
 
 static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
@@ -88,23 +87,44 @@
 {
 	struct vmw_legacy_display *lds = dev_priv->ldu_priv;
 	struct vmw_legacy_display_unit *entry;
-	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_crtc *crtc = NULL;
 	int i = 0;
 
-	/* to stop the screen from changing size on resize */
-	vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
-	for (i = 0; i < lds->num_active; i++) {
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
-		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+	/* If there is no display topology the host just assumes
+	 * that the guest will set the same layout as the host.
+	 */
+	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
+		int w = 0, h = 0;
+		list_for_each_entry(entry, &lds->active, active) {
+			crtc = &entry->base.crtc;
+			w = max(w, crtc->x + crtc->mode.hdisplay);
+			h = max(h, crtc->y + crtc->mode.vdisplay);
+			i++;
+		}
+
+		if (crtc == NULL)
+			return 0;
+		fb = entry->base.crtc.fb;
+
+		vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+				   fb->bits_per_pixel, fb->depth);
+
+		return 0;
 	}
 
-	/* Now set the mode */
-	vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
+	if (!list_empty(&lds->active)) {
+		entry = list_entry(lds->active.next, typeof(*entry), active);
+		fb = entry->base.crtc.fb;
+
+		vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+				   fb->bits_per_pixel, fb->depth);
+	}
+
+	/* Make sure we always show something. */
+	vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
+		  lds->num_active ? lds->num_active : 1);
+
 	i = 0;
 	list_for_each_entry(entry, &lds->active, active) {
 		crtc = &entry->base.crtc;
@@ -120,6 +140,10 @@
 		i++;
 	}
 
+	BUG_ON(i != lds->num_active);
+
+	lds->last_num_active = lds->num_active;
+
 	return 0;
 }
 
@@ -130,6 +154,7 @@
 	if (list_empty(&ldu->active))
 		return 0;
 
+	/* Must init otherwise list_empty(&ldu->active) will not work. */
 	list_del_init(&ldu->active);
 	if (--(ld->num_active) == 0) {
 		BUG_ON(!ld->fb);
@@ -149,24 +174,29 @@
 	struct vmw_legacy_display_unit *entry;
 	struct list_head *at;
 
+	BUG_ON(!ld->num_active && ld->fb);
+	if (vfb != ld->fb) {
+		if (ld->fb && ld->fb->unpin)
+			ld->fb->unpin(ld->fb);
+		if (vfb->pin)
+			vfb->pin(vfb);
+		ld->fb = vfb;
+	}
+
 	if (!list_empty(&ldu->active))
 		return 0;
 
 	at = &ld->active;
 	list_for_each_entry(entry, &ld->active, active) {
-		if (entry->unit > ldu->unit)
+		if (entry->base.unit > ldu->base.unit)
 			break;
 
 		at = &entry->active;
 	}
 
 	list_add(&ldu->active, at);
-	if (ld->num_active++ == 0) {
-		BUG_ON(ld->fb);
-		if (vfb->pin)
-			vfb->pin(vfb);
-		ld->fb = vfb;
-	}
+
+	ld->num_active++;
 
 	return 0;
 }
@@ -208,6 +238,8 @@
 
 	/* ldu only supports one fb active at the time */
 	if (dev_priv->ldu_priv->fb && vfb &&
+	    !(dev_priv->ldu_priv->num_active == 1 &&
+	      !list_empty(&ldu->active)) &&
 	    dev_priv->ldu_priv->fb != vfb) {
 		DRM_ERROR("Multiple framebuffers not supported\n");
 		return -EINVAL;
@@ -443,18 +475,16 @@
 	if (!ldu)
 		return -ENOMEM;
 
-	ldu->unit = unit;
+	ldu->base.unit = unit;
 	crtc = &ldu->base.crtc;
 	encoder = &ldu->base.encoder;
 	connector = &ldu->base.connector;
 
+	INIT_LIST_HEAD(&ldu->active);
+
 	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
-	/* Initial status */
-	if (unit == 0)
-		connector->status = connector_status_connected;
-	else
-		connector->status = connector_status_disconnected;
+	connector->status = vmw_ldu_connector_detect(connector);
 
 	drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
 			 DRM_MODE_ENCODER_LVDS);
@@ -462,8 +492,6 @@
 	encoder->possible_crtcs = (1 << unit);
 	encoder->possible_clones = 0;
 
-	INIT_LIST_HEAD(&ldu->active);
-
 	drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
 
 	drm_connector_attach_property(connector,
@@ -487,18 +515,22 @@
 
 	INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
 	dev_priv->ldu_priv->num_active = 0;
+	dev_priv->ldu_priv->last_num_active = 0;
 	dev_priv->ldu_priv->fb = NULL;
 
 	drm_mode_create_dirty_info_property(dev_priv->dev);
 
 	vmw_ldu_init(dev_priv, 0);
-	vmw_ldu_init(dev_priv, 1);
-	vmw_ldu_init(dev_priv, 2);
-	vmw_ldu_init(dev_priv, 3);
-	vmw_ldu_init(dev_priv, 4);
-	vmw_ldu_init(dev_priv, 5);
-	vmw_ldu_init(dev_priv, 6);
-	vmw_ldu_init(dev_priv, 7);
+	/* for old hardware without multimon only enable one display */
+	if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+		vmw_ldu_init(dev_priv, 1);
+		vmw_ldu_init(dev_priv, 2);
+		vmw_ldu_init(dev_priv, 3);
+		vmw_ldu_init(dev_priv, 4);
+		vmw_ldu_init(dev_priv, 5);
+		vmw_ldu_init(dev_priv, 6);
+		vmw_ldu_init(dev_priv, 7);
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index ad566c8..df2036e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -358,6 +358,8 @@
 	if (stream->buf != buf)
 		stream->buf = vmw_dmabuf_reference(buf);
 	stream->saved = *arg;
+	/* stream is no longer stopped/paused */
+	stream->paused = false;
 
 	return 0;
 }
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index dc5873c..1121f77 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -130,4 +130,7 @@
 extern void drm_kms_helper_poll_init(struct drm_device *dev);
 extern void drm_kms_helper_poll_fini(struct drm_device *dev);
 extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+
+extern void drm_kms_helper_poll_disable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable(struct drm_device *dev);
 #endif