Merge tag 'for-kvmgt' of git://git.kernel.org/pub/scm/virt/kvm/kvm into drm-intel-next-queued

Paulo Bonzini writes:

The three KVM patches that KVMGT needs.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
diff --git a/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
new file mode 100644
index 0000000..9409d9c
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
@@ -0,0 +1,33 @@
+Silicon Image SiI8620 HDMI/MHL bridge bindings
+
+Required properties:
+	- compatible: "sil,sii8620"
+	- reg: i2c address of the bridge
+	- cvcc10-supply: Digital Core Supply Voltage (1.0V)
+	- iovcc18-supply: I/O Supply Voltage (1.8V)
+	- interrupts, interrupt-parent: interrupt specifier of INT pin
+	- reset-gpios: gpio specifier of RESET pin
+	- clocks, clock-names: specification and name of "xtal" clock
+	- video interfaces: Device node can contain video interface port
+			    node for HDMI encoder according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+	sii8620@39 {
+		reg = <0x39>;
+		compatible = "sil,sii8620";
+		cvcc10-supply = <&ldo36_reg>;
+		iovcc18-supply = <&ldo34_reg>;
+		interrupt-parent = <&gpf0>;
+		interrupts = <2 0>;
+		reset-gpio = <&gpv7 0 0>;
+		clocks = <&pmu_system_controller 0>;
+		clock-names = "xtal";
+
+		port {
+			mhl_to_hdmi: endpoint {
+				remote-endpoint = <&hdmi_to_mhl>;
+			};
+		};
+	};
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 53b872c..cb0d353 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -63,6 +63,9 @@
 DRM Format Handling
 ===================
 
+.. kernel-doc:: include/drm/drm_fourcc.h
+   :internal:
+
 .. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
    :export:
 
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 1ba301c..de3ac9f 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -216,3 +216,9 @@
 userspace are driver specific for efficiency and other reasons these
 interfaces can be rather substantial. Hence every driver has its own
 chapter.
+
+Testing and validation
+======================
+
+.. kernel-doc:: drivers/gpu/drm/drm_debugfs_crc.c
+   :doc: CRC ABI
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 87aaffc..ba83b7d 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -49,6 +49,15 @@
 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
    :internal:
 
+Intel GVT-g Host Support(vGPU device model)
+-------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
+   :doc: Intel GVT-g host support
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
+   :internal:
+
 Display Hardware Handling
 =========================
 
@@ -180,7 +189,7 @@
 DPIO
 ----
 
-.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dpio_phy.c
    :doc: DPIO
 
 CSR firmware support for DMC
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt
index b63a685..269681a 100644
--- a/Documentation/sync_file.txt
+++ b/Documentation/sync_file.txt
@@ -6,7 +6,7 @@
 
 This document serves as a guide for device drivers writers on what the
 sync_file API is, and how drivers can support it. Sync file is the carrier of
-the fences(struct fence) that are needed to synchronize between drivers or
+the fences(struct dma_fence) that are needed to synchronize between drivers or
 across process boundaries.
 
 The sync_file API is meant to be used to send and receive fence information
@@ -32,9 +32,9 @@
 Sync files can go either to or from userspace. When a sync_file is sent from
 the driver to userspace we call the fences it contains 'out-fences'. They are
 related to a buffer that the driver is processing or is going to process, so
-the driver creates an out-fence to be able to notify, through fence_signal(),
-when it has finished using (or processing) that buffer. Out-fences are fences
-that the driver creates.
+the driver creates an out-fence to be able to notify, through
+dma_fence_signal(), when it has finished using (or processing) that buffer.
+Out-fences are fences that the driver creates.
 
 On the other hand if the driver receives fence(s) through a sync_file from
 userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
@@ -47,7 +47,7 @@
 When a driver needs to send an out-fence userspace it creates a sync_file.
 
 Interface:
-	struct sync_file *sync_file_create(struct fence *fence);
+	struct sync_file *sync_file_create(struct dma_fence *fence);
 
 The caller pass the out-fence and gets back the sync_file. That is just the
 first step, next it needs to install an fd on sync_file->file. So it gets an
@@ -72,11 +72,11 @@
 from it.
 
 Interface:
-	struct fence *sync_file_get_fence(int fd);
+	struct dma_fence *sync_file_get_fence(int fd);
 
 
 The returned reference is owned by the caller and must be disposed of
-afterwards using fence_put(). In case of error, a NULL is returned instead.
+afterwards using dma_fence_put(). In case of error, a NULL is returned instead.
 
 References:
 [1] struct sync_file in include/linux/sync_file.h
diff --git a/MAINTAINERS b/MAINTAINERS
index c447953..f354714 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4054,7 +4054,6 @@
 M:	Daniel Vetter <daniel.vetter@intel.com>
 M:	Jani Nikula <jani.nikula@linux.intel.com>
 L:	intel-gfx@lists.freedesktop.org
-L:	dri-devel@lists.freedesktop.org
 W:	https://01.org/linuxgraphics/
 Q:	http://patchwork.freedesktop.org/project/intel-gfx/
 T:	git git://anongit.freedesktop.org/drm-intel
@@ -4064,6 +4063,16 @@
 F:	include/uapi/drm/i915_drm.h
 F:	Documentation/gpu/i915.rst
 
+INTEL GVT-g DRIVERS (Intel GPU Virtualization)
+M:      Zhenyu Wang <zhenyuw@linux.intel.com>
+M:      Zhi Wang <zhi.a.wang@intel.com>
+L:      igvt-g-dev@lists.01.org
+L:      intel-gfx@lists.freedesktop.org
+W:      https://01.org/igvt-g
+T:      git https://github.com/01org/gvt-linux.git
+S:      Supported
+F:      drivers/gpu/drm/i915/gvt/
+
 DRM DRIVERS FOR ATMEL HLCDC
 M:	Boris Brezillon <boris.brezillon@free-electrons.com>
 L:	dri-devel@lists.freedesktop.org
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fdf44ca..37bf25c6 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -248,11 +248,11 @@
 	  APIs extension; the file's descriptor can then be passed on to other
 	  driver.
 
-config FENCE_TRACE
-	bool "Enable verbose FENCE_TRACE messages"
+config DMA_FENCE_TRACE
+	bool "Enable verbose DMA_FENCE_TRACE messages"
 	depends on DMA_SHARED_BUFFER
 	help
-	  Enable the FENCE_TRACE printks. This will add extra
+	  Enable the DMA_FENCE_TRACE printks. This will add extra
 	  spam to the console log, but will make it easier to diagnose
 	  lockup related problems for dma-buffers shared across multiple
 	  devices.
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 2585821..ed3b785 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -7,7 +7,7 @@
 	select DMA_SHARED_BUFFER
 	---help---
 	  The Sync File Framework adds explicit syncronization via
-	  userspace. It enables send/receive 'struct fence' objects to/from
+	  userspace. It enables send/receive 'struct dma_fence' objects to/from
 	  userspace via Sync File fds for synchronization between drivers via
 	  userspace components. It has been ported from Android.
 
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 210a10b..c33bf88 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,3 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cf04d24..e72e644 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -25,7 +25,7 @@
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/dma-buf.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/anon_inodes.h>
 #include <linux/export.h>
 #include <linux/debugfs.h>
@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 	return base + offset;
 }
 
-static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
 	unsigned long flags;
@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
 	struct dma_buf *dmabuf;
 	struct reservation_object *resv;
 	struct reservation_object_list *fobj;
-	struct fence *fence_excl;
+	struct dma_fence *fence_excl;
 	unsigned long events;
 	unsigned shared_count, seq;
 
@@ -187,20 +187,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
 		spin_unlock_irq(&dmabuf->poll.lock);
 
 		if (events & pevents) {
-			if (!fence_get_rcu(fence_excl)) {
+			if (!dma_fence_get_rcu(fence_excl)) {
 				/* force a recheck */
 				events &= ~pevents;
 				dma_buf_poll_cb(NULL, &dcb->cb);
-			} else if (!fence_add_callback(fence_excl, &dcb->cb,
-						       dma_buf_poll_cb)) {
+			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
+							   dma_buf_poll_cb)) {
 				events &= ~pevents;
-				fence_put(fence_excl);
+				dma_fence_put(fence_excl);
 			} else {
 				/*
 				 * No callback queued, wake up any additional
 				 * waiters.
 				 */
-				fence_put(fence_excl);
+				dma_fence_put(fence_excl);
 				dma_buf_poll_cb(NULL, &dcb->cb);
 			}
 		}
@@ -222,9 +222,9 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
 			goto out;
 
 		for (i = 0; i < shared_count; ++i) {
-			struct fence *fence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 
-			if (!fence_get_rcu(fence)) {
+			if (!dma_fence_get_rcu(fence)) {
 				/*
 				 * fence refcount dropped to zero, this means
 				 * that fobj has been freed
@@ -235,13 +235,13 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
 				dma_buf_poll_cb(NULL, &dcb->cb);
 				break;
 			}
-			if (!fence_add_callback(fence, &dcb->cb,
-						dma_buf_poll_cb)) {
-				fence_put(fence);
+			if (!dma_fence_add_callback(fence, &dcb->cb,
+						    dma_buf_poll_cb)) {
+				dma_fence_put(fence);
 				events &= ~POLLOUT;
 				break;
 			}
-			fence_put(fence);
+			dma_fence_put(fence);
 		}
 
 		/* No callback queued, wake up any additional waiters. */
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
new file mode 100644
index 0000000..67eb7c8
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -0,0 +1,146 @@
+/*
+ * dma-fence-array: aggregate fences to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ *	Gustavo Padovan <gustavo@padovan.org>
+ *	Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-array.h>
+
+static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
+{
+	return "dma_fence_array";
+}
+
+static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
+{
+	return "unbound";
+}
+
+static void dma_fence_array_cb_func(struct dma_fence *f,
+				    struct dma_fence_cb *cb)
+{
+	struct dma_fence_array_cb *array_cb =
+		container_of(cb, struct dma_fence_array_cb, cb);
+	struct dma_fence_array *array = array_cb->array;
+
+	if (atomic_dec_and_test(&array->num_pending))
+		dma_fence_signal(&array->base);
+	dma_fence_put(&array->base);
+}
+
+static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
+{
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+	struct dma_fence_array_cb *cb = (void *)(&array[1]);
+	unsigned i;
+
+	for (i = 0; i < array->num_fences; ++i) {
+		cb[i].array = array;
+		/*
+		 * As we may report that the fence is signaled before all
+		 * callbacks are complete, we need to take an additional
+		 * reference count on the array so that we do not free it too
+		 * early. The core fence handling will only hold the reference
+		 * until we signal the array as complete (but that is now
+		 * insufficient).
+		 */
+		dma_fence_get(&array->base);
+		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
+					   dma_fence_array_cb_func)) {
+			dma_fence_put(&array->base);
+			if (atomic_dec_and_test(&array->num_pending))
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static bool dma_fence_array_signaled(struct dma_fence *fence)
+{
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+
+	return atomic_read(&array->num_pending) <= 0;
+}
+
+static void dma_fence_array_release(struct dma_fence *fence)
+{
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+	unsigned i;
+
+	for (i = 0; i < array->num_fences; ++i)
+		dma_fence_put(array->fences[i]);
+
+	kfree(array->fences);
+	dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_array_ops = {
+	.get_driver_name = dma_fence_array_get_driver_name,
+	.get_timeline_name = dma_fence_array_get_timeline_name,
+	.enable_signaling = dma_fence_array_enable_signaling,
+	.signaled = dma_fence_array_signaled,
+	.wait = dma_fence_default_wait,
+	.release = dma_fence_array_release,
+};
+EXPORT_SYMBOL(dma_fence_array_ops);
+
+/**
+ * dma_fence_array_create - Create a custom fence array
+ * @num_fences:		[in]	number of fences to add in the array
+ * @fences:		[in]	array containing the fences
+ * @context:		[in]	fence context to use
+ * @seqno:		[in]	sequence number to use
+ * @signal_on_any:	[in]	signal on any fence in the array
+ *
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocate the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is taken and dma_fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+					       struct dma_fence **fences,
+					       u64 context, unsigned seqno,
+					       bool signal_on_any)
+{
+	struct dma_fence_array *array;
+	size_t size = sizeof(*array);
+
+	/* Allocate the callback structures behind the array. */
+	size += num_fences * sizeof(struct dma_fence_array_cb);
+	array = kzalloc(size, GFP_KERNEL);
+	if (!array)
+		return NULL;
+
+	spin_lock_init(&array->lock);
+	dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
+		       context, seqno);
+
+	array->num_fences = num_fences;
+	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
+	array->fences = fences;
+
+	return array;
+}
+EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/dma-fence.c
similarity index 64%
rename from drivers/dma-buf/fence.c
rename to drivers/dma-buf/dma-fence.c
index 4d51f9e..3a7bf00 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -21,13 +21,13 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/atomic.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #define CREATE_TRACE_POINTS
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
 
-EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
-EXPORT_TRACEPOINT_SYMBOL(fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
 
 /*
  * fence context counter: each execution context should have its own
@@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
  * context or not. One device can have multiple separate contexts,
  * and they're used if some engine can run independently of another.
  */
-static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
 
 /**
- * fence_context_alloc - allocate an array of fence contexts
+ * dma_fence_context_alloc - allocate an array of fence contexts
  * @num:	[in]	amount of contexts to allocate
  *
  * This function will return the first index of the number of fences allocated.
  * The fence context is used for setting fence->context to a unique number.
  */
-u64 fence_context_alloc(unsigned num)
+u64 dma_fence_context_alloc(unsigned num)
 {
 	BUG_ON(!num);
-	return atomic64_add_return(num, &fence_context_counter) - num;
+	return atomic64_add_return(num, &dma_fence_context_counter) - num;
 }
-EXPORT_SYMBOL(fence_context_alloc);
+EXPORT_SYMBOL(dma_fence_context_alloc);
 
 /**
- * fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_locked - signal completion of a fence
  * @fence: the fence to signal
  *
  * Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
  * can only go from unsignaled to signaled state, it will only be effective
  * the first time.
  *
- * Unlike fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal, this function must be called with fence->lock held.
  */
-int fence_signal_locked(struct fence *fence)
+int dma_fence_signal_locked(struct dma_fence *fence)
 {
-	struct fence_cb *cur, *tmp;
+	struct dma_fence_cb *cur, *tmp;
 	int ret = 0;
 
+	lockdep_assert_held(fence->lock);
+
 	if (WARN_ON(!fence))
 		return -EINVAL;
 
@@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence)
 		smp_mb__before_atomic();
 	}
 
-	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		ret = -EINVAL;
 
 		/*
-		 * we might have raced with the unlocked fence_signal,
+		 * we might have raced with the unlocked dma_fence_signal,
 		 * still run through all callbacks
 		 */
 	} else
-		trace_fence_signaled(fence);
+		trace_dma_fence_signaled(fence);
 
 	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
 		list_del_init(&cur->node);
@@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence)
 	}
 	return ret;
 }
-EXPORT_SYMBOL(fence_signal_locked);
+EXPORT_SYMBOL(dma_fence_signal_locked);
 
 /**
- * fence_signal - signal completion of a fence
+ * dma_fence_signal - signal completion of a fence
  * @fence: the fence to signal
  *
  * Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
  * can only go from unsignaled to signaled state, it will only be effective
  * the first time.
  */
-int fence_signal(struct fence *fence)
+int dma_fence_signal(struct dma_fence *fence)
 {
 	unsigned long flags;
 
@@ -116,13 +118,13 @@ int fence_signal(struct fence *fence)
 		smp_mb__before_atomic();
 	}
 
-	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return -EINVAL;
 
-	trace_fence_signaled(fence);
+	trace_dma_fence_signaled(fence);
 
-	if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
-		struct fence_cb *cur, *tmp;
+	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+		struct dma_fence_cb *cur, *tmp;
 
 		spin_lock_irqsave(fence->lock, flags);
 		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
@@ -133,10 +135,10 @@ int fence_signal(struct fence *fence)
 	}
 	return 0;
 }
-EXPORT_SYMBOL(fence_signal);
+EXPORT_SYMBOL(dma_fence_signal);
 
 /**
- * fence_wait_timeout - sleep until the fence gets signaled
+ * dma_fence_wait_timeout - sleep until the fence gets signaled
  * or until timeout elapses
  * @fence:	[in]	the fence to wait on
  * @intr:	[in]	if true, do an interruptible wait
@@ -152,7 +154,7 @@ EXPORT_SYMBOL(fence_signal);
  * freed before return, resulting in undefined behavior.
  */
 signed long
-fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
 {
 	signed long ret;
 
@@ -160,70 +162,71 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
 		return -EINVAL;
 
 	if (timeout == 0)
-		return fence_is_signaled(fence);
+		return dma_fence_is_signaled(fence);
 
-	trace_fence_wait_start(fence);
+	trace_dma_fence_wait_start(fence);
 	ret = fence->ops->wait(fence, intr, timeout);
-	trace_fence_wait_end(fence);
+	trace_dma_fence_wait_end(fence);
 	return ret;
 }
-EXPORT_SYMBOL(fence_wait_timeout);
+EXPORT_SYMBOL(dma_fence_wait_timeout);
 
-void fence_release(struct kref *kref)
+void dma_fence_release(struct kref *kref)
 {
-	struct fence *fence =
-			container_of(kref, struct fence, refcount);
+	struct dma_fence *fence =
+		container_of(kref, struct dma_fence, refcount);
 
-	trace_fence_destroy(fence);
+	trace_dma_fence_destroy(fence);
 
 	BUG_ON(!list_empty(&fence->cb_list));
 
 	if (fence->ops->release)
 		fence->ops->release(fence);
 	else
-		fence_free(fence);
+		dma_fence_free(fence);
 }
-EXPORT_SYMBOL(fence_release);
+EXPORT_SYMBOL(dma_fence_release);
 
-void fence_free(struct fence *fence)
+void dma_fence_free(struct dma_fence *fence)
 {
 	kfree_rcu(fence, rcu);
 }
-EXPORT_SYMBOL(fence_free);
+EXPORT_SYMBOL(dma_fence_free);
 
 /**
- * fence_enable_sw_signaling - enable signaling on fence
+ * dma_fence_enable_sw_signaling - enable signaling on fence
  * @fence:	[in]	the fence to enable
  *
  * this will request for sw signaling to be enabled, to make the fence
  * complete as soon as possible
  */
-void fence_enable_sw_signaling(struct fence *fence)
+void dma_fence_enable_sw_signaling(struct dma_fence *fence)
 {
 	unsigned long flags;
 
-	if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
-	    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-		trace_fence_enable_signal(fence);
+	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+			      &fence->flags) &&
+	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		trace_dma_fence_enable_signal(fence);
 
 		spin_lock_irqsave(fence->lock, flags);
 
 		if (!fence->ops->enable_signaling(fence))
-			fence_signal_locked(fence);
+			dma_fence_signal_locked(fence);
 
 		spin_unlock_irqrestore(fence->lock, flags);
 	}
 }
-EXPORT_SYMBOL(fence_enable_sw_signaling);
+EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
 
 /**
- * fence_add_callback - add a callback to be called when the fence
+ * dma_fence_add_callback - add a callback to be called when the fence
  * is signaled
  * @fence:	[in]	the fence to wait on
  * @cb:		[in]	the callback to register
  * @func:	[in]	the function to call
  *
- * cb will be initialized by fence_add_callback, no initialization
+ * cb will be initialized by dma_fence_add_callback, no initialization
  * by the caller is required. Any number of callbacks can be registered
  * to a fence, but a callback can only be registered to one fence at a time.
  *
@@ -232,15 +235,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling);
  * *not* call the callback)
  *
  * Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to fence_wait, however the caller doesn't need to
+ * refcount as it does to dma_fence_wait, however the caller doesn't need to
  * keep a refcount to fence afterwards: when software access is enabled,
  * the creator of the fence is required to keep the fence alive until
- * after it signals with fence_signal. The callback itself can be called
+ * after it signals with dma_fence_signal. The callback itself can be called
  * from irq context.
  *
  */
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
-		       fence_func_t func)
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+			   dma_fence_func_t func)
 {
 	unsigned long flags;
 	int ret = 0;
@@ -249,22 +252,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
 	if (WARN_ON(!fence || !func))
 		return -EINVAL;
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		INIT_LIST_HEAD(&cb->node);
 		return -ENOENT;
 	}
 
 	spin_lock_irqsave(fence->lock, flags);
 
-	was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+				   &fence->flags);
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		ret = -ENOENT;
 	else if (!was_set) {
-		trace_fence_enable_signal(fence);
+		trace_dma_fence_enable_signal(fence);
 
 		if (!fence->ops->enable_signaling(fence)) {
-			fence_signal_locked(fence);
+			dma_fence_signal_locked(fence);
 			ret = -ENOENT;
 		}
 	}
@@ -278,10 +282,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
 
 	return ret;
 }
-EXPORT_SYMBOL(fence_add_callback);
+EXPORT_SYMBOL(dma_fence_add_callback);
 
 /**
- * fence_remove_callback - remove a callback from the signaling list
+ * dma_fence_remove_callback - remove a callback from the signaling list
  * @fence:	[in]	the fence to wait on
  * @cb:		[in]	the callback to remove
  *
@@ -296,7 +300,7 @@ EXPORT_SYMBOL(fence_add_callback);
  * with a reference held to the fence.
  */
 bool
-fence_remove_callback(struct fence *fence, struct fence_cb *cb)
+dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	unsigned long flags;
 	bool ret;
@@ -311,15 +315,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
 
 	return ret;
 }
-EXPORT_SYMBOL(fence_remove_callback);
+EXPORT_SYMBOL(dma_fence_remove_callback);
 
 struct default_wait_cb {
-	struct fence_cb base;
+	struct dma_fence_cb base;
 	struct task_struct *task;
 };
 
 static void
-fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
+dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	struct default_wait_cb *wait =
 		container_of(cb, struct default_wait_cb, base);
@@ -328,7 +332,7 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
 }
 
 /**
- * fence_default_wait - default sleep until the fence gets signaled
+ * dma_fence_default_wait - default sleep until the fence gets signaled
  * or until timeout elapses
  * @fence:	[in]	the fence to wait on
  * @intr:	[in]	if true, do an interruptible wait
@@ -338,14 +342,14 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
  * remaining timeout in jiffies on success.
  */
 signed long
-fence_default_wait(struct fence *fence, bool intr, signed long timeout)
+dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
 {
 	struct default_wait_cb cb;
 	unsigned long flags;
 	signed long ret = timeout;
 	bool was_set;
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return timeout;
 
 	spin_lock_irqsave(fence->lock, flags);
@@ -355,25 +359,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
 		goto out;
 	}
 
-	was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+				   &fence->flags);
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		goto out;
 
 	if (!was_set) {
-		trace_fence_enable_signal(fence);
+		trace_dma_fence_enable_signal(fence);
 
 		if (!fence->ops->enable_signaling(fence)) {
-			fence_signal_locked(fence);
+			dma_fence_signal_locked(fence);
 			goto out;
 		}
 	}
 
-	cb.base.func = fence_default_wait_cb;
+	cb.base.func = dma_fence_default_wait_cb;
 	cb.task = current;
 	list_add(&cb.base.node, &fence->cb_list);
 
-	while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
 		if (intr)
 			__set_current_state(TASK_INTERRUPTIBLE);
 		else
@@ -395,23 +400,23 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
 	spin_unlock_irqrestore(fence->lock, flags);
 	return ret;
 }
-EXPORT_SYMBOL(fence_default_wait);
+EXPORT_SYMBOL(dma_fence_default_wait);
 
 static bool
-fence_test_signaled_any(struct fence **fences, uint32_t count)
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count)
 {
 	int i;
 
 	for (i = 0; i < count; ++i) {
-		struct fence *fence = fences[i];
-		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		struct dma_fence *fence = fences[i];
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 			return true;
 	}
 	return false;
 }
 
 /**
- * fence_wait_any_timeout - sleep until any fence gets signaled
+ * dma_fence_wait_any_timeout - sleep until any fence gets signaled
  * or until timeout elapses
  * @fences:	[in]	array of fences to wait on
  * @count:	[in]	number of fences to wait on
@@ -427,8 +432,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count)
  * fence might be freed before return, resulting in undefined behavior.
  */
 signed long
-fence_wait_any_timeout(struct fence **fences, uint32_t count,
-		       bool intr, signed long timeout)
+dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
+			   bool intr, signed long timeout)
 {
 	struct default_wait_cb *cb;
 	signed long ret = timeout;
@@ -439,7 +444,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
 
 	if (timeout == 0) {
 		for (i = 0; i < count; ++i)
-			if (fence_is_signaled(fences[i]))
+			if (dma_fence_is_signaled(fences[i]))
 				return 1;
 
 		return 0;
@@ -452,16 +457,16 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
 	}
 
 	for (i = 0; i < count; ++i) {
-		struct fence *fence = fences[i];
+		struct dma_fence *fence = fences[i];
 
-		if (fence->ops->wait != fence_default_wait) {
+		if (fence->ops->wait != dma_fence_default_wait) {
 			ret = -EINVAL;
 			goto fence_rm_cb;
 		}
 
 		cb[i].task = current;
-		if (fence_add_callback(fence, &cb[i].base,
-				       fence_default_wait_cb)) {
+		if (dma_fence_add_callback(fence, &cb[i].base,
+					   dma_fence_default_wait_cb)) {
 			/* This fence is already signaled */
 			goto fence_rm_cb;
 		}
@@ -473,7 +478,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
 		else
 			set_current_state(TASK_UNINTERRUPTIBLE);
 
-		if (fence_test_signaled_any(fences, count))
+		if (dma_fence_test_signaled_any(fences, count))
 			break;
 
 		ret = schedule_timeout(ret);
@@ -486,34 +491,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
 
 fence_rm_cb:
 	while (i-- > 0)
-		fence_remove_callback(fences[i], &cb[i].base);
+		dma_fence_remove_callback(fences[i], &cb[i].base);
 
 err_free_cb:
 	kfree(cb);
 
 	return ret;
 }
-EXPORT_SYMBOL(fence_wait_any_timeout);
+EXPORT_SYMBOL(dma_fence_wait_any_timeout);
 
 /**
- * fence_init - Initialize a custom fence.
+ * dma_fence_init - Initialize a custom fence.
  * @fence:	[in]	the fence to initialize
- * @ops:	[in]	the fence_ops for operations on this fence
+ * @ops:	[in]	the dma_fence_ops for operations on this fence
  * @lock:	[in]	the irqsafe spinlock to use for locking this fence
  * @context:	[in]	the execution context this fence is run on
  * @seqno:	[in]	a linear increasing sequence number for this context
  *
  * Initializes an allocated fence, the caller doesn't have to keep its
  * refcount after committing with this fence, but it will need to hold a
- * refcount again if fence_ops.enable_signaling gets called. This can
+ * refcount again if dma_fence_ops.enable_signaling gets called. This can
  * be used for other implementing other types of fence.
  *
  * context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using fence_later.
+ * to check which fence is later by simply using dma_fence_later.
  */
 void
-fence_init(struct fence *fence, const struct fence_ops *ops,
-	     spinlock_t *lock, u64 context, unsigned seqno)
+dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+	       spinlock_t *lock, u64 context, unsigned seqno)
 {
 	BUG_ON(!lock);
 	BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
@@ -527,6 +532,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
 	fence->seqno = seqno;
 	fence->flags = 0UL;
 
-	trace_fence_init(fence);
+	trace_dma_fence_init(fence);
 }
-EXPORT_SYMBOL(fence_init);
+EXPORT_SYMBOL(dma_fence_init);
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
deleted file mode 100644
index f1989fc..0000000
--- a/drivers/dma-buf/fence-array.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * fence-array: aggregate fences to be waited together
- *
- * Copyright (C) 2016 Collabora Ltd
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
- * Authors:
- *	Gustavo Padovan <gustavo@padovan.org>
- *	Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- */
-
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/fence-array.h>
-
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
-
-static const char *fence_array_get_driver_name(struct fence *fence)
-{
-	return "fence_array";
-}
-
-static const char *fence_array_get_timeline_name(struct fence *fence)
-{
-	return "unbound";
-}
-
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
-{
-	struct fence_array_cb *array_cb =
-		container_of(cb, struct fence_array_cb, cb);
-	struct fence_array *array = array_cb->array;
-
-	if (atomic_dec_and_test(&array->num_pending))
-		fence_signal(&array->base);
-	fence_put(&array->base);
-}
-
-static bool fence_array_enable_signaling(struct fence *fence)
-{
-	struct fence_array *array = to_fence_array(fence);
-	struct fence_array_cb *cb = (void *)(&array[1]);
-	unsigned i;
-
-	for (i = 0; i < array->num_fences; ++i) {
-		cb[i].array = array;
-		/*
-		 * As we may report that the fence is signaled before all
-		 * callbacks are complete, we need to take an additional
-		 * reference count on the array so that we do not free it too
-		 * early. The core fence handling will only hold the reference
-		 * until we signal the array as complete (but that is now
-		 * insufficient).
-		 */
-		fence_get(&array->base);
-		if (fence_add_callback(array->fences[i], &cb[i].cb,
-				       fence_array_cb_func)) {
-			fence_put(&array->base);
-			if (atomic_dec_and_test(&array->num_pending))
-				return false;
-		}
-	}
-
-	return true;
-}
-
-static bool fence_array_signaled(struct fence *fence)
-{
-	struct fence_array *array = to_fence_array(fence);
-
-	return atomic_read(&array->num_pending) <= 0;
-}
-
-static void fence_array_release(struct fence *fence)
-{
-	struct fence_array *array = to_fence_array(fence);
-	unsigned i;
-
-	for (i = 0; i < array->num_fences; ++i)
-		fence_put(array->fences[i]);
-
-	kfree(array->fences);
-	fence_free(fence);
-}
-
-const struct fence_ops fence_array_ops = {
-	.get_driver_name = fence_array_get_driver_name,
-	.get_timeline_name = fence_array_get_timeline_name,
-	.enable_signaling = fence_array_enable_signaling,
-	.signaled = fence_array_signaled,
-	.wait = fence_default_wait,
-	.release = fence_array_release,
-};
-EXPORT_SYMBOL(fence_array_ops);
-
-/**
- * fence_array_create - Create a custom fence array
- * @num_fences:		[in]	number of fences to add in the array
- * @fences:		[in]	array containing the fences
- * @context:		[in]	fence context to use
- * @seqno:		[in]	sequence number to use
- * @signal_on_any:	[in]	signal on any fence in the array
- *
- * Allocate a fence_array object and initialize the base fence with fence_init().
- * In case of error it returns NULL.
- *
- * The caller should allocate the fences array with num_fences size
- * and fill it with the fences it wants to add to the object. Ownership of this
- * array is taken and fence_put() is used on each fence on release.
- *
- * If @signal_on_any is true the fence array signals if any fence in the array
- * signals, otherwise it signals when all fences in the array signal.
- */
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
-				       u64 context, unsigned seqno,
-				       bool signal_on_any)
-{
-	struct fence_array *array;
-	size_t size = sizeof(*array);
-
-	/* Allocate the callback structures behind the array. */
-	size += num_fences * sizeof(struct fence_array_cb);
-	array = kzalloc(size, GFP_KERNEL);
-	if (!array)
-		return NULL;
-
-	spin_lock_init(&array->lock);
-	fence_init(&array->base, &fence_array_ops, &array->lock,
-		   context, seqno);
-
-	array->num_fences = num_fences;
-	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
-	array->fences = fences;
-
-	return array;
-}
-EXPORT_SYMBOL(fence_array_create);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 723d8af..7ed56f3 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
 static void
 reservation_object_add_shared_inplace(struct reservation_object *obj,
 				      struct reservation_object_list *fobj,
-				      struct fence *fence)
+				      struct dma_fence *fence)
 {
 	u32 i;
 
-	fence_get(fence);
+	dma_fence_get(fence);
 
 	preempt_disable();
 	write_seqcount_begin(&obj->seq);
 
 	for (i = 0; i < fobj->shared_count; ++i) {
-		struct fence *old_fence;
+		struct dma_fence *old_fence;
 
 		old_fence = rcu_dereference_protected(fobj->shared[i],
 						reservation_object_held(obj));
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
 			write_seqcount_end(&obj->seq);
 			preempt_enable();
 
-			fence_put(old_fence);
+			dma_fence_put(old_fence);
 			return;
 		}
 	}
@@ -143,12 +143,12 @@ static void
 reservation_object_add_shared_replace(struct reservation_object *obj,
 				      struct reservation_object_list *old,
 				      struct reservation_object_list *fobj,
-				      struct fence *fence)
+				      struct dma_fence *fence)
 {
 	unsigned i;
-	struct fence *old_fence = NULL;
+	struct dma_fence *old_fence = NULL;
 
-	fence_get(fence);
+	dma_fence_get(fence);
 
 	if (!old) {
 		RCU_INIT_POINTER(fobj->shared[0], fence);
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
 	fobj->shared_count = old->shared_count;
 
 	for (i = 0; i < old->shared_count; ++i) {
-		struct fence *check;
+		struct dma_fence *check;
 
 		check = rcu_dereference_protected(old->shared[i],
 						reservation_object_held(obj));
@@ -196,7 +196,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
 		kfree_rcu(old, rcu);
 
 	if (old_fence)
-		fence_put(old_fence);
+		dma_fence_put(old_fence);
 }
 
 /**
@@ -208,7 +208,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
  * reservation_object_reserve_shared() has been called.
  */
 void reservation_object_add_shared_fence(struct reservation_object *obj,
-					 struct fence *fence)
+					 struct dma_fence *fence)
 {
 	struct reservation_object_list *old, *fobj = obj->staged;
 
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
  * Add a fence to the exclusive slot.  The obj->lock must be held.
  */
 void reservation_object_add_excl_fence(struct reservation_object *obj,
-				       struct fence *fence)
+				       struct dma_fence *fence)
 {
-	struct fence *old_fence = reservation_object_get_excl(obj);
+	struct dma_fence *old_fence = reservation_object_get_excl(obj);
 	struct reservation_object_list *old;
 	u32 i = 0;
 
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 		i = old->shared_count;
 
 	if (fence)
-		fence_get(fence);
+		dma_fence_get(fence);
 
 	preempt_disable();
 	write_seqcount_begin(&obj->seq);
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 
 	/* inplace update, no shared fences */
 	while (i--)
-		fence_put(rcu_dereference_protected(old->shared[i],
+		dma_fence_put(rcu_dereference_protected(old->shared[i],
 						reservation_object_held(obj)));
 
 	if (old_fence)
-		fence_put(old_fence);
+		dma_fence_put(old_fence);
 }
 EXPORT_SYMBOL(reservation_object_add_excl_fence);
 
@@ -276,26 +276,32 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
  * Zero or -errno
  */
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
-				      struct fence **pfence_excl,
+				      struct dma_fence **pfence_excl,
 				      unsigned *pshared_count,
-				      struct fence ***pshared)
+				      struct dma_fence ***pshared)
 {
-	unsigned shared_count = 0;
-	unsigned retry = 1;
-	struct fence **shared = NULL, *fence_excl = NULL;
-	int ret = 0;
+	struct dma_fence **shared = NULL;
+	struct dma_fence *fence_excl;
+	unsigned int shared_count;
+	int ret = 1;
 
-	while (retry) {
+	do {
 		struct reservation_object_list *fobj;
 		unsigned seq;
+		unsigned int i;
 
-		seq = read_seqcount_begin(&obj->seq);
+		shared_count = i = 0;
 
 		rcu_read_lock();
+		seq = read_seqcount_begin(&obj->seq);
+
+		fence_excl = rcu_dereference(obj->fence_excl);
+		if (fence_excl && !dma_fence_get_rcu(fence_excl))
+			goto unlock;
 
 		fobj = rcu_dereference(obj->fence);
 		if (fobj) {
-			struct fence **nshared;
+			struct dma_fence **nshared;
 			size_t sz = sizeof(*shared) * fobj->shared_max;
 
 			nshared = krealloc(shared, sz,
@@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 				}
 
 				ret = -ENOMEM;
-				shared_count = 0;
 				break;
 			}
 			shared = nshared;
-			memcpy(shared, fobj->shared, sz);
 			shared_count = fobj->shared_count;
-		} else
-			shared_count = 0;
-		fence_excl = rcu_dereference(obj->fence_excl);
-
-		retry = read_seqcount_retry(&obj->seq, seq);
-		if (retry)
-			goto unlock;
-
-		if (!fence_excl || fence_get_rcu(fence_excl)) {
-			unsigned i;
 
 			for (i = 0; i < shared_count; ++i) {
-				if (fence_get_rcu(shared[i]))
-					continue;
-
-				/* uh oh, refcount failed, abort and retry */
-				while (i--)
-					fence_put(shared[i]);
-
-				if (fence_excl) {
-					fence_put(fence_excl);
-					fence_excl = NULL;
-				}
-
-				retry = 1;
-				break;
+				shared[i] = rcu_dereference(fobj->shared[i]);
+				if (!dma_fence_get_rcu(shared[i]))
+					break;
 			}
-		} else
-			retry = 1;
+		}
 
+		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+			while (i--)
+				dma_fence_put(shared[i]);
+			dma_fence_put(fence_excl);
+			goto unlock;
+		}
+
+		ret = 0;
 unlock:
 		rcu_read_unlock();
-	}
-	*pshared_count = shared_count;
-	if (shared_count)
-		*pshared = shared;
-	else {
-		*pshared = NULL;
+	} while (ret);
+
+	if (!shared_count) {
 		kfree(shared);
+		shared = NULL;
 	}
+
+	*pshared_count = shared_count;
+	*pshared = shared;
 	*pfence_excl = fence_excl;
 
 	return ret;
@@ -377,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 					 bool wait_all, bool intr,
 					 unsigned long timeout)
 {
-	struct fence *fence;
+	struct dma_fence *fence;
 	unsigned seq, shared_count, i = 0;
 	long ret = timeout;
 
@@ -397,20 +388,18 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 		if (fobj)
 			shared_count = fobj->shared_count;
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		for (i = 0; i < shared_count; ++i) {
-			struct fence *lfence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
 
-			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
+			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				     &lfence->flags))
 				continue;
 
-			if (!fence_get_rcu(lfence))
+			if (!dma_fence_get_rcu(lfence))
 				goto unlock_retry;
 
-			if (fence_is_signaled(lfence)) {
-				fence_put(lfence);
+			if (dma_fence_is_signaled(lfence)) {
+				dma_fence_put(lfence);
 				continue;
 			}
 
@@ -420,18 +409,16 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 	}
 
 	if (!shared_count) {
-		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
+		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
 
 		if (fence_excl &&
-		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
-			if (!fence_get_rcu(fence_excl))
+		    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+			      &fence_excl->flags)) {
+			if (!dma_fence_get_rcu(fence_excl))
 				goto unlock_retry;
 
-			if (fence_is_signaled(fence_excl))
-				fence_put(fence_excl);
+			if (dma_fence_is_signaled(fence_excl))
+				dma_fence_put(fence_excl);
 			else
 				fence = fence_excl;
 		}
@@ -439,8 +426,13 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 
 	rcu_read_unlock();
 	if (fence) {
-		ret = fence_wait_timeout(fence, intr, ret);
-		fence_put(fence);
+		if (read_seqcount_retry(&obj->seq, seq)) {
+			dma_fence_put(fence);
+			goto retry;
+		}
+
+		ret = dma_fence_wait_timeout(fence, intr, ret);
+		dma_fence_put(fence);
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
 			goto retry;
 	}
@@ -454,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
 
 
 static inline int
-reservation_object_test_signaled_single(struct fence *passed_fence)
+reservation_object_test_signaled_single(struct dma_fence *passed_fence)
 {
-	struct fence *fence, *lfence = passed_fence;
+	struct dma_fence *fence, *lfence = passed_fence;
 	int ret = 1;
 
-	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-		fence = fence_get_rcu(lfence);
+	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+		fence = dma_fence_get_rcu(lfence);
 		if (!fence)
 			return -1;
 
-		ret = !!fence_is_signaled(fence);
-		fence_put(fence);
+		ret = !!dma_fence_is_signaled(fence);
+		dma_fence_put(fence);
 	}
 	return ret;
 }
@@ -484,12 +476,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 					  bool test_all)
 {
 	unsigned seq, shared_count;
-	int ret = true;
+	int ret;
 
+	rcu_read_lock();
 retry:
+	ret = true;
 	shared_count = 0;
 	seq = read_seqcount_begin(&obj->seq);
-	rcu_read_lock();
 
 	if (test_all) {
 		unsigned i;
@@ -500,46 +493,35 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 		if (fobj)
 			shared_count = fobj->shared_count;
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		for (i = 0; i < shared_count; ++i) {
-			struct fence *fence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 
 			ret = reservation_object_test_signaled_single(fence);
 			if (ret < 0)
-				goto unlock_retry;
+				goto retry;
 			else if (!ret)
 				break;
 		}
 
-		/*
-		 * There could be a read_seqcount_retry here, but nothing cares
-		 * about whether it's the old or newer fence pointers that are
-		 * signaled. That race could still have happened after checking
-		 * read_seqcount_retry. If you care, use ww_mutex_lock.
-		 */
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto retry;
 	}
 
 	if (!shared_count) {
-		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
+		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
 
 		if (fence_excl) {
 			ret = reservation_object_test_signaled_single(
 								fence_excl);
 			if (ret < 0)
-				goto unlock_retry;
+				goto retry;
+
+			if (read_seqcount_retry(&obj->seq, seq))
+				goto retry;
 		}
 	}
 
 	rcu_read_unlock();
 	return ret;
-
-unlock_retry:
-	rcu_read_unlock();
-	goto retry;
 }
 EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
index 71127f8..f47112a 100644
--- a/drivers/dma-buf/seqno-fence.c
+++ b/drivers/dma-buf/seqno-fence.c
@@ -21,35 +21,35 @@
 #include <linux/export.h>
 #include <linux/seqno-fence.h>
 
-static const char *seqno_fence_get_driver_name(struct fence *fence)
+static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
 {
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 
 	return seqno_fence->ops->get_driver_name(fence);
 }
 
-static const char *seqno_fence_get_timeline_name(struct fence *fence)
+static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
 {
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 
 	return seqno_fence->ops->get_timeline_name(fence);
 }
 
-static bool seqno_enable_signaling(struct fence *fence)
+static bool seqno_enable_signaling(struct dma_fence *fence)
 {
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 
 	return seqno_fence->ops->enable_signaling(fence);
 }
 
-static bool seqno_signaled(struct fence *fence)
+static bool seqno_signaled(struct dma_fence *fence)
 {
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 
 	return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
 }
 
-static void seqno_release(struct fence *fence)
+static void seqno_release(struct dma_fence *fence)
 {
 	struct seqno_fence *f = to_seqno_fence(fence);
 
@@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence)
 	if (f->ops->release)
 		f->ops->release(fence);
 	else
-		fence_free(&f->base);
+		dma_fence_free(&f->base);
 }
 
-static signed long seqno_wait(struct fence *fence, bool intr,
-				signed long timeout)
+static signed long seqno_wait(struct dma_fence *fence, bool intr,
+			      signed long timeout)
 {
 	struct seqno_fence *f = to_seqno_fence(fence);
 
 	return f->ops->wait(fence, intr, timeout);
 }
 
-const struct fence_ops seqno_fence_ops = {
+const struct dma_fence_ops seqno_fence_ops = {
 	.get_driver_name = seqno_fence_get_driver_name,
 	.get_timeline_name = seqno_fence_get_timeline_name,
 	.enable_signaling = seqno_enable_signaling,
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 62e8e6d..82e0ca4 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data {
 
 #define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
 
-static const struct fence_ops timeline_fence_ops;
+static const struct dma_fence_ops timeline_fence_ops;
 
-static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
 {
 	if (fence->ops != &timeline_fence_ops)
 		return NULL;
@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name)
 		return NULL;
 
 	kref_init(&obj->kref);
-	obj->context = fence_context_alloc(1);
+	obj->context = dma_fence_context_alloc(1);
 	strlcpy(obj->name, name, sizeof(obj->name));
 
 	INIT_LIST_HEAD(&obj->child_list_head);
@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
 
 	list_for_each_entry_safe(pt, next, &obj->active_list_head,
 				 active_list) {
-		if (fence_is_signaled_locked(&pt->base))
+		if (dma_fence_is_signaled_locked(&pt->base))
 			list_del_init(&pt->active_list);
 	}
 
@@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
 
 	spin_lock_irqsave(&obj->child_list_lock, flags);
 	sync_timeline_get(obj);
-	fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
-		   obj->context, value);
+	dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+		       obj->context, value);
 	list_add_tail(&pt->child_list, &obj->child_list_head);
 	INIT_LIST_HEAD(&pt->active_list);
 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
 	return pt;
 }
 
-static const char *timeline_fence_get_driver_name(struct fence *fence)
+static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "sw_sync";
 }
 
-static const char *timeline_fence_get_timeline_name(struct fence *fence)
+static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
 {
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 	return parent->name;
 }
 
-static void timeline_fence_release(struct fence *fence)
+static void timeline_fence_release(struct dma_fence *fence)
 {
-	struct sync_pt *pt = fence_to_sync_pt(fence);
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 	unsigned long flags;
 
 	spin_lock_irqsave(fence->lock, flags);
@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence)
 	spin_unlock_irqrestore(fence->lock, flags);
 
 	sync_timeline_put(parent);
-	fence_free(fence);
+	dma_fence_free(fence);
 }
 
-static bool timeline_fence_signaled(struct fence *fence)
+static bool timeline_fence_signaled(struct dma_fence *fence)
 {
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 	return (fence->seqno > parent->value) ? false : true;
 }
 
-static bool timeline_fence_enable_signaling(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct dma_fence *fence)
 {
-	struct sync_pt *pt = fence_to_sync_pt(fence);
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 	if (timeline_fence_signaled(fence))
 		return false;
@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence)
 	return true;
 }
 
-static void timeline_fence_value_str(struct fence *fence,
+static void timeline_fence_value_str(struct dma_fence *fence,
 				    char *str, int size)
 {
 	snprintf(str, size, "%d", fence->seqno);
 }
 
-static void timeline_fence_timeline_value_str(struct fence *fence,
+static void timeline_fence_timeline_value_str(struct dma_fence *fence,
 					     char *str, int size)
 {
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 	snprintf(str, size, "%d", parent->value);
 }
 
-static const struct fence_ops timeline_fence_ops = {
+static const struct dma_fence_ops timeline_fence_ops = {
 	.get_driver_name = timeline_fence_get_driver_name,
 	.get_timeline_name = timeline_fence_get_timeline_name,
 	.enable_signaling = timeline_fence_enable_signaling,
 	.signaled = timeline_fence_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = timeline_fence_release,
 	.fence_value_str = timeline_fence_value_str,
 	.timeline_value_str = timeline_fence_timeline_value_str,
@@ -317,7 +317,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
 
 	sync_file = sync_file_create(&pt->base);
 	if (!sync_file) {
-		fence_put(&pt->base);
+		dma_fence_put(&pt->base);
 		err = -ENOMEM;
 		goto err;
 	}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 2dd4c3d..48b20e3 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -71,12 +71,13 @@ static const char *sync_status_str(int status)
 	return "error";
 }
 
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+static void sync_print_fence(struct seq_file *s,
+			     struct dma_fence *fence, bool show)
 {
 	int status = 1;
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
-	if (fence_is_signaled_locked(fence))
+	if (dma_fence_is_signaled_locked(fence))
 		status = fence->status;
 
 	seq_printf(s, "  %s%sfence %s",
@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s,
 	int i;
 
 	seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
-		   sync_status_str(!fence_is_signaled(sync_file->fence)));
+		   sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
 
-	if (fence_is_array(sync_file->fence)) {
-		struct fence_array *array = to_fence_array(sync_file->fence);
+	if (dma_fence_is_array(sync_file->fence)) {
+		struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
 
 		for (i = 0; i < array->num_fences; ++i)
 			sync_print_fence(s, array->fences[i], true);
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index d269aa6..26fe8b9 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -15,7 +15,7 @@
 
 #include <linux/list.h>
 #include <linux/spinlock.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #include <linux/sync_file.h>
 #include <uapi/linux/sync_file.h>
@@ -45,10 +45,9 @@ struct sync_timeline {
 	struct list_head	sync_timeline_list;
 };
 
-static inline struct sync_timeline *fence_parent(struct fence *fence)
+static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
 {
-	return container_of(fence->lock, struct sync_timeline,
-			    child_list_lock);
+	return container_of(fence->lock, struct sync_timeline, child_list_lock);
 }
 
 /**
@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
  * @active_list: sync timeline active child's list
  */
 struct sync_pt {
-	struct fence base;
+	struct dma_fence base;
 	struct list_head child_list;
 	struct list_head active_list;
 };
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index b29a9e8..69d8ef9 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -54,7 +54,7 @@ static struct sync_file *sync_file_alloc(void)
 	return NULL;
 }
 
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 	struct sync_file *sync_file;
 
@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
  * takes ownership of @fence. The sync_file can be released with
  * fput(sync_file->file). Returns the sync_file or NULL in case of error.
  */
-struct sync_file *sync_file_create(struct fence *fence)
+struct sync_file *sync_file_create(struct dma_fence *fence)
 {
 	struct sync_file *sync_file;
 
@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
 	if (!sync_file)
 		return NULL;
 
-	sync_file->fence = fence;
+	sync_file->fence = dma_fence_get(fence);
 
 	snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
 		 fence->ops->get_driver_name(fence),
@@ -121,16 +121,16 @@ static struct sync_file *sync_file_fdget(int fd)
  * Ensures @fd references a valid sync_file and returns a fence that
  * represents all fence in the sync_file. On error NULL is returned.
  */
-struct fence *sync_file_get_fence(int fd)
+struct dma_fence *sync_file_get_fence(int fd)
 {
 	struct sync_file *sync_file;
-	struct fence *fence;
+	struct dma_fence *fence;
 
 	sync_file = sync_file_fdget(fd);
 	if (!sync_file)
 		return NULL;
 
-	fence = fence_get(sync_file->fence);
+	fence = dma_fence_get(sync_file->fence);
 	fput(sync_file->file);
 
 	return fence;
@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd)
 EXPORT_SYMBOL(sync_file_get_fence);
 
 static int sync_file_set_fence(struct sync_file *sync_file,
-			       struct fence **fences, int num_fences)
+			       struct dma_fence **fences, int num_fences)
 {
-	struct fence_array *array;
+	struct dma_fence_array *array;
 
 	/*
 	 * The reference for the fences in the new sync_file and held
 	 * in add_fence() during the merge procedure, so for num_fences == 1
 	 * we already own a new reference to the fence. For num_fence > 1
-	 * we own the reference of the fence_array creation.
+	 * we own the reference of the dma_fence_array creation.
 	 */
 	if (num_fences == 1) {
 		sync_file->fence = fences[0];
 		kfree(fences);
 	} else {
-		array = fence_array_create(num_fences, fences,
-					   fence_context_alloc(1), 1, false);
+		array = dma_fence_array_create(num_fences, fences,
+					       dma_fence_context_alloc(1),
+					       1, false);
 		if (!array)
 			return -ENOMEM;
 
@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file,
 	return 0;
 }
 
-static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+static struct dma_fence **get_fences(struct sync_file *sync_file,
+				     int *num_fences)
 {
-	if (fence_is_array(sync_file->fence)) {
-		struct fence_array *array = to_fence_array(sync_file->fence);
+	if (dma_fence_is_array(sync_file->fence)) {
+		struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
 
 		*num_fences = array->num_fences;
 		return array->fences;
@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
 	return &sync_file->fence;
 }
 
-static void add_fence(struct fence **fences, int *i, struct fence *fence)
+static void add_fence(struct dma_fence **fences,
+		      int *i, struct dma_fence *fence)
 {
 	fences[*i] = fence;
 
-	if (!fence_is_signaled(fence)) {
-		fence_get(fence);
+	if (!dma_fence_is_signaled(fence)) {
+		dma_fence_get(fence);
 		(*i)++;
 	}
 }
@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
 					 struct sync_file *b)
 {
 	struct sync_file *sync_file;
-	struct fence **fences, **nfences, **a_fences, **b_fences;
+	struct dma_fence **fences, **nfences, **a_fences, **b_fences;
 	int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
 
 	sync_file = sync_file_alloc();
@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
 	 * and sync_file_create, this is a reasonable assumption.
 	 */
 	for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
-		struct fence *pt_a = a_fences[i_a];
-		struct fence *pt_b = b_fences[i_b];
+		struct dma_fence *pt_a = a_fences[i_a];
+		struct dma_fence *pt_b = b_fences[i_b];
 
 		if (pt_a->context < pt_b->context) {
 			add_fence(fences, &i, pt_a);
@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
 		add_fence(fences, &i, b_fences[i_b]);
 
 	if (i == 0)
-		fences[i++] = fence_get(a_fences[0]);
+		fences[i++] = dma_fence_get(a_fences[0]);
 
 	if (num_fences > i) {
 		nfences = krealloc(fences, i * sizeof(*fences),
@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref)
 						     kref);
 
 	if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
-		fence_remove_callback(sync_file->fence, &sync_file->cb);
-	fence_put(sync_file->fence);
+		dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
+	dma_fence_put(sync_file->fence);
 	kfree(sync_file);
 }
 
@@ -307,12 +310,12 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 
 	if (!poll_does_not_wait(wait) &&
 	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
-		if (fence_add_callback(sync_file->fence, &sync_file->cb,
-				       fence_check_cb_func) < 0)
+		if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
+					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
 	}
 
-	return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+	return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
 }
 
 static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -370,14 +373,14 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file,
 	return err;
 }
 
-static void sync_fill_fence_info(struct fence *fence,
+static void sync_fill_fence_info(struct dma_fence *fence,
 				 struct sync_fence_info *info)
 {
 	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
 		sizeof(info->obj_name));
 	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
 		sizeof(info->driver_name));
-	if (fence_is_signaled(fence))
+	if (dma_fence_is_signaled(fence))
 		info->status = fence->status >= 0 ? 1 : fence->status;
 	else
 		info->status = 0;
@@ -389,7 +392,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 {
 	struct sync_file_info info;
 	struct sync_fence_info *fence_info = NULL;
-	struct fence **fences;
+	struct dma_fence **fences;
 	__u32 size;
 	int num_fences, ret, i;
 
@@ -429,7 +432,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 
 no_fences:
 	strlcpy(info.name, sync_file->name, sizeof(info.name));
-	info.status = fence_is_signaled(sync_file->fence);
+	info.status = dma_fence_is_signaled(sync_file->fence);
 	info.num_fences = num_fences;
 
 	if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25c7204..74579d2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,7 +9,7 @@
 		drm_scatter.o drm_pci.o \
 		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
-		drm_info.o drm_debugfs.o drm_encoder_slave.o \
+		drm_info.o drm_encoder_slave.o \
 		drm_trace_points.o drm_global.o drm_prime.o \
 		drm_rect.o drm_vma_manager.o drm_flip_work.o \
 		drm_modeset_lock.o drm_atomic.o drm_bridge.o \
@@ -23,6 +23,7 @@
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 drm-$(CONFIG_OF) += drm_of.o
 drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
 
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 248a05d..41bd2bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -24,7 +24,7 @@
 	atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
 	amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
-	amdgpu_gtt_mgr.o
+	amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index b8d6667..0619269 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,7 +90,6 @@
 #define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3        0x25
 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK          0x27
-#define ENCODER_OBJECT_ID_VIRTUAL                 0x28
 
 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
 
@@ -120,7 +119,6 @@
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
 #define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
-#define CONNECTOR_OBJECT_ID_VIRTUAL               0x17
 
 /* deleted */
 
@@ -149,7 +147,6 @@
 #define GRAPH_OBJECT_ENUM_ID5                     0x05
 #define GRAPH_OBJECT_ENUM_ID6                     0x06
 #define GRAPH_OBJECT_ENUM_ID7                     0x07
-#define GRAPH_OBJECT_ENUM_VIRTUAL                 0x08
 
 /****************************************************/
 /* Graphics Object ID Bit definition                */
@@ -411,10 +408,6 @@
                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                   ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
 
-#define ENCODER_VIRTUAL_ENUM_VIRTUAL            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-                                                  GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
-                                                  ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
-
 /****************************************************/
 /* Connector Object ID definition - Shared with BIOS */
 /****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e..2ec7b3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -34,7 +34,7 @@
 #include <linux/kref.h>
 #include <linux/interval_tree.h>
 #include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #include <ttm/ttm_bo_api.h>
 #include <ttm/ttm_bo_driver.h>
@@ -53,7 +53,11 @@
 #include "amdgpu_ucode.h"
 #include "amdgpu_ttm.h"
 #include "amdgpu_gds.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_vm.h"
 #include "amd_powerplay.h"
+#include "amdgpu_dpm.h"
 #include "amdgpu_acp.h"
 
 #include "gpu_scheduler.h"
@@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu;
 extern int amdgpu_sclk_deep_sleep_en;
 extern char *amdgpu_virtual_display;
 extern unsigned amdgpu_pp_feature_mask;
+extern int amdgpu_vram_page_split;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask;
 #define AMDGPUFB_CONN_LIMIT			4
 #define AMDGPU_BIOS_NUM_SCRATCH			8
 
-/* max number of rings */
-#define AMDGPU_MAX_RINGS			16
-#define AMDGPU_MAX_GFX_RINGS			1
-#define AMDGPU_MAX_COMPUTE_RINGS		8
-#define AMDGPU_MAX_VCE_RINGS			3
-
 /* max number of IP instances */
 #define AMDGPU_MAX_SDMA_INSTANCES		2
 
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask;
 
 struct amdgpu_device;
 struct amdgpu_ib;
-struct amdgpu_vm;
-struct amdgpu_ring;
 struct amdgpu_cs_parser;
 struct amdgpu_job;
 struct amdgpu_irq_src;
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
 bool amdgpu_is_idle(struct amdgpu_device *adev,
 		    enum amd_ip_block_type block_type);
 
+#define AMDGPU_MAX_IP_NUM 16
+
+struct amdgpu_ip_block_status {
+	bool valid;
+	bool sw;
+	bool hw;
+	bool late_initialized;
+	bool hang;
+};
+
 struct amdgpu_ip_block_version {
-	enum amd_ip_block_type type;
-	u32 major;
-	u32 minor;
-	u32 rev;
+	const enum amd_ip_block_type type;
+	const u32 major;
+	const u32 minor;
+	const u32 rev;
 	const struct amd_ip_funcs *funcs;
 };
 
+struct amdgpu_ip_block {
+	struct amdgpu_ip_block_status status;
+	const struct amdgpu_ip_block_version *version;
+};
+
 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
 				enum amd_ip_block_type type,
 				u32 major, u32 minor);
 
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
-					struct amdgpu_device *adev,
-					enum amd_ip_block_type type);
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+					     enum amd_ip_block_type type);
+
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+			const struct amdgpu_ip_block_version *ip_block_version);
 
 /* provided by hw blocks that can move/clear data.  e.g., gfx or sdma */
 struct amdgpu_buffer_funcs {
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs {
 	void (*set_rptr)(struct amdgpu_device *adev);
 };
 
-/* provided by hw blocks that expose a ring buffer for commands */
-struct amdgpu_ring_funcs {
-	/* ring read/write ptr handling */
-	u32 (*get_rptr)(struct amdgpu_ring *ring);
-	u32 (*get_wptr)(struct amdgpu_ring *ring);
-	void (*set_wptr)(struct amdgpu_ring *ring);
-	/* validating and patching of IBs */
-	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-	/* command emit functions */
-	void (*emit_ib)(struct amdgpu_ring *ring,
-			struct amdgpu_ib *ib,
-			unsigned vm_id, bool ctx_switch);
-	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
-			   uint64_t seq, unsigned flags);
-	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
-	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
-			      uint64_t pd_addr);
-	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
-	void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
-	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
-				uint32_t gds_base, uint32_t gds_size,
-				uint32_t gws_base, uint32_t gws_size,
-				uint32_t oa_base, uint32_t oa_size);
-	/* testing functions */
-	int (*test_ring)(struct amdgpu_ring *ring);
-	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
-	/* insert NOP packets */
-	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
-	/* pad the indirect buffer to the necessary number of dw */
-	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
-	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
-	/* note usage for clock and power gating */
-	void (*begin_use)(struct amdgpu_ring *ring);
-	void (*end_use)(struct amdgpu_ring *ring);
-	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
-	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
-	unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
-	unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
-};
-
 /*
  * BIOS.
  */
@@ -364,47 +337,6 @@ struct amdgpu_clock {
 };
 
 /*
- * Fences.
- */
-struct amdgpu_fence_driver {
-	uint64_t			gpu_addr;
-	volatile uint32_t		*cpu_addr;
-	/* sync_seq is protected by ring emission lock */
-	uint32_t			sync_seq;
-	atomic_t			last_seq;
-	bool				initialized;
-	struct amdgpu_irq_src		*irq_src;
-	unsigned			irq_type;
-	struct timer_list		fallback_timer;
-	unsigned			num_fences_mask;
-	spinlock_t			lock;
-	struct fence			**fences;
-};
-
-/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
-
-#define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
-#define AMDGPU_FENCE_FLAG_INT           (1 << 1)
-
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
-				  unsigned num_hw_submission);
-int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
-				   struct amdgpu_irq_src *irq_src,
-				   unsigned irq_type);
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-
-/*
  * BO.
  */
 struct amdgpu_bo_list_entry {
@@ -427,7 +359,7 @@ struct amdgpu_bo_va_mapping {
 struct amdgpu_bo_va {
 	/* protected by bo being reserved */
 	struct list_head		bo_list;
-	struct fence		        *last_pt_update;
+	struct dma_fence	        *last_pt_update;
 	unsigned			ref_count;
 
 	/* protected by vm mutex and spinlock */
@@ -464,7 +396,6 @@ struct amdgpu_bo {
 	 */
 	struct list_head		va;
 	/* Constant after initialization */
-	struct amdgpu_device		*adev;
 	struct drm_gem_object		gem_base;
 	struct amdgpu_bo		*parent;
 	struct amdgpu_bo		*shadow;
@@ -543,7 +474,7 @@ struct amdgpu_sa_bo {
 	struct amdgpu_sa_manager	*manager;
 	unsigned			soffset;
 	unsigned			eoffset;
-	struct fence		        *fence;
+	struct dma_fence	        *fence;
 };
 
 /*
@@ -561,27 +492,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 			  struct drm_device *dev,
 			  uint32_t handle, uint64_t *offset_p);
-/*
- * Synchronization
- */
-struct amdgpu_sync {
-	DECLARE_HASHTABLE(fences, 4);
-	struct fence	        *last_vm_update;
-};
-
-void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct fence *f);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
-		     struct amdgpu_sync *sync,
-		     struct reservation_object *resv,
-		     void *owner);
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
-				     struct amdgpu_ring *ring);
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_sync *sync);
-int amdgpu_sync_init(void);
-void amdgpu_sync_fini(void);
 int amdgpu_fence_slab_init(void);
 void amdgpu_fence_slab_fini(void);
 
@@ -703,10 +613,10 @@ struct amdgpu_flip_work {
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct amdgpu_bo		*old_abo;
-	struct fence			*excl;
+	struct dma_fence		*excl;
 	unsigned			shared_count;
-	struct fence			**shared;
-	struct fence_cb			cb;
+	struct dma_fence		**shared;
+	struct dma_fence_cb		cb;
 	bool				async;
 };
 
@@ -723,14 +633,6 @@ struct amdgpu_ib {
 	uint32_t			flags;
 };
 
-enum amdgpu_ring_type {
-	AMDGPU_RING_TYPE_GFX,
-	AMDGPU_RING_TYPE_COMPUTE,
-	AMDGPU_RING_TYPE_SDMA,
-	AMDGPU_RING_TYPE_UVD,
-	AMDGPU_RING_TYPE_VCE
-};
-
 extern const struct amd_sched_backend_ops amdgpu_sched_ops;
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -742,214 +644,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 		      struct amd_sched_entity *entity, void *owner,
-		      struct fence **f);
-
-struct amdgpu_ring {
-	struct amdgpu_device		*adev;
-	const struct amdgpu_ring_funcs	*funcs;
-	struct amdgpu_fence_driver	fence_drv;
-	struct amd_gpu_scheduler	sched;
-
-	struct amdgpu_bo	*ring_obj;
-	volatile uint32_t	*ring;
-	unsigned		rptr_offs;
-	unsigned		wptr;
-	unsigned		wptr_old;
-	unsigned		ring_size;
-	unsigned		max_dw;
-	int			count_dw;
-	uint64_t		gpu_addr;
-	uint32_t		align_mask;
-	uint32_t		ptr_mask;
-	bool			ready;
-	u32			nop;
-	u32			idx;
-	u32			me;
-	u32			pipe;
-	u32			queue;
-	struct amdgpu_bo	*mqd_obj;
-	u32			doorbell_index;
-	bool			use_doorbell;
-	unsigned		wptr_offs;
-	unsigned		fence_offs;
-	uint64_t		current_ctx;
-	enum amdgpu_ring_type	type;
-	char			name[16];
-	unsigned		cond_exe_offs;
-	u64			cond_exe_gpu_addr;
-	volatile u32		*cond_exe_cpu_addr;
-#if defined(CONFIG_DEBUG_FS)
-	struct dentry *ent;
-#endif
-};
-
-/*
- * VM
- */
-
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM	16
-
-/* Maximum number of PTEs the hardware can write with one command */
-#define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
-
-/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
-
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define AMDGPU_VM_PTB_ALIGN_SIZE   32768
-
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
-#define AMDGPU_PTE_VALID	(1 << 0)
-#define AMDGPU_PTE_SYSTEM	(1 << 1)
-#define AMDGPU_PTE_SNOOPED	(1 << 2)
-
-/* VI only */
-#define AMDGPU_PTE_EXECUTABLE	(1 << 4)
-
-#define AMDGPU_PTE_READABLE	(1 << 5)
-#define AMDGPU_PTE_WRITEABLE	(1 << 6)
-
-#define AMDGPU_PTE_FRAG(x)	((x & 0x1f) << 7)
-
-/* How to programm VM fault handling */
-#define AMDGPU_VM_FAULT_STOP_NEVER	0
-#define AMDGPU_VM_FAULT_STOP_FIRST	1
-#define AMDGPU_VM_FAULT_STOP_ALWAYS	2
-
-struct amdgpu_vm_pt {
-	struct amdgpu_bo_list_entry	entry;
-	uint64_t			addr;
-	uint64_t			shadow_addr;
-};
-
-struct amdgpu_vm {
-	/* tree of virtual addresses mapped */
-	struct rb_root		va;
-
-	/* protecting invalidated */
-	spinlock_t		status_lock;
-
-	/* BOs moved, but not yet updated in the PT */
-	struct list_head	invalidated;
-
-	/* BOs cleared in the PT because of a move */
-	struct list_head	cleared;
-
-	/* BO mappings freed, but not yet updated in the PT */
-	struct list_head	freed;
-
-	/* contains the page directory */
-	struct amdgpu_bo	*page_directory;
-	unsigned		max_pde_used;
-	struct fence		*page_directory_fence;
-	uint64_t		last_eviction_counter;
-
-	/* array of page tables, one for each page directory entry */
-	struct amdgpu_vm_pt	*page_tables;
-
-	/* for id and flush management per ring */
-	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];
-
-	/* protecting freed */
-	spinlock_t		freed_lock;
-
-	/* Scheduler entity for page table updates */
-	struct amd_sched_entity	entity;
-
-	/* client id */
-	u64                     client_id;
-};
-
-struct amdgpu_vm_id {
-	struct list_head	list;
-	struct fence		*first;
-	struct amdgpu_sync	active;
-	struct fence		*last_flush;
-	atomic64_t		owner;
-
-	uint64_t		pd_gpu_addr;
-	/* last flushed PD/PT update */
-	struct fence		*flushed_updates;
-
-	uint32_t                current_gpu_reset_count;
-
-	uint32_t		gds_base;
-	uint32_t		gds_size;
-	uint32_t		gws_base;
-	uint32_t		gws_size;
-	uint32_t		oa_base;
-	uint32_t		oa_size;
-};
-
-struct amdgpu_vm_manager {
-	/* Handling of VMIDs */
-	struct mutex				lock;
-	unsigned				num_ids;
-	struct list_head			ids_lru;
-	struct amdgpu_vm_id			ids[AMDGPU_NUM_VM];
-
-	/* Handling of VM fences */
-	u64					fence_context;
-	unsigned				seqno[AMDGPU_MAX_RINGS];
-
-	uint32_t				max_pfn;
-	/* vram base address for page table entry  */
-	u64					vram_base_offset;
-	/* is vm enabled? */
-	bool					enabled;
-	/* vm pte handling */
-	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
-	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
-	unsigned				vm_pte_num_rings;
-	atomic_t				vm_pte_next_ring;
-	/* client id counter */
-	atomic64_t				client_counter;
-};
-
-void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
-			 struct list_head *validated,
-			 struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct list_head *duplicates);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
-				  struct amdgpu_vm *vm);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		      struct amdgpu_sync *sync, struct fence *fence,
-		      struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-				    struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
-			  struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			     struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
-			struct amdgpu_bo_va *bo_va,
-			bool clear);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
-			     struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
-				       struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
-				      struct amdgpu_vm *vm,
-				      struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
-		     struct amdgpu_bo_va *bo_va,
-		     uint64_t addr, uint64_t offset,
-		     uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
-		       struct amdgpu_bo_va *bo_va,
-		       uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
-		      struct amdgpu_bo_va *bo_va);
+		      struct dma_fence **f);
 
 /*
  * context related structures
@@ -957,7 +652,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 
 struct amdgpu_ctx_ring {
 	uint64_t		sequence;
-	struct fence		**fences;
+	struct dma_fence	**fences;
 	struct amd_sched_entity	entity;
 };
 
@@ -966,7 +661,7 @@ struct amdgpu_ctx {
 	struct amdgpu_device    *adev;
 	unsigned		reset_counter;
 	spinlock_t		ring_lock;
-	struct fence            **fences;
+	struct dma_fence	**fences;
 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
 	bool preamble_presented;
 };
@@ -982,8 +677,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
 
 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
-			      struct fence *fence);
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+			      struct dma_fence *fence);
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 				   struct amdgpu_ring *ring, uint64_t seq);
 
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -1093,6 +788,16 @@ struct amdgpu_scratch {
 /*
  * GFX configurations
  */
+#define AMDGPU_GFX_MAX_SE 4
+#define AMDGPU_GFX_MAX_SH_PER_SE 2
+
+struct amdgpu_rb_config {
+	uint32_t rb_backend_disable;
+	uint32_t user_rb_backend_disable;
+	uint32_t raster_config;
+	uint32_t raster_config_1;
+};
+
 struct amdgpu_gca_config {
 	unsigned max_shader_engines;
 	unsigned max_tile_pipes;
@@ -1121,6 +826,8 @@ struct amdgpu_gca_config {
 
 	uint32_t tile_mode_array[32];
 	uint32_t macrotile_mode_array[16];
+
+	struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
 };
 
 struct amdgpu_cu_info {
@@ -1133,6 +840,7 @@ struct amdgpu_gfx_funcs {
 	/* get the gpu clock counter */
 	uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
 	void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+	void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
 };
 
 struct amdgpu_gfx {
@@ -1181,23 +889,13 @@ struct amdgpu_gfx {
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		  unsigned size, struct amdgpu_ib *ib);
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
-		    struct fence *f);
+		    struct dma_fence *f);
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
-		       struct amdgpu_ib *ib, struct fence *last_vm_update,
-		       struct amdgpu_job *job, struct fence **f);
+		       struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
+		       struct amdgpu_job *job, struct dma_fence **f);
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
-void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_undo(struct amdgpu_ring *ring);
-int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-		     unsigned ring_size, u32 nop, u32 align_mask,
-		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
-		     enum amdgpu_ring_type ring_type);
-void amdgpu_ring_fini(struct amdgpu_ring *ring);
 
 /*
  * CS.
@@ -1225,7 +923,7 @@ struct amdgpu_cs_parser {
 	struct amdgpu_bo_list		*bo_list;
 	struct amdgpu_bo_list_entry	vm_pd;
 	struct list_head		validated;
-	struct fence			*fence;
+	struct dma_fence		*fence;
 	uint64_t			bytes_moved_threshold;
 	uint64_t			bytes_moved;
 	struct amdgpu_bo_list_entry	*evictable;
@@ -1245,7 +943,7 @@ struct amdgpu_job {
 	struct amdgpu_ring	*ring;
 	struct amdgpu_sync	sync;
 	struct amdgpu_ib	*ibs;
-	struct fence		*fence; /* the hw fence */
+	struct dma_fence	*fence; /* the hw fence */
 	uint32_t		preamble_status;
 	uint32_t		num_ibs;
 	void			*owner;
@@ -1294,354 +992,6 @@ struct amdgpu_wb {
 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
 
-
-
-enum amdgpu_int_thermal_type {
-	THERMAL_TYPE_NONE,
-	THERMAL_TYPE_EXTERNAL,
-	THERMAL_TYPE_EXTERNAL_GPIO,
-	THERMAL_TYPE_RV6XX,
-	THERMAL_TYPE_RV770,
-	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
-	THERMAL_TYPE_EVERGREEN,
-	THERMAL_TYPE_SUMO,
-	THERMAL_TYPE_NI,
-	THERMAL_TYPE_SI,
-	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
-	THERMAL_TYPE_CI,
-	THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
-	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
-	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
-	AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
-	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
-	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
-	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
-	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define AMDGPU_MAX_VCE_LEVELS 6
-
-enum amdgpu_vce_level {
-	AMDGPU_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
-	AMDGPU_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
-	AMDGPU_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
-	AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
-	AMDGPU_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
-	AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-struct amdgpu_ps {
-	u32 caps; /* vbios flags */
-	u32 class; /* vbios flags */
-	u32 class2; /* vbios flags */
-	/* UVD clocks */
-	u32 vclk;
-	u32 dclk;
-	/* VCE clocks */
-	u32 evclk;
-	u32 ecclk;
-	bool vce_active;
-	enum amdgpu_vce_level vce_level;
-	/* asic priv */
-	void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
-	/* thermal interrupt work */
-	struct work_struct work;
-	/* low temperature threshold */
-	int                min_temp;
-	/* high temperature threshold */
-	int                max_temp;
-	/* was last interrupt low to high or high to low */
-	bool               high_to_low;
-	/* interrupt source */
-	struct amdgpu_irq_src	irq;
-};
-
-enum amdgpu_clk_action
-{
-	AMDGPU_SCLK_UP = 1,
-	AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
-	u32 sclk;
-	u32 mclk;
-	enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
-	u32 sclk;
-	u32 mclk;
-	u16 vddc;
-	u16 vddci;
-};
-
-struct amdgpu_clock_array {
-	u32 count;
-	u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
-	u32 clk;
-	u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
-	u32 count;
-	struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
-	struct {
-		u16 vddc;
-		u32 leakage;
-	};
-	struct {
-		u16 vddc1;
-		u16 vddc2;
-		u16 vddc3;
-	};
-};
-
-struct amdgpu_cac_leakage_table {
-	u32 count;
-	union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
-	u16 voltage;
-	u32 sclk;
-	u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
-	u32 count;
-	struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
-	u32 vclk;
-	u32 dclk;
-	u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
-	u8 count;
-	struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
-	u32 ecclk;
-	u32 evclk;
-	u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
-	u8 count;
-	struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
-	u8 ppm_design;
-	u16 cpu_core_number;
-	u32 platform_tdp;
-	u32 small_ac_platform_tdp;
-	u32 platform_tdc;
-	u32 small_ac_platform_tdc;
-	u32 apu_tdp;
-	u32 dgpu_tdp;
-	u32 dgpu_ulv_power;
-	u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
-	u16 tdp;
-	u16 configurable_tdp;
-	u16 tdc;
-	u16 battery_power_limit;
-	u16 small_power_limit;
-	u16 low_cac_leakage;
-	u16 high_cac_leakage;
-	u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
-	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
-	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
-	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
-	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
-	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
-	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
-	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
-	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
-	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
-	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
-	struct amdgpu_clock_array valid_sclk_values;
-	struct amdgpu_clock_array valid_mclk_values;
-	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
-	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
-	u32 mclk_sclk_ratio;
-	u32 sclk_mclk_delta;
-	u16 vddc_vddci_delta;
-	u16 min_vddc_for_pcie_gen2;
-	struct amdgpu_cac_leakage_table cac_leakage_table;
-	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
-	struct amdgpu_ppm_table *ppm_table;
-	struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
-	u16 t_min;
-	u16 t_med;
-	u16 t_high;
-	u16 pwm_min;
-	u16 pwm_med;
-	u16 pwm_high;
-	u8 t_hyst;
-	u32 cycle_delay;
-	u16 t_max;
-	u8 control_mode;
-	u16 default_max_fan_pwm;
-	u16 default_fan_output_sensitivity;
-	u16 fan_output_sensitivity;
-	bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
-	AMDGPU_PCIE_GEN1 = 0,
-	AMDGPU_PCIE_GEN2 = 1,
-	AMDGPU_PCIE_GEN3 = 2,
-	AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-enum amdgpu_dpm_forced_level {
-	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
-	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
-	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
-	AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
-struct amdgpu_vce_state {
-	/* vce clocks */
-	u32 evclk;
-	u32 ecclk;
-	/* gpu clocks */
-	u32 sclk;
-	u32 mclk;
-	u8 clk_idx;
-	u8 pstate;
-};
-
-struct amdgpu_dpm_funcs {
-	int (*get_temperature)(struct amdgpu_device *adev);
-	int (*pre_set_power_state)(struct amdgpu_device *adev);
-	int (*set_power_state)(struct amdgpu_device *adev);
-	void (*post_set_power_state)(struct amdgpu_device *adev);
-	void (*display_configuration_changed)(struct amdgpu_device *adev);
-	u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
-	u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
-	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
-	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
-	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
-	bool (*vblank_too_short)(struct amdgpu_device *adev);
-	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
-	void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
-	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
-	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
-	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
-	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
-	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
-	int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
-	int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
-	int (*get_sclk_od)(struct amdgpu_device *adev);
-	int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
-	int (*get_mclk_od)(struct amdgpu_device *adev);
-	int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
-};
-
-struct amdgpu_dpm {
-	struct amdgpu_ps        *ps;
-	/* number of valid power states */
-	int                     num_ps;
-	/* current power state that is active */
-	struct amdgpu_ps        *current_ps;
-	/* requested power state */
-	struct amdgpu_ps        *requested_ps;
-	/* boot up power state */
-	struct amdgpu_ps        *boot_ps;
-	/* default uvd power state */
-	struct amdgpu_ps        *uvd_ps;
-	/* vce requirements */
-	struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
-	enum amdgpu_vce_level vce_level;
-	enum amd_pm_state_type state;
-	enum amd_pm_state_type user_state;
-	u32                     platform_caps;
-	u32                     voltage_response_time;
-	u32                     backbias_response_time;
-	void                    *priv;
-	u32			new_active_crtcs;
-	int			new_active_crtc_count;
-	u32			current_active_crtcs;
-	int			current_active_crtc_count;
-	struct amdgpu_dpm_dynamic_state dyn_state;
-	struct amdgpu_dpm_fan fan;
-	u32 tdp_limit;
-	u32 near_tdp_limit;
-	u32 near_tdp_limit_adjusted;
-	u32 sq_ramping_threshold;
-	u32 cac_leakage;
-	u16 tdp_od_limit;
-	u32 tdp_adjustment;
-	u16 load_line_slope;
-	bool power_control;
-	bool ac_power;
-	/* special states active */
-	bool                    thermal_active;
-	bool                    uvd_active;
-	bool                    vce_active;
-	/* thermal handling */
-	struct amdgpu_dpm_thermal thermal;
-	/* forced levels */
-	enum amdgpu_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
-	struct mutex		mutex;
-	u32                     current_sclk;
-	u32                     current_mclk;
-	u32                     default_sclk;
-	u32                     default_mclk;
-	struct amdgpu_i2c_chan *i2c_bus;
-	/* internal thermal controller on rv6xx+ */
-	enum amdgpu_int_thermal_type int_thermal_type;
-	struct device	        *int_hwmon_dev;
-	/* fan control parameters */
-	bool                    no_fan;
-	u8                      fan_pulses_per_revolution;
-	u8                      fan_min_rpm;
-	u8                      fan_max_rpm;
-	/* dpm */
-	bool                    dpm_enabled;
-	bool                    sysfs_initialized;
-	struct amdgpu_dpm       dpm;
-	const struct firmware	*fw;	/* SMC firmware */
-	uint32_t                fw_version;
-	const struct amdgpu_dpm_funcs *funcs;
-	uint32_t                pcie_gen_mask;
-	uint32_t                pcie_mlw_mask;
-	struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
-};
-
 void amdgpu_get_pcie_info(struct amdgpu_device *adev);
 
 /*
@@ -1939,14 +1289,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
 
-struct amdgpu_ip_block_status {
-	bool valid;
-	bool sw;
-	bool hw;
-	bool late_initialized;
-	bool hang;
-};
-
 struct amdgpu_device {
 	struct device			*dev;
 	struct drm_device		*ddev;
@@ -2102,9 +1444,8 @@ struct amdgpu_device {
 	/* GDS */
 	struct amdgpu_gds		gds;
 
-	const struct amdgpu_ip_block_version *ip_blocks;
+	struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
 	int				num_ip_blocks;
-	struct amdgpu_ip_block_status	*ip_block_status;
 	struct mutex	mn_lock;
 	DECLARE_HASHTABLE(mn_hash, 7);
 
@@ -2127,6 +1468,11 @@ struct amdgpu_device {
 
 };
 
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+{
+	return container_of(bdev, struct amdgpu_device, mman.bdev);
+}
+
 bool amdgpu_device_is_px(struct drm_device *dev);
 int amdgpu_device_init(struct amdgpu_device *adev,
 		       struct drm_device *ddev,
@@ -2278,8 +1624,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
-#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
-#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2301,108 +1645,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-
-#define amdgpu_dpm_read_sensor(adev, idx, value) \
-	((adev)->pp_enabled ? \
-		(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
-		-EINVAL)
-
-#define amdgpu_dpm_get_temperature(adev) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
-	      (adev)->pm.funcs->get_temperature((adev)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
-	      (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
-	      (adev)->pm.funcs->get_fan_control_mode((adev)))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
-	      (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
-	      (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_sclk(adev, l) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
-		(adev)->pm.funcs->get_sclk((adev), (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l)  \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
-	      (adev)->pm.funcs->get_mclk((adev), (l)))
-
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
-	      (adev)->pm.funcs->force_performance_level((adev), (l)))
-
-#define amdgpu_dpm_powergate_uvd(adev, g) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
-	      (adev)->pm.funcs->powergate_uvd((adev), (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
-	      (adev)->pm.funcs->powergate_vce((adev), (g)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
-	(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_performance_level(adev) \
-	(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
-	(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
-	(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
-	(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
-	(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
-		(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
-
-#define amdgpu_dpm_get_sclk_od(adev) \
-	(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
-	(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
-
-#define amdgpu_dpm_get_mclk_od(adev) \
-	((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
-	((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output)		\
-	(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
-
 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
 
 /* Common functions */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 892d60f..2f9f96c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
 
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	const struct amdgpu_ip_block_version *ip_version =
+	const struct amdgpu_ip_block *ip_block =
 		amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
 
-	if (!ip_version)
+	if (!ip_block)
 		return -EINVAL;
 
 	r = amd_acp_hw_init(adev->acp.cgs_device,
-			    ip_version->major, ip_version->minor);
+			    ip_block->version->major, ip_block->version->minor);
 	/* -ENODEV means board uses AZ rather than ACP */
 	if (r == -ENODEV)
 		return 0;
@@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs acp_ip_funcs = {
+static const struct amd_ip_funcs acp_ip_funcs = {
 	.name = "acp_ip",
 	.early_init = acp_early_init,
 	.late_init = NULL,
@@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
 	.set_clockgating_state = acp_set_clockgating_state,
 	.set_powergating_state = acp_set_powergating_state,
 };
+
+const struct amdgpu_ip_block_version acp_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_ACP,
+	.major = 2,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &acp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index 8a39631..a288ce2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -37,6 +37,6 @@ struct amdgpu_acp {
 	struct acp_pm_domain *acp_genpd;
 };
 
-extern const struct amd_ip_funcs acp_ip_funcs;
+extern const struct amdgpu_ip_block_version acp_ip_block;
 
 #endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf54..56a86dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
 	return 0;
 }
 
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
-{
-	GET_ENGINE_CLOCK_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-	return le32_to_cpu(args.ulReturnEngineClock);
-}
-
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
-{
-	GET_MEMORY_CLOCK_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-	return le32_to_cpu(args.ulReturnMemoryClock);
-}
-
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
-				      uint32_t eng_clock)
-{
-	SET_ENGINE_CLOCK_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
-
-	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
-				      uint32_t mem_clock)
-{
-	SET_MEMORY_CLOCK_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
-
-	if (adev->flags & AMD_IS_APU)
-		return;
-
-	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
 					     u32 eng_clock, u32 mem_clock)
 {
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
 	return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
 }
 
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
-				 u16 voltage_level,
-				 u8 voltage_type)
-{
-	union set_voltage args;
-	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
-	u8 frev, crev, volt_index = voltage_level;
-
-	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
-		return;
-
-	/* 0xff01 is a flag rather then an actual voltage */
-	if (voltage_level == 0xff01)
-		return;
-
-	switch (crev) {
-	case 1:
-		args.v1.ucVoltageType = voltage_type;
-		args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
-		args.v1.ucVoltageIndex = volt_index;
-		break;
-	case 2:
-		args.v2.ucVoltageType = voltage_type;
-		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
-		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
-		break;
-	case 3:
-		args.v3.ucVoltageType = voltage_type;
-		args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
-		args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
-		break;
-	default:
-		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-		return;
-	}
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
 int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
 					      u16 *leakage_id)
 {
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
 		WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
 }
 
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+					      bool hung)
+{
+	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+	if (hung)
+		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+	else
+		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+	WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
 /* Atom needs data in little endian format
  * so swap as appropriate when copying data to
  * or from atom. Note that atom operates on
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 1735615..70e9ace 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
 					    bool strobe_mode,
 					    struct atom_mpll_param *mpll_param);
 
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
-				      uint32_t eng_clock);
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
-				      uint32_t mem_clock);
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
-				 u16 voltage_level,
-				 u8 voltage_type);
-
 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
 					     u32 eng_clock, u32 mem_clock);
 
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
 void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+					      bool hung);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
 int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 3453052..cc97eee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 {
 	unsigned long start_jiffies;
 	unsigned long end_jiffies;
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	int i, r;
 
 	start_jiffies = jiffies;
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 				       false);
 		if (r)
 			goto exit_do_move;
-		r = fence_wait(fence, false);
+		r = dma_fence_wait(fence, false);
 		if (r)
 			goto exit_do_move;
-		fence_put(fence);
+		dma_fence_put(fence);
 	}
 	end_jiffies = jiffies;
 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
 exit_do_move:
 	if (fence)
-		fence_put(fence);
+		dma_fence_put(fence);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a8bfa3..017556c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
 	switch(type) {
 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
-		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 		domain = AMDGPU_GEM_DOMAIN_VRAM;
 		if (max_offset > adev->mc.real_vram_size)
 			return -EINVAL;
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
 		break;
 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
-		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 		domain = AMDGPU_GEM_DOMAIN_VRAM;
 		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
 			place.fpfn =
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
 	r = amdgpu_bo_reserve(obj, false);
 	if (unlikely(r != 0))
 		return r;
-	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+	r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
 				     min_offset, max_offset, mcaddr);
 	amdgpu_bo_unreserve(obj);
 	return r;
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 	int i, r = -1;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
 
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->set_clockgating_state(
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
 								(void *)adev,
 									state);
 			break;
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 	int i, r = -1;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
 
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->set_powergating_state(
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->set_powergating_state(
 								(void *)adev,
 									state);
 			break;
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 		result = AMDGPU_UCODE_ID_CP_MEC1;
 		break;
 	case CGS_UCODE_ID_CP_MEC_JT2:
-		if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
-		  || adev->asic_type == CHIP_POLARIS10)
-			result = AMDGPU_UCODE_ID_CP_MEC2;
-		else
+		/* for VI. JT2 should be the same as JT1, because:
+			1, MEC2 and MEC1 use exactly same FW.
+			2, JT2 is not pached but JT1 is.
+		*/
+		if (adev->asic_type >= CHIP_TOPAZ)
 			result = AMDGPU_UCODE_ID_CP_MEC1;
+		else
+			result = AMDGPU_UCODE_ID_CP_MEC2;
 		break;
 	case CGS_UCODE_ID_RLC_G:
 		result = AMDGPU_UCODE_ID_RLC_G;
 		break;
+	case CGS_UCODE_ID_STORAGE:
+		result = AMDGPU_UCODE_ID_STORAGE;
+		break;
 	default:
 		DRM_ERROR("Firmware type not supported\n");
 	}
@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 
 		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
 		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
-			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+			gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
 			data_size = le32_to_cpu(header->jt_size) << 2;
 		}
-		info->mc_addr = gpu_addr;
+
+		info->kptr = ucode->kaddr;
 		info->image_size = data_size;
+		info->mc_addr = gpu_addr;
 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+
+		if (CGS_UCODE_ID_CP_MEC == type)
+			info->image_size = (header->jt_offset) << 2;
+
 		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
 	} else {
@@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 	return 0;
 }
 
+static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
+{
+	CGS_FUNC_ADEV;
+	return amdgpu_sriov_vf(adev);
+}
+
 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
 					struct cgs_system_info *sys_info)
 {
@@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
 	amdgpu_cgs_notify_dpm_enabled,
 	amdgpu_cgs_call_acpi_method,
 	amdgpu_cgs_query_system_info,
+	amdgpu_cgs_is_virtualization_enabled
 };
 
 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index e3281d4..3af8ffb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
 	.force = amdgpu_connector_dvi_force,
 };
 
-static struct drm_encoder *
-amdgpu_connector_virtual_encoder(struct drm_connector *connector)
-{
-	int enc_id = connector->encoder_ids[0];
-	struct drm_encoder *encoder;
-	int i;
-	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-		if (connector->encoder_ids[i] == 0)
-			break;
-
-		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
-		if (!encoder)
-			continue;
-
-		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
-			return encoder;
-	}
-
-	/* pick the first one */
-	if (enc_id)
-		return drm_encoder_find(connector->dev, enc_id);
-	return NULL;
-}
-
-static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
-{
-	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
-
-	if (encoder) {
-		amdgpu_connector_add_common_modes(encoder, connector);
-	}
-
-	return 0;
-}
-
-static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
-					   struct drm_display_mode *mode)
-{
-	return MODE_OK;
-}
-
-static int
-amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
-{
-	return 0;
-}
-
-static enum drm_connector_status
-
-amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
-{
-	return connector_status_connected;
-}
-
-static int
-amdgpu_connector_virtual_set_property(struct drm_connector *connector,
-				  struct drm_property *property,
-				  uint64_t val)
-{
-	return 0;
-}
-
-static void amdgpu_connector_virtual_force(struct drm_connector *connector)
-{
-	return;
-}
-
-static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
-	.get_modes = amdgpu_connector_virtual_get_modes,
-	.mode_valid = amdgpu_connector_virtual_mode_valid,
-	.best_encoder = amdgpu_connector_virtual_encoder,
-};
-
-static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
-	.dpms = amdgpu_connector_virtual_dpms,
-	.detect = amdgpu_connector_virtual_detect,
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.set_property = amdgpu_connector_virtual_set_property,
-	.destroy = amdgpu_connector_destroy,
-	.force = amdgpu_connector_virtual_force,
-};
-
 void
 amdgpu_connector_add(struct amdgpu_device *adev,
 		      uint32_t connector_id,
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
 			connector->interlace_allowed = false;
 			connector->doublescan_allowed = false;
 			break;
-		case DRM_MODE_CONNECTOR_VIRTUAL:
-			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
-			if (!amdgpu_dig_connector)
-				goto failed;
-			amdgpu_connector->con_priv = amdgpu_dig_connector;
-			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
-			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
-			subpixel_order = SubPixelHorizontalRGB;
-			connector->interlace_allowed = false;
-			connector->doublescan_allowed = false;
-			break;
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b0f6e69..a024217 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 				 struct amdgpu_bo *bo)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	u64 initial_bytes_moved;
 	uint32_t domain;
 	int r;
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 
 retry:
 	amdgpu_ttm_placement_from_domain(bo, domain);
-	initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+	initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-	p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+	p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
 		initial_bytes_moved;
 
 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -387,9 +388,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 
 /* Last resort, try to evict something from the current working set */
 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
-				struct amdgpu_bo_list_entry *lobj)
+				struct amdgpu_bo *validated)
 {
-	uint32_t domain = lobj->robj->allowed_domains;
+	uint32_t domain = validated->allowed_domains;
 	int r;
 
 	if (!p->evictable)
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 
 		struct amdgpu_bo_list_entry *candidate = p->evictable;
 		struct amdgpu_bo *bo = candidate->robj;
+		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 		u64 initial_bytes_moved;
 		uint32_t other;
 
 		/* If we reached our current BO we can forget it */
-		if (candidate == lobj)
+		if (candidate->robj == validated)
 			break;
 
 		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 
 		/* Good we can try to move this BO somewhere else */
 		amdgpu_ttm_placement_from_domain(bo, other);
-		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+		initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+		p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
 			initial_bytes_moved;
 
 		if (unlikely(r))
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 	return false;
 }
 
+static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
+{
+	struct amdgpu_cs_parser *p = param;
+	int r;
+
+	do {
+		r = amdgpu_cs_bo_validate(p, bo);
+	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+	if (r)
+		return r;
+
+	if (bo->shadow)
+		r = amdgpu_cs_bo_validate(p, bo);
+
+	return r;
+}
+
 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 			    struct list_head *validated)
 {
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 		if (p->evictable == lobj)
 			p->evictable = NULL;
 
-		do {
-			r = amdgpu_cs_bo_validate(p, bo);
-		} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+		r = amdgpu_cs_validate(p, bo);
 		if (r)
 			return r;
 
-		if (bo->shadow) {
-			r = amdgpu_cs_bo_validate(p, bo);
-			if (r)
-				return r;
-		}
-
 		if (binding_userptr) {
 			drm_free_large(lobj->user_pages);
 			lobj->user_pages = NULL;
@@ -593,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 		list_splice(&need_pages, &p->validated);
 	}
 
-	amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
-
 	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
 	p->bytes_moved = 0;
 	p->evictable = list_last_entry(&p->validated,
 				       struct amdgpu_bo_list_entry,
 				       tv.head);
 
+	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
+				      amdgpu_cs_validate, p);
+	if (r) {
+		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+		goto error_validate;
+	}
+
 	r = amdgpu_cs_list_validate(p, &duplicates);
 	if (r) {
 		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
@@ -719,7 +735,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 	}
-	fence_put(parser->fence);
+	dma_fence_put(parser->fence);
 
 	if (parser->ctx)
 		amdgpu_ctx_put(parser->ctx);
@@ -756,7 +772,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 
 	if (p->bo_list) {
 		for (i = 0; i < p->bo_list->num_entries; i++) {
-			struct fence *f;
+			struct dma_fence *f;
 
 			/* ignore duplicates */
 			bo = p->bo_list->array[i].robj;
@@ -806,13 +822,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 
 	/* Only for UVD/VCE VM emulation */
 	if (ring->funcs->parse_cs) {
-		p->job->vm = NULL;
 		for (i = 0; i < p->job->num_ibs; i++) {
 			r = amdgpu_ring_parse_cs(ring, p, i);
 			if (r)
 				return r;
 		}
-	} else {
+	}
+
+	if (p->job->vm) {
 		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 
 		r = amdgpu_bo_vm_update_pte(p, vm);
@@ -901,7 +918,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 			offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
 			kptr += chunk_ib->va_start - offset;
 
-			r =  amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
+			r =  amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
 			if (r) {
 				DRM_ERROR("Failed to get ib !\n");
 				return r;
@@ -916,9 +933,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 				return r;
 			}
 
-			ib->gpu_addr = chunk_ib->va_start;
 		}
 
+		ib->gpu_addr = chunk_ib->va_start;
 		ib->length_dw = chunk_ib->ib_bytes / 4;
 		ib->flags = chunk_ib->flags;
 		j++;
@@ -926,8 +943,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
 	/* UVD & VCE fw doesn't support user fences */
 	if (parser->job->uf_addr && (
-	    parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
-	    parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
 		return -EINVAL;
 
 	return 0;
@@ -956,7 +973,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 		for (j = 0; j < num_deps; ++j) {
 			struct amdgpu_ring *ring;
 			struct amdgpu_ctx *ctx;
-			struct fence *fence;
+			struct dma_fence *fence;
 
 			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 					       deps[j].ip_instance,
@@ -978,7 +995,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 			} else if (fence) {
 				r = amdgpu_sync_fence(adev, &p->job->sync,
 						      fence);
-				fence_put(fence);
+				dma_fence_put(fence);
 				amdgpu_ctx_put(ctx);
 				if (r)
 					return r;
@@ -1008,7 +1025,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
 	job->owner = p->filp;
 	job->fence_ctx = entity->fence_context;
-	p->fence = fence_get(&job->base.s_fence->finished);
+	p->fence = dma_fence_get(&job->base.s_fence->finished);
 	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
 	job->uf_sequence = cs->out.handle;
 	amdgpu_job_free_resources(job);
@@ -1091,7 +1108,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
 	struct amdgpu_ring *ring = NULL;
 	struct amdgpu_ctx *ctx;
-	struct fence *fence;
+	struct dma_fence *fence;
 	long r;
 
 	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@@ -1107,8 +1124,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 	if (IS_ERR(fence))
 		r = PTR_ERR(fence);
 	else if (fence) {
-		r = fence_wait_timeout(fence, true, timeout);
-		fence_put(fence);
+		r = dma_fence_wait_timeout(fence, true, timeout);
+		dma_fence_put(fence);
 	} else
 		r = 1;
 
@@ -1195,6 +1212,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
 		r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
 		if (unlikely(r))
 			return r;
+
+		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+			continue;
+
+		bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+		amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+		if (unlikely(r))
+			return r;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a5e2fcb..400c66b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 	kref_init(&ctx->refcount);
 	spin_lock_init(&ctx->ring_lock);
 	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
-			      sizeof(struct fence*), GFP_KERNEL);
+			      sizeof(struct dma_fence*), GFP_KERNEL);
 	if (!ctx->fences)
 		return -ENOMEM;
 
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
 					  rq, amdgpu_sched_jobs);
 		if (r)
-			break;
+			goto failed;
 	}
 
-	if (i < adev->num_rings) {
-		for (j = 0; j < i; j++)
-			amd_sched_entity_fini(&adev->rings[j]->sched,
-					      &ctx->rings[j].entity);
-		kfree(ctx->fences);
-		ctx->fences = NULL;
-		return r;
-	}
 	return 0;
+
+failed:
+	for (j = 0; j < i; j++)
+		amd_sched_entity_fini(&adev->rings[j]->sched,
+				      &ctx->rings[j].entity);
+	kfree(ctx->fences);
+	ctx->fences = NULL;
+	return r;
 }
 
 static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		for (j = 0; j < amdgpu_sched_jobs; ++j)
-			fence_put(ctx->rings[i].fences[j]);
+			dma_fence_put(ctx->rings[i].fences[j]);
 	kfree(ctx->fences);
 	ctx->fences = NULL;
 
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 }
 
 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
-			      struct fence *fence)
+			      struct dma_fence *fence)
 {
 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
 	uint64_t seq = cring->sequence;
 	unsigned idx = 0;
-	struct fence *other = NULL;
+	struct dma_fence *other = NULL;
 
 	idx = seq & (amdgpu_sched_jobs - 1);
 	other = cring->fences[idx];
 	if (other) {
 		signed long r;
-		r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+		r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
 		if (r < 0)
 			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
 	}
 
-	fence_get(fence);
+	dma_fence_get(fence);
 
 	spin_lock(&ctx->ring_lock);
 	cring->fences[idx] = fence;
 	cring->sequence++;
 	spin_unlock(&ctx->ring_lock);
 
-	fence_put(other);
+	dma_fence_put(other);
 
 	return seq;
 }
 
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
-				   struct amdgpu_ring *ring, uint64_t seq)
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+				       struct amdgpu_ring *ring, uint64_t seq)
 {
 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
-	struct fence *fence;
+	struct dma_fence *fence;
 
 	spin_lock(&ctx->ring_lock);
 
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 		return NULL;
 	}
 
-	fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
+	fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
 	spin_unlock(&ctx->ring_lock);
 
 	return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a92..6958d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
 	if (adev->vram_scratch.robj == NULL) {
 		r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
 				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 				     NULL, NULL, &adev->vram_scratch.robj);
 		if (r) {
 			return r;
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
 static void amdgpu_wb_fini(struct amdgpu_device *adev)
 {
 	if (adev->wb.wb_obj) {
-		if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
-			amdgpu_bo_kunmap(adev->wb.wb_obj);
-			amdgpu_bo_unpin(adev->wb.wb_obj);
-			amdgpu_bo_unreserve(adev->wb.wb_obj);
-		}
-		amdgpu_bo_unref(&adev->wb.wb_obj);
-		adev->wb.wb = NULL;
+		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
+				      &adev->wb.gpu_addr,
+				      (void **)&adev->wb.wb);
 		adev->wb.wb_obj = NULL;
 	}
 }
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
 	int r;
 
 	if (adev->wb.wb_obj == NULL) {
-		r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0,  NULL, NULL,
-				     &adev->wb.wb_obj);
+		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
+					    (void **)&adev->wb.wb);
 		if (r) {
 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
 			return r;
 		}
-		r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
-		if (unlikely(r != 0)) {
-			amdgpu_wb_fini(adev);
-			return r;
-		}
-		r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
-				&adev->wb.gpu_addr);
-		if (r) {
-			amdgpu_bo_unreserve(adev->wb.wb_obj);
-			dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
-			amdgpu_wb_fini(adev);
-			return r;
-		}
-		r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
-		amdgpu_bo_unreserve(adev->wb.wb_obj);
-		if (r) {
-			dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
-			amdgpu_wb_fini(adev);
-			return r;
-		}
 
 		adev->wb.num_wb = AMDGPU_MAX_WB;
 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@@ -1051,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
 			 amdgpu_vm_block_size);
 		amdgpu_vm_block_size = 9;
 	}
+
+	if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) ||
+	    !amdgpu_check_pot_argument(amdgpu_vram_page_split)) {
+		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
+			 amdgpu_vram_page_split);
+		amdgpu_vram_page_split = 1024;
+	}
 }
 
 /**
@@ -1125,11 +1110,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-									    state);
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+										     state);
 			if (r)
 				return r;
 			break;
@@ -1145,11 +1130,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
-									    state);
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+										     state);
 			if (r)
 				return r;
 			break;
@@ -1164,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
 	int i, r;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
 			if (r)
 				return r;
 			break;
@@ -1183,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
 	int i;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].type == block_type)
-			return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+		if (adev->ip_blocks[i].version->type == block_type)
+			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
 	}
 	return true;
 
 }
 
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
-					struct amdgpu_device *adev,
-					enum amd_ip_block_type type)
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+					     enum amd_ip_block_type type)
 {
 	int i;
 
 	for (i = 0; i < adev->num_ip_blocks; i++)
-		if (adev->ip_blocks[i].type == type)
+		if (adev->ip_blocks[i].version->type == type)
 			return &adev->ip_blocks[i];
 
 	return NULL;
@@ -1220,38 +1204,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
 				enum amd_ip_block_type type,
 				u32 major, u32 minor)
 {
-	const struct amdgpu_ip_block_version *ip_block;
-	ip_block = amdgpu_get_ip_block(adev, type);
+	struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
 
-	if (ip_block && ((ip_block->major > major) ||
-			((ip_block->major == major) &&
-			(ip_block->minor >= minor))))
+	if (ip_block && ((ip_block->version->major > major) ||
+			((ip_block->version->major == major) &&
+			(ip_block->version->minor >= minor))))
 		return 0;
 
 	return 1;
 }
 
-static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+/**
+ * amdgpu_ip_block_add
+ *
+ * @adev: amdgpu_device pointer
+ * @ip_block_version: pointer to the IP to add
+ *
+ * Adds the IP block driver information to the collection of IPs
+ * on the asic.
+ */
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+			const struct amdgpu_ip_block_version *ip_block_version)
+{
+	if (!ip_block_version)
+		return -EINVAL;
+
+	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
+
+	return 0;
+}
+
+static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
 {
 	adev->enable_virtual_display = false;
 
 	if (amdgpu_virtual_display) {
 		struct drm_device *ddev = adev->ddev;
 		const char *pci_address_name = pci_name(ddev->pdev);
-		char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
 
 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
 		pciaddstr_tmp = pciaddstr;
-		while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
+			pciaddname = strsep(&pciaddname_tmp, ",");
 			if (!strcmp(pci_address_name, pciaddname)) {
+				long num_crtc;
+				int res = -1;
+
 				adev->enable_virtual_display = true;
+
+				if (pciaddname_tmp)
+					res = kstrtol(pciaddname_tmp, 10,
+						      &num_crtc);
+
+				if (!res) {
+					if (num_crtc < 1)
+						num_crtc = 1;
+					if (num_crtc > 6)
+						num_crtc = 6;
+					adev->mode_info.num_crtc = num_crtc;
+				} else {
+					adev->mode_info.num_crtc = 1;
+				}
 				break;
 			}
 		}
 
-		DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
-				 amdgpu_virtual_display, pci_address_name,
-				 adev->enable_virtual_display);
+		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+			 amdgpu_virtual_display, pci_address_name,
+			 adev->enable_virtual_display, adev->mode_info.num_crtc);
 
 		kfree(pciaddstr);
 	}
@@ -1261,7 +1282,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
 {
 	int i, r;
 
-	amdgpu_whether_enable_virtual_display(adev);
+	amdgpu_device_enable_virtual_display(adev);
 
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
@@ -1313,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
 		return -EINVAL;
 	}
 
-	adev->ip_block_status = kcalloc(adev->num_ip_blocks,
-					sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
-	if (adev->ip_block_status == NULL)
-		return -ENOMEM;
-
-	if (adev->ip_blocks == NULL) {
-		DRM_ERROR("No IP blocks found!\n");
-		return r;
-	}
-
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
 			DRM_ERROR("disabled ip block: %d\n", i);
-			adev->ip_block_status[i].valid = false;
+			adev->ip_blocks[i].status.valid = false;
 		} else {
-			if (adev->ip_blocks[i].funcs->early_init) {
-				r = adev->ip_blocks[i].funcs->early_init((void *)adev);
+			if (adev->ip_blocks[i].version->funcs->early_init) {
+				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
 				if (r == -ENOENT) {
-					adev->ip_block_status[i].valid = false;
+					adev->ip_blocks[i].status.valid = false;
 				} else if (r) {
-					DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+					DRM_ERROR("early_init of IP block <%s> failed %d\n",
+						  adev->ip_blocks[i].version->funcs->name, r);
 					return r;
 				} else {
-					adev->ip_block_status[i].valid = true;
+					adev->ip_blocks[i].status.valid = true;
 				}
 			} else {
-				adev->ip_block_status[i].valid = true;
+				adev->ip_blocks[i].status.valid = true;
 			}
 		}
 	}
@@ -1355,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
 	int i, r;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
+		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
 		if (r) {
-			DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 			return r;
 		}
-		adev->ip_block_status[i].sw = true;
+		adev->ip_blocks[i].status.sw = true;
 		/* need to do gmc hw init early so we can allocate gpu mem */
-		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
 			r = amdgpu_vram_scratch_init(adev);
 			if (r) {
 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
 				return r;
 			}
-			r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
 			if (r) {
 				DRM_ERROR("hw_init %d failed %d\n", i, r);
 				return r;
@@ -1380,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
 				DRM_ERROR("amdgpu_wb_init failed %d\n", r);
 				return r;
 			}
-			adev->ip_block_status[i].hw = true;
+			adev->ip_blocks[i].status.hw = true;
 		}
 	}
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].sw)
+		if (!adev->ip_blocks[i].status.sw)
 			continue;
 		/* gmc hw init is done early */
-		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
+		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
 			continue;
-		r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+		r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
 		if (r) {
-			DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 			return r;
 		}
-		adev->ip_block_status[i].hw = true;
+		adev->ip_blocks[i].status.hw = true;
 	}
 
 	return 0;
@@ -1406,25 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
 	int i = 0, r;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].funcs->late_init) {
-			r = adev->ip_blocks[i].funcs->late_init((void *)adev);
+		if (adev->ip_blocks[i].version->funcs->late_init) {
+			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
 			if (r) {
-				DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+				DRM_ERROR("late_init of IP block <%s> failed %d\n",
+					  adev->ip_blocks[i].version->funcs->name, r);
 				return r;
 			}
-			adev->ip_block_status[i].late_initialized = true;
+			adev->ip_blocks[i].status.late_initialized = true;
 		}
 		/* skip CG for VCE/UVD, it's handled specially */
-		if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
-		    adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
 			/* enable clockgating to save power */
-			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-									    AMD_CG_STATE_GATE);
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+										     AMD_CG_STATE_GATE);
 			if (r) {
 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
-					  adev->ip_blocks[i].funcs->name, r);
+					  adev->ip_blocks[i].version->funcs->name, r);
 				return r;
 			}
 		}
@@ -1439,68 +1454,71 @@ static int amdgpu_fini(struct amdgpu_device *adev)
 
 	/* need to disable SMC first */
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].hw)
+		if (!adev->ip_blocks[i].status.hw)
 			continue;
-		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
-			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-									    AMD_CG_STATE_UNGATE);
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+										     AMD_CG_STATE_UNGATE);
 			if (r) {
 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
-					  adev->ip_blocks[i].funcs->name, r);
+					  adev->ip_blocks[i].version->funcs->name, r);
 				return r;
 			}
-			r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
 			/* XXX handle errors */
 			if (r) {
 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
-					  adev->ip_blocks[i].funcs->name, r);
+					  adev->ip_blocks[i].version->funcs->name, r);
 			}
-			adev->ip_block_status[i].hw = false;
+			adev->ip_blocks[i].status.hw = false;
 			break;
 		}
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-		if (!adev->ip_block_status[i].hw)
+		if (!adev->ip_blocks[i].status.hw)
 			continue;
-		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
 			amdgpu_wb_fini(adev);
 			amdgpu_vram_scratch_fini(adev);
 		}
 		/* ungate blocks before hw fini so that we can shutdown the blocks safely */
-		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-								    AMD_CG_STATE_UNGATE);
+		r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+									     AMD_CG_STATE_UNGATE);
 		if (r) {
-			DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 			return r;
 		}
-		r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 		}
-		adev->ip_block_status[i].hw = false;
+		adev->ip_blocks[i].status.hw = false;
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-		if (!adev->ip_block_status[i].sw)
+		if (!adev->ip_blocks[i].status.sw)
 			continue;
-		r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
+		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 		}
-		adev->ip_block_status[i].sw = false;
-		adev->ip_block_status[i].valid = false;
+		adev->ip_blocks[i].status.sw = false;
+		adev->ip_blocks[i].status.valid = false;
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-		if (!adev->ip_block_status[i].late_initialized)
+		if (!adev->ip_blocks[i].status.late_initialized)
 			continue;
-		if (adev->ip_blocks[i].funcs->late_fini)
-			adev->ip_blocks[i].funcs->late_fini((void *)adev);
-		adev->ip_block_status[i].late_initialized = false;
+		if (adev->ip_blocks[i].version->funcs->late_fini)
+			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+		adev->ip_blocks[i].status.late_initialized = false;
 	}
 
 	return 0;
@@ -1518,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
 		/* ungate blocks so that suspend can properly shut them down */
 		if (i != AMD_IP_BLOCK_TYPE_SMC) {
-			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-									    AMD_CG_STATE_UNGATE);
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+										     AMD_CG_STATE_UNGATE);
 			if (r) {
-				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+					  adev->ip_blocks[i].version->funcs->name, r);
 			}
 		}
 		/* XXX handle errors */
-		r = adev->ip_blocks[i].funcs->suspend(adev);
+		r = adev->ip_blocks[i].version->funcs->suspend(adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("suspend of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 		}
 	}
 
@@ -1544,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
 	int i, r;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		r = adev->ip_blocks[i].funcs->resume(adev);
+		r = adev->ip_blocks[i].version->funcs->resume(adev);
 		if (r) {
-			DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("resume of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 			return r;
 		}
 	}
@@ -1599,7 +1620,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->vm_manager.vm_pte_funcs = NULL;
 	adev->vm_manager.vm_pte_num_rings = 0;
 	adev->gart.gart_funcs = NULL;
-	adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 
 	adev->smc_rreg = &amdgpu_invalid_rreg;
 	adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -1859,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 	amdgpu_fence_driver_fini(adev);
 	amdgpu_fbdev_fini(adev);
 	r = amdgpu_fini(adev);
-	kfree(adev->ip_block_status);
-	adev->ip_block_status = NULL;
 	adev->accel_working = false;
 	/* free i2c buses */
 	amdgpu_i2c_fini(adev);
@@ -1956,7 +1975,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 
 	r = amdgpu_suspend(adev);
 
-	/* evict remaining vram memory */
+	/* evict remaining vram memory
+	 * This second call to evict vram is to evict the gart page table
+	 * using the CPU.
+	 */
 	amdgpu_bo_evict_vram(adev);
 
 	pci_save_state(dev->pdev);
@@ -2096,13 +2118,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
 	bool asic_hang = false;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].funcs->check_soft_reset)
-			adev->ip_block_status[i].hang =
-				adev->ip_blocks[i].funcs->check_soft_reset(adev);
-		if (adev->ip_block_status[i].hang) {
-			DRM_INFO("IP block:%d is hang!\n", i);
+		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
+			adev->ip_blocks[i].status.hang =
+				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+		if (adev->ip_blocks[i].status.hang) {
+			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
 			asic_hang = true;
 		}
 	}
@@ -2114,11 +2136,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_block_status[i].hang &&
-		    adev->ip_blocks[i].funcs->pre_soft_reset) {
-			r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+		if (adev->ip_blocks[i].status.hang &&
+		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
+			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
 			if (r)
 				return r;
 		}
@@ -2132,13 +2154,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
 	int i;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
-		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
-		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
-		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
-			if (adev->ip_block_status[i].hang) {
+		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
+		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
+		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
+		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+			if (adev->ip_blocks[i].status.hang) {
 				DRM_INFO("Some block need full reset!\n");
 				return true;
 			}
@@ -2152,11 +2174,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_block_status[i].hang &&
-		    adev->ip_blocks[i].funcs->soft_reset) {
-			r = adev->ip_blocks[i].funcs->soft_reset(adev);
+		if (adev->ip_blocks[i].status.hang &&
+		    adev->ip_blocks[i].version->funcs->soft_reset) {
+			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
 			if (r)
 				return r;
 		}
@@ -2170,11 +2192,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_block_status[i].hang &&
-		    adev->ip_blocks[i].funcs->post_soft_reset)
-			r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+		if (adev->ip_blocks[i].status.hang &&
+		    adev->ip_blocks[i].version->funcs->post_soft_reset)
+			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
 		if (r)
 			return r;
 	}
@@ -2193,7 +2215,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
 					   struct amdgpu_ring *ring,
 					   struct amdgpu_bo *bo,
-					   struct fence **fence)
+					   struct dma_fence **fence)
 {
 	uint32_t domain;
 	int r;
@@ -2312,30 +2334,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 		if (need_full_reset && amdgpu_need_backup(adev)) {
 			struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 			struct amdgpu_bo *bo, *tmp;
-			struct fence *fence = NULL, *next = NULL;
+			struct dma_fence *fence = NULL, *next = NULL;
 
 			DRM_INFO("recover vram bo from shadow\n");
 			mutex_lock(&adev->shadow_list_lock);
 			list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
 				amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
 				if (fence) {
-					r = fence_wait(fence, false);
+					r = dma_fence_wait(fence, false);
 					if (r) {
 						WARN(r, "recovery from shadow isn't comleted\n");
 						break;
 					}
 				}
 
-				fence_put(fence);
+				dma_fence_put(fence);
 				fence = next;
 			}
 			mutex_unlock(&adev->shadow_list_lock);
 			if (fence) {
-				r = fence_wait(fence, false);
+				r = dma_fence_wait(fence, false);
 				if (r)
 					WARN(r, "recovery from shadow isn't comleted\n");
 			}
-			fence_put(fence);
+			dma_fence_put(fence);
 		}
 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 			struct amdgpu_ring *ring = adev->rings[i];
@@ -2531,6 +2553,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
 		se_bank = (*pos >> 24) & 0x3FF;
 		sh_bank = (*pos >> 34) & 0x3FF;
 		instance_bank = (*pos >> 44) & 0x3FF;
+
+		if (se_bank == 0x3FF)
+			se_bank = 0xFFFFFFFF;
+		if (sh_bank == 0x3FF)
+			sh_bank = 0xFFFFFFFF;
+		if (instance_bank == 0x3FF)
+			instance_bank = 0xFFFFFFFF;
 		use_bank = 1;
 	} else {
 		use_bank = 0;
@@ -2539,8 +2568,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
 	*pos &= 0x3FFFF;
 
 	if (use_bank) {
-		if (sh_bank >= adev->gfx.config.max_sh_per_se ||
-		    se_bank >= adev->gfx.config.max_shader_engines)
+		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
 			return -EINVAL;
 		mutex_lock(&adev->grbm_idx_mutex);
 		amdgpu_gfx_select_se_sh(adev, se_bank,
@@ -2587,10 +2616,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
 	struct amdgpu_device *adev = f->f_inode->i_private;
 	ssize_t result = 0;
 	int r;
+	bool pm_pg_lock, use_bank;
+	unsigned instance_bank, sh_bank, se_bank;
 
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	/* are we reading registers for which a PG lock is necessary? */
+	pm_pg_lock = (*pos >> 23) & 1;
+
+	if (*pos & (1ULL << 62)) {
+		se_bank = (*pos >> 24) & 0x3FF;
+		sh_bank = (*pos >> 34) & 0x3FF;
+		instance_bank = (*pos >> 44) & 0x3FF;
+
+		if (se_bank == 0x3FF)
+			se_bank = 0xFFFFFFFF;
+		if (sh_bank == 0x3FF)
+			sh_bank = 0xFFFFFFFF;
+		if (instance_bank == 0x3FF)
+			instance_bank = 0xFFFFFFFF;
+		use_bank = 1;
+	} else {
+		use_bank = 0;
+	}
+
+	*pos &= 0x3FFFF;
+
+	if (use_bank) {
+		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+			return -EINVAL;
+		mutex_lock(&adev->grbm_idx_mutex);
+		amdgpu_gfx_select_se_sh(adev, se_bank,
+					sh_bank, instance_bank);
+	}
+
+	if (pm_pg_lock)
+		mutex_lock(&adev->pm.mutex);
+
 	while (size) {
 		uint32_t value;
 
@@ -2609,6 +2673,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
 		size -= 4;
 	}
 
+	if (use_bank) {
+		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		mutex_unlock(&adev->grbm_idx_mutex);
+	}
+
+	if (pm_pg_lock)
+		mutex_unlock(&adev->pm.mutex);
+
 	return result;
 }
 
@@ -2871,6 +2943,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
 	return !r ? 4 : r;
 }
 
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+					size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	int r, x;
+	ssize_t result=0;
+	uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+	if (size & 3 || *pos & 3)
+		return -EINVAL;
+
+	/* decode offset */
+	offset = (*pos & 0x7F);
+	se = ((*pos >> 7) & 0xFF);
+	sh = ((*pos >> 15) & 0xFF);
+	cu = ((*pos >> 23) & 0xFF);
+	wave = ((*pos >> 31) & 0xFF);
+	simd = ((*pos >> 37) & 0xFF);
+
+	/* switch to the specific se/sh/cu */
+	mutex_lock(&adev->grbm_idx_mutex);
+	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+	x = 0;
+	if (adev->gfx.funcs->read_wave_data)
+		adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	if (!x)
+		return -EINVAL;
+
+	while (size && (offset < x * 4)) {
+		uint32_t value;
+
+		value = data[offset >> 2];
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		offset += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
 	.owner = THIS_MODULE,
 	.read = amdgpu_debugfs_regs_read,
@@ -2908,6 +3030,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
 	.llseek = default_llseek
 };
 
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_debugfs_wave_read,
+	.llseek = default_llseek
+};
+
 static const struct file_operations *debugfs_regs[] = {
 	&amdgpu_debugfs_regs_fops,
 	&amdgpu_debugfs_regs_didt_fops,
@@ -2915,6 +3043,7 @@ static const struct file_operations *debugfs_regs[] = {
 	&amdgpu_debugfs_regs_smc_fops,
 	&amdgpu_debugfs_gca_config_fops,
 	&amdgpu_debugfs_sensors_fops,
+	&amdgpu_debugfs_wave_fops,
 };
 
 static const char *debugfs_regs_names[] = {
@@ -2924,6 +3053,7 @@ static const char *debugfs_regs_names[] = {
 	"amdgpu_regs_smc",
 	"amdgpu_gca_config",
 	"amdgpu_sensors",
+	"amdgpu_wave",
 };
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b4..741144f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,29 +35,29 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
-static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
+static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 	struct amdgpu_flip_work *work =
 		container_of(cb, struct amdgpu_flip_work, cb);
 
-	fence_put(f);
+	dma_fence_put(f);
 	schedule_work(&work->flip_work.work);
 }
 
 static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
-				     struct fence **f)
+				     struct dma_fence **f)
 {
-	struct fence *fence= *f;
+	struct dma_fence *fence= *f;
 
 	if (fence == NULL)
 		return false;
 
 	*f = NULL;
 
-	if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+	if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
 		return true;
 
-	fence_put(fence);
+	dma_fence_put(fence);
 	return false;
 }
 
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	struct amdgpu_flip_work *work =
 		container_of(delayed_work, struct amdgpu_flip_work, flip_work);
 	struct amdgpu_device *adev = work->adev;
-	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
+	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
 
-	struct drm_crtc *crtc = &amdgpuCrtc->base;
+	struct drm_crtc *crtc = &amdgpu_crtc->base;
 	unsigned long flags;
 	unsigned i;
 	int vpos, hpos;
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	/* Wait until we're out of the vertical blank period before the one
 	 * targeted by the flip
 	 */
-	if (amdgpuCrtc->enabled &&
+	if (amdgpu_crtc->enabled &&
 	    (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
 					&vpos, &hpos, NULL, NULL,
 					&crtc->hwmode)
 	     & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
 	    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
 	    (int)(work->target_vblank -
-		  amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+		  amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
 		schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
 		return;
 	}
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
 
 	/* Set the flip status */
-	amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 
 
 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
-					 amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+					 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
 
 }
 
@@ -244,9 +244,9 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
 
 cleanup:
 	amdgpu_bo_unref(&work->old_abo);
-	fence_put(work->excl);
+	dma_fence_put(work->excl);
 	for (i = 0; i < work->shared_count; ++i)
-		fence_put(work->shared[i]);
+		dma_fence_put(work->shared[i]);
 	kfree(work->shared);
 	kfree(work);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 14f57d9..6ca0333 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
 			}
-			for (i = 0; i < states->numEntries; i++) {
-				if (i >= AMDGPU_MAX_VCE_LEVELS)
-					break;
+			adev->pm.dpm.num_of_vce_states =
+					states->numEntries > AMD_MAX_VCE_LEVELS ?
+					AMD_MAX_VCE_LEVELS : states->numEntries;
+			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 				vce_clk = (VCEClockInfo *)
 					((u8 *)&array->entries[0] +
 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
 
 	return encoded_lanes[lanes];
 }
+
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+{
+	if (idx < adev->pm.dpm.num_of_vce_states)
+		return &adev->pm.dpm.vce_states[idx];
+
+	return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 3738a96..bd85e35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -23,6 +23,446 @@
 #ifndef __AMDGPU_DPM_H__
 #define __AMDGPU_DPM_H__
 
+enum amdgpu_int_thermal_type {
+	THERMAL_TYPE_NONE,
+	THERMAL_TYPE_EXTERNAL,
+	THERMAL_TYPE_EXTERNAL_GPIO,
+	THERMAL_TYPE_RV6XX,
+	THERMAL_TYPE_RV770,
+	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+	THERMAL_TYPE_EVERGREEN,
+	THERMAL_TYPE_SUMO,
+	THERMAL_TYPE_NI,
+	THERMAL_TYPE_SI,
+	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+	THERMAL_TYPE_CI,
+	THERMAL_TYPE_KV,
+};
+
+enum amdgpu_dpm_auto_throttle_src {
+	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
+	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum amdgpu_dpm_event_src {
+	AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
+	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
+	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
+	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+struct amdgpu_ps {
+	u32 caps; /* vbios flags */
+	u32 class; /* vbios flags */
+	u32 class2; /* vbios flags */
+	/* UVD clocks */
+	u32 vclk;
+	u32 dclk;
+	/* VCE clocks */
+	u32 evclk;
+	u32 ecclk;
+	bool vce_active;
+	enum amd_vce_level vce_level;
+	/* asic priv */
+	void *ps_priv;
+};
+
+struct amdgpu_dpm_thermal {
+	/* thermal interrupt work */
+	struct work_struct work;
+	/* low temperature threshold */
+	int                min_temp;
+	/* high temperature threshold */
+	int                max_temp;
+	/* was last interrupt low to high or high to low */
+	bool               high_to_low;
+	/* interrupt source */
+	struct amdgpu_irq_src	irq;
+};
+
+enum amdgpu_clk_action
+{
+	AMDGPU_SCLK_UP = 1,
+	AMDGPU_SCLK_DOWN
+};
+
+struct amdgpu_blacklist_clocks
+{
+	u32 sclk;
+	u32 mclk;
+	enum amdgpu_clk_action action;
+};
+
+struct amdgpu_clock_and_voltage_limits {
+	u32 sclk;
+	u32 mclk;
+	u16 vddc;
+	u16 vddci;
+};
+
+struct amdgpu_clock_array {
+	u32 count;
+	u32 *values;
+};
+
+struct amdgpu_clock_voltage_dependency_entry {
+	u32 clk;
+	u16 v;
+};
+
+struct amdgpu_clock_voltage_dependency_table {
+	u32 count;
+	struct amdgpu_clock_voltage_dependency_entry *entries;
+};
+
+union amdgpu_cac_leakage_entry {
+	struct {
+		u16 vddc;
+		u32 leakage;
+	};
+	struct {
+		u16 vddc1;
+		u16 vddc2;
+		u16 vddc3;
+	};
+};
+
+struct amdgpu_cac_leakage_table {
+	u32 count;
+	union amdgpu_cac_leakage_entry *entries;
+};
+
+struct amdgpu_phase_shedding_limits_entry {
+	u16 voltage;
+	u32 sclk;
+	u32 mclk;
+};
+
+struct amdgpu_phase_shedding_limits_table {
+	u32 count;
+	struct amdgpu_phase_shedding_limits_entry *entries;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_entry {
+	u32 vclk;
+	u32 dclk;
+	u16 v;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_table {
+	u8 count;
+	struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_entry {
+	u32 ecclk;
+	u32 evclk;
+	u16 v;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_table {
+	u8 count;
+	struct amdgpu_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_ppm_table {
+	u8 ppm_design;
+	u16 cpu_core_number;
+	u32 platform_tdp;
+	u32 small_ac_platform_tdp;
+	u32 platform_tdc;
+	u32 small_ac_platform_tdc;
+	u32 apu_tdp;
+	u32 dgpu_tdp;
+	u32 dgpu_ulv_power;
+	u32 tj_max;
+};
+
+struct amdgpu_cac_tdp_table {
+	u16 tdp;
+	u16 configurable_tdp;
+	u16 tdc;
+	u16 battery_power_limit;
+	u16 small_power_limit;
+	u16 low_cac_leakage;
+	u16 high_cac_leakage;
+	u16 maximum_power_delivery_limit;
+};
+
+struct amdgpu_dpm_dynamic_state {
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
+	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
+	struct amdgpu_clock_array valid_sclk_values;
+	struct amdgpu_clock_array valid_mclk_values;
+	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
+	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
+	u32 mclk_sclk_ratio;
+	u32 sclk_mclk_delta;
+	u16 vddc_vddci_delta;
+	u16 min_vddc_for_pcie_gen2;
+	struct amdgpu_cac_leakage_table cac_leakage_table;
+	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
+	struct amdgpu_ppm_table *ppm_table;
+	struct amdgpu_cac_tdp_table *cac_tdp_table;
+};
+
+struct amdgpu_dpm_fan {
+	u16 t_min;
+	u16 t_med;
+	u16 t_high;
+	u16 pwm_min;
+	u16 pwm_med;
+	u16 pwm_high;
+	u8 t_hyst;
+	u32 cycle_delay;
+	u16 t_max;
+	u8 control_mode;
+	u16 default_max_fan_pwm;
+	u16 default_fan_output_sensitivity;
+	u16 fan_output_sensitivity;
+	bool ucode_fan_control;
+};
+
+enum amdgpu_pcie_gen {
+	AMDGPU_PCIE_GEN1 = 0,
+	AMDGPU_PCIE_GEN2 = 1,
+	AMDGPU_PCIE_GEN3 = 2,
+	AMDGPU_PCIE_GEN_INVALID = 0xffff
+};
+
+enum amdgpu_dpm_forced_level {
+	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
+	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
+	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+	AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
+};
+
+struct amdgpu_dpm_funcs {
+	int (*get_temperature)(struct amdgpu_device *adev);
+	int (*pre_set_power_state)(struct amdgpu_device *adev);
+	int (*set_power_state)(struct amdgpu_device *adev);
+	void (*post_set_power_state)(struct amdgpu_device *adev);
+	void (*display_configuration_changed)(struct amdgpu_device *adev);
+	u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
+	u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
+	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
+	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
+	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+	bool (*vblank_too_short)(struct amdgpu_device *adev);
+	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
+	void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
+	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
+	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
+	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
+	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
+	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+	int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+	int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+	int (*get_sclk_od)(struct amdgpu_device *adev);
+	int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+	int (*get_mclk_od)(struct amdgpu_device *adev);
+	int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
+	int (*check_state_equal)(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal);
+
+	struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
+};
+
+#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
+#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
+#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
+#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
+#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
+#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
+#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+	((adev)->pp_enabled ? \
+		(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+		-EINVAL)
+
+#define amdgpu_dpm_get_temperature(adev) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
+	      (adev)->pm.funcs->get_temperature((adev)))
+
+#define amdgpu_dpm_set_fan_control_mode(adev, m) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
+	      (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+
+#define amdgpu_dpm_get_fan_control_mode(adev) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
+	      (adev)->pm.funcs->get_fan_control_mode((adev)))
+
+#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+	      (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+	      (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_sclk(adev, l) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
+		(adev)->pm.funcs->get_sclk((adev), (l)))
+
+#define amdgpu_dpm_get_mclk(adev, l)  \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
+	      (adev)->pm.funcs->get_mclk((adev), (l)))
+
+
+#define amdgpu_dpm_force_performance_level(adev, l) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
+	      (adev)->pm.funcs->force_performance_level((adev), (l)))
+
+#define amdgpu_dpm_powergate_uvd(adev, g) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
+	      (adev)->pm.funcs->powergate_uvd((adev), (g)))
+
+#define amdgpu_dpm_powergate_vce(adev, g) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
+	      (adev)->pm.funcs->powergate_vce((adev), (g)))
+
+#define amdgpu_dpm_get_current_power_state(adev) \
+	(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_performance_level(adev) \
+	(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+	(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+	(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+	(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+	(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+		(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
+#define amdgpu_dpm_get_sclk_od(adev) \
+	(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+	(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+	((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+	((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
+#define amdgpu_dpm_dispatch_task(adev, event_id, input, output)		\
+	(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+
+#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+
+#define amdgpu_dpm_get_vce_clock_state(adev, i)				\
+	((adev)->pp_enabled ?						\
+	 (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
+	 (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+
+struct amdgpu_dpm {
+	struct amdgpu_ps        *ps;
+	/* number of valid power states */
+	int                     num_ps;
+	/* current power state that is active */
+	struct amdgpu_ps        *current_ps;
+	/* requested power state */
+	struct amdgpu_ps        *requested_ps;
+	/* boot up power state */
+	struct amdgpu_ps        *boot_ps;
+	/* default uvd power state */
+	struct amdgpu_ps        *uvd_ps;
+	/* vce requirements */
+	u32                  num_of_vce_states;
+	struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
+	enum amd_vce_level vce_level;
+	enum amd_pm_state_type state;
+	enum amd_pm_state_type user_state;
+	enum amd_pm_state_type last_state;
+	enum amd_pm_state_type last_user_state;
+	u32                     platform_caps;
+	u32                     voltage_response_time;
+	u32                     backbias_response_time;
+	void                    *priv;
+	u32			new_active_crtcs;
+	int			new_active_crtc_count;
+	u32			current_active_crtcs;
+	int			current_active_crtc_count;
+	struct amdgpu_dpm_dynamic_state dyn_state;
+	struct amdgpu_dpm_fan fan;
+	u32 tdp_limit;
+	u32 near_tdp_limit;
+	u32 near_tdp_limit_adjusted;
+	u32 sq_ramping_threshold;
+	u32 cac_leakage;
+	u16 tdp_od_limit;
+	u32 tdp_adjustment;
+	u16 load_line_slope;
+	bool power_control;
+	bool ac_power;
+	/* special states active */
+	bool                    thermal_active;
+	bool                    uvd_active;
+	bool                    vce_active;
+	/* thermal handling */
+	struct amdgpu_dpm_thermal thermal;
+	/* forced levels */
+	enum amdgpu_dpm_forced_level forced_level;
+};
+
+struct amdgpu_pm {
+	struct mutex		mutex;
+	u32                     current_sclk;
+	u32                     current_mclk;
+	u32                     default_sclk;
+	u32                     default_mclk;
+	struct amdgpu_i2c_chan *i2c_bus;
+	/* internal thermal controller on rv6xx+ */
+	enum amdgpu_int_thermal_type int_thermal_type;
+	struct device	        *int_hwmon_dev;
+	/* fan control parameters */
+	bool                    no_fan;
+	u8                      fan_pulses_per_revolution;
+	u8                      fan_min_rpm;
+	u8                      fan_max_rpm;
+	/* dpm */
+	bool                    dpm_enabled;
+	bool                    sysfs_initialized;
+	struct amdgpu_dpm       dpm;
+	const struct firmware	*fw;	/* SMC firmware */
+	uint32_t                fw_version;
+	const struct amdgpu_dpm_funcs *funcs;
+	uint32_t                pcie_gen_mask;
+	uint32_t                pcie_mlw_mask;
+	struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+};
+
 #define R600_SSTU_DFLT                               0
 #define R600_SST_DFLT                                0x00C8
 
@@ -82,4 +522,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
 				 u16 default_lanes);
 u8 amdgpu_encode_pci_lane_width(u32 lanes);
 
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71ed27e..6bb4d9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -58,9 +58,10 @@
  * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
  * - 3.7.0 - Add support for VCE clock list packet
  * - 3.8.0 - Add support raster config init in the kernel
+ * - 3.9.0 - Add support for memory query info about VRAM and GTT.
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	8
+#define KMS_DRIVER_MINOR	9
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64;
 int amdgpu_vm_block_size = -1;
 int amdgpu_vm_fault_stop = 0;
 int amdgpu_vm_debug = 0;
+int amdgpu_vram_page_split = 1024;
 int amdgpu_exp_hw_support = 0;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
 MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
 module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
 
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
 MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
 module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
 
-MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+MODULE_PARM_DESC(virtual_display,
+		 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
 module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
 
 static const struct pci_device_id pciidlist[] = {
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
 	{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	/* fiji */
 	{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
+	{0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
 	/* carrizo */
 	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
 	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9fb8aa4..38bdc2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -90,12 +90,12 @@ static struct fb_ops amdgpufb_ops = {
 };
 
 
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
+int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
 {
 	int aligned = width;
 	int pitch_mask = 0;
 
-	switch (bpp / 8) {
+	switch (cpp) {
 	case 1:
 		pitch_mask = 255;
 		break;
@@ -110,7 +110,7 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
 
 	aligned += pitch_mask;
 	aligned &= ~pitch_mask;
-	return aligned;
+	return aligned * cpp;
 }
 
 static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,20 +139,21 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
 	int ret;
 	int aligned_size, size;
 	int height = mode_cmd->height;
-	u32 bpp, depth;
+	u32 cpp;
 
-	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+	cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
 
 	/* need to align pitch with crtc limits */
-	mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
-						  fb_tiled) * ((bpp + 1) / 8);
+	mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
+						  fb_tiled);
 
 	height = ALIGN(mode_cmd->height, 8);
 	size = mode_cmd->pitches[0] * height;
 	aligned_size = ALIGN(size, PAGE_SIZE);
 	ret = amdgpu_gem_object_create(adev, aligned_size, 0,
 				       AMDGPU_GEM_DOMAIN_VRAM,
-				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+				       AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 				       true, &gobj);
 	if (ret) {
 		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f..57552c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
  */
 
 struct amdgpu_fence {
-	struct fence base;
+	struct dma_fence base;
 
 	/* RB, DMA, etc. */
 	struct amdgpu_ring		*ring;
@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void)
 /*
  * Cast helper
  */
-static const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+static const struct dma_fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
 {
 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 
@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
  * Emits a fence command on the requested ring (all asics).
  * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_fence *fence;
-	struct fence *old, **ptr;
+	struct dma_fence *old, **ptr;
 	uint32_t seq;
 
 	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
 
 	seq = ++ring->fence_drv.sync_seq;
 	fence->ring = ring;
-	fence_init(&fence->base, &amdgpu_fence_ops,
-		   &ring->fence_drv.lock,
-		   adev->fence_context + ring->idx,
-		   seq);
+	dma_fence_init(&fence->base, &amdgpu_fence_ops,
+		       &ring->fence_drv.lock,
+		       adev->fence_context + ring->idx,
+		       seq);
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 			       seq, AMDGPU_FENCE_FLAG_INT);
 
@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
 	 * emitting the fence would mess up the hardware ring buffer.
 	 */
 	old = rcu_dereference_protected(*ptr, 1);
-	if (old && !fence_is_signaled(old)) {
+	if (old && !dma_fence_is_signaled(old)) {
 		DRM_INFO("rcu slot is busy\n");
-		fence_wait(old, false);
+		dma_fence_wait(old, false);
 	}
 
-	rcu_assign_pointer(*ptr, fence_get(&fence->base));
+	rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
 
 	*f = &fence->base;
 
@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
 	seq &= drv->num_fences_mask;
 
 	do {
-		struct fence *fence, **ptr;
+		struct dma_fence *fence, **ptr;
 
 		++last_seq;
 		last_seq &= drv->num_fences_mask;
@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
 		if (!fence)
 			continue;
 
-		r = fence_signal(fence);
+		r = dma_fence_signal(fence);
 		if (!r)
-			FENCE_TRACE(fence, "signaled from irq context\n");
+			DMA_FENCE_TRACE(fence, "signaled from irq context\n");
 		else
 			BUG();
 
-		fence_put(fence);
+		dma_fence_put(fence);
 	} while (last_seq != seq);
 }
 
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 {
 	uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
-	struct fence *fence, **ptr;
+	struct dma_fence *fence, **ptr;
 	int r;
 
 	if (!seq)
@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 	rcu_read_lock();
 	fence = rcu_dereference(*ptr);
-	if (!fence || !fence_get_rcu(fence)) {
+	if (!fence || !dma_fence_get_rcu(fence)) {
 		rcu_read_unlock();
 		return 0;
 	}
 	rcu_read_unlock();
 
-	r = fence_wait(fence, false);
-	fence_put(fence);
+	r = dma_fence_wait(fence, false);
+	dma_fence_put(fence);
 	return r;
 }
 
@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 		amd_sched_fini(&ring->sched);
 		del_timer_sync(&ring->fence_drv.fallback_timer);
 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
-			fence_put(ring->fence_drv.fences[j]);
+			dma_fence_put(ring->fence_drv.fences[j]);
 		kfree(ring->fence_drv.fences);
 		ring->fence_drv.fences = NULL;
 		ring->fence_drv.initialized = false;
@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
  * Common fence implementation
  */
 
-static const char *amdgpu_fence_get_driver_name(struct fence *fence)
+static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "amdgpu";
 }
 
-static const char *amdgpu_fence_get_timeline_name(struct fence *f)
+static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	return (const char *)fence->ring->name;
@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
  * to fence_queue that checks if this fence is signaled, and if so it
  * signals the fence and removes itself.
  */
-static bool amdgpu_fence_enable_signaling(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_ring *ring = fence->ring;
@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
 	if (!timer_pending(&ring->fence_drv.fallback_timer))
 		amdgpu_fence_schedule_fallback(ring);
 
-	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 
 	return true;
 }
@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
  */
 static void amdgpu_fence_free(struct rcu_head *rcu)
 {
-	struct fence *f = container_of(rcu, struct fence, rcu);
+	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	kmem_cache_free(amdgpu_fence_slab, fence);
 }
@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
  * This function is called when the reference count becomes zero.
  * It just RCU schedules freeing up the fence.
  */
-static void amdgpu_fence_release(struct fence *f)
+static void amdgpu_fence_release(struct dma_fence *f)
 {
 	call_rcu(&f->rcu, amdgpu_fence_free);
 }
 
-static const struct fence_ops amdgpu_fence_ops = {
+static const struct dma_fence_ops amdgpu_fence_ops = {
 	.get_driver_name = amdgpu_fence_get_driver_name,
 	.get_timeline_name = amdgpu_fence_get_timeline_name,
 	.enable_signaling = amdgpu_fence_enable_signaling,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = amdgpu_fence_release,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 21a1242..964d2a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
 	if (adev->gart.robj == NULL) {
 		r = amdgpu_bo_create(adev, adev->gart.table_size,
 				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 				     NULL, NULL, &adev->gart.robj);
 		if (r) {
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a7ea9a3..cd62f6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
  * Call from drm_gem_handle_create which appear in both new and open ioctl
  * case.
  */
-int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int amdgpu_gem_object_open(struct drm_gem_object *obj,
+			   struct drm_file *file_priv)
 {
 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
-	struct amdgpu_device *adev = abo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 	struct amdgpu_vm *vm = &fpriv->vm;
 	struct amdgpu_bo_va *bo_va;
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 			     struct drm_file *file_priv)
 {
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 	struct amdgpu_vm *vm = &fpriv->vm;
 
@@ -407,10 +408,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 		return -ENOENT;
 	}
 	robj = gem_to_amdgpu_bo(gobj);
-	if (timeout == 0)
-		ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
-	else
-		ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
+	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
+						  timeout);
 
 	/* ret == 0 means not signaled,
 	 * ret > 0 means signaled
@@ -470,6 +469,16 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 	return r;
 }
 
+static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
+{
+	unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+	/* if anything is swapped out don't swap it in here,
+	   just abort and wait for the next CS */
+
+	return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
+}
+
 /**
  * amdgpu_gem_va_update_vm -update the bo_va in its VM
  *
@@ -480,7 +489,8 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
  * vital here, so they are not reported back to userspace.
  */
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
-				    struct amdgpu_bo_va *bo_va, uint32_t operation)
+				    struct amdgpu_bo_va *bo_va,
+				    uint32_t operation)
 {
 	struct ttm_validate_buffer tv, *entry;
 	struct amdgpu_bo_list_entry vm_pd;
@@ -503,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 	if (r)
 		goto error_print;
 
-	amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
 	list_for_each_entry(entry, &list, head) {
 		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
 		/* if anything is swapped out don't swap it in here,
@@ -511,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 		if (domain == AMDGPU_GEM_DOMAIN_CPU)
 			goto error_unreserve;
 	}
-	list_for_each_entry(entry, &duplicates, head) {
-		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
-		/* if anything is swapped out don't swap it in here,
-		   just abort and wait for the next CS */
-		if (domain == AMDGPU_GEM_DOMAIN_CPU)
-			goto error_unreserve;
-	}
+	r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
+				      NULL);
+	if (r)
+		goto error_unreserve;
 
 	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
 	if (r)
@@ -538,8 +544,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 }
 
-
-
 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *filp)
 {
@@ -549,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 	struct amdgpu_bo *abo;
 	struct amdgpu_bo_va *bo_va;
-	struct ttm_validate_buffer tv, tv_pd;
+	struct amdgpu_bo_list_entry vm_pd;
+	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head list, duplicates;
 	uint32_t invalid_flags, va_flags = 0;
@@ -594,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	tv.shared = true;
 	list_add(&tv.head, &list);
 
-	tv_pd.bo = &fpriv->vm.page_directory->tbo;
-	tv_pd.shared = true;
-	list_add(&tv_pd.head, &list);
+	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 	if (r) {
@@ -704,7 +707,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 	uint32_t handle;
 	int r;
 
-	args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+	args->pitch = amdgpu_align_pitch(adev, args->width,
+					 DIV_ROUND_UP(args->bpp, 8), 0);
 	args->size = (u64)args->pitch * args->height;
 	args->size = ALIGN(args->size, PAGE_SIZE);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a074edd..01a42b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,6 +24,7 @@
  */
 #include <drm/drmP.h>
 #include "amdgpu.h"
+#include "amdgpu_gfx.h"
 
 /*
  * GPU scratch registers helpers function.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 51321e1..e0204408 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,6 +27,7 @@
 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
 
-unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
+		unsigned max_sh);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f86c844..3c634f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
 		return -ENOMEM;
 
 	node->start = AMDGPU_BO_INVALID_OFFSET;
+	node->size = mem->num_pages;
 	mem->mm_node = node;
 
 	if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6a6c86c..216a957 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  * Free an IB (all asics).
  */
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
-		    struct fence *f)
+		    struct dma_fence *f)
 {
 	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
 }
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
  * to SI there was just a DE IB.
  */
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
-		       struct amdgpu_ib *ibs, struct fence *last_vm_update,
-		       struct amdgpu_job *job, struct fence **f)
+		       struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
+		       struct amdgpu_job *job, struct dma_fence **f)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib *ib = &ibs[0];
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		return -EINVAL;
 	}
 
-	alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
-		num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+	alloc_size = ring->funcs->emit_frame_size + num_ibs *
+		ring->funcs->emit_ib_size;
 
 	r = amdgpu_ring_alloc(ring, alloc_size);
 	if (r) {
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		return r;
 	}
 
-	if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+	if (ring->funcs->init_cond_exec)
 		patch_offset = amdgpu_ring_init_cond_exec(ring);
 
 	if (vm) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 8c58079..a0de628 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 
 void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
-	struct fence *f;
+	struct dma_fence *f;
 	unsigned i;
 
 	/* use sched fence if available */
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
 	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
-	fence_put(job->fence);
+	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
 	kfree(job);
 }
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
 {
 	amdgpu_job_free_resources(job);
 
-	fence_put(job->fence);
+	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
 	kfree(job);
 }
 
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 		      struct amd_sched_entity *entity, void *owner,
-		      struct fence **f)
+		      struct dma_fence **f)
 {
 	int r;
 	job->ring = ring;
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 
 	job->owner = owner;
 	job->fence_ctx = entity->fence_context;
-	*f = fence_get(&job->base.s_fence->finished);
+	*f = dma_fence_get(&job->base.s_fence->finished);
 	amdgpu_job_free_resources(job);
 	amd_sched_entity_push_job(&job->base);
 
 	return 0;
 }
 
-static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 {
 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
 	struct amdgpu_vm *vm = job->vm;
 
-	struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+	struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
 
 	if (fence == NULL && vm && !job->vm_id) {
 		struct amdgpu_ring *ring = job->ring;
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 	return fence;
 }
 
-static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
 {
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	struct amdgpu_job *job;
 	int r;
 
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
 		DRM_ERROR("Error scheduling IBs (%d)\n", r);
 
 	/* if gpu reset, hw fence will be replaced here */
-	fence_put(job->fence);
-	job->fence = fence_get(fence);
+	dma_fence_put(job->fence);
+	job->fence = dma_fence_get(fence);
 	amdgpu_job_free_resources(job);
 	return fence;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb1..7839267 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		}
 
 		for (i = 0; i < adev->num_ip_blocks; i++) {
-			if (adev->ip_blocks[i].type == type &&
-			    adev->ip_block_status[i].valid) {
-				ip.hw_ip_version_major = adev->ip_blocks[i].major;
-				ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
+			if (adev->ip_blocks[i].version->type == type &&
+			    adev->ip_blocks[i].status.valid) {
+				ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
+				ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
 				ip.capabilities_flags = 0;
 				ip.available_rings = ring_mask;
 				ip.ib_start_alignment = ib_start_alignment;
@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		}
 
 		for (i = 0; i < adev->num_ip_blocks; i++)
-			if (adev->ip_blocks[i].type == type &&
-			    adev->ip_block_status[i].valid &&
+			if (adev->ip_blocks[i].version->type == type &&
+			    adev->ip_blocks[i].status.valid &&
 			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
 				count++;
 
@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		return copy_to_user(out, &vram_gtt,
 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
 	}
+	case AMDGPU_INFO_MEMORY: {
+		struct drm_amdgpu_memory_info mem;
+
+		memset(&mem, 0, sizeof(mem));
+		mem.vram.total_heap_size = adev->mc.real_vram_size;
+		mem.vram.usable_heap_size =
+			adev->mc.real_vram_size - adev->vram_pin_size;
+		mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+
+		mem.cpu_accessible_vram.total_heap_size =
+			adev->mc.visible_vram_size;
+		mem.cpu_accessible_vram.usable_heap_size =
+			adev->mc.visible_vram_size -
+			(adev->vram_pin_size - adev->invisible_pin_size);
+		mem.cpu_accessible_vram.heap_usage =
+			atomic64_read(&adev->vram_vis_usage);
+		mem.cpu_accessible_vram.max_allocation =
+			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
+
+		mem.gtt.total_heap_size = adev->mc.gtt_size;
+		mem.gtt.usable_heap_size =
+			adev->mc.gtt_size - adev->gart_pin_size;
+		mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+
+		return copy_to_user(out, &mem,
+				    min((size_t)size, sizeof(mem)))
+				    ? -EFAULT : 0;
+	}
 	case AMDGPU_INFO_READ_MMR_REG: {
 		unsigned n, alloc_size;
 		uint32_t *regs;
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		dev_info.ids_flags = 0;
 		if (adev->flags & AMD_IS_APU)
 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+		if (amdgpu_sriov_vf(adev))
+			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
 		dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
 		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		return copy_to_user(out, &dev_info,
 				    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
 	}
+	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+		unsigned i;
+		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+		struct amd_vce_state *vce_state;
+
+		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
+			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
+			if (vce_state) {
+				vce_clk_table.entries[i].sclk = vce_state->sclk;
+				vce_clk_table.entries[i].mclk = vce_state->mclk;
+				vce_clk_table.entries[i].eclk = vce_state->evclk;
+				vce_clk_table.num_valid_entries++;
+			}
+		}
+
+		return copy_to_user(out, &vce_clk_table,
+				    min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
+	}
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 32fa7b7..7ea3cac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -285,7 +285,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 {
 	unsigned long end = addr + amdgpu_bo_size(bo) - 1;
-	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_mn *rmn;
 	struct amdgpu_mn_node *node = NULL;
 	struct list_head bos;
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  */
 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 {
-	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_mn *rmn;
 	struct list_head *head;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 7b0eff7..1e23334 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -341,8 +341,6 @@ struct amdgpu_mode_info {
 	int			num_dig; /* number of dig blocks */
 	int			disp_priority;
 	const struct amdgpu_display_funcs *funcs;
-	struct hrtimer vblank_timer;
-	enum amdgpu_interrupt_state vsync_timer_enabled;
 };
 
 #define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -413,6 +411,9 @@ struct amdgpu_crtc {
 	u32 wm_high;
 	u32 lb_vblank_lead_lines;
 	struct drm_display_mode hw_mode;
+	/* for virtual dce */
+	struct hrtimer vblank_timer;
+	enum amdgpu_interrupt_state vsync_timer_enabled;
 };
 
 struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aa074fa..f0a0513 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
 
 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 	struct amdgpu_bo *bo;
 
 	bo = container_of(tbo, struct amdgpu_bo, tbo);
 
-	amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+	amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
 
 	drm_gem_object_release(&bo->gem_base);
 	amdgpu_bo_unref(&bo->parent);
 	if (!list_empty(&bo->shadow_list)) {
-		mutex_lock(&bo->adev->shadow_list_lock);
+		mutex_lock(&adev->shadow_list_lock);
 		list_del_init(&bo->shadow_list);
-		mutex_unlock(&bo->adev->shadow_list_lock);
+		mutex_unlock(&adev->shadow_list_lock);
 	}
 	kfree(bo->metadata);
 	kfree(bo);
@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 
 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 		unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+		unsigned lpfn = 0;
+
+		/* This forces a reallocation if the flag wasn't set before */
+		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+			lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
 
 		if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
 		    !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
 		    adev->mc.visible_vram_size < adev->mc.real_vram_size) {
 			places[c].fpfn = visible_pfn;
-			places[c].lpfn = 0;
+			places[c].lpfn = lpfn;
 			places[c].flags = TTM_PL_FLAG_WC |
 				TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
 				TTM_PL_FLAG_TOPDOWN;
@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 		}
 
 		places[c].fpfn = 0;
-		places[c].lpfn = 0;
+		places[c].lpfn = lpfn;
 		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 			TTM_PL_FLAG_VRAM;
 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 {
-	amdgpu_ttm_placement_init(abo->adev, &abo->placement,
-				  abo->placements, domain, abo->flags);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+
+	amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
+				  domain, abo->flags);
 }
 
 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 	int r;
 
 	r = amdgpu_bo_create(adev, size, align, true, domain,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, bo_ptr);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 		kfree(bo);
 		return r;
 	}
-	bo->adev = adev;
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
 	bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -383,7 +391,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 
 	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
-		struct fence *fence;
+		struct dma_fence *fence;
 
 		if (adev->mman.buffer_funcs_ring == NULL ||
 		   !adev->mman.buffer_funcs_ring->ready) {
@@ -403,9 +411,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 		amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
 		amdgpu_bo_fence(bo, fence, false);
 		amdgpu_bo_unreserve(bo);
-		fence_put(bo->tbo.moving);
-		bo->tbo.moving = fence_get(fence);
-		fence_put(fence);
+		dma_fence_put(bo->tbo.moving);
+		bo->tbo.moving = dma_fence_get(fence);
+		dma_fence_put(fence);
 	}
 	*bo_ptr = bo;
 
@@ -491,7 +499,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 			       struct amdgpu_ring *ring,
 			       struct amdgpu_bo *bo,
 			       struct reservation_object *resv,
-			       struct fence **fence,
+			       struct dma_fence **fence,
 			       bool direct)
 
 {
@@ -523,7 +531,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
 				  struct amdgpu_ring *ring,
 				  struct amdgpu_bo *bo,
 				  struct reservation_object *resv,
-				  struct fence **fence,
+				  struct dma_fence **fence,
 				  bool direct)
 
 {
@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 			     u64 min_offset, u64 max_offset,
 			     u64 *gpu_addr)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	int r, i;
 	unsigned fpfn, lpfn;
 
@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 
 		return 0;
 	}
+
+	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 	amdgpu_ttm_placement_from_domain(bo, domain);
 	for (i = 0; i < bo->placement.num_placement; i++) {
 		/* force to pin into visible video ram */
 		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
 		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
 		    (!max_offset || max_offset >
-		     bo->adev->mc.visible_vram_size)) {
+		     adev->mc.visible_vram_size)) {
 			if (WARN_ON_ONCE(min_offset >
-					 bo->adev->mc.visible_vram_size))
+					 adev->mc.visible_vram_size))
 				return -EINVAL;
 			fpfn = min_offset >> PAGE_SHIFT;
-			lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
+			lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
 		} else {
 			fpfn = min_offset >> PAGE_SHIFT;
 			lpfn = max_offset >> PAGE_SHIFT;
@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 	if (unlikely(r)) {
-		dev_err(bo->adev->dev, "%p pin failed\n", bo);
+		dev_err(adev->dev, "%p pin failed\n", bo);
 		goto error;
 	}
 	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
 	if (unlikely(r)) {
-		dev_err(bo->adev->dev, "%p bind failed\n", bo);
+		dev_err(adev->dev, "%p bind failed\n", bo);
 		goto error;
 	}
 
@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 	if (gpu_addr != NULL)
 		*gpu_addr = amdgpu_bo_gpu_offset(bo);
 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
-		bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+		adev->vram_pin_size += amdgpu_bo_size(bo);
 		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-			bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+			adev->invisible_pin_size += amdgpu_bo_size(bo);
 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
-		bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+		adev->gart_pin_size += amdgpu_bo_size(bo);
 	}
 
 error:
@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 
 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	int r, i;
 
 	if (!bo->pin_count) {
-		dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
+		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
 		return 0;
 	}
 	bo->pin_count--;
@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 	}
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 	if (unlikely(r)) {
-		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
 		goto error;
 	}
 
 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
-		bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+		adev->vram_pin_size -= amdgpu_bo_size(bo);
 		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-			bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+			adev->invisible_pin_size -= amdgpu_bo_size(bo);
 	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
-		bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+		adev->gart_pin_size -= amdgpu_bo_size(bo);
 	}
 
 error:
@@ -849,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 			   struct ttm_mem_reg *new_mem)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 
@@ -856,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 		return;
 
 	abo = container_of(bo, struct amdgpu_bo, tbo);
-	amdgpu_vm_bo_invalidate(abo->adev, abo);
+	amdgpu_vm_bo_invalidate(adev, abo);
 
 	/* update statistics */
 	if (!new_mem)
 		return;
 
 	/* move_notify is called before move happens */
-	amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
+	amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
 
 	trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 }
 
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
-	struct amdgpu_device *adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
 	unsigned long offset, size, lpfn;
 	int i, r;
@@ -879,13 +892,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 		return 0;
 
 	abo = container_of(bo, struct amdgpu_bo, tbo);
-	adev = abo->adev;
 	if (bo->mem.mem_type != TTM_PL_VRAM)
 		return 0;
 
 	size = bo->mem.num_pages << PAGE_SHIFT;
 	offset = bo->mem.start << PAGE_SHIFT;
-	if ((offset + size) <= adev->mc.visible_vram_size)
+	/* TODO: figure out how to map scattered VRAM to the CPU */
+	if ((offset + size) <= adev->mc.visible_vram_size &&
+	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
 		return 0;
 
 	/* Can't move a pinned BO to visible VRAM */
@@ -893,6 +907,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 		return -EINVAL;
 
 	/* hurrah the memory is not visible ! */
+	abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
 	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
 	for (i = 0; i < abo->placement.num_placement; i++) {
@@ -926,7 +941,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
  * @shared: true if fence should be added shared
  *
  */
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared)
 {
 	struct reservation_object *resv = bo->tbo.resv;
@@ -954,6 +969,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 	WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
 		     !bo->pin_count);
 	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
 
 	return bo->tbo.offset;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034..5cbf59e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
  */
 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	int r;
 
 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS)
-			dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+			dev_err(adev->dev, "%p reserve failed\n", bo);
 		return r;
 	}
 	return 0;
@@ -156,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *new_mem);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 			       struct amdgpu_ring *ring,
 			       struct amdgpu_bo *bo,
 			       struct reservation_object *resv,
-			       struct fence **fence, bool direct);
+			       struct dma_fence **fence, bool direct);
 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
 				  struct amdgpu_ring *ring,
 				  struct amdgpu_bo *bo,
 				  struct reservation_object *resv,
-				  struct fence **fence,
+				  struct dma_fence **fence,
 				  bool direct);
 
 
@@ -200,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 		     unsigned size, unsigned align);
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 			      struct amdgpu_sa_bo **sa_bo,
-			      struct fence *fence);
+			      struct dma_fence *fence);
 #if defined(CONFIG_DEBUG_FS)
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 					 struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index accc908..274f330 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -986,10 +986,10 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
 
 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 {
-	int i;
 	struct amdgpu_ps *ps;
 	enum amd_pm_state_type dpm_state;
 	int ret;
+	bool equal;
 
 	/* if dpm init failed */
 	if (!adev->pm.dpm_enabled)
@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 	else
 		return;
 
-	/* no need to reprogram if nothing changed unless we are on BTC+ */
-	if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
-		/* vce just modifies an existing state so force a change */
-		if (ps->vce_active != adev->pm.dpm.vce_active)
-			goto force;
-		if (adev->flags & AMD_IS_APU) {
-			/* for APUs if the num crtcs changed but state is the same,
-			 * all we need to do is update the display configuration.
-			 */
-			if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
-				/* update display watermarks based on new power state */
-				amdgpu_display_bandwidth_update(adev);
-				/* update displays */
-				amdgpu_dpm_display_configuration_changed(adev);
-				adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
-				adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-			}
-			return;
-		} else {
-			/* for BTC+ if the num crtcs hasn't changed and state is the same,
-			 * nothing to do, if the num crtcs is > 1 and state is the same,
-			 * update display configuration.
-			 */
-			if (adev->pm.dpm.new_active_crtcs ==
-			    adev->pm.dpm.current_active_crtcs) {
-				return;
-			} else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
-				   (adev->pm.dpm.new_active_crtc_count > 1)) {
-				/* update display watermarks based on new power state */
-				amdgpu_display_bandwidth_update(adev);
-				/* update displays */
-				amdgpu_dpm_display_configuration_changed(adev);
-				adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
-				adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-				return;
-			}
-		}
-	}
-
-force:
 	if (amdgpu_dpm == 1) {
 		printk("switching from power state:\n");
 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
@@ -1059,31 +1019,21 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 	/* update whether vce is active */
 	ps->vce_active = adev->pm.dpm.vce_active;
 
+	amdgpu_dpm_display_configuration_changed(adev);
+
 	ret = amdgpu_dpm_pre_set_power_state(adev);
 	if (ret)
 		return;
 
-	/* update display watermarks based on new power state */
-	amdgpu_display_bandwidth_update(adev);
+	if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
+		equal = false;
 
-	/* wait for the rings to drain */
-	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-		struct amdgpu_ring *ring = adev->rings[i];
-		if (ring && ring->ready)
-			amdgpu_fence_wait_empty(ring);
-	}
+	if (equal)
+		return;
 
-	/* program the new power state */
 	amdgpu_dpm_set_power_state(adev);
-
-	/* update current power state */
-	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
-
 	amdgpu_dpm_post_set_power_state(adev);
 
-	/* update displays */
-	amdgpu_dpm_display_configuration_changed(adev);
-
 	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
 	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
 
@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 			mutex_lock(&adev->pm.mutex);
 			adev->pm.dpm.vce_active = true;
 			/* XXX select vce level based on ring/task */
-			adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
+			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 			mutex_unlock(&adev->pm.mutex);
 		} else {
 			mutex_lock(&adev->pm.mutex);
@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 	struct drm_device *ddev = adev->ddev;
 	struct drm_crtc *crtc;
 	struct amdgpu_crtc *amdgpu_crtc;
+	int i = 0;
 
 	if (!adev->pm.dpm_enabled)
 		return;
 
+	amdgpu_display_bandwidth_update(adev);
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (ring && ring->ready)
+			amdgpu_fence_wait_empty(ring);
+	}
+
 	if (adev->pp_enabled) {
-		int i = 0;
-
-		amdgpu_display_bandwidth_update(adev);
-		for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-			struct amdgpu_ring *ring = adev->rings[i];
-			if (ring && ring->ready)
-				amdgpu_fence_wait_empty(ring);
-		}
-
 		amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
 	} else {
 		mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 7532ff8..fa6baf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
 	return ret;
 }
 
-const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
 	.name = "amdgpu_powerplay",
 	.early_init = amdgpu_pp_early_init,
 	.late_init = amdgpu_pp_late_init,
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
 	.set_clockgating_state = amdgpu_pp_set_clockgating_state,
 	.set_powergating_state = amdgpu_pp_set_powergating_state,
 };
+
+const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &amdgpu_pp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
index da5cf47..c0c4bfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
@@ -23,11 +23,11 @@
  *
  */
 
-#ifndef __AMDGPU_POPWERPLAY_H__
-#define __AMDGPU_POPWERPLAY_H__
+#ifndef __AMDGPU_POWERPLAY_H__
+#define __AMDGPU_POWERPLAY_H__
 
 #include "amd_shared.h"
 
-extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
+extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
 
-#endif /* __AMDSOC_DM_H__ */
+#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3cb5e90..4c99282 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
 {
 	/* Align requested size with padding so unlock_commit can
 	 * pad safely */
-	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
 
 	/* Make sure we aren't trying to allocate more space
 	 * than the maximum for one submission
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 	int i;
 
 	for (i = 0; i < count; i++)
-		amdgpu_ring_write(ring, ring->nop);
+		amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  */
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & ring->align_mask)
-		ib->ptr[ib->length_dw++] = ring->nop;
+	while (ib->length_dw & ring->funcs->align_mask)
+		ib->ptr[ib->length_dw++] = ring->funcs->nop;
 }
 
 /**
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
 	uint32_t count;
 
 	/* We pad to match fetch size */
-	count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
-	count %= ring->align_mask + 1;
+	count = ring->funcs->align_mask + 1 -
+		(ring->wptr & ring->funcs->align_mask);
+	count %= ring->funcs->align_mask + 1;
 	ring->funcs->insert_nop(ring, count);
 
 	mb();
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-		     unsigned max_dw, u32 nop, u32 align_mask,
-		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
-		     enum amdgpu_ring_type ring_type)
+		     unsigned max_dw, struct amdgpu_irq_src *irq_src,
+		     unsigned irq_type)
 {
 	int r;
 
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 
 	ring->ring_size = roundup_pow_of_two(max_dw * 4 *
 					     amdgpu_sched_hw_submission);
-	ring->align_mask = align_mask;
-	ring->nop = nop;
-	ring->type = ring_type;
 
 	/* Allocate ring buffer */
 	if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
new file mode 100644
index 0000000..f2ad49c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_RING_H__
+#define __AMDGPU_RING_H__
+
+#include "gpu_scheduler.h"
+
+/* max number of rings */
+#define AMDGPU_MAX_RINGS		16
+#define AMDGPU_MAX_GFX_RINGS		1
+#define AMDGPU_MAX_COMPUTE_RINGS	8
+#define AMDGPU_MAX_VCE_RINGS		3
+
+/* some special values for the owner field */
+#define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
+#define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
+
+#define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
+#define AMDGPU_FENCE_FLAG_INT           (1 << 1)
+
+enum amdgpu_ring_type {
+	AMDGPU_RING_TYPE_GFX,
+	AMDGPU_RING_TYPE_COMPUTE,
+	AMDGPU_RING_TYPE_SDMA,
+	AMDGPU_RING_TYPE_UVD,
+	AMDGPU_RING_TYPE_VCE
+};
+
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+
+/*
+ * Fences.
+ */
+struct amdgpu_fence_driver {
+	uint64_t			gpu_addr;
+	volatile uint32_t		*cpu_addr;
+	/* sync_seq is protected by ring emission lock */
+	uint32_t			sync_seq;
+	atomic_t			last_seq;
+	bool				initialized;
+	struct amdgpu_irq_src		*irq_src;
+	unsigned			irq_type;
+	struct timer_list		fallback_timer;
+	unsigned			num_fences_mask;
+	spinlock_t			lock;
+	struct dma_fence		**fences;
+};
+
+int amdgpu_fence_driver_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+				  unsigned num_hw_submission);
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+				   struct amdgpu_irq_src *irq_src,
+				   unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+void amdgpu_fence_process(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+
+/*
+ * Rings.
+ */
+
+/* provided by hw blocks that expose a ring buffer for commands */
+struct amdgpu_ring_funcs {
+	enum amdgpu_ring_type	type;
+	uint32_t		align_mask;
+	u32			nop;
+
+	/* ring read/write ptr handling */
+	u32 (*get_rptr)(struct amdgpu_ring *ring);
+	u32 (*get_wptr)(struct amdgpu_ring *ring);
+	void (*set_wptr)(struct amdgpu_ring *ring);
+	/* validating and patching of IBs */
+	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+	/* constants to calculate how many DW are needed for an emit */
+	unsigned emit_frame_size;
+	unsigned emit_ib_size;
+	/* command emit functions */
+	void (*emit_ib)(struct amdgpu_ring *ring,
+			struct amdgpu_ib *ib,
+			unsigned vm_id, bool ctx_switch);
+	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+			   uint64_t seq, unsigned flags);
+	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
+	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+			      uint64_t pd_addr);
+	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+	void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
+	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
+				uint32_t gds_base, uint32_t gds_size,
+				uint32_t gws_base, uint32_t gws_size,
+				uint32_t oa_base, uint32_t oa_size);
+	/* testing functions */
+	int (*test_ring)(struct amdgpu_ring *ring);
+	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
+	/* insert NOP packets */
+	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+	/* pad the indirect buffer to the necessary number of dw */
+	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+	/* note usage for clock and power gating */
+	void (*begin_use)(struct amdgpu_ring *ring);
+	void (*end_use)(struct amdgpu_ring *ring);
+	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+};
+
+struct amdgpu_ring {
+	struct amdgpu_device		*adev;
+	const struct amdgpu_ring_funcs	*funcs;
+	struct amdgpu_fence_driver	fence_drv;
+	struct amd_gpu_scheduler	sched;
+
+	struct amdgpu_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr_offs;
+	unsigned		wptr;
+	unsigned		wptr_old;
+	unsigned		ring_size;
+	unsigned		max_dw;
+	int			count_dw;
+	uint64_t		gpu_addr;
+	uint32_t		ptr_mask;
+	bool			ready;
+	u32			idx;
+	u32			me;
+	u32			pipe;
+	u32			queue;
+	struct amdgpu_bo	*mqd_obj;
+	u32			doorbell_index;
+	bool			use_doorbell;
+	unsigned		wptr_offs;
+	unsigned		fence_offs;
+	uint64_t		current_ctx;
+	char			name[16];
+	unsigned		cond_exe_offs;
+	u64			cond_exe_gpu_addr;
+	volatile u32		*cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry *ent;
+#endif
+};
+
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_ring_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_undo(struct amdgpu_ring *ring);
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+		     unsigned ring_size, struct amdgpu_irq_src *irq_src,
+		     unsigned irq_type);
+void amdgpu_ring_fini(struct amdgpu_ring *ring);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index d8af37a..fd26c4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 	}
 	list_del_init(&sa_bo->olist);
 	list_del_init(&sa_bo->flist);
-	fence_put(sa_bo->fence);
+	dma_fence_put(sa_bo->fence);
 	kfree(sa_bo);
 }
 
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
 		if (sa_bo->fence == NULL ||
-		    !fence_is_signaled(sa_bo->fence)) {
+		    !dma_fence_is_signaled(sa_bo->fence)) {
 			return;
 		}
 		amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
 }
 
 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
-				   struct fence **fences,
+				   struct dma_fence **fences,
 				   unsigned *tries)
 {
 	struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
 		sa_bo = list_first_entry(&sa_manager->flist[i],
 					 struct amdgpu_sa_bo, flist);
 
-		if (!fence_is_signaled(sa_bo->fence)) {
+		if (!dma_fence_is_signaled(sa_bo->fence)) {
 			fences[i] = sa_bo->fence;
 			continue;
 		}
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 		     struct amdgpu_sa_bo **sa_bo,
 		     unsigned size, unsigned align)
 {
-	struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+	struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
 	unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
 	unsigned count;
 	int i, r;
@@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 
 		for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
 			if (fences[i])
-				fences[count++] = fence_get(fences[i]);
+				fences[count++] = dma_fence_get(fences[i]);
 
 		if (count) {
 			spin_unlock(&sa_manager->wq.lock);
-			t = fence_wait_any_timeout(fences, count, false,
-						   MAX_SCHEDULE_TIMEOUT);
+			t = dma_fence_wait_any_timeout(fences, count, false,
+						       MAX_SCHEDULE_TIMEOUT);
 			for (i = 0; i < count; ++i)
-				fence_put(fences[i]);
+				dma_fence_put(fences[i]);
 
 			r = (t > 0) ? 0 : t;
 			spin_lock(&sa_manager->wq.lock);
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 }
 
 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
-		       struct fence *fence)
+		       struct dma_fence *fence)
 {
 	struct amdgpu_sa_manager *sa_manager;
 
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
 
 	sa_manager = (*sa_bo)->manager;
 	spin_lock(&sa_manager->wq.lock);
-	if (fence && !fence_is_signaled(fence)) {
+	if (fence && !dma_fence_is_signaled(fence)) {
 		uint32_t idx;
 
-		(*sa_bo)->fence = fence_get(fence);
+		(*sa_bo)->fence = dma_fence_get(fence);
 		idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 	} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5c8d302..ed814e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -34,7 +34,7 @@
 
 struct amdgpu_sync_entry {
 	struct hlist_node	node;
-	struct fence		*fence;
+	struct dma_fence	*fence;
 };
 
 static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
  *
  * Test if the fence was issued by us.
  */
-static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+				 struct dma_fence *f)
 {
 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
  *
  * Extract who originally created the fence.
  */
-static void *amdgpu_sync_get_owner(struct fence *f)
+static void *amdgpu_sync_get_owner(struct dma_fence *f)
 {
 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
  *
  * Either keep the existing fence or the new one, depending which one is later.
  */
-static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
+static void amdgpu_sync_keep_later(struct dma_fence **keep,
+				   struct dma_fence *fence)
 {
-	if (*keep && fence_is_later(*keep, fence))
+	if (*keep && dma_fence_is_later(*keep, fence))
 		return;
 
-	fence_put(*keep);
-	*keep = fence_get(fence);
+	dma_fence_put(*keep);
+	*keep = dma_fence_get(fence);
 }
 
 /**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
  * Tries to add the fence to an existing hash entry. Returns true when an entry
  * was found, false otherwise.
  */
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
 {
 	struct amdgpu_sync_entry *e;
 
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
  *
  */
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct fence *f)
+		      struct dma_fence *f)
 {
 	struct amdgpu_sync_entry *e;
 
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		return -ENOMEM;
 
 	hash_add(sync->fences, &e->node, f->context);
-	e->fence = fence_get(f);
+	e->fence = dma_fence_get(f);
 	return 0;
 }
 
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     void *owner)
 {
 	struct reservation_object_list *flist;
-	struct fence *f;
+	struct dma_fence *f;
 	void *fence_owner;
 	unsigned i;
 	int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
  * Returns the next fence not signaled yet without removing it from the sync
  * object.
  */
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
-				     struct amdgpu_ring *ring)
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+					 struct amdgpu_ring *ring)
 {
 	struct amdgpu_sync_entry *e;
 	struct hlist_node *tmp;
 	int i;
 
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
-		struct fence *f = e->fence;
+		struct dma_fence *f = e->fence;
 		struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
 		if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 			 * when they are scheduled.
 			 */
 			if (s_fence->sched == &ring->sched) {
-				if (fence_is_signaled(&s_fence->scheduled))
+				if (dma_fence_is_signaled(&s_fence->scheduled))
 					continue;
 
 				return &s_fence->scheduled;
 			}
 		}
 
-		if (fence_is_signaled(f)) {
+		if (dma_fence_is_signaled(f)) {
 			hash_del(&e->node);
-			fence_put(f);
+			dma_fence_put(f);
 			kmem_cache_free(amdgpu_sync_slab, e);
 			continue;
 		}
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
  *
  * Get and removes the next fence from the sync object not signaled yet.
  */
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 {
 	struct amdgpu_sync_entry *e;
 	struct hlist_node *tmp;
-	struct fence *f;
+	struct dma_fence *f;
 	int i;
 
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 		hash_del(&e->node);
 		kmem_cache_free(amdgpu_sync_slab, e);
 
-		if (!fence_is_signaled(f))
+		if (!dma_fence_is_signaled(f))
 			return f;
 
-		fence_put(f);
+		dma_fence_put(f);
 	}
 	return NULL;
 }
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
 
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 		hash_del(&e->node);
-		fence_put(e->fence);
+		dma_fence_put(e->fence);
 		kmem_cache_free(amdgpu_sync_slab, e);
 	}
 
-	fence_put(sync->last_vm_update);
+	dma_fence_put(sync->last_vm_update);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
new file mode 100644
index 0000000..605be26
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_SYNC_H__
+#define __AMDGPU_SYNC_H__
+
+#include <linux/hashtable.h>
+
+struct dma_fence;
+struct reservation_object;
+struct amdgpu_device;
+struct amdgpu_ring;
+
+/*
+ * Container for fences used to sync command submissions.
+ */
+struct amdgpu_sync {
+	DECLARE_HASHTABLE(fences, 4);
+	struct dma_fence	*last_vm_update;
+};
+
+void amdgpu_sync_create(struct amdgpu_sync *sync);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct dma_fence *f);
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+		     struct amdgpu_sync *sync,
+		     struct reservation_object *resv,
+		     void *owner);
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+				     struct amdgpu_ring *ring);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b827c75..e05a243 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		void *gtt_map, *vram_map;
 		void **gtt_start, **gtt_end;
 		void **vram_start, **vram_end;
-		struct fence *fence = NULL;
+		struct dma_fence *fence = NULL;
 
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		r = fence_wait(fence, false);
+		r = dma_fence_wait(fence, false);
 		if (r) {
 			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		fence_put(fence);
+		dma_fence_put(fence);
 
 		r = amdgpu_bo_kmap(vram_obj, &vram_map);
 		if (r) {
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		r = fence_wait(fence, false);
+		r = dma_fence_wait(fence, false);
 		if (r) {
 			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		fence_put(fence);
+		dma_fence_put(fence);
 
 		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
@@ -216,7 +216,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			amdgpu_bo_unref(&gtt_obj[i]);
 		}
 		if (fence)
-			fence_put(fence);
+			dma_fence_put(fence);
 		break;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 067e5e6..bb964a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
 			     __field(struct amdgpu_device *, adev)
 			     __field(struct amd_sched_job *, sched_job)
 			     __field(struct amdgpu_ib *, ib)
-			     __field(struct fence *, fence)
+			     __field(struct dma_fence *, fence)
 			     __field(char *, ring_name)
 			     __field(u32, num_ibs)
 			     ),
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
 			     __field(struct amdgpu_device *, adev)
 			     __field(struct amd_sched_job *, sched_job)
 			     __field(struct amdgpu_ib *, ib)
-			     __field(struct fence *, fence)
+			     __field(struct dma_fence *, fence)
 			     __field(char *, ring_name)
 			     __field(u32, num_ibs)
 			     ),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691..1821c05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -51,16 +51,6 @@
 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
 
-static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
-{
-	struct amdgpu_mman *mman;
-	struct amdgpu_device *adev;
-
-	mman = container_of(bdev, struct amdgpu_mman, bdev);
-	adev = container_of(mman, struct amdgpu_device, mman);
-	return adev;
-}
-
 
 /*
  * Global memory.
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 {
 	struct amdgpu_device *adev;
 
-	adev = amdgpu_get_adev(bdev);
+	adev = amdgpu_ttm_adev(bdev);
 
 	switch (type) {
 	case TTM_PL_SYSTEM:
@@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		break;
 	case TTM_PL_VRAM:
 		/* "On-card" video ram */
-		man->func = &ttm_bo_manager_func;
+		man->func = &amdgpu_vram_mgr_func;
 		man->gpu_offset = adev->mc.vram_start;
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
 	static struct ttm_place placements = {
 		.fpfn = 0,
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 	abo = container_of(bo, struct amdgpu_bo, tbo);
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+		if (adev->mman.buffer_funcs_ring->ready == false) {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 		} else {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 				 * allocating address space for the BO.
 				 */
 				abo->placements[i].lpfn =
-					abo->adev->mc.gtt_size >> PAGE_SHIFT;
+					adev->mc.gtt_size >> PAGE_SHIFT;
 			}
 		}
 		break;
@@ -260,63 +251,115 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
 	new_mem->mm_node = NULL;
 }
 
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
-			bool evict, bool no_wait_gpu,
-			struct ttm_mem_reg *new_mem,
-			struct ttm_mem_reg *old_mem)
+static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+			       struct drm_mm_node *mm_node,
+			       struct ttm_mem_reg *mem,
+			       uint64_t *addr)
 {
-	struct amdgpu_device *adev;
-	struct amdgpu_ring *ring;
-	uint64_t old_start, new_start;
-	struct fence *fence;
 	int r;
 
-	adev = amdgpu_get_adev(bo->bdev);
-	ring = adev->mman.buffer_funcs_ring;
-
-	switch (old_mem->mem_type) {
+	switch (mem->mem_type) {
 	case TTM_PL_TT:
-		r = amdgpu_ttm_bind(bo, old_mem);
+		r = amdgpu_ttm_bind(bo, mem);
 		if (r)
 			return r;
 
 	case TTM_PL_VRAM:
-		old_start = (u64)old_mem->start << PAGE_SHIFT;
-		old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
+		*addr = mm_node->start << PAGE_SHIFT;
+		*addr += bo->bdev->man[mem->mem_type].gpu_offset;
 		break;
 	default:
-		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		DRM_ERROR("Unknown placement %d\n", mem->mem_type);
 		return -EINVAL;
 	}
-	switch (new_mem->mem_type) {
-	case TTM_PL_TT:
-		r = amdgpu_ttm_bind(bo, new_mem);
-		if (r)
-			return r;
 
-	case TTM_PL_VRAM:
-		new_start = (u64)new_mem->start << PAGE_SHIFT;
-		new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
-		break;
-	default:
-		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
-		return -EINVAL;
-	}
+	return 0;
+}
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+			    bool evict, bool no_wait_gpu,
+			    struct ttm_mem_reg *new_mem,
+			    struct ttm_mem_reg *old_mem)
+{
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+	struct drm_mm_node *old_mm, *new_mm;
+	uint64_t old_start, old_size, new_start, new_size;
+	unsigned long num_pages;
+	struct dma_fence *fence = NULL;
+	int r;
+
+	BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+
 	if (!ring->ready) {
 		DRM_ERROR("Trying to move memory with ring turned off.\n");
 		return -EINVAL;
 	}
 
-	BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
-
-	r = amdgpu_copy_buffer(ring, old_start, new_start,
-			       new_mem->num_pages * PAGE_SIZE, /* bytes */
-			       bo->resv, &fence, false);
+	old_mm = old_mem->mm_node;
+	r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
 	if (r)
 		return r;
+	old_size = old_mm->size;
+
+
+	new_mm = new_mem->mm_node;
+	r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
+	if (r)
+		return r;
+	new_size = new_mm->size;
+
+	num_pages = new_mem->num_pages;
+	while (num_pages) {
+		unsigned long cur_pages = min(old_size, new_size);
+		struct dma_fence *next;
+
+		r = amdgpu_copy_buffer(ring, old_start, new_start,
+				       cur_pages * PAGE_SIZE,
+				       bo->resv, &next, false);
+		if (r)
+			goto error;
+
+		dma_fence_put(fence);
+		fence = next;
+
+		num_pages -= cur_pages;
+		if (!num_pages)
+			break;
+
+		old_size -= cur_pages;
+		if (!old_size) {
+			r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
+						&old_start);
+			if (r)
+				goto error;
+			old_size = old_mm->size;
+		} else {
+			old_start += cur_pages * PAGE_SIZE;
+		}
+
+		new_size -= cur_pages;
+		if (!new_size) {
+			r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
+						&new_start);
+			if (r)
+				goto error;
+
+			new_size = new_mm->size;
+		} else {
+			new_start += cur_pages * PAGE_SIZE;
+		}
+	}
 
 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
-	fence_put(fence);
+	dma_fence_put(fence);
+	return r;
+
+error:
+	if (fence)
+		dma_fence_wait(fence, false);
+	dma_fence_put(fence);
 	return r;
 }
 
@@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
 	struct ttm_placement placement;
 	int r;
 
-	adev = amdgpu_get_adev(bo->bdev);
+	adev = amdgpu_ttm_adev(bo->bdev);
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
 	placement.num_placement = 1;
@@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
 	struct ttm_place placements;
 	int r;
 
-	adev = amdgpu_get_adev(bo->bdev);
+	adev = amdgpu_ttm_adev(bo->bdev);
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
 	placement.num_placement = 1;
@@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
 	if (WARN_ON_ONCE(abo->pin_count > 0))
 		return -EINVAL;
 
-	adev = amdgpu_get_adev(bo->bdev);
+	adev = amdgpu_ttm_adev(bo->bdev);
 
 	/* remember the eviction */
 	if (evict)
@@ -475,7 +518,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-	struct amdgpu_device *adev = amdgpu_get_adev(bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 
 	mem->bus.addr = NULL;
 	mem->bus.offset = 0;
@@ -607,7 +650,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 /* prepare the sg table with the user pages */
 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	unsigned nents;
 	int r;
@@ -639,7 +682,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 
 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	struct sg_page_iter sg_iter;
 
@@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
 	struct amdgpu_device *adev;
 	struct amdgpu_ttm_tt *gtt;
 
-	adev = amdgpu_get_adev(bdev);
+	adev = amdgpu_ttm_adev(bdev);
 
 	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
 	if (gtt == NULL) {
@@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
 		return 0;
 	}
 
-	adev = amdgpu_get_adev(ttm->bdev);
+	adev = amdgpu_ttm_adev(ttm->bdev);
 
 #ifdef CONFIG_SWIOTLB
 	if (swiotlb_nr_tbl()) {
@@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
 	if (slave)
 		return;
 
-	adev = amdgpu_get_adev(ttm->bdev);
+	adev = amdgpu_ttm_adev(ttm->bdev);
 
 #ifdef CONFIG_SWIOTLB
 	if (swiotlb_nr_tbl()) {
@@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 
 static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 	unsigned i, j;
 
 	for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
 
 static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 	unsigned log2_size = min(ilog2(tbo->num_pages),
 				 AMDGPU_TTM_LRU_SIZE - 1);
 
@@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
 	return res;
 }
 
+static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+					    const struct ttm_place *place)
+{
+	if (bo->mem.mem_type == TTM_PL_VRAM &&
+	    bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
+		unsigned long num_pages = bo->mem.num_pages;
+		struct drm_mm_node *node = bo->mem.mm_node;
+
+		/* Check each drm MM node individually */
+		while (num_pages) {
+			if (place->fpfn < (node->start + node->size) &&
+			    !(place->lpfn && place->lpfn <= node->start))
+				return true;
+
+			num_pages -= node->size;
+			++node;
+		}
+
+		return false;
+	}
+
+	return ttm_bo_eviction_valuable(bo, place);
+}
+
 static struct ttm_bo_driver amdgpu_bo_driver = {
 	.ttm_tt_create = &amdgpu_ttm_tt_create,
 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
 	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
 	.invalidate_caches = &amdgpu_invalidate_caches,
 	.init_mem_type = &amdgpu_init_mem_type,
+	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
 	.evict_flags = &amdgpu_evict_flags,
 	.move = &amdgpu_bo_move,
 	.verify_access = &amdgpu_verify_access,
@@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 
 	r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &adev->stollen_vga_memory);
 	if (r) {
 		return r;
@@ -1247,7 +1316,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct fence **fence, bool direct_submit)
+		       struct dma_fence **fence, bool direct_submit)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_job *job;
@@ -1294,7 +1363,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 	if (direct_submit) {
 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
 				       NULL, NULL, fence);
-		job->fence = fence_get(*fence);
+		job->fence = dma_fence_get(*fence);
 		if (r)
 			DRM_ERROR("Error scheduling IBs (%d)\n", r);
 		amdgpu_job_free(job);
@@ -1315,9 +1384,9 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		uint32_t src_data,
 		struct reservation_object *resv,
-		struct fence **fence)
+		struct dma_fence **fence)
 {
-	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_job *job;
 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9812c80..98ee384 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -66,6 +66,7 @@ struct amdgpu_mman {
 };
 
 extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
 
 int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *tbo,
@@ -77,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct fence **fence, bool direct_submit);
+		       struct dma_fence **fence, bool direct_submit);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 			uint32_t src_data,
 			struct reservation_object *resv,
-			struct fence **fence);
+			struct dma_fence **fence);
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cb3d252..0f0b381 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
 	ucode->mc_addr = mc_addr;
 	ucode->kaddr = kptr;
 
+	if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
+		return 0;
+
 	header = (const struct common_firmware_header *)ucode->fw->data;
 	memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
 		le32_to_cpu(header->ucode_array_offset_bytes)),
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
 	return 0;
 }
 
+static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
+				uint64_t mc_addr, void *kptr)
+{
+	const struct gfx_firmware_header_v1_0 *header = NULL;
+	const struct common_firmware_header *comm_hdr = NULL;
+	uint8_t* src_addr = NULL;
+	uint8_t* dst_addr = NULL;
+
+	if (NULL == ucode->fw)
+		return 0;
+
+	comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
+	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+	dst_addr = ucode->kaddr +
+			   ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
+			   PAGE_SIZE);
+	src_addr = (uint8_t *)ucode->fw->data +
+			   le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
+			   (le32_to_cpu(header->jt_offset) * 4);
+	memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
+
+	return 0;
+}
+
+
 int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 {
 	struct amdgpu_bo **bo = &adev->firmware.fw_buf;
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 	const struct common_firmware_header *header = NULL;
 
 	err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
-			       AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+				amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+				0, NULL, NULL, bo);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
 		goto failed;
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 		goto failed_reserve;
 	}
 
-	err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
+	err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+				&fw_mc_addr);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
 		goto failed_pin;
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 			header = (const struct common_firmware_header *)ucode->fw->data;
 			amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
 						    fw_buf_ptr + fw_offset);
+			if (i == AMDGPU_UCODE_ID_CP_MEC1) {
+				const struct gfx_firmware_header_v1_0 *cp_hdr;
+				cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+				amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
+						    fw_buf_ptr + fw_offset);
+				fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+			}
 			fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 		}
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index e468be4..a8a4230 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
 	AMDGPU_UCODE_ID_CP_MEC1,
 	AMDGPU_UCODE_ID_CP_MEC2,
 	AMDGPU_UCODE_ID_RLC_G,
+	AMDGPU_UCODE_ID_STORAGE,
 	AMDGPU_UCODE_ID_MAXIMUM,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281ca..fb270c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 	for (i = 0; i < adev->uvd.max_handles; ++i) {
 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 		if (handle != 0 && adev->uvd.filp[i] == filp) {
-			struct fence *fence;
+			struct dma_fence *fence;
 
 			r = amdgpu_uvd_get_destroy_msg(ring, handle,
 						       false, &fence);
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 				continue;
 			}
 
-			fence_wait(fence, false);
-			fence_put(fence);
+			dma_fence_wait(fence, false);
+			dma_fence_put(fence);
 
 			adev->uvd.filp[i] = NULL;
 			atomic_set(&adev->uvd.handles[i], 0);
@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 	struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
 	int r;
 
+	parser->job->vm = NULL;
+	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
 	if (ib->length_dw % 16) {
 		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
 			  ib->length_dw);
@@ -909,14 +912,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 }
 
 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
-			       bool direct, struct fence **fence)
+			       bool direct, struct dma_fence **fence)
 {
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head head;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	struct amdgpu_device *adev = ring->adev;
 	uint64_t addr;
 	int i, r;
@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	if (r)
 		return r;
 
-	if (!bo->adev->uvd.address_64_bit) {
+	if (!ring->adev->uvd.address_64_bit) {
 		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
 		amdgpu_uvd_force_into_uvd_segment(bo);
 	}
@@ -960,7 +963,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 
 	if (direct) {
 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-		job->fence = fence_get(f);
+		job->fence = dma_fence_get(f);
 		if (r)
 			goto err_free;
 
@@ -975,9 +978,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
 
 	if (fence)
-		*fence = fence_get(f);
+		*fence = dma_fence_get(f);
 	amdgpu_bo_unref(&bo);
-	fence_put(f);
+	dma_fence_put(f);
 
 	return 0;
 
@@ -993,7 +996,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
    crash the vcpu so just try to emmit a dummy create/destroy msg to
    avoid this */
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct fence **fence)
+			      struct dma_fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &bo);
 	if (r)
 		return r;
@@ -1042,7 +1046,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 }
 
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       bool direct, struct fence **fence)
+			       bool direct, struct dma_fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &bo);
 	if (r)
 		return r;
@@ -1128,7 +1133,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
  */
 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
-	struct fence *fence;
+	struct dma_fence *fence;
 	long r;
 
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -1143,7 +1148,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		goto error;
 	}
 
-	r = fence_wait_timeout(fence, false, timeout);
+	r = dma_fence_wait_timeout(fence, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
@@ -1154,7 +1159,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		r = 0;
 	}
 
-	fence_put(fence);
+	dma_fence_put(fence);
 
 error:
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index c850009..6249ba1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct fence **fence);
+			      struct dma_fence **fence);
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       bool direct, struct fence **fence);
+			       bool direct, struct dma_fence **fence);
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
 			     struct drm_file *filp);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd8..69b66b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 
 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &adev->vce.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -395,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
  * Open up a stream for HW test
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct fence **fence)
+			      struct dma_fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	uint64_t dummy;
 	int i, r;
 
@@ -450,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 		ib->ptr[i] = 0x0;
 
 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-	job->fence = fence_get(f);
+	job->fence = dma_fence_get(f);
 	if (r)
 		goto err;
 
 	amdgpu_job_free(job);
 	if (fence)
-		*fence = fence_get(f);
-	fence_put(f);
+		*fence = dma_fence_get(f);
+	dma_fence_put(f);
 	return 0;
 
 err:
@@ -476,12 +477,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
  * Close up a stream for HW test or if userspace failed to do so
  */
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       bool direct, struct fence **fence)
+			       bool direct, struct dma_fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	int i, r;
 
 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -513,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
 	if (direct) {
 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-		job->fence = fence_get(f);
+		job->fence = dma_fence_get(f);
 		if (r)
 			goto err;
 
@@ -526,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 	}
 
 	if (fence)
-		*fence = fence_get(f);
-	fence_put(f);
+		*fence = dma_fence_get(f);
+	dma_fence_put(f);
 	return 0;
 
 err:
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 	uint32_t *size = &tmp;
 	int i, r, idx = 0;
 
+	p->job->vm = NULL;
+	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
 	r = amdgpu_cs_sysvm_access_required(p);
 	if (r)
 		return r;
@@ -788,6 +792,96 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 }
 
 /**
+ * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ *
+ * @p: parser context
+ *
+ */
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+{
+	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+	int session_idx = -1;
+	uint32_t destroyed = 0;
+	uint32_t created = 0;
+	uint32_t allocated = 0;
+	uint32_t tmp, handle = 0;
+	int i, r = 0, idx = 0;
+
+	while (idx < ib->length_dw) {
+		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+		if ((len < 8) || (len & 3)) {
+			DRM_ERROR("invalid VCE command length (%d)!\n", len);
+			r = -EINVAL;
+			goto out;
+		}
+
+		switch (cmd) {
+		case 0x00000001: /* session */
+			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+			session_idx = amdgpu_vce_validate_handle(p, handle,
+								 &allocated);
+			if (session_idx < 0) {
+				r = session_idx;
+				goto out;
+			}
+			break;
+
+		case 0x01000001: /* create */
+			created |= 1 << session_idx;
+			if (destroyed & (1 << session_idx)) {
+				destroyed &= ~(1 << session_idx);
+				allocated |= 1 << session_idx;
+
+			} else if (!(allocated & (1 << session_idx))) {
+				DRM_ERROR("Handle already in use!\n");
+				r = -EINVAL;
+				goto out;
+			}
+
+			break;
+
+		case 0x02000001: /* destroy */
+			destroyed |= 1 << session_idx;
+			break;
+
+		default:
+			break;
+		}
+
+		if (session_idx == -1) {
+			DRM_ERROR("no session command at start of IB\n");
+			r = -EINVAL;
+			goto out;
+		}
+
+		idx += len / 4;
+	}
+
+	if (allocated & ~created) {
+		DRM_ERROR("New session without create command!\n");
+		r = -ENOENT;
+	}
+
+out:
+	if (!r) {
+		/* No error, free all destroyed handle slots */
+		tmp = destroyed;
+		amdgpu_ib_free(p->adev, ib, NULL);
+	} else {
+		/* Error during parsing, free all allocated handle slots */
+		tmp = allocated;
+	}
+
+	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+		if (tmp & (1 << i))
+			atomic_set(&p->adev->vce.handles[i], 0);
+
+	return r;
+}
+
+/**
  * amdgpu_vce_ring_emit_ib - execute indirect buffer
  *
  * @ring: engine to use
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 	amdgpu_ring_write(ring, VCE_CMD_END);
 }
 
-unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		4; /* amdgpu_vce_ring_emit_ib */
-}
-
-unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		6; /* amdgpu_vce_ring_emit_fence  x1 no user fence */
-}
-
 /**
  * amdgpu_vce_ring_test_ring - test if VCE ring is working
  *
@@ -883,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
  */
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	long r;
 
 	/* skip vce ring1/2 ib test for now, since it's not reliable */
@@ -902,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		goto error;
 	}
 
-	r = fence_wait_timeout(fence, false, timeout);
+	r = dma_fence_wait_timeout(fence, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
@@ -913,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		r = 0;
 	}
 error:
-	fence_put(fence);
+	dma_fence_put(fence);
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 12729d2..d98041f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,11 +29,12 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct fence **fence);
+			      struct dma_fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       bool direct, struct fence **fence);
+			       bool direct, struct dma_fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
 			     unsigned vm_id, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 06f2432..e480263 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,7 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 }
 
 /**
- * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
  *
  * @adev: amdgpu device pointer
  * @vm: vm providing the BOs
- * @duplicates: head of duplicates list
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
  *
- * Add the page directory to the BO duplicates list
- * for command submission.
+ * Validate the page table BOs on command submission if neccessary.
  */
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct list_head *duplicates)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			      int (*validate)(void *p, struct amdgpu_bo *bo),
+			      void *param)
 {
 	uint64_t num_evictions;
 	unsigned i;
+	int r;
 
 	/* We only need to validate the page tables
 	 * if they aren't already valid.
 	 */
 	num_evictions = atomic64_read(&adev->num_evictions);
 	if (num_evictions == vm->last_eviction_counter)
-		return;
+		return 0;
 
 	/* add the vm page table to the list */
 	for (i = 0; i <= vm->max_pde_used; ++i) {
-		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+		struct amdgpu_bo *bo = vm->page_tables[i].bo;
 
-		if (!entry->robj)
+		if (!bo)
 			continue;
 
-		list_add(&entry->tv.head, duplicates);
+		r = validate(param, bo);
+		if (r)
+			return r;
 	}
 
+	return 0;
 }
 
 /**
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 
 	spin_lock(&glob->lru_lock);
 	for (i = 0; i <= vm->max_pde_used; ++i) {
-		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+		struct amdgpu_bo *bo = vm->page_tables[i].bo;
 
-		if (!entry->robj)
+		if (!bo)
 			continue;
 
-		ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+		ttm_bo_move_to_lru_tail(&bo->tbo);
 	}
 	spin_unlock(&glob->lru_lock);
 }
@@ -194,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
  * Allocate an id for the vm, adding fences to the sync obj as necessary.
  */
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		      struct amdgpu_sync *sync, struct fence *fence,
+		      struct amdgpu_sync *sync, struct dma_fence *fence,
 		      struct amdgpu_job *job)
 {
 	struct amdgpu_device *adev = ring->adev;
 	uint64_t fence_context = adev->fence_context + ring->idx;
-	struct fence *updates = sync->last_vm_update;
+	struct dma_fence *updates = sync->last_vm_update;
 	struct amdgpu_vm_id *id, *idle;
-	struct fence **fences;
+	struct dma_fence **fences;
 	unsigned i;
 	int r = 0;
 
@@ -225,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	if (&idle->list == &adev->vm_manager.ids_lru) {
 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
-		struct fence_array *array;
+		struct dma_fence_array *array;
 		unsigned j;
 
 		for (j = 0; j < i; ++j)
-			fence_get(fences[j]);
+			dma_fence_get(fences[j]);
 
-		array = fence_array_create(i, fences, fence_context,
+		array = dma_fence_array_create(i, fences, fence_context,
 					   seqno, true);
 		if (!array) {
 			for (j = 0; j < i; ++j)
-				fence_put(fences[j]);
+				dma_fence_put(fences[j]);
 			kfree(fences);
 			r = -ENOMEM;
 			goto error;
@@ -243,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 
 
 		r = amdgpu_sync_fence(ring->adev, sync, &array->base);
-		fence_put(&array->base);
+		dma_fence_put(&array->base);
 		if (r)
 			goto error;
 
@@ -257,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	/* Check if we can use a VMID already assigned to this VM */
 	i = ring->idx;
 	do {
-		struct fence *flushed;
+		struct dma_fence *flushed;
 
 		id = vm->ids[i++];
 		if (i == AMDGPU_MAX_RINGS)
@@ -279,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 			continue;
 
 		if (id->last_flush->context != fence_context &&
-		    !fence_is_signaled(id->last_flush))
+		    !dma_fence_is_signaled(id->last_flush))
 			continue;
 
 		flushed  = id->flushed_updates;
 		if (updates &&
-		    (!flushed || fence_is_later(updates, flushed)))
+		    (!flushed || dma_fence_is_later(updates, flushed)))
 			continue;
 
 		/* Good we can use this VMID. Remember this submission as
@@ -315,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	if (r)
 		goto error;
 
-	fence_put(id->first);
-	id->first = fence_get(fence);
+	dma_fence_put(id->first);
+	id->first = dma_fence_get(fence);
 
-	fence_put(id->last_flush);
+	dma_fence_put(id->last_flush);
 	id->last_flush = NULL;
 
-	fence_put(id->flushed_updates);
-	id->flushed_updates = fence_get(updates);
+	dma_fence_put(id->flushed_updates);
+	id->flushed_updates = dma_fence_get(updates);
 
 	id->pd_gpu_addr = job->vm_pd_addr;
 	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@@ -341,9 +346,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	const struct amdgpu_ip_block_version *ip_block;
+	const struct amdgpu_ip_block *ip_block;
 
-	if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
 		/* only compute rings */
 		return false;
 
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
 	if (!ip_block)
 		return false;
 
-	if (ip_block->major <= 7) {
+	if (ip_block->version->major <= 7) {
 		/* gfx7 has no workaround */
 		return true;
-	} else if (ip_block->major == 8) {
+	} else if (ip_block->version->major == 8) {
 		if (adev->gfx.mec_fw_version >= 673)
 			/* gfx8 is fixed in MEC firmware 673 */
 			return false;
@@ -393,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 
 	if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
 	    amdgpu_vm_is_gpu_reset(adev, id))) {
-		struct fence *fence;
+		struct dma_fence *fence;
 
 		trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
 		amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@@ -403,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 			return r;
 
 		mutex_lock(&adev->vm_manager.lock);
-		fence_put(id->last_flush);
+		dma_fence_put(id->last_flush);
 		id->last_flush = fence;
 		mutex_unlock(&adev->vm_manager.lock);
 	}
@@ -537,7 +542,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 			      struct amdgpu_bo *bo)
 {
 	struct amdgpu_ring *ring;
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	struct amdgpu_job *job;
 	struct amdgpu_pte_update_params params;
 	unsigned entries;
@@ -578,7 +583,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 		goto error_free;
 
 	amdgpu_bo_fence(bo, fence, true);
-	fence_put(fence);
+	dma_fence_put(fence);
 	return 0;
 
 error_free:
@@ -612,123 +617,6 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 	return result;
 }
 
-static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
-					 struct amdgpu_vm *vm,
-					 bool shadow)
-{
-	struct amdgpu_ring *ring;
-	struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
-		vm->page_directory;
-	uint64_t pd_addr;
-	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
-	uint64_t last_pde = ~0, last_pt = ~0;
-	unsigned count = 0, pt_idx, ndw;
-	struct amdgpu_job *job;
-	struct amdgpu_pte_update_params params;
-	struct fence *fence = NULL;
-
-	int r;
-
-	if (!pd)
-		return 0;
-
-	r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
-	if (r)
-		return r;
-
-	pd_addr = amdgpu_bo_gpu_offset(pd);
-	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-
-	/* padding, etc. */
-	ndw = 64;
-
-	/* assume the worst case */
-	ndw += vm->max_pde_used * 6;
-
-	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
-	if (r)
-		return r;
-
-	memset(&params, 0, sizeof(params));
-	params.adev = adev;
-	params.ib = &job->ibs[0];
-
-	/* walk over the address space and update the page directory */
-	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
-		struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
-		uint64_t pde, pt;
-
-		if (bo == NULL)
-			continue;
-
-		if (bo->shadow) {
-			struct amdgpu_bo *shadow = bo->shadow;
-
-			r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
-			if (r)
-				return r;
-		}
-
-		pt = amdgpu_bo_gpu_offset(bo);
-		if (!shadow) {
-			if (vm->page_tables[pt_idx].addr == pt)
-				continue;
-			vm->page_tables[pt_idx].addr = pt;
-		} else {
-			if (vm->page_tables[pt_idx].shadow_addr == pt)
-				continue;
-			vm->page_tables[pt_idx].shadow_addr = pt;
-		}
-
-		pde = pd_addr + pt_idx * 8;
-		if (((last_pde + 8 * count) != pde) ||
-		    ((last_pt + incr * count) != pt) ||
-		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
-
-			if (count) {
-				amdgpu_vm_do_set_ptes(&params, last_pde,
-						      last_pt, count, incr,
-						      AMDGPU_PTE_VALID);
-			}
-
-			count = 1;
-			last_pde = pde;
-			last_pt = pt;
-		} else {
-			++count;
-		}
-	}
-
-	if (count)
-		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
-				      count, incr, AMDGPU_PTE_VALID);
-
-	if (params.ib->length_dw != 0) {
-		amdgpu_ring_pad_ib(ring, params.ib);
-		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
-				 AMDGPU_FENCE_OWNER_VM);
-		WARN_ON(params.ib->length_dw > ndw);
-		r = amdgpu_job_submit(job, ring, &vm->entity,
-				      AMDGPU_FENCE_OWNER_VM, &fence);
-		if (r)
-			goto error_free;
-
-		amdgpu_bo_fence(pd, fence, true);
-		fence_put(vm->page_directory_fence);
-		vm->page_directory_fence = fence_get(fence);
-		fence_put(fence);
-
-	} else {
-		amdgpu_job_free(job);
-	}
-
-	return 0;
-
-error_free:
-	amdgpu_job_free(job);
-	return r;
-}
-
 /*
  * amdgpu_vm_update_pdes - make sure that page directory is valid
  *
@@ -742,14 +630,135 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
  * Returns 0 for success, error for failure.
  */
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-                                   struct amdgpu_vm *vm)
+				    struct amdgpu_vm *vm)
 {
+	struct amdgpu_bo *shadow;
+	struct amdgpu_ring *ring;
+	uint64_t pd_addr, shadow_addr;
+	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
+	uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
+	unsigned count = 0, pt_idx, ndw;
+	struct amdgpu_job *job;
+	struct amdgpu_pte_update_params params;
+	struct dma_fence *fence = NULL;
+
 	int r;
 
-	r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+	shadow = vm->page_directory->shadow;
+
+	/* padding, etc. */
+	ndw = 64;
+
+	/* assume the worst case */
+	ndw += vm->max_pde_used * 6;
+
+	pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+	if (shadow) {
+		r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+		if (r)
+			return r;
+		shadow_addr = amdgpu_bo_gpu_offset(shadow);
+		ndw *= 2;
+	} else {
+		shadow_addr = 0;
+	}
+
+	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 	if (r)
 		return r;
-	return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
+
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.ib = &job->ibs[0];
+
+	/* walk over the address space and update the page directory */
+	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
+		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
+		uint64_t pde, pt;
+
+		if (bo == NULL)
+			continue;
+
+		if (bo->shadow) {
+			struct amdgpu_bo *pt_shadow = bo->shadow;
+
+			r = amdgpu_ttm_bind(&pt_shadow->tbo,
+					    &pt_shadow->tbo.mem);
+			if (r)
+				return r;
+		}
+
+		pt = amdgpu_bo_gpu_offset(bo);
+		if (vm->page_tables[pt_idx].addr == pt)
+			continue;
+
+		vm->page_tables[pt_idx].addr = pt;
+
+		pde = pd_addr + pt_idx * 8;
+		if (((last_pde + 8 * count) != pde) ||
+		    ((last_pt + incr * count) != pt) ||
+		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
+
+			if (count) {
+				if (shadow)
+					amdgpu_vm_do_set_ptes(&params,
+							      last_shadow,
+							      last_pt, count,
+							      incr,
+							      AMDGPU_PTE_VALID);
+
+				amdgpu_vm_do_set_ptes(&params, last_pde,
+						      last_pt, count, incr,
+						      AMDGPU_PTE_VALID);
+			}
+
+			count = 1;
+			last_pde = pde;
+			last_shadow = shadow_addr + pt_idx * 8;
+			last_pt = pt;
+		} else {
+			++count;
+		}
+	}
+
+	if (count) {
+		if (vm->page_directory->shadow)
+			amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
+					      count, incr, AMDGPU_PTE_VALID);
+
+		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
+				      count, incr, AMDGPU_PTE_VALID);
+	}
+
+	if (params.ib->length_dw == 0) {
+		amdgpu_job_free(job);
+		return 0;
+	}
+
+	amdgpu_ring_pad_ib(ring, params.ib);
+	amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+			 AMDGPU_FENCE_OWNER_VM);
+	if (shadow)
+		amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
+				 AMDGPU_FENCE_OWNER_VM);
+
+	WARN_ON(params.ib->length_dw > ndw);
+	r = amdgpu_job_submit(job, ring, &vm->entity,
+			      AMDGPU_FENCE_OWNER_VM, &fence);
+	if (r)
+		goto error_free;
+
+	amdgpu_bo_fence(vm->page_directory, fence, true);
+	dma_fence_put(vm->page_directory_fence);
+	vm->page_directory_fence = dma_fence_get(fence);
+	dma_fence_put(fence);
+
+	return 0;
+
+error_free:
+	amdgpu_job_free(job);
+	return r;
 }
 
 /**
@@ -781,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 	/* initialize the variables */
 	addr = start;
 	pt_idx = addr >> amdgpu_vm_block_size;
-	pt = vm->page_tables[pt_idx].entry.robj;
+	pt = vm->page_tables[pt_idx].bo;
 	if (params->shadow) {
 		if (!pt->shadow)
 			return;
-		pt = vm->page_tables[pt_idx].entry.robj->shadow;
+		pt = pt->shadow;
 	}
 	if ((addr & ~mask) == (end & ~mask))
 		nptes = end - addr;
@@ -804,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 	/* walk over the address space and update the page tables */
 	while (addr < end) {
 		pt_idx = addr >> amdgpu_vm_block_size;
-		pt = vm->page_tables[pt_idx].entry.robj;
+		pt = vm->page_tables[pt_idx].bo;
 		if (params->shadow) {
 			if (!pt->shadow)
 				return;
-			pt = vm->page_tables[pt_idx].entry.robj->shadow;
+			pt = pt->shadow;
 		}
 
 		if ((addr & ~mask) == (end & ~mask))
@@ -929,20 +938,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
  * Returns 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
-				       struct fence *exclusive,
+				       struct dma_fence *exclusive,
 				       uint64_t src,
 				       dma_addr_t *pages_addr,
 				       struct amdgpu_vm *vm,
 				       uint64_t start, uint64_t last,
 				       uint32_t flags, uint64_t addr,
-				       struct fence **fence)
+				       struct dma_fence **fence)
 {
 	struct amdgpu_ring *ring;
 	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned nptes, ncmds, ndw;
 	struct amdgpu_job *job;
 	struct amdgpu_pte_update_params params;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	int r;
 
 	memset(&params, 0, sizeof(params));
@@ -1045,10 +1054,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
 	amdgpu_bo_fence(vm->page_directory, f, true);
 	if (fence) {
-		fence_put(*fence);
-		*fence = fence_get(f);
+		dma_fence_put(*fence);
+		*fence = dma_fence_get(f);
 	}
-	fence_put(f);
+	dma_fence_put(f);
 	return 0;
 
 error_free:
@@ -1065,8 +1074,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
- * @addr: addr to set the area to
  * @flags: HW flags for the mapping
+ * @nodes: array of drm_mm_nodes with the MC addresses
  * @fence: optional resulting fence
  *
  * Split the mapping into smaller chunks so that each update fits
@@ -1074,17 +1083,16 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  * Returns 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
-				      struct fence *exclusive,
+				      struct dma_fence *exclusive,
 				      uint32_t gtt_flags,
 				      dma_addr_t *pages_addr,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_bo_va_mapping *mapping,
-				      uint32_t flags, uint64_t addr,
-				      struct fence **fence)
+				      uint32_t flags,
+				      struct drm_mm_node *nodes,
+				      struct dma_fence **fence)
 {
-	const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
-
-	uint64_t src = 0, start = mapping->it.start;
+	uint64_t pfn, src = 0, start = mapping->it.start;
 	int r;
 
 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1097,23 +1105,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 
 	trace_amdgpu_vm_bo_update(mapping);
 
-	if (pages_addr) {
-		if (flags == gtt_flags)
-			src = adev->gart.table_addr + (addr >> 12) * 8;
-		addr = 0;
+	pfn = mapping->offset >> PAGE_SHIFT;
+	if (nodes) {
+		while (pfn >= nodes->size) {
+			pfn -= nodes->size;
+			++nodes;
+		}
 	}
-	addr += mapping->offset;
 
-	if (!pages_addr || src)
-		return amdgpu_vm_bo_update_mapping(adev, exclusive,
-						   src, pages_addr, vm,
-						   start, mapping->it.last,
-						   flags, addr, fence);
+	do {
+		uint64_t max_entries;
+		uint64_t addr, last;
 
-	while (start != mapping->it.last + 1) {
-		uint64_t last;
+		if (nodes) {
+			addr = nodes->start << PAGE_SHIFT;
+			max_entries = (nodes->size - pfn) *
+				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+		} else {
+			addr = 0;
+			max_entries = S64_MAX;
+		}
 
-		last = min((uint64_t)mapping->it.last, start + max_size - 1);
+		if (pages_addr) {
+			if (flags == gtt_flags)
+				src = adev->gart.table_addr +
+					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
+			else
+				max_entries = min(max_entries, 16ull * 1024ull);
+			addr = 0;
+		} else if (flags & AMDGPU_PTE_VALID) {
+			addr += adev->vm_manager.vram_base_offset;
+		}
+		addr += pfn << PAGE_SHIFT;
+
+		last = min((uint64_t)mapping->it.last, start + max_entries - 1);
 		r = amdgpu_vm_bo_update_mapping(adev, exclusive,
 						src, pages_addr, vm,
 						start, last, flags, addr,
@@ -1121,9 +1146,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 		if (r)
 			return r;
 
+		pfn += last - start + 1;
+		if (nodes && nodes->size == pfn) {
+			pfn = 0;
+			++nodes;
+		}
 		start = last + 1;
-		addr += max_size * AMDGPU_GPU_PAGE_SIZE;
-	}
+
+	} while (unlikely(start != mapping->it.last + 1));
 
 	return 0;
 }
@@ -1147,40 +1177,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	dma_addr_t *pages_addr = NULL;
 	uint32_t gtt_flags, flags;
 	struct ttm_mem_reg *mem;
-	struct fence *exclusive;
-	uint64_t addr;
+	struct drm_mm_node *nodes;
+	struct dma_fence *exclusive;
 	int r;
 
 	if (clear) {
 		mem = NULL;
-		addr = 0;
+		nodes = NULL;
 		exclusive = NULL;
 	} else {
 		struct ttm_dma_tt *ttm;
 
 		mem = &bo_va->bo->tbo.mem;
-		addr = (u64)mem->start << PAGE_SHIFT;
-		switch (mem->mem_type) {
-		case TTM_PL_TT:
+		nodes = mem->mm_node;
+		if (mem->mem_type == TTM_PL_TT) {
 			ttm = container_of(bo_va->bo->tbo.ttm, struct
 					   ttm_dma_tt, ttm);
 			pages_addr = ttm->dma_address;
-			break;
-
-		case TTM_PL_VRAM:
-			addr += adev->vm_manager.vram_base_offset;
-			break;
-
-		default:
-			break;
 		}
-
 		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
 	}
 
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
 	gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
-		adev == bo_va->bo->adev) ? flags : 0;
+		adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
 
 	spin_lock(&vm->status_lock);
 	if (!list_empty(&bo_va->vm_status))
@@ -1190,7 +1210,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
 					       gtt_flags, pages_addr, vm,
-					       mapping, flags, addr,
+					       mapping, flags, nodes,
 					       &bo_va->last_pt_update);
 		if (r)
 			return r;
@@ -1405,18 +1425,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 	/* walk over the address space and allocate the page tables */
 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
 		struct reservation_object *resv = vm->page_directory->tbo.resv;
-		struct amdgpu_bo_list_entry *entry;
 		struct amdgpu_bo *pt;
 
-		entry = &vm->page_tables[pt_idx].entry;
-		if (entry->robj)
+		if (vm->page_tables[pt_idx].bo)
 			continue;
 
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_VRAM,
 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-				     AMDGPU_GEM_CREATE_SHADOW,
+				     AMDGPU_GEM_CREATE_SHADOW |
+				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 				     NULL, resv, &pt);
 		if (r)
 			goto error_free;
@@ -1442,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 			}
 		}
 
-		entry->robj = pt;
-		entry->priority = 0;
-		entry->tv.bo = &entry->robj->tbo;
-		entry->tv.shared = true;
-		entry->user_pages = NULL;
+		vm->page_tables[pt_idx].bo = pt;
 		vm->page_tables[pt_idx].addr = 0;
 	}
 
@@ -1547,7 +1562,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 		kfree(mapping);
 	}
 
-	fence_put(bo_va->last_pt_update);
+	dma_fence_put(bo_va->last_pt_update);
 	kfree(bo_va);
 }
 
@@ -1626,7 +1641,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	r = amdgpu_bo_create(adev, pd_size, align, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-			     AMDGPU_GEM_CREATE_SHADOW,
+			     AMDGPU_GEM_CREATE_SHADOW |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &vm->page_directory);
 	if (r)
 		goto error_free_sched_entity;
@@ -1697,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	}
 
 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
-		struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+		struct amdgpu_bo *pt = vm->page_tables[i].bo;
 
 		if (!pt)
 			continue;
@@ -1709,7 +1725,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
 	amdgpu_bo_unref(&vm->page_directory->shadow);
 	amdgpu_bo_unref(&vm->page_directory);
-	fence_put(vm->page_directory_fence);
+	dma_fence_put(vm->page_directory_fence);
 }
 
 /**
@@ -1733,7 +1749,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 			      &adev->vm_manager.ids_lru);
 	}
 
-	adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+	adev->vm_manager.fence_context =
+		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		adev->vm_manager.seqno[i] = 0;
 
@@ -1755,8 +1772,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
 	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
 		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
 
-		fence_put(adev->vm_manager.ids[i].first);
+		dma_fence_put(adev->vm_manager.ids[i].first);
 		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
-		fence_put(id->flushed_updates);
+		dma_fence_put(id->flushed_updates);
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
new file mode 100644
index 0000000..adbc2f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_VM_H__
+#define __AMDGPU_VM_H__
+
+#include <linux/rbtree.h>
+
+#include "gpu_scheduler.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
+struct amdgpu_bo_va;
+struct amdgpu_job;
+struct amdgpu_bo_list_entry;
+
+/*
+ * GPUVM handling
+ */
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VM	16
+
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
+
+/* number of entries in page table */
+#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define AMDGPU_VM_PTB_ALIGN_SIZE   32768
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+
+#define AMDGPU_PTE_VALID	(1 << 0)
+#define AMDGPU_PTE_SYSTEM	(1 << 1)
+#define AMDGPU_PTE_SNOOPED	(1 << 2)
+
+/* VI only */
+#define AMDGPU_PTE_EXECUTABLE	(1 << 4)
+
+#define AMDGPU_PTE_READABLE	(1 << 5)
+#define AMDGPU_PTE_WRITEABLE	(1 << 6)
+
+#define AMDGPU_PTE_FRAG(x)	((x & 0x1f) << 7)
+
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER	0
+#define AMDGPU_VM_FAULT_STOP_FIRST	1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS	2
+
+struct amdgpu_vm_pt {
+	struct amdgpu_bo	*bo;
+	uint64_t		addr;
+};
+
+struct amdgpu_vm {
+	/* tree of virtual addresses mapped */
+	struct rb_root		va;
+
+	/* protecting invalidated */
+	spinlock_t		status_lock;
+
+	/* BOs moved, but not yet updated in the PT */
+	struct list_head	invalidated;
+
+	/* BOs cleared in the PT because of a move */
+	struct list_head	cleared;
+
+	/* BO mappings freed, but not yet updated in the PT */
+	struct list_head	freed;
+
+	/* contains the page directory */
+	struct amdgpu_bo	*page_directory;
+	unsigned		max_pde_used;
+	struct dma_fence		*page_directory_fence;
+	uint64_t		last_eviction_counter;
+
+	/* array of page tables, one for each page directory entry */
+	struct amdgpu_vm_pt	*page_tables;
+
+	/* for id and flush management per ring */
+	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];
+
+	/* protecting freed */
+	spinlock_t		freed_lock;
+
+	/* Scheduler entity for page table updates */
+	struct amd_sched_entity	entity;
+
+	/* client id */
+	u64                     client_id;
+};
+
+struct amdgpu_vm_id {
+	struct list_head	list;
+	struct dma_fence		*first;
+	struct amdgpu_sync	active;
+	struct dma_fence		*last_flush;
+	atomic64_t		owner;
+
+	uint64_t		pd_gpu_addr;
+	/* last flushed PD/PT update */
+	struct dma_fence		*flushed_updates;
+
+	uint32_t                current_gpu_reset_count;
+
+	uint32_t		gds_base;
+	uint32_t		gds_size;
+	uint32_t		gws_base;
+	uint32_t		gws_size;
+	uint32_t		oa_base;
+	uint32_t		oa_size;
+};
+
+struct amdgpu_vm_manager {
+	/* Handling of VMIDs */
+	struct mutex				lock;
+	unsigned				num_ids;
+	struct list_head			ids_lru;
+	struct amdgpu_vm_id			ids[AMDGPU_NUM_VM];
+
+	/* Handling of VM fences */
+	u64					fence_context;
+	unsigned				seqno[AMDGPU_MAX_RINGS];
+
+	uint32_t				max_pfn;
+	/* vram base address for page table entry  */
+	u64					vram_base_offset;
+	/* is vm enabled? */
+	bool					enabled;
+	/* vm pte handling */
+	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
+	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
+	unsigned				vm_pte_num_rings;
+	atomic_t				vm_pte_next_ring;
+	/* client id counter */
+	atomic64_t				client_counter;
+};
+
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+			 struct list_head *validated,
+			 struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			      int (*callback)(void *p, struct amdgpu_bo *bo),
+			      void *param);
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+				  struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync, struct dma_fence *fence,
+		      struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+				    struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+			  struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			     struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+			struct amdgpu_bo_va *bo_va,
+			bool clear);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+			     struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+				       struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+				      struct amdgpu_vm *vm,
+				      struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+		     struct amdgpu_bo_va *bo_va,
+		     uint64_t addr, uint64_t offset,
+		     uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+		       struct amdgpu_bo_va *bo_va,
+		       uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+		      struct amdgpu_bo_va *bo_va);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
new file mode 100644
index 0000000..180eed7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_vram_mgr {
+	struct drm_mm mm;
+	spinlock_t lock;
+};
+
+/**
+ * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of VRAM
+ *
+ * Allocate and initialize the VRAM manager.
+ */
+static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
+				unsigned long p_size)
+{
+	struct amdgpu_vram_mgr *mgr;
+
+	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+	if (!mgr)
+		return -ENOMEM;
+
+	drm_mm_init(&mgr->mm, 0, p_size);
+	spin_lock_init(&mgr->lock);
+	man->priv = mgr;
+	return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+{
+	struct amdgpu_vram_mgr *mgr = man->priv;
+
+	spin_lock(&mgr->lock);
+	if (!drm_mm_clean(&mgr->mm)) {
+		spin_unlock(&mgr->lock);
+		return -EBUSY;
+	}
+
+	drm_mm_takedown(&mgr->mm);
+	spin_unlock(&mgr->lock);
+	kfree(mgr);
+	man->priv = NULL;
+	return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate VRAM for the given BO.
+ */
+static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+			       struct ttm_buffer_object *tbo,
+			       const struct ttm_place *place,
+			       struct ttm_mem_reg *mem)
+{
+	struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
+	struct amdgpu_vram_mgr *mgr = man->priv;
+	struct drm_mm *mm = &mgr->mm;
+	struct drm_mm_node *nodes;
+	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
+	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+	unsigned i;
+	int r;
+
+	lpfn = place->lpfn;
+	if (!lpfn)
+		lpfn = man->size;
+
+	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
+	    amdgpu_vram_page_split == -1) {
+		pages_per_node = ~0ul;
+		num_nodes = 1;
+	} else {
+		pages_per_node = max((uint32_t)amdgpu_vram_page_split,
+				     mem->page_alignment);
+		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+	}
+
+	nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+	if (!nodes)
+		return -ENOMEM;
+
+	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+		sflags = DRM_MM_SEARCH_BELOW;
+		aflags = DRM_MM_CREATE_TOP;
+	}
+
+	pages_left = mem->num_pages;
+
+	spin_lock(&mgr->lock);
+	for (i = 0; i < num_nodes; ++i) {
+		unsigned long pages = min(pages_left, pages_per_node);
+		uint32_t alignment = mem->page_alignment;
+
+		if (pages == pages_per_node)
+			alignment = pages_per_node;
+		else
+			sflags |= DRM_MM_SEARCH_BEST;
+
+		r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
+							alignment, 0,
+							place->fpfn, lpfn,
+							sflags, aflags);
+		if (unlikely(r))
+			goto error;
+
+		pages_left -= pages;
+	}
+	spin_unlock(&mgr->lock);
+
+	mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
+	mem->mm_node = nodes;
+
+	return 0;
+
+error:
+	while (i--)
+		drm_mm_remove_node(&nodes[i]);
+	spin_unlock(&mgr->lock);
+
+	kfree(nodes);
+	return r == -ENOSPC ? 0 : r;
+}
+
+/**
+ * amdgpu_vram_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated VRAM again.
+ */
+static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
+				struct ttm_mem_reg *mem)
+{
+	struct amdgpu_vram_mgr *mgr = man->priv;
+	struct drm_mm_node *nodes = mem->mm_node;
+	unsigned pages = mem->num_pages;
+
+	if (!mem->mm_node)
+		return;
+
+	spin_lock(&mgr->lock);
+	while (pages) {
+		pages -= nodes->size;
+		drm_mm_remove_node(nodes);
+		++nodes;
+	}
+	spin_unlock(&mgr->lock);
+
+	kfree(mem->mm_node);
+	mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_vram_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+				  const char *prefix)
+{
+	struct amdgpu_vram_mgr *mgr = man->priv;
+
+	spin_lock(&mgr->lock);
+	drm_mm_debug_table(&mgr->mm, prefix);
+	spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
+	amdgpu_vram_mgr_init,
+	amdgpu_vram_mgr_fini,
+	amdgpu_vram_mgr_new,
+	amdgpu_vram_mgr_del,
+	amdgpu_vram_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f..8c9bc75 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -31,6 +31,7 @@
 #include "atom.h"
 #include "atom-bits.h"
 #include "atombios_encoders.h"
+#include "atombios_crtc.h"
 #include "amdgpu_atombios.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 1d8c375..e9b1964 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
 {
 	struct ci_power_info *pi = ci_get_pi(adev);
 
-	if (pi->uvd_power_gated == gate)
-		return;
-
 	pi->uvd_power_gated = gate;
 
 	ci_update_uvd_dpm(adev, gate);
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
 		sclk = ps->performance_levels[0].sclk;
 	}
 
+	if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
+		sclk = adev->pm.pm_display_cfg.min_core_set_clock;
+
+	if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
+		mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
+
 	if (rps->vce_active) {
 		if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
 			sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
 	struct ci_power_info *pi = ci_get_pi(adev);
 	int i, ret;
 
+	if (amdgpu_ci_is_smc_running(adev)) {
+		DRM_INFO("smc is running, no need to load smc firmware\n");
+		return 0;
+	}
+
 	for (i = 0; i < adev->usec_timeout; i++) {
 		if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
 			break;
@@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
 {
 	struct ci_power_info *pi = ci_get_pi(adev);
 	u32 tmp;
+	int ret = 0;
 
 	if (!gate) {
+		/* turn the clocks on when decoding */
+		ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+						    AMD_CG_STATE_UNGATE);
+		if (ret)
+			return ret;
+
 		if (pi->caps_uvd_dpm ||
 		    (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
 			pi->smc_state_table.UvdBootLevel = 0;
@@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
 		tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
 		tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
 		WREG32_SMC(ixDPM_TABLE_475, tmp);
+		ret = ci_enable_uvd_dpm(adev, true);
+	} else {
+		ret = ci_enable_uvd_dpm(adev, false);
+		if (ret)
+			return ret;
+
+		ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+						    AMD_CG_STATE_GATE);
 	}
 
-	return ci_enable_uvd_dpm(adev, !gate);
+	return ret;
 }
 
 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
@@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
 
 			ret = ci_enable_vce_dpm(adev, true);
 		} else {
+			ret = ci_enable_vce_dpm(adev, false);
+			if (ret)
+				return ret;
 			/* turn the clocks off when not encoding */
 			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
 							    AMD_CG_STATE_GATE);
-			if (ret)
-				return ret;
-
-			ret = ci_enable_vce_dpm(adev, false);
 		}
 	}
 	return ret;
@@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev,
 	pi->current_rps = *rps;
 	pi->current_ps = *new_ps;
 	pi->current_rps.ps_priv = &pi->current_ps;
+	adev->pm.dpm.current_ps = &pi->current_rps;
 }
 
 static void ci_update_requested_ps(struct amdgpu_device *adev,
@@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
 	pi->requested_rps = *rps;
 	pi->requested_ps = *new_ps;
 	pi->requested_rps.ps_priv = &pi->requested_ps;
+	adev->pm.dpm.requested_ps = &pi->requested_rps;
 }
 
 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
@@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
 	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
 	int ret;
 
-	if (amdgpu_ci_is_smc_running(adev))
-		return -EINVAL;
 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
 		ci_enable_voltage_control(adev);
 		ret = ci_construct_voltage_tables(adev);
@@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
 	adev->pm.dpm.num_ps = state_array->ucNumEntries;
 
 	/* fill in the vce power states */
-	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 		u32 sclk, mclk;
 		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
 		clock_info = (union pplib_clock_info *)
@@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev,
 	amdgpu_dpm_print_ps_status(adev, rps);
 }
 
+static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
+						const struct ci_pl *ci_cpl2)
+{
+	return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
+		  (ci_cpl1->sclk == ci_cpl2->sclk) &&
+		  (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
+		  (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
+}
+
+static int ci_check_state_equal(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal)
+{
+	struct ci_ps *ci_cps;
+	struct ci_ps *ci_rps;
+	int i;
+
+	if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+		return -EINVAL;
+
+	ci_cps = ci_get_ps(cps);
+	ci_rps = ci_get_ps(rps);
+
+	if (ci_cps == NULL) {
+		*equal = false;
+		return 0;
+	}
+
+	if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
+
+		*equal = false;
+		return 0;
+	}
+
+	for (i = 0; i < ci_cps->performance_level_count; i++) {
+		if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
+					&(ci_rps->performance_levels[i]))) {
+			*equal = false;
+			return 0;
+		}
+	}
+
+	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+	*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+	*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+	return 0;
+}
+
 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 {
 	struct ci_power_info *pi = ci_get_pi(adev);
@@ -6287,12 +6359,19 @@ static int ci_dpm_suspend(void *handle)
 
 	if (adev->pm.dpm_enabled) {
 		mutex_lock(&adev->pm.mutex);
-		/* disable dpm */
-		ci_dpm_disable(adev);
-		/* reset the power state */
-		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+		amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+			       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+		amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+			       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+		adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
+		adev->pm.dpm.last_state = adev->pm.dpm.state;
+		adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
+		adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
 		mutex_unlock(&adev->pm.mutex);
+		amdgpu_pm_compute_clocks(adev);
+
 	}
+
 	return 0;
 }
 
@@ -6310,6 +6389,8 @@ static int ci_dpm_resume(void *handle)
 			adev->pm.dpm_enabled = false;
 		else
 			adev->pm.dpm_enabled = true;
+		adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
+		adev->pm.dpm.state = adev->pm.dpm.last_state;
 		mutex_unlock(&adev->pm.mutex);
 		if (adev->pm.dpm_enabled)
 			amdgpu_pm_compute_clocks(adev);
@@ -6644,6 +6725,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
 	.set_sclk_od = ci_dpm_set_sclk_od,
 	.get_mclk_od = ci_dpm_get_mclk_od,
 	.set_mclk_od = ci_dpm_set_mclk_od,
+	.check_state_equal = ci_check_state_equal,
+	.get_vce_clock_state = amdgpu_get_vce_clock_state,
 };
 
 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -6662,3 +6745,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
 	adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version ci_dpm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 7,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &ci_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a845b6a..302df85 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 	return r;
 }
 
-static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
-	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
-	if (hung)
-		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-	else
-		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
-	WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
 /**
  * cik_asic_reset - soft reset GPU
  *
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
 static int cik_asic_reset(struct amdgpu_device *adev)
 {
 	int r;
-	cik_set_bios_scratch_engine_hung(adev, true);
+
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
 	r = cik_gpu_pci_config_reset(adev);
 
-	cik_set_bios_scratch_engine_hung(adev, false);
+	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
 	return r;
 }
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
 		adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
 }
 
-static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 5,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 5,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-int cik_set_ip_blocks(struct amdgpu_device *adev)
-{
-	if (adev->enable_virtual_display) {
-		switch (adev->asic_type) {
-		case CHIP_BONAIRE:
-			adev->ip_blocks = bonaire_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
-			break;
-		case CHIP_HAWAII:
-			adev->ip_blocks = hawaii_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
-			break;
-		case CHIP_KAVERI:
-			adev->ip_blocks = kaveri_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
-			break;
-		case CHIP_KABINI:
-			adev->ip_blocks = kabini_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
-			break;
-		case CHIP_MULLINS:
-			adev->ip_blocks = mullins_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
-			break;
-		default:
-			/* FIXME: not supported yet */
-			return -EINVAL;
-		}
-	} else {
-		switch (adev->asic_type) {
-		case CHIP_BONAIRE:
-			adev->ip_blocks = bonaire_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
-			break;
-		case CHIP_HAWAII:
-			adev->ip_blocks = hawaii_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
-			break;
-		case CHIP_KAVERI:
-			adev->ip_blocks = kaveri_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
-			break;
-		case CHIP_KABINI:
-			adev->ip_blocks = kabini_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
-			break;
-		case CHIP_MULLINS:
-			adev->ip_blocks = mullins_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
-			break;
-		default:
-			/* FIXME: not supported yet */
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
 static const struct amdgpu_asic_funcs cik_asic_funcs =
 {
 	.read_disabled_bios = &cik_read_disabled_bios,
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs cik_common_ip_funcs = {
+static const struct amd_ip_funcs cik_common_ip_funcs = {
 	.name = "cik_common",
 	.early_init = cik_common_early_init,
 	.late_init = NULL,
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
 	.set_clockgating_state = cik_common_set_clockgating_state,
 	.set_powergating_state = cik_common_set_powergating_state,
 };
+
+static const struct amdgpu_ip_block_version cik_common_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_COMMON,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cik_common_ip_funcs,
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		amdgpu_ip_block_add(adev, &cik_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+		amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+		amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+		break;
+	case CHIP_HAWAII:
+		amdgpu_ip_block_add(adev, &cik_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+		amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
+		amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+		break;
+	case CHIP_KAVERI:
+		amdgpu_ip_block_add(adev, &cik_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+		amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
+		amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		amdgpu_ip_block_add(adev, &cik_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+		amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+		amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 5ebd2d7..c4989f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,8 +24,6 @@
 #ifndef __CIK_H__
 #define __CIK_H__
 
-extern const struct amd_ip_funcs cik_common_ip_funcs;
-
 void cik_srbm_select(struct amdgpu_device *adev,
 		     u32 me, u32 pipe, u32 queue, u32 vmid);
 int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index be3d6f7..319b32c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs cik_ih_ip_funcs = {
+static const struct amd_ip_funcs cik_ih_ip_funcs = {
 	.name = "cik_ih",
 	.early_init = cik_ih_early_init,
 	.late_init = NULL,
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 	if (adev->irq.ih_funcs == NULL)
 		adev->irq.ih_funcs = &cik_ih_funcs;
 }
+
+const struct amdgpu_ip_block_version cik_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 2,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cik_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
index 6b0f375..1d9ddee 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
@@ -24,6 +24,6 @@
 #ifndef __CIK_IH_H__
 #define __CIK_IH_H__
 
-extern const struct amd_ip_funcs cik_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_ih_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952ac..4c34dbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 
 	for (i = 0; i < count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
-			amdgpu_ring_write(ring, ring->nop |
+			amdgpu_ring_write(ring, ring->funcs->nop |
 					  SDMA_NOP_COUNT(count - 1));
 		else
-			amdgpu_ring_write(ring, ring->nop);
+			amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	unsigned index;
 	u32 tmp = 0;
 	u64 gpu_addr;
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 		goto err1;
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err0:
 	amdgpu_wb_free(adev, index);
 	return r;
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 }
 
-static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		7 + 4; /* cik_sdma_ring_emit_ib */
-}
-
-static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		6 + /* cik_sdma_ring_emit_hdp_flush */
-		3 + /* cik_sdma_ring_emit_hdp_invalidate */
-		6 + /* cik_sdma_ring_emit_pipeline_sync */
-		12 + /* cik_sdma_ring_emit_vm_flush */
-		9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 				 bool enable)
 {
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle)
 		ring->ring_obj = NULL;
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
-				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
-				     AMDGPU_RING_TYPE_SDMA);
+				     AMDGPU_SDMA_IRQ_TRAP0 :
+				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
 	}
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs cik_sdma_ip_funcs = {
+static const struct amd_ip_funcs cik_sdma_ip_funcs = {
 	.name = "cik_sdma",
 	.early_init = cik_sdma_early_init,
 	.late_init = NULL,
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
 	.get_rptr = cik_sdma_ring_get_rptr,
 	.get_wptr = cik_sdma_ring_get_wptr,
 	.set_wptr = cik_sdma_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		6 + /* cik_sdma_ring_emit_hdp_flush */
+		3 + /* cik_sdma_ring_emit_hdp_invalidate */
+		6 + /* cik_sdma_ring_emit_pipeline_sync */
+		12 + /* cik_sdma_ring_emit_vm_flush */
+		9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
 	.emit_ib = cik_sdma_ring_emit_ib,
 	.emit_fence = cik_sdma_ring_emit_fence,
 	.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
 	.test_ib = cik_sdma_ring_test_ib,
 	.insert_nop = cik_sdma_ring_insert_nop,
 	.pad_ib = cik_sdma_ring_pad_ib,
-	.get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
-	.get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
+
+const struct amdgpu_ip_block_version cik_sdma_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 2,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cik_sdma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
index 027727c..a4a8fe0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
@@ -24,6 +24,6 @@
 #ifndef __CIK_SDMA_H__
 #define __CIK_SDMA_H__
 
-extern const struct amd_ip_funcs cik_sdma_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_sdma_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 8659852..6cbd913 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -43,6 +43,14 @@
 #define CRTC4_REGISTER_OFFSET                 (0x477c - 0x1b7c)
 #define CRTC5_REGISTER_OFFSET                 (0x4a7c - 0x1b7c)
 
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET                 (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET                 (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET                 (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET                 (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET                 (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET                 (0x1816 - 0x1807)
+
 #define BONAIRE_GB_ADDR_CONFIG_GOLDEN        0x12010001
 #define HAWAII_GB_ADDR_CONFIG_GOLDEN         0x12011003
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 3c082e14..352b5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev,
 
 	pi->current_ps = *ps;
 	pi->current_rps = *rps;
-	pi->current_rps.ps_priv = ps;
+	pi->current_rps.ps_priv = &pi->current_ps;
+	adev->pm.dpm.current_ps = &pi->current_rps;
 
 }
 
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev,
 
 	pi->requested_ps = *ps;
 	pi->requested_rps = *rps;
-	pi->requested_rps.ps_priv = ps;
+	pi->requested_rps.ps_priv = &pi->requested_ps;
+	adev->pm.dpm.requested_ps = &pi->requested_rps;
 
 }
 
@@ -2257,6 +2259,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
 	}
 }
 
+static int cz_check_state_equal(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal)
+{
+	if (equal == NULL)
+		return -EINVAL;
+
+	*equal = false;
+	return 0;
+}
+
 const struct amd_ip_funcs cz_dpm_ip_funcs = {
 	.name = "cz_dpm",
 	.early_init = cz_dpm_early_init,
@@ -2289,6 +2303,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
 	.vblank_too_short = NULL,
 	.powergate_uvd = cz_dpm_powergate_uvd,
 	.powergate_vce = cz_dpm_powergate_vce,
+	.check_state_equal = cz_check_state_equal,
 };
 
 static void cz_dpm_set_funcs(struct amdgpu_device *adev)
@@ -2296,3 +2311,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev)
 	if (NULL == adev->pm.funcs)
 		adev->pm.funcs = &cz_dpm_funcs;
 }
+
+const struct amdgpu_ip_block_version cz_dpm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 8,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cz_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 3d23a70..fe7cbb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs cz_ih_ip_funcs = {
+static const struct amd_ip_funcs cz_ih_ip_funcs = {
 	.name = "cz_ih",
 	.early_init = cz_ih_early_init,
 	.late_init = NULL,
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 		adev->irq.ih_funcs = &cz_ih_funcs;
 }
 
+const struct amdgpu_ip_block_version cz_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 3,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cz_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
index fc4057a..14be775 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
@@ -24,6 +24,6 @@
 #ifndef __CZ_IH_H__
 #define __CZ_IH_H__
 
-extern const struct amd_ip_funcs cz_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cz_ih_ip_block;
 
 #endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4108c68..199d3f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -31,6 +31,7 @@
 #include "atombios_encoders.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
+#include "dce_v10_0.h"
 
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
 			       enum amdgpu_hpd_id hpd)
 {
-	int idx;
 	bool connected = false;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		idx = 0;
-		break;
-	case AMDGPU_HPD_2:
-		idx = 1;
-		break;
-	case AMDGPU_HPD_3:
-		idx = 2;
-		break;
-	case AMDGPU_HPD_4:
-		idx = 3;
-		break;
-	case AMDGPU_HPD_5:
-		idx = 4;
-		break;
-	case AMDGPU_HPD_6:
-		idx = 5;
-		break;
-	default:
+	if (hpd >= adev->mode_info.num_hpd)
 		return connected;
-	}
 
-	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
 		connected = true;
 
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
 {
 	u32 tmp;
 	bool connected = dce_v10_0_hpd_sense(adev, hpd);
-	int idx;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		idx = 0;
-		break;
-	case AMDGPU_HPD_2:
-		idx = 1;
-		break;
-	case AMDGPU_HPD_3:
-		idx = 2;
-		break;
-	case AMDGPU_HPD_4:
-		idx = 3;
-		break;
-	case AMDGPU_HPD_5:
-		idx = 4;
-		break;
-	case AMDGPU_HPD_6:
-		idx = 5;
-		break;
-	default:
+	if (hpd >= adev->mode_info.num_hpd)
 		return;
-	}
 
-	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
 	if (connected)
 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
 	else
 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
-	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
 	u32 tmp;
-	int idx;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			idx = 0;
-			break;
-		case AMDGPU_HPD_2:
-			idx = 1;
-			break;
-		case AMDGPU_HPD_3:
-			idx = 2;
-			break;
-		case AMDGPU_HPD_4:
-			idx = 3;
-			break;
-		case AMDGPU_HPD_5:
-			idx = 4;
-			break;
-		case AMDGPU_HPD_6:
-			idx = 5;
-			break;
-		default:
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
-		}
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
-			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
-		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
-		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
-		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
 				    DC_HPD_CONNECT_INT_DELAY,
 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
 				    DC_HPD_DISCONNECT_INT_DELAY,
 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
-		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 		amdgpu_irq_get(adev, &adev->hpd_irq,
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
 	u32 tmp;
-	int idx;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			idx = 0;
-			break;
-		case AMDGPU_HPD_2:
-			idx = 1;
-			break;
-		case AMDGPU_HPD_3:
-			idx = 2;
-			break;
-		case AMDGPU_HPD_4:
-			idx = 3;
-			break;
-		case AMDGPU_HPD_5:
-			idx = 4;
-			break;
-		case AMDGPU_HPD_6:
-			idx = 5;
-			break;
-		default:
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
-		}
 
-		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
-		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		amdgpu_irq_put(adev, &adev->hpd_irq,
 			       amdgpu_connector->hpd.hpd);
@@ -3554,7 +3471,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
 	.name = "dce_v10_0",
 	.early_init = dce_v10_0_early_init,
 	.late_init = NULL,
@@ -3885,3 +3802,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
 	adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version dce_v10_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 10,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_v10_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v10_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 10,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &dce_v10_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index e3dc04d..7a07477 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -24,7 +24,9 @@
 #ifndef __DCE_V10_0_H__
 #define __DCE_V10_0_H__
 
-extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+
+extern const struct amdgpu_ip_block_version dce_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v10_1_ip_block;
 
 void dce_v10_0_disable_dce(struct amdgpu_device *adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f264b8f..ecd000e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -31,6 +31,7 @@
 #include "atombios_encoders.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
+#include "dce_v11_0.h"
 
 #include "dce/dce_11_0_d.h"
 #include "dce/dce_11_0_sh_mask.h"
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
 			       enum amdgpu_hpd_id hpd)
 {
-	int idx;
 	bool connected = false;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		idx = 0;
-		break;
-	case AMDGPU_HPD_2:
-		idx = 1;
-		break;
-	case AMDGPU_HPD_3:
-		idx = 2;
-		break;
-	case AMDGPU_HPD_4:
-		idx = 3;
-		break;
-	case AMDGPU_HPD_5:
-		idx = 4;
-		break;
-	case AMDGPU_HPD_6:
-		idx = 5;
-		break;
-	default:
+	if (hpd >= adev->mode_info.num_hpd)
 		return connected;
-	}
 
-	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
 		connected = true;
 
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
 {
 	u32 tmp;
 	bool connected = dce_v11_0_hpd_sense(adev, hpd);
-	int idx;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		idx = 0;
-		break;
-	case AMDGPU_HPD_2:
-		idx = 1;
-		break;
-	case AMDGPU_HPD_3:
-		idx = 2;
-		break;
-	case AMDGPU_HPD_4:
-		idx = 3;
-		break;
-	case AMDGPU_HPD_5:
-		idx = 4;
-		break;
-	case AMDGPU_HPD_6:
-		idx = 5;
-		break;
-	default:
+	if (hpd >= adev->mode_info.num_hpd)
 		return;
-	}
 
-	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
 	if (connected)
 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
 	else
 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
-	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
 	u32 tmp;
-	int idx;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			idx = 0;
-			break;
-		case AMDGPU_HPD_2:
-			idx = 1;
-			break;
-		case AMDGPU_HPD_3:
-			idx = 2;
-			break;
-		case AMDGPU_HPD_4:
-			idx = 3;
-			break;
-		case AMDGPU_HPD_5:
-			idx = 4;
-			break;
-		case AMDGPU_HPD_6:
-			idx = 5;
-			break;
-		default:
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
-		}
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
-			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
-		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
-		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
-		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
 				    DC_HPD_CONNECT_INT_DELAY,
 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
 				    DC_HPD_DISCONNECT_INT_DELAY,
 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
-		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
 	u32 tmp;
-	int idx;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			idx = 0;
-			break;
-		case AMDGPU_HPD_2:
-			idx = 1;
-			break;
-		case AMDGPU_HPD_3:
-			idx = 2;
-			break;
-		case AMDGPU_HPD_4:
-			idx = 3;
-			break;
-		case AMDGPU_HPD_5:
-			idx = 4;
-			break;
-		case AMDGPU_HPD_6:
-			idx = 5;
-			break;
-		default:
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
-		}
 
-		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
-		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 	}
@@ -3611,7 +3528,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
 	.name = "dce_v11_0",
 	.early_init = dce_v11_0_early_init,
 	.late_init = NULL,
@@ -3941,3 +3858,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
 	adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version dce_v11_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 11,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_v11_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v11_2_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 11,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &dce_v11_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 1f58a65..0d878ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -24,7 +24,8 @@
 #ifndef __DCE_V11_0_H__
 #define __DCE_V11_0_H__
 
-extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
 
 void dce_v11_0_disable_dce(struct amdgpu_device *adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b948d6c..44547f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -46,6 +46,16 @@ static const u32 crtc_offsets[6] =
 	SI_CRTC5_REGISTER_OFFSET
 };
 
+static const u32 hpd_offsets[] =
+{
+	DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS,
+};
+
 static const uint32_t dig_offsets[] = {
 	SI_CRTC0_REGISTER_OFFSET,
 	SI_CRTC1_REGISTER_OFFSET,
@@ -94,15 +104,6 @@ static const struct {
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 } };
 
-static const uint32_t hpd_int_control_offsets[6] = {
-	DC_HPD1_INT_CONTROL,
-	DC_HPD2_INT_CONTROL,
-	DC_HPD3_INT_CONTROL,
-	DC_HPD4_INT_CONTROL,
-	DC_HPD5_INT_CONTROL,
-	DC_HPD6_INT_CONTROL,
-};
-
 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
 				     u32 block_offset, u32 reg)
 {
@@ -257,34 +258,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 {
 	bool connected = false;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_2:
-		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_3:
-		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_4:
-		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_5:
-		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_6:
-		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	default:
-		break;
-	}
+	if (hpd >= adev->mode_info.num_hpd)
+		return connected;
+
+	if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE)
+		connected = true;
 
 	return connected;
 }
@@ -303,58 +281,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 	u32 tmp;
 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		tmp = RREG32(DC_HPD1_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD1_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_2:
-		tmp = RREG32(DC_HPD2_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD2_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_3:
-		tmp = RREG32(DC_HPD3_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD3_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_4:
-		tmp = RREG32(DC_HPD4_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD4_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_5:
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD5_INT_CONTROL, tmp);
-			break;
-	case AMDGPU_HPD_6:
-		tmp = RREG32(DC_HPD6_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD6_INT_CONTROL, tmp);
-		break;
-	default:
-		break;
-	}
+	if (hpd >= adev->mode_info.num_hpd)
+		return;
+
+	tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+	if (connected)
+		tmp &= ~DC_HPDx_INT_POLARITY;
+	else
+		tmp |= DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -369,34 +304,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
-	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
-		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+	u32 tmp;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			WREG32(DC_HPD1_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_2:
-			WREG32(DC_HPD2_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_3:
-			WREG32(DC_HPD3_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_4:
-			WREG32(DC_HPD4_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_5:
-			WREG32(DC_HPD5_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_6:
-			WREG32(DC_HPD6_CONTROL, tmp);
-			break;
-		default:
-			break;
-		}
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+			continue;
+
+		tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp |= DC_HPDx_EN;
+		WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -405,34 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
-			switch (amdgpu_connector->hpd.hpd) {
-			case AMDGPU_HPD_1:
-				dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_2:
-				dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_3:
-				dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_4:
-				dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_5:
-				dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_6:
-				dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
-				break;
-			default:
-				continue;
-			}
-
-			dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
-			dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
-			WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+			tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+			tmp &= ~DC_HPDx_INT_EN;
+			WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
@@ -454,32 +347,18 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
+	u32 tmp;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			WREG32(DC_HPD1_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_2:
-			WREG32(DC_HPD2_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_3:
-			WREG32(DC_HPD3_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_4:
-			WREG32(DC_HPD4_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_5:
-			WREG32(DC_HPD5_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_6:
-			WREG32(DC_HPD6_CONTROL, 0);
-			break;
-		default:
-			break;
-		}
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+			continue;
+
+		tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp &= ~DC_HPDx_EN;
+		WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 	}
 }
@@ -611,12 +490,55 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 					  bool render)
 {
-	if (!render) 
+	if (!render)
 		WREG32(R_000300_VGA_RENDER_CONTROL,
 			RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
 
 }
 
+static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
+{
+	int num_crtc = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+		num_crtc = 6;
+		break;
+	case CHIP_OLAND:
+		num_crtc = 2;
+		break;
+	default:
+		num_crtc = 0;
+	}
+	return num_crtc;
+}
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev)
+{
+	/*Disable VGA render and enabled crtc, if has DCE engine*/
+	if (amdgpu_atombios_has_dce_engine_info(adev)) {
+		u32 tmp;
+		int crtc_enabled, i;
+
+		dce_v6_0_set_vga_render_state(adev, false);
+
+		/*Disable crtc*/
+		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
+			crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) &
+				EVERGREEN_CRTC_MASTER_EN;
+			if (crtc_enabled) {
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+		}
+	}
+}
+
 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 {
 
@@ -2338,21 +2260,20 @@ static int dce_v6_0_early_init(void *handle)
 	dce_v6_0_set_display_funcs(adev);
 	dce_v6_0_set_irq_funcs(adev);
 
+	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
 	case CHIP_VERDE:
-		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
 		break;
 	case CHIP_OLAND:
-		adev->mode_info.num_crtc = 2;
 		adev->mode_info.num_hpd = 2;
 		adev->mode_info.num_dig = 2;
 		break;
 	default:
-		/* FIXME: not supported yet */
 		return -EINVAL;
 	}
 
@@ -2588,42 +2509,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
 					    unsigned type,
 					    enum amdgpu_interrupt_state state)
 {
-	u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+	u32 dc_hpd_int_cntl;
 
-	switch (type) {
-	case AMDGPU_HPD_1:
-		dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_2:
-		dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_3:
-		dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_4:
-		dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_5:
-		dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_6:
-		dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
-		break;
-	default:
+	if (type >= adev->mode_info.num_hpd) {
 		DRM_DEBUG("invalid hdp %d\n", type);
 		return 0;
 	}
 
 	switch (state) {
 	case AMDGPU_IRQ_STATE_DISABLE:
-		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
-		dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
-		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	case AMDGPU_IRQ_STATE_ENABLE:
-		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
-		dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
-		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
+		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	default:
 		break;
@@ -2796,7 +2698,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
 			    struct amdgpu_irq_src *source,
 			    struct amdgpu_iv_entry *entry)
 {
-	uint32_t disp_int, mask, int_control, tmp;
+	uint32_t disp_int, mask, tmp;
 	unsigned hpd;
 
 	if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -2807,12 +2709,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
 	hpd = entry->src_data;
 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
 	mask = interrupt_status_offsets[hpd].hpd;
-	int_control = hpd_int_control_offsets[hpd];
 
 	if (disp_int & mask) {
-		tmp = RREG32(int_control);
+		tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-		WREG32(int_control, tmp);
+		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 		schedule_work(&adev->hotplug_work);
 		DRM_INFO("IH: HPD%d\n", hpd + 1);
 	}
@@ -2833,7 +2734,7 @@ static int dce_v6_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
 	.name = "dce_v6_0",
 	.early_init = dce_v6_0_early_init,
 	.late_init = NULL,
@@ -3174,3 +3075,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version dce_v6_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 6,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v6_4_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 6,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &dce_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index 6a55281..7b546b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -24,6 +24,9 @@
 #ifndef __DCE_V6_0_H__
 #define __DCE_V6_0_H__
 
-extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v6_4_ip_block;
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5966166..979aedf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -31,6 +31,7 @@
 #include "atombios_encoders.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
+#include "dce_v8_0.h"
 
 #include "dce/dce_8_0_d.h"
 #include "dce/dce_8_0_sh_mask.h"
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
 	CRTC5_REGISTER_OFFSET
 };
 
+static const u32 hpd_offsets[] =
+{
+	HPD0_REGISTER_OFFSET,
+	HPD1_REGISTER_OFFSET,
+	HPD2_REGISTER_OFFSET,
+	HPD3_REGISTER_OFFSET,
+	HPD4_REGISTER_OFFSET,
+	HPD5_REGISTER_OFFSET
+};
+
 static const uint32_t dig_offsets[] = {
 	CRTC0_REGISTER_OFFSET,
 	CRTC1_REGISTER_OFFSET,
@@ -104,15 +115,6 @@ static const struct {
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 } };
 
-static const uint32_t hpd_int_control_offsets[6] = {
-	mmDC_HPD1_INT_CONTROL,
-	mmDC_HPD2_INT_CONTROL,
-	mmDC_HPD3_INT_CONTROL,
-	mmDC_HPD4_INT_CONTROL,
-	mmDC_HPD5_INT_CONTROL,
-	mmDC_HPD6_INT_CONTROL,
-};
-
 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 				     u32 block_offset, u32 reg)
 {
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 {
 	bool connected = false;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_2:
-		if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_3:
-		if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_4:
-		if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_5:
-		if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_6:
-		if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
-			connected = true;
-		break;
-	default:
-		break;
-	}
+	if (hpd >= adev->mode_info.num_hpd)
+		return connected;
+
+	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+		connected = true;
 
 	return connected;
 }
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 	u32 tmp;
 	bool connected = dce_v8_0_hpd_sense(adev, hpd);
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		tmp = RREG32(mmDC_HPD1_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD1_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_2:
-		tmp = RREG32(mmDC_HPD2_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD2_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_3:
-		tmp = RREG32(mmDC_HPD3_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD3_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_4:
-		tmp = RREG32(mmDC_HPD4_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD4_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_5:
-		tmp = RREG32(mmDC_HPD5_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD5_INT_CONTROL, tmp);
-			break;
-	case AMDGPU_HPD_6:
-		tmp = RREG32(mmDC_HPD6_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD6_INT_CONTROL, tmp);
-		break;
-	default:
-		break;
-	}
+	if (hpd >= adev->mode_info.num_hpd)
+		return;
+
+	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+	if (connected)
+		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+	else
+		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
-	u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
-		(0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
-		DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+	u32 tmp;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			WREG32(mmDC_HPD1_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_2:
-			WREG32(mmDC_HPD2_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_3:
-			WREG32(mmDC_HPD3_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_4:
-			WREG32(mmDC_HPD4_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_5:
-			WREG32(mmDC_HPD5_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_6:
-			WREG32(mmDC_HPD6_CONTROL, tmp);
-			break;
-		default:
-			break;
-		}
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+			continue;
+
+		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
-			switch (amdgpu_connector->hpd.hpd) {
-			case AMDGPU_HPD_1:
-				dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_2:
-				dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_3:
-				dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_4:
-				dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_5:
-				dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_6:
-				dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
-				break;
-			default:
-				continue;
-			}
-
-			dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
-			dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
-			WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
+	u32 tmp;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			WREG32(mmDC_HPD1_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_2:
-			WREG32(mmDC_HPD2_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_3:
-			WREG32(mmDC_HPD3_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_4:
-			WREG32(mmDC_HPD4_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_5:
-			WREG32(mmDC_HPD5_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_6:
-			WREG32(mmDC_HPD6_CONTROL, 0);
-			break;
-		default:
-			break;
-		}
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+			continue;
+
+		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 	}
 }
@@ -3204,42 +3084,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
 					    unsigned type,
 					    enum amdgpu_interrupt_state state)
 {
-	u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+	u32 dc_hpd_int_cntl;
 
-	switch (type) {
-	case AMDGPU_HPD_1:
-		dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_2:
-		dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_3:
-		dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_4:
-		dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_5:
-		dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_6:
-		dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
-		break;
-	default:
+	if (type >= adev->mode_info.num_hpd) {
 		DRM_DEBUG("invalid hdp %d\n", type);
 		return 0;
 	}
 
 	switch (state) {
 	case AMDGPU_IRQ_STATE_DISABLE:
-		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
 		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
-		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	case AMDGPU_IRQ_STATE_ENABLE:
-		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
 		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
-		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	default:
 		break;
@@ -3412,7 +3273,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
 			    struct amdgpu_irq_src *source,
 			    struct amdgpu_iv_entry *entry)
 {
-	uint32_t disp_int, mask, int_control, tmp;
+	uint32_t disp_int, mask, tmp;
 	unsigned hpd;
 
 	if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -3423,12 +3284,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
 	hpd = entry->src_data;
 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
 	mask = interrupt_status_offsets[hpd].hpd;
-	int_control = hpd_int_control_offsets[hpd];
 
 	if (disp_int & mask) {
-		tmp = RREG32(int_control);
+		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-		WREG32(int_control, tmp);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 		schedule_work(&adev->hotplug_work);
 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
 	}
@@ -3449,7 +3309,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
 	.name = "dce_v8_0",
 	.early_init = dce_v8_0_early_init,
 	.late_init = NULL,
@@ -3779,3 +3639,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
 	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version dce_v8_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_2_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_3_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 3,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_5_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 5,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7d0770c..13b802d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -24,7 +24,11 @@
 #ifndef __DCE_V8_0_H__
 #define __DCE_V8_0_H__
 
-extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_2_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_3_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_5_ip_block;
 
 void dce_v8_0_disable_dce(struct amdgpu_device *adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f0..cc85676 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -27,6 +27,9 @@
 #include "atom.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #include "dce_v8_0.h"
 #endif
@@ -34,11 +37,13 @@
 #include "dce_v11_0.h"
 #include "dce_virtual.h"
 
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+
 static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
-				  struct amdgpu_irq_src *source,
-				  struct amdgpu_iv_entry *entry);
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+					      int index);
 
 /**
  * dce_virtual_vblank_wait - vblank wait asic callback.
@@ -99,6 +104,14 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
 			      struct amdgpu_mode_mc_save *save)
 {
 	switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+		dce_v6_0_disable_dce(adev);
+		break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	case CHIP_BONAIRE:
 	case CHIP_HAWAII:
@@ -119,6 +132,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
 		dce_v11_0_disable_dce(adev);
 		break;
 	case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+	case CHIP_HAINAN:
+#endif
 		/* no DCE */
 		return;
 	default:
@@ -195,10 +211,9 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 		amdgpu_crtc->enabled = true;
-		/* Make sure VBLANK and PFLIP interrupts are still enabled */
+		/* Make sure VBLANK interrupts are still enabled */
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
-		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
 		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
@@ -264,24 +279,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
 				     const struct drm_display_mode *mode,
 				     struct drm_display_mode *adjusted_mode)
 {
-	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
-	struct drm_encoder *encoder;
-
-	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc == crtc) {
-			amdgpu_crtc->encoder = encoder;
-			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
-			break;
-		}
-	}
-	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
-		amdgpu_crtc->encoder = NULL;
-		amdgpu_crtc->connector = NULL;
-		return false;
-	}
-
 	return true;
 }
 
@@ -341,6 +338,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
 	amdgpu_crtc->encoder = NULL;
 	amdgpu_crtc->connector = NULL;
+	amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
 
 	return 0;
@@ -350,48 +348,128 @@ static int dce_virtual_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
 	dce_virtual_set_display_funcs(adev);
 	dce_virtual_set_irq_funcs(adev);
 
-	adev->mode_info.num_crtc = 1;
 	adev->mode_info.num_hpd = 1;
 	adev->mode_info.num_dig = 1;
 	return 0;
 }
 
-static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+static struct drm_encoder *
+dce_virtual_encoder(struct drm_connector *connector)
 {
-	struct amdgpu_i2c_bus_rec ddc_bus;
-	struct amdgpu_router router;
-	struct amdgpu_hpd hpd;
+	int enc_id = connector->encoder_ids[0];
+	struct drm_encoder *encoder;
+	int i;
 
-	/* look up gpio for ddc, hpd */
-	ddc_bus.valid = false;
-	hpd.hpd = AMDGPU_HPD_NONE;
-	/* needed for aux chan transactions */
-	ddc_bus.hpd = hpd.hpd;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
 
-	memset(&router, 0, sizeof(router));
-	router.ddc_valid = false;
-	router.cd_valid = false;
-	amdgpu_display_add_connector(adev,
-				      0,
-				      ATOM_DEVICE_CRT1_SUPPORT,
-				      DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
-				      CONNECTOR_OBJECT_ID_VIRTUAL,
-				      &hpd,
-				      &router);
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
 
-	amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
-							ATOM_DEVICE_CRT1_SUPPORT,
-							0);
+		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+			return encoder;
+	}
 
-	amdgpu_link_encoder_connector(adev->ddev);
-
-	return true;
+	/* pick the first one */
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
+	return NULL;
 }
 
+static int dce_virtual_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode = NULL;
+	unsigned i;
+	static const struct mode_size {
+		int w;
+		int h;
+	} common_modes[17] = {
+		{ 640,  480},
+		{ 720,  480},
+		{ 800,  600},
+		{ 848,  480},
+		{1024,  768},
+		{1152,  768},
+		{1280,  720},
+		{1280,  800},
+		{1280,  854},
+		{1280,  960},
+		{1280, 1024},
+		{1440,  900},
+		{1400, 1050},
+		{1680, 1050},
+		{1600, 1200},
+		{1920, 1080},
+		{1920, 1200}
+	};
+
+	for (i = 0; i < 17; i++) {
+		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+		drm_mode_probed_add(connector, mode);
+	}
+
+	return 0;
+}
+
+static int dce_virtual_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static int
+dce_virtual_dpms(struct drm_connector *connector, int mode)
+{
+	return 0;
+}
+
+static enum drm_connector_status
+dce_virtual_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static int
+dce_virtual_set_property(struct drm_connector *connector,
+			 struct drm_property *property,
+			 uint64_t val)
+{
+	return 0;
+}
+
+static void dce_virtual_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static void dce_virtual_force(struct drm_connector *connector)
+{
+	return;
+}
+
+static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
+	.get_modes = dce_virtual_get_modes,
+	.mode_valid = dce_virtual_mode_valid,
+	.best_encoder = dce_virtual_encoder,
+};
+
+static const struct drm_connector_funcs dce_virtual_connector_funcs = {
+	.dpms = dce_virtual_dpms,
+	.detect = dce_virtual_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = dce_virtual_set_property,
+	.destroy = dce_virtual_destroy,
+	.force = dce_virtual_force,
+};
+
 static int dce_virtual_sw_init(void *handle)
 {
 	int r, i;
@@ -420,16 +498,16 @@ static int dce_virtual_sw_init(void *handle)
 	adev->ddev->mode_config.max_width = 16384;
 	adev->ddev->mode_config.max_height = 16384;
 
-	/* allocate crtcs */
+	/* allocate crtcs, encoders, connectors */
 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 		r = dce_virtual_crtc_init(adev, i);
 		if (r)
 			return r;
+		r = dce_virtual_connector_encoder_init(adev, i);
+		if (r)
+			return r;
 	}
 
-	dce_virtual_get_connector_info(adev);
-	amdgpu_print_display_setup(adev->ddev);
-
 	drm_kms_helper_poll_init(adev->ddev);
 
 	adev->mode_info.mode_config_initialized = true;
@@ -496,7 +574,7 @@ static int dce_virtual_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_virtual_ip_funcs = {
+static const struct amd_ip_funcs dce_virtual_ip_funcs = {
 	.name = "dce_virtual",
 	.early_init = dce_virtual_early_init,
 	.late_init = NULL,
@@ -526,8 +604,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
 
 static void
 dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
-		      struct drm_display_mode *mode,
-		      struct drm_display_mode *adjusted_mode)
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
 {
 	return;
 }
@@ -547,10 +625,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
 				    const struct drm_display_mode *mode,
 				    struct drm_display_mode *adjusted_mode)
 {
-
-	/* set the active encoder to connector routing */
-	amdgpu_encoder_set_active_device(encoder);
-
 	return true;
 }
 
@@ -576,45 +650,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
 	.destroy = dce_virtual_encoder_destroy,
 };
 
-static void dce_virtual_encoder_add(struct amdgpu_device *adev,
-				 uint32_t encoder_enum,
-				 uint32_t supported_device,
-				 u16 caps)
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+					      int index)
 {
-	struct drm_device *dev = adev->ddev;
 	struct drm_encoder *encoder;
-	struct amdgpu_encoder *amdgpu_encoder;
+	struct drm_connector *connector;
 
-	/* see if we already added it */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		amdgpu_encoder = to_amdgpu_encoder(encoder);
-		if (amdgpu_encoder->encoder_enum == encoder_enum) {
-			amdgpu_encoder->devices |= supported_device;
-			return;
-		}
+	/* add a new encoder */
+	encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+	if (!encoder)
+		return -ENOMEM;
+	encoder->possible_crtcs = 1 << index;
+	drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL, NULL);
+	drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
 
+	connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+	if (!connector) {
+		kfree(encoder);
+		return -ENOMEM;
 	}
 
-	/* add a new one */
-	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
-	if (!amdgpu_encoder)
-		return;
+	/* add a new connector */
+	drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	drm_connector_register(connector);
 
-	encoder = &amdgpu_encoder->base;
-	encoder->possible_crtcs = 0x1;
-	amdgpu_encoder->enc_priv = NULL;
-	amdgpu_encoder->encoder_enum = encoder_enum;
-	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
-	amdgpu_encoder->devices = supported_device;
-	amdgpu_encoder->rmx_type = RMX_OFF;
-	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
-	amdgpu_encoder->is_ext_encoder = false;
-	amdgpu_encoder->caps = caps;
+	/* link them */
+	drm_mode_connector_attach_encoder(connector, encoder);
 
-	drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
-					 DRM_MODE_ENCODER_VIRTUAL, NULL);
-	drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
-	DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+	return 0;
 }
 
 static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
@@ -630,8 +699,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
 	.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
 	.page_flip = &dce_virtual_page_flip,
 	.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
-	.add_encoder = &dce_virtual_encoder_add,
-	.add_connector = &amdgpu_connector_add,
+	.add_encoder = NULL,
+	.add_connector = NULL,
 	.stop_mc_access = &dce_virtual_stop_mc_access,
 	.resume_mc_access = &dce_virtual_resume_mc_access,
 };
@@ -642,107 +711,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
 		adev->mode_info.funcs = &dce_virtual_display_funcs;
 }
 
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
-{
-	struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
-	struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
-	unsigned crtc = 0;
-	drm_handle_vblank(adev->ddev, crtc);
-	dce_virtual_pageflip_irq(adev, NULL, NULL);
-	hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
-	return HRTIMER_NORESTART;
-}
-
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
-						     int crtc,
-						     enum amdgpu_interrupt_state state)
-{
-	if (crtc >= adev->mode_info.num_crtc) {
-		DRM_DEBUG("invalid crtc %d\n", crtc);
-		return;
-	}
-
-	if (state && !adev->mode_info.vsync_timer_enabled) {
-		DRM_DEBUG("Enable software vsync timer\n");
-		hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-		hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
-		adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
-		hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
-	} else if (!state && adev->mode_info.vsync_timer_enabled) {
-		DRM_DEBUG("Disable software vsync timer\n");
-		hrtimer_cancel(&adev->mode_info.vblank_timer);
-	}
-
-	adev->mode_info.vsync_timer_enabled = state;
-	DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
-}
-
-
-static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
-                                       struct amdgpu_irq_src *source,
-                                       unsigned type,
-                                       enum amdgpu_interrupt_state state)
-{
-	switch (type) {
-	case AMDGPU_CRTC_IRQ_VBLANK1:
-		dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
-		break;
-	default:
-		break;
-	}
-	return 0;
-}
-
-static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
-					  int crtc)
-{
-	if (crtc >= adev->mode_info.num_crtc) {
-		DRM_DEBUG("invalid crtc %d\n", crtc);
-		return;
-	}
-}
-
-static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
-			      struct amdgpu_irq_src *source,
-			      struct amdgpu_iv_entry *entry)
-{
-	unsigned crtc = 0;
-	unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
-
-	dce_virtual_crtc_vblank_int_ack(adev, crtc);
-
-	if (amdgpu_irq_enabled(adev, source, irq_type)) {
-		drm_handle_vblank(adev->ddev, crtc);
-	}
-	dce_virtual_pageflip_irq(adev, NULL, NULL);
-	DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-	return 0;
-}
-
-static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
-					    struct amdgpu_irq_src *src,
-					    unsigned type,
-					    enum amdgpu_interrupt_state state)
-{
-	if (type >= adev->mode_info.num_crtc) {
-		DRM_ERROR("invalid pageflip crtc %d\n", type);
-		return -EINVAL;
-	}
-	DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
-
-	return 0;
-}
-
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
-				  struct amdgpu_irq_src *source,
-				  struct amdgpu_iv_entry *entry)
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+				unsigned crtc_id)
 {
 	unsigned long flags;
-	unsigned crtc_id = 0;
 	struct amdgpu_crtc *amdgpu_crtc;
 	struct amdgpu_flip_work *works;
 
-	crtc_id = 0;
 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
 	if (crtc_id >= adev->mode_info.num_crtc) {
@@ -781,22 +756,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+	struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
+						       struct amdgpu_crtc, vblank_timer);
+	struct drm_device *ddev = amdgpu_crtc->base.dev;
+	struct amdgpu_device *adev = ddev->dev_private;
+
+	drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+	dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+	hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+		      HRTIMER_MODE_REL);
+
+	return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+							int crtc,
+							enum amdgpu_interrupt_state state)
+{
+	if (crtc >= adev->mode_info.num_crtc) {
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+
+	if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+		DRM_DEBUG("Enable software vsync timer\n");
+		hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
+			     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
+				    ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+		adev->mode_info.crtcs[crtc]->vblank_timer.function =
+			dce_virtual_vblank_timer_handle;
+		hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
+			      ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+	} else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+		DRM_DEBUG("Disable software vsync timer\n");
+		hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
+	}
+
+	adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
+	DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+					  struct amdgpu_irq_src *source,
+					  unsigned type,
+					  enum amdgpu_interrupt_state state)
+{
+	if (type > AMDGPU_CRTC_IRQ_VBLANK6)
+		return -EINVAL;
+
+	dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
+
+	return 0;
+}
+
 static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
 	.set = dce_virtual_set_crtc_irq_state,
-	.process = dce_virtual_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
-	.set = dce_virtual_set_pageflip_irq_state,
-	.process = dce_virtual_pageflip_irq,
+	.process = NULL,
 };
 
 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
 {
 	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
 	adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
-
-	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
-	adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
 }
 
+const struct amdgpu_ip_block_version dce_virtual_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_virtual_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index e239243..ed42201 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -24,8 +24,7 @@
 #ifndef __DCE_VIRTUAL_H__
 #define __DCE_VIRTUAL_H__
 
-extern const struct amd_ip_funcs dce_virtual_ip_funcs;
-#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 40abb6b..21c086e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	long r;
@@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 		goto err2;
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
@@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err1:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
@@ -1940,7 +1940,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
 
 static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 	uint32_t seq = ring->fence_drv.sync_seq;
 	uint64_t addr = ring->fence_drv.gpu_addr;
 
@@ -1966,7 +1966,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
 	/* write new base address */
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
 	amdgpu_ring_write(ring, 0);
 }
 
-static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		6; /* gfx_v6_0_ring_emit_ib */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
-	return
-		5 + /* gfx_v6_0_ring_emit_hdp_flush */
-		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
-		14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
-		7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
-		17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
-		3; /* gfx_v6_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		5 + /* gfx_v6_0_ring_emit_hdp_flush */
-		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
-		7 + /* gfx_v6_0_ring_emit_pipeline_sync */
-		17 + /* gfx_v6_0_ring_emit_vm_flush */
-		14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
 	.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
 	.select_se_sh = &gfx_v6_0_select_se_sh,
@@ -2896,9 +2869,7 @@ static int gfx_v6_0_sw_init(void *handle)
 		ring->ring_obj = NULL;
 		sprintf(ring->name, "gfx");
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     0x80000000, 0xf,
-				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
-				     AMDGPU_RING_TYPE_GFX);
+				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 		if (r)
 			return r;
 	}
@@ -2920,9 +2891,7 @@ static int gfx_v6_0_sw_init(void *handle)
 		sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     0x80000000, 0xf,
-				     &adev->gfx.eop_irq, irq_type,
-				     AMDGPU_RING_TYPE_COMPUTE);
+				     &adev->gfx.eop_irq, irq_type);
 		if (r)
 			return r;
 	}
@@ -3237,7 +3206,7 @@ static int gfx_v6_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
 	.name = "gfx_v6_0",
 	.early_init = gfx_v6_0_early_init,
 	.late_init = NULL,
@@ -3255,10 +3224,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+	.type = AMDGPU_RING_TYPE_GFX,
+	.align_mask = 0xff,
+	.nop = 0x80000000,
 	.get_rptr = gfx_v6_0_ring_get_rptr,
 	.get_wptr = gfx_v6_0_ring_get_wptr,
 	.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		5 + /* gfx_v6_0_ring_emit_hdp_flush */
+		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+		14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+		7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+		17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+		3, /* gfx_v6_ring_emit_cntxcntl */
+	.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
 	.emit_ib = gfx_v6_0_ring_emit_ib,
 	.emit_fence = gfx_v6_0_ring_emit_fence,
 	.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3269,15 +3248,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
 	.test_ib = gfx_v6_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
-	.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+	.type = AMDGPU_RING_TYPE_COMPUTE,
+	.align_mask = 0xff,
+	.nop = 0x80000000,
 	.get_rptr = gfx_v6_0_ring_get_rptr,
 	.get_wptr = gfx_v6_0_ring_get_wptr,
 	.set_wptr = gfx_v6_0_ring_set_wptr_compute,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		5 + /* gfx_v6_0_ring_emit_hdp_flush */
+		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v6_0_ring_emit_vm_flush */
+		14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
 	.emit_ib = gfx_v6_0_ring_emit_ib,
 	.emit_fence = gfx_v6_0_ring_emit_fence,
 	.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3287,8 +3273,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
 	.test_ring = gfx_v6_0_ring_test_ring,
 	.test_ib = gfx_v6_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
-	.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -3360,3 +3344,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
 }
+
+const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 6,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gfx_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index b9657e7..ced6fc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -24,6 +24,6 @@
 #ifndef __GFX_V6_0_H__
 #define __GFX_V6_0_H__
 
-extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da..5b631fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
 	u32 ref_and_mask;
-	int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
 
-	if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
 		switch (ring->me) {
 		case 1:
 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	long r;
@@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 		goto err2;
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
@@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err1:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
@@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
  */
 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 	uint32_t seq = ring->fence_drv.sync_seq;
 	uint64_t addr = ring->fence_drv.gpu_addr;
 
@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.save_restore_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.save_restore_obj);
 			if (r) {
@@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.clear_state_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.clear_state_obj);
 			if (r) {
@@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.cp_table_obj == NULL) {
 			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.cp_table_obj);
 			if (r) {
@@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
-static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
 {
-	return
-		4; /* gfx_v7_0_ring_emit_ib_gfx */
+	WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+	return RREG32(mmSQ_IND_DATA);
 }
 
-static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
 {
-	return
-		20 + /* gfx_v7_0_ring_emit_gds_switch */
-		7 + /* gfx_v7_0_ring_emit_hdp_flush */
-		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
-		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
-		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
-		17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
-		3; /* gfx_v7_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		4; /* gfx_v7_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		20 + /* gfx_v7_0_ring_emit_gds_switch */
-		7 + /* gfx_v7_0_ring_emit_hdp_flush */
-		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
-		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
-		17 + /* gfx_v7_0_ring_emit_vm_flush */
-		7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+	/* type 0 wave data */
+	dst[(*no_fields)++] = 0;
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
 }
 
 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
 	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
 	.select_se_sh = &gfx_v7_0_select_se_sh,
+	.read_wave_data = &gfx_v7_0_read_wave_data,
 };
 
 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
@@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle)
 		ring->ring_obj = NULL;
 		sprintf(ring->name, "gfx");
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
-				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
-				     AMDGPU_RING_TYPE_GFX);
+				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 		if (r)
 			return r;
 	}
@@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle)
 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
 		/* type-2 packets are deprecated on MEC, use type-3 instead */
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
-				     &adev->gfx.eop_irq, irq_type,
-				     AMDGPU_RING_TYPE_COMPUTE);
+				     &adev->gfx.eop_irq, irq_type);
 		if (r)
 			return r;
 	}
@@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
 	.name = "gfx_v7_0",
 	.early_init = gfx_v7_0_early_init,
 	.late_init = gfx_v7_0_late_init,
@@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+	.type = AMDGPU_RING_TYPE_GFX,
+	.align_mask = 0xff,
+	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 	.get_rptr = gfx_v7_0_ring_get_rptr,
 	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
 	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		20 + /* gfx_v7_0_ring_emit_gds_switch */
+		7 + /* gfx_v7_0_ring_emit_hdp_flush */
+		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+		17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+		3, /* gfx_v7_ring_emit_cntxcntl */
+	.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
 	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
 	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
 	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
-	.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
-	.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+	.type = AMDGPU_RING_TYPE_COMPUTE,
+	.align_mask = 0xff,
+	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 	.get_rptr = gfx_v7_0_ring_get_rptr,
 	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
 	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		20 + /* gfx_v7_0_ring_emit_gds_switch */
+		7 + /* gfx_v7_0_ring_emit_hdp_flush */
+		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v7_0_ring_emit_vm_flush */
+		7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+	.emit_ib_size =	4, /* gfx_v7_0_ring_emit_ib_compute */
 	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
 	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
-	.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
 }
+
+const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 7,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 7,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 7,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 7,
+	.minor = 3,
+	.rev = 0,
+	.funcs = &gfx_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index 94e3ea1..2f5164c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -24,6 +24,9 @@
 #ifndef __GFX_V7_0_H__
 #define __GFX_V7_0_H__
 
-extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee6a48a..86a7ca5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -798,7 +798,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	long r;
@@ -824,7 +824,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 		goto err2;
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
@@ -844,7 +844,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	}
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err1:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
@@ -1058,6 +1058,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 		adev->firmware.fw_size +=
 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
+		/* we need account JT in */
+		cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+		adev->firmware.fw_size +=
+			ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+		if (amdgpu_sriov_vf(adev)) {
+			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+			info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+			info->fw = adev->gfx.mec_fw;
+			adev->firmware.fw_size +=
+				ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+		}
+
 		if (adev->gfx.mec2_fw) {
 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
@@ -1127,34 +1140,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
 	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
 			PACKET3_SET_CONTEXT_REG_START);
-	switch (adev->asic_type) {
-	case CHIP_TONGA:
-	case CHIP_POLARIS10:
-		buffer[count++] = cpu_to_le32(0x16000012);
-		buffer[count++] = cpu_to_le32(0x0000002A);
-		break;
-	case CHIP_POLARIS11:
-		buffer[count++] = cpu_to_le32(0x16000012);
-		buffer[count++] = cpu_to_le32(0x00000000);
-		break;
-	case CHIP_FIJI:
-		buffer[count++] = cpu_to_le32(0x3a00161a);
-		buffer[count++] = cpu_to_le32(0x0000002e);
-		break;
-	case CHIP_TOPAZ:
-	case CHIP_CARRIZO:
-		buffer[count++] = cpu_to_le32(0x00000002);
-		buffer[count++] = cpu_to_le32(0x00000000);
-		break;
-	case CHIP_STONEY:
-		buffer[count++] = cpu_to_le32(0x00000000);
-		buffer[count++] = cpu_to_le32(0x00000000);
-		break;
-	default:
-		buffer[count++] = cpu_to_le32(0x00000000);
-		buffer[count++] = cpu_to_le32(0x00000000);
-		break;
-	}
+	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
 
 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -1273,7 +1260,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.clear_state_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.clear_state_obj);
 			if (r) {
@@ -1315,7 +1303,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.cp_table_obj == NULL) {
 			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.cp_table_obj);
 			if (r) {
@@ -1575,7 +1564,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	int r, i;
 	u32 tmp;
 	unsigned total_size, vgpr_offset, sgpr_offset;
@@ -1708,7 +1697,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 	}
 
 	/* wait for the GPU to finish processing the IB */
-	r = fence_wait(f, false);
+	r = dma_fence_wait(f, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto fail;
@@ -1729,7 +1718,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
 fail:
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 
 	return r;
 }
@@ -2045,10 +2034,8 @@ static int gfx_v8_0_sw_init(void *handle)
 			ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
 		}
 
-		r = amdgpu_ring_init(adev, ring, 1024,
-				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
-				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
-				     AMDGPU_RING_TYPE_GFX);
+		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+				     AMDGPU_CP_IRQ_GFX_EOP);
 		if (r)
 			return r;
 	}
@@ -2072,10 +2059,8 @@ static int gfx_v8_0_sw_init(void *handle)
 		sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
 		/* type-2 packets are deprecated on MEC, use type-3 instead */
-		r = amdgpu_ring_init(adev, ring, 1024,
-				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
-				     &adev->gfx.eop_irq, irq_type,
-				     AMDGPU_RING_TYPE_COMPUTE);
+		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+				     irq_type);
 		if (r)
 			return r;
 	}
@@ -3679,6 +3664,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
 							num_rb_pipes);
 	}
 
+	/* cache the values for userspace */
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+			adev->gfx.config.rb_config[i][j].rb_backend_disable =
+				RREG32(mmCC_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].raster_config =
+				RREG32(mmPA_SC_RASTER_CONFIG);
+			adev->gfx.config.rb_config[i][j].raster_config_1 =
+				RREG32(mmPA_SC_RASTER_CONFIG_1);
+		}
+	}
+	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 	mutex_unlock(&adev->grbm_idx_mutex);
 }
 
@@ -4331,7 +4331,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring;
 	u32 tmp;
 	u32 rb_bufsz;
-	u64 rb_addr, rptr_addr;
+	u64 rb_addr, rptr_addr, wptr_gpu_addr;
 	int r;
 
 	/* Set the write pointer delay */
@@ -4362,6 +4362,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
 	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
 	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
 
+	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
+	WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
 	mdelay(1);
 	WREG32(mmCP_RB0_CNTL, tmp);
 
@@ -5438,9 +5441,41 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+	WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+	return RREG32(mmSQ_IND_DATA);
+}
+
+static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+{
+	/* type 0 wave data */
+	dst[(*no_fields)++] = 0;
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+
 static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
 	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
 	.select_se_sh = &gfx_v8_0_select_se_sh,
+	.read_wave_data = &gfx_v8_0_read_wave_data,
 };
 
 static int gfx_v8_0_early_init(void *handle)
@@ -6120,7 +6155,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
 	u32 ref_and_mask, reg_mem_engine;
 
-	if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
 		switch (ring->me) {
 		case 1:
 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -6222,7 +6257,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
 
 static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 	uint32_t seq = ring->fence_drv.sync_seq;
 	uint64_t addr = ring->fence_drv.gpu_addr;
 
@@ -6240,11 +6275,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-
-	/* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
-	if (usepfp)
-		amdgpu_ring_insert_nop(ring, 128);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6360,42 +6391,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
 	amdgpu_ring_write(ring, 0);
 }
 
-static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
-{
-	return
-		4; /* gfx_v8_0_ring_emit_ib_gfx */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
-	return
-		20 + /* gfx_v8_0_ring_emit_gds_switch */
-		7 + /* gfx_v8_0_ring_emit_hdp_flush */
-		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
-		6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
-		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
-		256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
-		2 + /* gfx_v8_ring_emit_sb */
-		3; /* gfx_v8_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		4; /* gfx_v8_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		20 + /* gfx_v8_0_ring_emit_gds_switch */
-		7 + /* gfx_v8_0_ring_emit_hdp_flush */
-		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
-		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
-		17 + /* gfx_v8_0_ring_emit_vm_flush */
-		7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
-}
-
 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
 						 enum amdgpu_interrupt_state state)
 {
@@ -6541,7 +6536,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
-const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
 	.name = "gfx_v8_0",
 	.early_init = gfx_v8_0_early_init,
 	.late_init = gfx_v8_0_late_init,
@@ -6562,10 +6557,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+	.type = AMDGPU_RING_TYPE_GFX,
+	.align_mask = 0xff,
+	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 	.get_rptr = gfx_v8_0_ring_get_rptr,
 	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
 	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		20 + /* gfx_v8_0_ring_emit_gds_switch */
+		7 + /* gfx_v8_0_ring_emit_hdp_flush */
+		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+		6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+		128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+		2 + /* gfx_v8_ring_emit_sb */
+		3, /* gfx_v8_ring_emit_cntxcntl */
+	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_gfx */
 	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
 	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
 	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6579,15 +6586,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_switch_buffer = gfx_v8_ring_emit_sb,
 	.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
-	.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
-	.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+	.type = AMDGPU_RING_TYPE_COMPUTE,
+	.align_mask = 0xff,
+	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 	.get_rptr = gfx_v8_0_ring_get_rptr,
 	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
 	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		20 + /* gfx_v8_0_ring_emit_gds_switch */
+		7 + /* gfx_v8_0_ring_emit_hdp_flush */
+		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v8_0_ring_emit_vm_flush */
+		7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_compute */
 	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
 	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6599,8 +6614,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
-	.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6753,3 +6766,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
 }
+
+const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 8,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gfx_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 8,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &gfx_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index ebed1f8..788cc3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -24,6 +24,7 @@
 #ifndef __GFX_V8_0_H__
 #define __GFX_V8_0_H__
 
-extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b13c8aa..1940d36 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1030,7 +1030,7 @@ static int gmc_v6_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
 	.name = "gmc_v6_0",
 	.early_init = gmc_v6_0_early_init,
 	.late_init = gmc_v6_0_late_init,
@@ -1069,3 +1069,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
 }
 
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 6,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gmc_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index 42c4fc6..ed2f64d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -24,6 +24,6 @@
 #ifndef __GMC_V6_0_H__
 #define __GMC_V6_0_H__
 
-extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index aa0c4b9..3a25f72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1235,7 +1235,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 	.name = "gmc_v7_0",
 	.early_init = gmc_v7_0_early_init,
 	.late_init = gmc_v7_0_late_init,
@@ -1273,3 +1273,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->mc.vm_fault.num_types = 1;
 	adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 7,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gmc_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 7,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &gmc_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 0b386b5..ebce296 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -24,6 +24,7 @@
 #ifndef __GMC_V7_0_H__
 #define __GMC_V7_0_H__
 
-extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c22ef14..74d7cc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1436,7 +1436,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 	.name = "gmc_v8_0",
 	.early_init = gmc_v8_0_early_init,
 	.late_init = gmc_v8_0_late_init,
@@ -1477,3 +1477,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->mc.vm_fault.num_types = 1;
 	adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 8,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 8,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 8,
+	.minor = 5,
+	.rev = 0,
+	.funcs = &gmc_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index fc5001a..19b8a8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -24,6 +24,8 @@
 #ifndef __GMC_V8_0_H__
 #define __GMC_V8_0_H__
 
-extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 3b8906c..ac21bb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs iceland_ih_ip_funcs = {
+static const struct amd_ip_funcs iceland_ih_ip_funcs = {
 	.name = "iceland_ih",
 	.early_init = iceland_ih_early_init,
 	.late_init = NULL,
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 		adev->irq.ih_funcs = &iceland_ih_funcs;
 }
 
+const struct amdgpu_ip_block_version iceland_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 2,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &iceland_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
index 57558cd..3235f42 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
@@ -24,6 +24,6 @@
 #ifndef __ICELAND_IH_H__
 #define __ICELAND_IH_H__
 
-extern const struct amd_ip_funcs iceland_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version iceland_ih_ip_block;
 
 #endif /* __ICELAND_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f8618a3..b6f2e50 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
 	adev->pm.dpm.num_ps = state_array->ucNumEntries;
 
 	/* fill in the vce power states */
-	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 		u32 sclk;
 		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
 		clock_info = (union pplib_clock_info *)
@@ -3243,6 +3243,18 @@ static int kv_dpm_set_powergating_state(void *handle,
 	return 0;
 }
 
+static int kv_check_state_equal(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal)
+{
+	if (equal == NULL)
+		return -EINVAL;
+
+	*equal = false;
+	return 0;
+}
+
 const struct amd_ip_funcs kv_dpm_ip_funcs = {
 	.name = "kv_dpm",
 	.early_init = kv_dpm_early_init,
@@ -3273,6 +3285,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
 	.force_performance_level = &kv_dpm_force_performance_level,
 	.powergate_uvd = &kv_dpm_powergate_uvd,
 	.enable_bapm = &kv_dpm_enable_bapm,
+	.get_vce_clock_state = amdgpu_get_vce_clock_state,
+	.check_state_equal = kv_check_state_equal,
 };
 
 static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -3291,3 +3305,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
 	adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version kv_dpm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 7,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &kv_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 565dab3..e81aa46 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 
 	for (i = 0; i < count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
-			amdgpu_ring_write(ring, ring->nop |
+			amdgpu_ring_write(ring, ring->funcs->nop |
 				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 		else
-			amdgpu_ring_write(ring, ring->nop);
+			amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	unsigned index;
 	u32 tmp = 0;
 	u64 gpu_addr;
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 		goto err1;
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err0:
 	amdgpu_wb_free(adev, index);
 	return r;
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
-static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		7 + 6; /* sdma_v2_4_ring_emit_ib */
-}
-
-static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		6 + /* sdma_v2_4_ring_emit_hdp_flush */
-		3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
-		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
-		12 + /* sdma_v2_4_ring_emit_vm_flush */
-		10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static int sdma_v2_4_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle)
 		ring->use_doorbell = false;
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
-				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
-				     AMDGPU_RING_TYPE_SDMA);
+				     AMDGPU_SDMA_IRQ_TRAP0 :
+				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
 	}
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
 	.name = "sdma_v2_4",
 	.early_init = sdma_v2_4_early_init,
 	.late_init = NULL,
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
 	.get_rptr = sdma_v2_4_ring_get_rptr,
 	.get_wptr = sdma_v2_4_ring_get_wptr,
 	.set_wptr = sdma_v2_4_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		6 + /* sdma_v2_4_ring_emit_hdp_flush */
+		3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+		12 + /* sdma_v2_4_ring_emit_vm_flush */
+		10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
 	.emit_ib = sdma_v2_4_ring_emit_ib,
 	.emit_fence = sdma_v2_4_ring_emit_fence,
 	.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
 	.test_ib = sdma_v2_4_ring_test_ib,
 	.insert_nop = sdma_v2_4_ring_insert_nop,
 	.pad_ib = sdma_v2_4_ring_pad_ib,
-	.get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
-	.get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
+
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 2,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &sdma_v2_4_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
index 07349f5..28b4337 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
@@ -24,6 +24,6 @@
 #ifndef __SDMA_V2_4_H__
 #define __SDMA_V2_4_H__
 
-extern const struct amd_ip_funcs sdma_v2_4_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a9d1094..77f1465 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 
 	for (i = 0; i < count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
-			amdgpu_ring_write(ring, ring->nop |
+			amdgpu_ring_write(ring, ring->funcs->nop |
 				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 		else
-			amdgpu_ring_write(ring, ring->nop);
+			amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	unsigned index;
 	u32 tmp = 0;
 	u64 gpu_addr;
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 		goto err1;
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	}
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err0:
 	amdgpu_wb_free(adev, index);
 	return r;
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
-static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		7 + 6; /* sdma_v3_0_ring_emit_ib */
-}
-
-static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		6 + /* sdma_v3_0_ring_emit_hdp_flush */
-		3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
-		6 + /* sdma_v3_0_ring_emit_pipeline_sync */
-		12 + /* sdma_v3_0_ring_emit_vm_flush */
-		10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static int sdma_v3_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle)
 
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
-				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
-				     AMDGPU_RING_TYPE_SDMA);
+				     AMDGPU_SDMA_IRQ_TRAP0 :
+				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
 	}
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
 	.name = "sdma_v3_0",
 	.early_init = sdma_v3_0_early_init,
 	.late_init = NULL,
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
 	.get_rptr = sdma_v3_0_ring_get_rptr,
 	.get_wptr = sdma_v3_0_ring_get_wptr,
 	.set_wptr = sdma_v3_0_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		6 + /* sdma_v3_0_ring_emit_hdp_flush */
+		3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+		6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+		12 + /* sdma_v3_0_ring_emit_vm_flush */
+		10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
 	.emit_ib = sdma_v3_0_ring_emit_ib,
 	.emit_fence = sdma_v3_0_ring_emit_fence,
 	.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
 	.test_ib = sdma_v3_0_ring_test_ib,
 	.insert_nop = sdma_v3_0_ring_insert_nop,
 	.pad_ib = sdma_v3_0_ring_pad_ib,
-	.get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
+
+const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 3,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &sdma_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 3,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &sdma_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
index 0cb9698..7aa223d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
@@ -24,6 +24,7 @@
 #ifndef __SDMA_V3_0_H__
 #define __SDMA_V3_0_H__
 
-extern const struct amd_ip_funcs sdma_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index dc9511c..3ed8ad8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -39,6 +39,7 @@
 #include "si_dma.h"
 #include "dce_v6_0.h"
 #include "si.h"
+#include "dce_virtual.h"
 
 static const u32 tahiti_golden_registers[] =
 {
@@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 }
 
-u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
 {
 	unsigned long flags;
 	u32 r;
@@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
 	return r;
 }
 
-void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
 	unsigned long flags;
 
@@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs si_common_ip_funcs = {
+static const struct amd_ip_funcs si_common_ip_funcs = {
 	.name = "si_common",
 	.early_init = si_common_early_init,
 	.late_init = NULL,
@@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = {
 	.set_powergating_state = si_common_set_powergating_state,
 };
 
-static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+static const struct amdgpu_ip_block_version si_common_ip_block =
 {
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_dma_ip_funcs,
-	},
-/*	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &si_null_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_null_ip_funcs,
-	},
-	*/
-};
-
-
-static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
-{
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_dma_ip_funcs,
-	},
+	.type = AMD_IP_BLOCK_TYPE_COMMON,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &si_common_ip_funcs,
 };
 
 int si_set_ip_blocks(struct amdgpu_device *adev)
@@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
 	case CHIP_VERDE:
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
+		amdgpu_ip_block_add(adev, &si_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_dma_ip_block);
+		/* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+		/* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+		break;
 	case CHIP_OLAND:
-		adev->ip_blocks = verde_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+		amdgpu_ip_block_add(adev, &si_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_dma_ip_block);
+		/* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+		/* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
 		break;
 	case CHIP_HAINAN:
-		adev->ip_blocks = hainan_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+		amdgpu_ip_block_add(adev, &si_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_dma_ip_block);
 		break;
 	default:
 		BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 959d7b6..5892250 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,8 +24,6 @@
 #ifndef __SI_H__
 #define __SI_H__
 
-extern const struct amd_ip_funcs si_common_ip_funcs;
-
 void si_srbm_select(struct amdgpu_device *adev,
 		     u32 me, u32 pipe, u32 queue, u32 vmid);
 int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index de35819..3dd552a 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	unsigned index;
 	u32 tmp = 0;
 	u64 gpu_addr;
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 		goto err1;
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err0:
 	amdgpu_wb_free(adev, index);
 	return r;
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
 }
 
-static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		7 + 3; /* si_dma_ring_emit_ib */
-}
-
-static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		3 + /* si_dma_ring_emit_hdp_flush */
-		3 + /* si_dma_ring_emit_hdp_invalidate */
-		6 + /* si_dma_ring_emit_pipeline_sync */
-		12 + /* si_dma_ring_emit_vm_flush */
-		9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static int si_dma_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle)
 		ring->use_doorbell = false;
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
-				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
-				     AMDGPU_RING_TYPE_SDMA);
+				     AMDGPU_SDMA_IRQ_TRAP0 :
+				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
 	}
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs si_dma_ip_funcs = {
+static const struct amd_ip_funcs si_dma_ip_funcs = {
 	.name = "si_dma",
 	.early_init = si_dma_early_init,
 	.late_init = NULL,
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
 	.get_rptr = si_dma_ring_get_rptr,
 	.get_wptr = si_dma_ring_get_wptr,
 	.set_wptr = si_dma_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		3 + /* si_dma_ring_emit_hdp_flush */
+		3 + /* si_dma_ring_emit_hdp_invalidate */
+		6 + /* si_dma_ring_emit_pipeline_sync */
+		12 + /* si_dma_ring_emit_vm_flush */
+		9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
 	.emit_ib = si_dma_ring_emit_ib,
 	.emit_fence = si_dma_ring_emit_fence,
 	.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
 	.test_ib = si_dma_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = si_dma_ring_pad_ib,
-	.get_emit_ib_size = si_dma_ring_get_emit_ib_size,
-	.get_dma_frame_size = si_dma_ring_get_dma_frame_size,
 };
 
 static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
 		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
+
+const struct amdgpu_ip_block_version si_dma_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &si_dma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
index 3a3e0c7..5ac1b84 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -24,6 +24,6 @@
 #ifndef __SI_DMA_H__
 #define __SI_DMA_H__
 
-extern const struct amd_ip_funcs si_dma_ip_funcs;
+extern const struct amdgpu_ip_block_version si_dma_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3de7bca..9172133 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
 	eg_pi->current_rps = *rps;
 	ni_pi->current_ps = *new_ps;
 	eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+	adev->pm.dpm.current_ps = &eg_pi->current_rps;
 }
 
 static void ni_update_requested_ps(struct amdgpu_device *adev,
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev,
 	eg_pi->requested_rps = *rps;
 	ni_pi->requested_ps = *new_ps;
 	eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+	adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
 }
 
 static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
@@ -7320,7 +7322,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
 	adev->pm.dpm.num_ps = state_array->ucNumEntries;
 
 	/* fill in the vce power states */
-	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 		u32 sclk, mclk;
 		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
 		clock_info = (union pplib_clock_info *)
@@ -7957,6 +7959,57 @@ static int si_dpm_early_init(void *handle)
 	return 0;
 }
 
+static inline bool si_are_power_levels_equal(const struct rv7xx_pl  *si_cpl1,
+						const struct rv7xx_pl *si_cpl2)
+{
+	return ((si_cpl1->mclk == si_cpl2->mclk) &&
+		  (si_cpl1->sclk == si_cpl2->sclk) &&
+		  (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
+		  (si_cpl1->vddc == si_cpl2->vddc) &&
+		  (si_cpl1->vddci == si_cpl2->vddci));
+}
+
+static int si_check_state_equal(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal)
+{
+	struct si_ps *si_cps;
+	struct si_ps *si_rps;
+	int i;
+
+	if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+		return -EINVAL;
+
+	si_cps = si_get_ps(cps);
+	si_rps = si_get_ps(rps);
+
+	if (si_cps == NULL) {
+		printk("si_cps is NULL\n");
+		*equal = false;
+		return 0;
+	}
+
+	if (si_cps->performance_level_count != si_rps->performance_level_count) {
+		*equal = false;
+		return 0;
+	}
+
+	for (i = 0; i < si_cps->performance_level_count; i++) {
+		if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
+					&(si_rps->performance_levels[i]))) {
+			*equal = false;
+			return 0;
+		}
+	}
+
+	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+	*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+	*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+	return 0;
+}
+
 
 const struct amd_ip_funcs si_dpm_ip_funcs = {
 	.name = "si_dpm",
@@ -7991,6 +8044,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
 	.get_fan_control_mode = &si_dpm_get_fan_control_mode,
 	.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
 	.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+	.check_state_equal = &si_check_state_equal,
+	.get_vce_clock_state = amdgpu_get_vce_clock_state,
 };
 
 static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -8010,3 +8065,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
 }
 
+const struct amdgpu_ip_block_version si_dpm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 6,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &si_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 8fae3d4..db0f368 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs si_ih_ip_funcs = {
+static const struct amd_ip_funcs si_ih_ip_funcs = {
 	.name = "si_ih",
 	.early_init = si_ih_early_init,
 	.late_init = NULL,
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 		adev->irq.ih_funcs = &si_ih_funcs;
 }
 
+const struct amdgpu_ip_block_version si_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &si_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
index f3e3a95..42e64a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -24,6 +24,6 @@
 #ifndef __SI_IH_H__
 #define __SI_IH_H__
 
-extern const struct amd_ip_funcs si_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version si_ih_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b4ea229..52b71ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs tonga_ih_ip_funcs = {
+static const struct amd_ip_funcs tonga_ih_ip_funcs = {
 	.name = "tonga_ih",
 	.early_init = tonga_ih_early_init,
 	.late_init = NULL,
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 		adev->irq.ih_funcs = &tonga_ih_funcs;
 }
 
+const struct amdgpu_ip_block_version tonga_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 3,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &tonga_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
index 7392d70..499027e 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
@@ -24,6 +24,6 @@
 #ifndef __TONGA_IH_H__
 #define __TONGA_IH_H__
 
-extern const struct amd_ip_funcs tonga_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version tonga_ih_ip_block;
 
-#endif /* __CZ_IH_H__ */
+#endif /* __TONGA_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f6c9415..8f9c7d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -36,6 +36,9 @@
 
 #include "bif/bif_4_1_d.h"
 
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
 static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -116,8 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
 	return r;
 }
@@ -526,20 +528,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
-static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		4; /* uvd_v4_2_ring_emit_ib */
-}
-
-static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		2 + /* uvd_v4_2_ring_emit_hdp_flush */
-		2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
-		14; /* uvd_v4_2_ring_emit_fence  x1 no user fence */
-}
-
 /**
  * uvd_v4_2_mc_resume - memory controller programming
  *
@@ -698,18 +686,34 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+	if (enable)
+		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+	else
+		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
 static int uvd_v4_2_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	bool gate = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-		return 0;
-
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
 
+	uvd_v5_0_set_bypass_mode(adev, gate);
+
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+		return 0;
+
 	uvd_v4_2_enable_mgcg(adev, gate);
 
 	return 0;
@@ -738,7 +742,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
 	}
 }
 
-const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
 	.name = "uvd_v4_2",
 	.early_init = uvd_v4_2_early_init,
 	.late_init = NULL,
@@ -756,10 +760,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_UVD,
+	.align_mask = 0xf,
+	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.get_rptr = uvd_v4_2_ring_get_rptr,
 	.get_wptr = uvd_v4_2_ring_get_wptr,
 	.set_wptr = uvd_v4_2_ring_set_wptr,
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
+	.emit_frame_size =
+		2 + /* uvd_v4_2_ring_emit_hdp_flush */
+		2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+		14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
+	.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
 	.emit_ib = uvd_v4_2_ring_emit_ib,
 	.emit_fence = uvd_v4_2_ring_emit_fence,
 	.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
@@ -770,8 +782,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
-	.get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
-	.get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -789,3 +799,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
 	adev->uvd.irq.num_types = 1;
 	adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
index 0a615dd..8a0444b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
@@ -24,6 +24,6 @@
 #ifndef __UVD_V4_2_H__
 #define __UVD_V4_2_H__
 
-extern const struct amd_ip_funcs uvd_v4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 400c16f..95303e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -33,6 +33,8 @@
 #include "oss/oss_2_0_sh_mask.h"
 #include "bif/bif_5_0_d.h"
 #include "vi.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -112,8 +114,7 @@ static int uvd_v5_0_sw_init(void *handle)
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
 	return r;
 }
@@ -577,20 +578,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
-static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		6; /* uvd_v5_0_ring_emit_ib */
-}
-
-static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		2 + /* uvd_v5_0_ring_emit_hdp_flush */
-		2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
-		14; /* uvd_v5_0_ring_emit_fence  x1 no user fence */
-}
-
 static bool uvd_v5_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -737,6 +724,20 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
 }
 #endif
 
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+	if (enable)
+		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+	else
+		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
 static int uvd_v5_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
@@ -744,6 +745,8 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	static int curstate = -1;
 
+	uvd_v5_0_set_bypass_mode(adev, enable);
+
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
@@ -789,7 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
 	}
 }
 
-const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
 	.name = "uvd_v5_0",
 	.early_init = uvd_v5_0_early_init,
 	.late_init = NULL,
@@ -807,10 +810,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_UVD,
+	.align_mask = 0xf,
+	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.get_rptr = uvd_v5_0_ring_get_rptr,
 	.get_wptr = uvd_v5_0_ring_get_wptr,
 	.set_wptr = uvd_v5_0_ring_set_wptr,
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
+	.emit_frame_size =
+		2 + /* uvd_v5_0_ring_emit_hdp_flush */
+		2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+		14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
+	.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
 	.emit_ib = uvd_v5_0_ring_emit_ib,
 	.emit_fence = uvd_v5_0_ring_emit_fence,
 	.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
@@ -821,8 +832,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
-	.get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -840,3 +849,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->uvd.irq.num_types = 1;
 	adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 5,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v5_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
index e3b3c49..2eaaea7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
@@ -24,6 +24,6 @@
 #ifndef __UVD_V5_0_H__
 #define __UVD_V5_0_H__
 
-extern const struct amd_ip_funcs uvd_v5_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d..a339b5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
 	return r;
 }
@@ -725,31 +724,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 	amdgpu_ring_write(ring, 0xE);
 }
 
-static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		8; /* uvd_v6_0_ring_emit_ib */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		2 + /* uvd_v6_0_ring_emit_hdp_flush */
-		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
-		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
-		14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
-	return
-		2 + /* uvd_v6_0_ring_emit_hdp_flush */
-		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
-		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
-		20 + /* uvd_v6_0_ring_emit_vm_flush */
-		14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
-}
-
 static bool uvd_v6_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -961,7 +935,7 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
 }
 #endif
 
-static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
 {
 	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
 
@@ -979,15 +953,14 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 
-	if (adev->asic_type == CHIP_FIJI ||
-	    adev->asic_type == CHIP_POLARIS10)
-		uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
+	uvd_v6_0_set_bypass_mode(adev, enable);
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
-	if (state == AMD_CG_STATE_GATE) {
+	if (enable) {
 		/* disable HW gating and enable Sw gating */
 		uvd_v6_0_set_sw_clock_gating(adev);
 	} else {
@@ -1027,7 +1000,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
 	}
 }
 
-const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
 	.name = "uvd_v6_0",
 	.early_init = uvd_v6_0_early_init,
 	.late_init = NULL,
@@ -1048,10 +1021,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+	.type = AMDGPU_RING_TYPE_UVD,
+	.align_mask = 0xf,
+	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.get_rptr = uvd_v6_0_ring_get_rptr,
 	.get_wptr = uvd_v6_0_ring_get_wptr,
 	.set_wptr = uvd_v6_0_ring_set_wptr,
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
+	.emit_frame_size =
+		2 + /* uvd_v6_0_ring_emit_hdp_flush */
+		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+		14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
+	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
 	.emit_ib = uvd_v6_0_ring_emit_ib,
 	.emit_fence = uvd_v6_0_ring_emit_fence,
 	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
@@ -1062,15 +1044,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
-	.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
 };
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+	.type = AMDGPU_RING_TYPE_UVD,
+	.align_mask = 0xf,
+	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.get_rptr = uvd_v6_0_ring_get_rptr,
 	.get_wptr = uvd_v6_0_ring_get_wptr,
 	.set_wptr = uvd_v6_0_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		2 + /* uvd_v6_0_ring_emit_hdp_flush */
+		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+		20 + /* uvd_v6_0_ring_emit_vm_flush */
+		14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
+	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
 	.emit_ib = uvd_v6_0_ring_emit_ib,
 	.emit_fence = uvd_v6_0_ring_emit_fence,
 	.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
@@ -1083,8 +1072,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
-	.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1108,3 +1095,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->uvd.irq.num_types = 1;
 	adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
index 6b92a23..d3d48c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
@@ -24,6 +24,8 @@
 #ifndef __UVD_V6_0_H__
 #define __UVD_V6_0_H__
 
-extern const struct amd_ip_funcs uvd_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 76e64ad..38ed903 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle)
 	for (i = 0; i < adev->vce.num_rings; i++) {
 		ring = &adev->vce.ring[i];
 		sprintf(ring->name, "vce%d", i);
-		r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-				     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+		r = amdgpu_ring_init(adev, ring, 512,
+				     &adev->vce.irq, 0);
 		if (r)
 			return r;
 	}
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
 		return vce_v2_0_start(adev);
 }
 
-const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
 	.name = "vce_v2_0",
 	.early_init = vce_v2_0_early_init,
 	.late_init = NULL,
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_VCE,
+	.align_mask = 0xf,
+	.nop = VCE_CMD_NO_OP,
 	.get_rptr = vce_v2_0_ring_get_rptr,
 	.get_wptr = vce_v2_0_ring_get_wptr,
 	.set_wptr = vce_v2_0_ring_set_wptr,
 	.parse_cs = amdgpu_vce_ring_parse_cs,
+	.emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
+	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
 	.emit_ib = amdgpu_vce_ring_emit_ib,
 	.emit_fence = amdgpu_vce_ring_emit_fence,
 	.test_ring = amdgpu_vce_ring_test_ring,
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
-	.get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
-	.get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->vce.irq.num_types = 1;
 	adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
 };
+
+const struct amdgpu_ip_block_version vce_v2_0_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
index 0d2ae8a..4d15167 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
@@ -24,6 +24,6 @@
 #ifndef __VCE_V2_0_H__
 #define __VCE_V2_0_H__
 
-extern const struct amd_ip_funcs vce_v2_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v2_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8533269..5ed2930 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -389,8 +389,7 @@ static int vce_v3_0_sw_init(void *handle)
 	for (i = 0; i < adev->vce.num_rings; i++) {
 		ring = &adev->vce.ring[i];
 		sprintf(ring->name, "vce%d", i);
-		r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-				     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
 		if (r)
 			return r;
 	}
@@ -808,28 +807,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
 	amdgpu_ring_write(ring, seq);
 }
 
-static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		5; /* vce_v3_0_ring_emit_ib */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		4 + /* vce_v3_0_emit_pipeline_sync */
-		6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
-	return
-		6 + /* vce_v3_0_emit_vm_flush */
-		4 + /* vce_v3_0_emit_pipeline_sync */
-		6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
-}
-
-const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
 	.name = "vce_v3_0",
 	.early_init = vce_v3_0_early_init,
 	.late_init = NULL,
@@ -850,10 +828,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+	.type = AMDGPU_RING_TYPE_VCE,
+	.align_mask = 0xf,
+	.nop = VCE_CMD_NO_OP,
 	.get_rptr = vce_v3_0_ring_get_rptr,
 	.get_wptr = vce_v3_0_ring_get_wptr,
 	.set_wptr = vce_v3_0_ring_set_wptr,
 	.parse_cs = amdgpu_vce_ring_parse_cs,
+	.emit_frame_size =
+		4 + /* vce_v3_0_emit_pipeline_sync */
+		6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+	.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
 	.emit_ib = amdgpu_vce_ring_emit_ib,
 	.emit_fence = amdgpu_vce_ring_emit_fence,
 	.test_ring = amdgpu_vce_ring_test_ring,
@@ -862,15 +847,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
-	.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
 };
 
 static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+	.type = AMDGPU_RING_TYPE_VCE,
+	.align_mask = 0xf,
+	.nop = VCE_CMD_NO_OP,
 	.get_rptr = vce_v3_0_ring_get_rptr,
 	.get_wptr = vce_v3_0_ring_get_wptr,
 	.set_wptr = vce_v3_0_ring_set_wptr,
-	.parse_cs = NULL,
+	.parse_cs = amdgpu_vce_ring_parse_cs_vm,
+	.emit_frame_size =
+		6 + /* vce_v3_0_emit_vm_flush */
+		4 + /* vce_v3_0_emit_pipeline_sync */
+		6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
 	.emit_ib = vce_v3_0_ring_emit_ib,
 	.emit_vm_flush = vce_v3_0_emit_vm_flush,
 	.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -881,8 +872,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
-	.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -910,3 +899,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->vce.irq.num_types = 1;
 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
 };
+
+const struct amdgpu_ip_block_version vce_v3_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_VCE,
+	.major = 3,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_VCE,
+	.major = 3,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_4_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_VCE,
+	.major = 3,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &vce_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
index b45af65..08b908c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
@@ -24,6 +24,8 @@
 #ifndef __VCE_V3_0_H__
 #define __VCE_V3_0_H__
 
-extern const struct amd_ip_funcs vce_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_1_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_4_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index c0d9aad..25c0a71 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -121,8 +121,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
 	u32 r;
 
 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	WREG32(mmSMC_IND_INDEX_0, (reg));
-	r = RREG32(mmSMC_IND_DATA_0);
+	WREG32(mmSMC_IND_INDEX_11, (reg));
+	r = RREG32(mmSMC_IND_DATA_11);
 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 	return r;
 }
@@ -132,8 +132,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 	unsigned long flags;
 
 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	WREG32(mmSMC_IND_INDEX_0, (reg));
-	WREG32(mmSMC_IND_DATA_0, (v));
+	WREG32(mmSMC_IND_INDEX_11, (reg));
+	WREG32(mmSMC_IND_DATA_11, (v));
 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 }
 
@@ -437,12 +437,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
 	/* take the smc lock since we are using the smc index */
 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
 	/* set rom index to 0 */
-	WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
-	WREG32(mmSMC_IND_DATA_0, 0);
+	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
+	WREG32(mmSMC_IND_DATA_11, 0);
 	/* set index to data for continous read */
-	WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
+	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
 	for (i = 0; i < length_dw; i++)
-		dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
+		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 
 	return true;
@@ -556,21 +556,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =
 	{mmPA_SC_RASTER_CONFIG_1, false, true},
 };
 
-static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
-					 u32 sh_num, u32 reg_offset)
+static uint32_t vi_get_register_value(struct amdgpu_device *adev,
+				      bool indexed, u32 se_num,
+				      u32 sh_num, u32 reg_offset)
 {
-	uint32_t val;
+	if (indexed) {
+		uint32_t val;
+		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
 
-	mutex_lock(&adev->grbm_idx_mutex);
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+		switch (reg_offset) {
+		case mmCC_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+		case mmGC_USER_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+		case mmPA_SC_RASTER_CONFIG:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+		case mmPA_SC_RASTER_CONFIG_1:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+		}
 
-	val = RREG32(reg_offset);
+		mutex_lock(&adev->grbm_idx_mutex);
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-	return val;
+		val = RREG32(reg_offset);
+
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		mutex_unlock(&adev->grbm_idx_mutex);
+		return val;
+	} else {
+		unsigned idx;
+
+		switch (reg_offset) {
+		case mmGB_ADDR_CONFIG:
+			return adev->gfx.config.gb_addr_config;
+		case mmMC_ARB_RAMCFG:
+			return adev->gfx.config.mc_arb_ramcfg;
+		case mmGB_TILE_MODE0:
+		case mmGB_TILE_MODE1:
+		case mmGB_TILE_MODE2:
+		case mmGB_TILE_MODE3:
+		case mmGB_TILE_MODE4:
+		case mmGB_TILE_MODE5:
+		case mmGB_TILE_MODE6:
+		case mmGB_TILE_MODE7:
+		case mmGB_TILE_MODE8:
+		case mmGB_TILE_MODE9:
+		case mmGB_TILE_MODE10:
+		case mmGB_TILE_MODE11:
+		case mmGB_TILE_MODE12:
+		case mmGB_TILE_MODE13:
+		case mmGB_TILE_MODE14:
+		case mmGB_TILE_MODE15:
+		case mmGB_TILE_MODE16:
+		case mmGB_TILE_MODE17:
+		case mmGB_TILE_MODE18:
+		case mmGB_TILE_MODE19:
+		case mmGB_TILE_MODE20:
+		case mmGB_TILE_MODE21:
+		case mmGB_TILE_MODE22:
+		case mmGB_TILE_MODE23:
+		case mmGB_TILE_MODE24:
+		case mmGB_TILE_MODE25:
+		case mmGB_TILE_MODE26:
+		case mmGB_TILE_MODE27:
+		case mmGB_TILE_MODE28:
+		case mmGB_TILE_MODE29:
+		case mmGB_TILE_MODE30:
+		case mmGB_TILE_MODE31:
+			idx = (reg_offset - mmGB_TILE_MODE0);
+			return adev->gfx.config.tile_mode_array[idx];
+		case mmGB_MACROTILE_MODE0:
+		case mmGB_MACROTILE_MODE1:
+		case mmGB_MACROTILE_MODE2:
+		case mmGB_MACROTILE_MODE3:
+		case mmGB_MACROTILE_MODE4:
+		case mmGB_MACROTILE_MODE5:
+		case mmGB_MACROTILE_MODE6:
+		case mmGB_MACROTILE_MODE7:
+		case mmGB_MACROTILE_MODE8:
+		case mmGB_MACROTILE_MODE9:
+		case mmGB_MACROTILE_MODE10:
+		case mmGB_MACROTILE_MODE11:
+		case mmGB_MACROTILE_MODE12:
+		case mmGB_MACROTILE_MODE13:
+		case mmGB_MACROTILE_MODE14:
+		case mmGB_MACROTILE_MODE15:
+			idx = (reg_offset - mmGB_MACROTILE_MODE0);
+			return adev->gfx.config.macrotile_mode_array[idx];
+		default:
+			return RREG32(reg_offset);
+		}
+	}
 }
 
 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -605,10 +684,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 			if (reg_offset != asic_register_entry->reg_offset)
 				continue;
 			if (!asic_register_entry->untouched)
-				*value = asic_register_entry->grbm_indexed ?
-					vi_read_indexed_register(adev, se_num,
-								 sh_num, reg_offset) :
-					RREG32(reg_offset);
+				*value = vi_get_register_value(adev,
+							       asic_register_entry->grbm_indexed,
+							       se_num, sh_num, reg_offset);
 			return 0;
 		}
 	}
@@ -618,10 +696,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 			continue;
 
 		if (!vi_allowed_read_registers[i].untouched)
-			*value = vi_allowed_read_registers[i].grbm_indexed ?
-				vi_read_indexed_register(adev, se_num,
-							 sh_num, reg_offset) :
-				RREG32(reg_offset);
+			*value = vi_get_register_value(adev,
+						       vi_allowed_read_registers[i].grbm_indexed,
+						       se_num, sh_num, reg_offset);
 		return 0;
 	}
 	return -EINVAL;
@@ -652,18 +729,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 	return -EINVAL;
 }
 
-static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
-	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
-	if (hung)
-		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-	else
-		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
-	WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
 /**
  * vi_asic_reset - soft reset GPU
  *
@@ -677,11 +742,11 @@ static int vi_asic_reset(struct amdgpu_device *adev)
 {
 	int r;
 
-	vi_set_bios_scratch_engine_hung(adev, true);
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
 	r = vi_gpu_pci_config_reset(adev);
 
-	vi_set_bios_scratch_engine_hung(adev, false);
+	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
 	return r;
 }
@@ -781,734 +846,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
 }
 
-/* topaz has no DCE, UVD, VCE */
-static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &iceland_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &sdma_v2_4_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &iceland_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &sdma_v2_4_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 10,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_v10_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 5,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v5_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 10,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 5,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v5_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 5,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 10,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &dce_v10_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 5,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 10,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 11,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &dce_v11_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 11,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cz_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 11,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_v11_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-#if defined(CONFIG_DRM_AMD_ACP)
-	{
-		.type = AMD_IP_BLOCK_TYPE_ACP,
-		.major = 2,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &acp_ip_funcs,
-	},
-#endif
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cz_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 11,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-#if defined(CONFIG_DRM_AMD_ACP)
-	{
-		.type = AMD_IP_BLOCK_TYPE_ACP,
-		.major = 2,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &acp_ip_funcs,
-	},
-#endif
-};
-
-int vi_set_ip_blocks(struct amdgpu_device *adev)
-{
-	if (adev->enable_virtual_display) {
-		switch (adev->asic_type) {
-		case CHIP_TOPAZ:
-			adev->ip_blocks = topaz_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
-			break;
-		case CHIP_FIJI:
-			adev->ip_blocks = fiji_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
-			break;
-		case CHIP_TONGA:
-			adev->ip_blocks = tonga_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
-			break;
-		case CHIP_POLARIS11:
-		case CHIP_POLARIS10:
-			adev->ip_blocks = polaris11_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
-			break;
-
-		case CHIP_CARRIZO:
-		case CHIP_STONEY:
-			adev->ip_blocks = cz_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
-			break;
-		default:
-			/* FIXME: not supported yet */
-			return -EINVAL;
-		}
-	} else {
-		switch (adev->asic_type) {
-		case CHIP_TOPAZ:
-			adev->ip_blocks = topaz_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
-			break;
-		case CHIP_FIJI:
-			adev->ip_blocks = fiji_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
-			break;
-		case CHIP_TONGA:
-			adev->ip_blocks = tonga_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
-			break;
-		case CHIP_POLARIS11:
-		case CHIP_POLARIS10:
-			adev->ip_blocks = polaris11_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
-			break;
-		case CHIP_CARRIZO:
-		case CHIP_STONEY:
-			adev->ip_blocks = cz_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
-			break;
-		default:
-			/* FIXME: not supported yet */
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
@@ -1593,7 +930,7 @@ static int vi_common_early_init(void *handle)
 		break;
 	case CHIP_TONGA:
 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
-		adev->pg_flags = 0;
+		adev->pg_flags = AMD_PG_SUPPORT_UVD;
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
 	case CHIP_POLARIS11:
@@ -1908,7 +1245,7 @@ static int vi_common_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs vi_common_ip_funcs = {
+static const struct amd_ip_funcs vi_common_ip_funcs = {
 	.name = "vi_common",
 	.early_init = vi_common_early_init,
 	.late_init = NULL,
@@ -1925,3 +1262,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
 	.set_powergating_state = vi_common_set_powergating_state,
 };
 
+static const struct amdgpu_ip_block_version vi_common_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_COMMON,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &vi_common_ip_funcs,
+};
+
+int vi_set_ip_blocks(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_TOPAZ:
+		/* topaz has no DCE, UVD, VCE */
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
+		amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+		break;
+	case CHIP_FIJI:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
+		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+		break;
+	case CHIP_TONGA:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+		break;
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
+		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+		break;
+	case CHIP_CARRIZO:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+		amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+		break;
+	case CHIP_STONEY:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+		amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 5020940..575d7ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,8 +24,6 @@
 #ifndef __VI_H__
 #define __VI_H__
 
-extern const struct amd_ip_funcs vi_common_ip_funcs;
-
 void vi_srbm_select(struct amdgpu_device *adev,
 		    u32 me, u32 pipe, u32 queue, u32 vmid);
 int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bec8125..d198627 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -84,6 +84,29 @@ enum amd_powergating_state {
 	AMD_PG_STATE_UNGATE,
 };
 
+struct amd_vce_state {
+	/* vce clocks */
+	u32 evclk;
+	u32 ecclk;
+	/* gpu clocks */
+	u32 sclk;
+	u32 mclk;
+	u8 clk_idx;
+	u8 pstate;
+};
+
+
+#define AMD_MAX_VCE_LEVELS 6
+
+enum amd_vce_level {
+	AMD_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
+	AMD_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
+	AMD_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
+	AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+	AMD_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
+	AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
 /* CG flags */
 #define AMD_CG_SUPPORT_GFX_MGCG			(1 << 0)
 #define AMD_CG_SUPPORT_GFX_MGLS			(1 << 1)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
index 3014d4a5..a9ef156 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
@@ -176,6 +176,8 @@
 #define mmSMU1_SMU_SMC_IND_DATA                                                 0x83
 #define mmSMU2_SMU_SMC_IND_DATA                                                 0x85
 #define mmSMU3_SMU_SMC_IND_DATA                                                 0x87
+#define mmSMC_IND_INDEX_11														0x1AC
+#define mmSMC_IND_DATA_11														0x1AD
 #define ixRCU_UC_EVENTS                                                         0xc0000004
 #define ixRCU_MISC_CTRL                                                         0xc0000010
 #define ixCC_RCU_FUSES                                                          0xc00c0000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index 9339174..22dd4c2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -87,6 +87,8 @@
 #define mmSMC_IND_DATA_6                                                        0x8d
 #define mmSMC_IND_INDEX_7                                                       0x8e
 #define mmSMC_IND_DATA_7                                                        0x8f
+#define mmSMC_IND_INDEX_11														0x1AC
+#define mmSMC_IND_DATA_11														0x1AD
 #define mmSMC_IND_ACCESS_CNTL                                                   0x92
 #define mmSMC_MESSAGE_0                                                         0x94
 #define mmSMC_RESP_0                                                            0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index 44b1855..eca2b85 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -90,6 +90,8 @@
 #define mmSMC_IND_DATA_6                                                        0x8d
 #define mmSMC_IND_INDEX_7                                                       0x8e
 #define mmSMC_IND_DATA_7                                                        0x8f
+#define mmSMC_IND_INDEX_11														0x1AC
+#define mmSMC_IND_DATA_11														0x1AD
 #define mmSMC_IND_ACCESS_CNTL                                                   0x92
 #define mmSMC_MESSAGE_0                                                         0x94
 #define mmSMC_RESP_0                                                            0x95
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index df7c18b..e4a1697 100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -106,6 +106,7 @@ enum cgs_ucode_id {
 	CGS_UCODE_ID_CP_MEC_JT2,
 	CGS_UCODE_ID_GMCON_RENG,
 	CGS_UCODE_ID_RLC_G,
+	CGS_UCODE_ID_STORAGE,
 	CGS_UCODE_ID_MAXIMUM,
 };
 
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
 typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
 				struct cgs_system_info *sys_info);
 
+typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
+
 struct cgs_ops {
 	/* memory management calls (similar to KFD interface) */
 	cgs_gpu_mem_info_t gpu_mem_info;
@@ -670,6 +673,7 @@ struct cgs_ops {
 	cgs_call_acpi_method call_acpi_method;
 	/* get system info */
 	cgs_query_system_info query_system_info;
+	cgs_is_virtualization_enabled_t is_virtualization_enabled;
 };
 
 struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -773,4 +777,6 @@ struct cgs_device
 	CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
 	resource_base)
 
+#define cgs_is_virtualization_enabled(cgs_device) \
+		CGS_CALL(is_virtualization_enabled, cgs_device)
 #endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7174f7a..0b1f220 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -436,7 +436,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type  state)
 	}
 }
 
-int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+		void *input, void *output)
 {
 	int ret = 0;
 	struct pp_instance *pp_handle;
@@ -475,7 +476,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
 	return ret;
 }
 
-enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 {
 	struct pp_hwmgr *hwmgr;
 	struct pp_power_state *state;
@@ -820,6 +821,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
 	return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
 }
 
+static struct amd_vce_state*
+pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+{
+	struct pp_hwmgr *hwmgr;
+
+	if (handle) {
+		hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+		if (hwmgr && idx < hwmgr->num_vce_state_tables)
+			return &hwmgr->vce_states[idx];
+	}
+
+	return NULL;
+}
+
 const struct amd_powerplay_funcs pp_dpm_funcs = {
 	.get_temperature = pp_dpm_get_temperature,
 	.load_firmware = pp_dpm_load_fw,
@@ -846,6 +862,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
 	.get_mclk_od = pp_dpm_get_mclk_od,
 	.set_mclk_od = pp_dpm_set_mclk_od,
 	.read_sensor = pp_dpm_read_sensor,
+	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
 };
 
 static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 9604249..4b14f25 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState(
 	return (struct cz_power_state *)hw_ps;
 }
 
-uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
 					uint32_t clock, uint32_t msg)
 {
 	int i = 0;
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
 	return 0;
 }
 
-int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
+static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
 				void *output, void *storage, int result)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 	struct phm_clock_voltage_dependency_table *table =
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
 	return result;
 }
 
-int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
 {
 	return sizeof(struct cz_power_state);
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 1944d28..f5e8fda 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -25,6 +25,7 @@
 #include "linux/delay.h"
 #include "hwmgr.h"
 #include "amd_acpi.h"
+#include "pp_acpi.h"
 
 bool acpi_atcs_functions_supported(void *device, uint32_t index)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 7de701d..baf0f3d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
 /**
  * Private Function to get the PowerPlay Table Address.
  */
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
 {
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables(
 	return 0;
 }
 
-int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
 {
 	int result = 0;
 	const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
 	return result;
 }
 
-int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
 {
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1211,7 +1211,7 @@ static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
 }
 
 static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
-		struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+		struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag)
 {
 	const ATOM_Tonga_VCE_State_Record *vce_state_record;
 	ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
@@ -1315,7 +1315,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
 
 	hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
 
-	if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+	if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) {
 		for (j = 0; j < i; j++)
 			ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
 	}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index ccf7ebe..a4e9cf4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
 	return 0;
 }
 
-int get_number_of_vce_state_table_entries(
+static int get_number_of_vce_state_table_entries(
 						  struct pp_hwmgr *hwmgr)
 {
 	const ATOM_PPLIB_POWERPLAYTABLE *table =
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries(
 	return 0;
 }
 
-int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
 							unsigned long i,
-							struct pp_vce_state *vce_state,
+							struct amd_vce_state *vce_state,
 							void **clock_info,
 							unsigned long *flag)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6eb6db1..cf2ee93 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_uvd_power_gating(hwmgr)) {
 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_vce_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_vce_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SamuPowerGating))
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SamuPowerGating))
@@ -149,15 +149,21 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 	if (bgate) {
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_UVD,
-				AMD_CG_STATE_GATE);
+				AMD_CG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_PG_STATE_GATE);
 		smu7_update_uvd_dpm(hwmgr, true);
 		smu7_powerdown_uvd(hwmgr);
 	} else {
 		smu7_powerup_uvd(hwmgr);
-		smu7_update_uvd_dpm(hwmgr, false);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_UNGATE);
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_UVD,
-				AMD_CG_STATE_UNGATE);
+				AMD_CG_STATE_GATE);
+		smu7_update_uvd_dpm(hwmgr, false);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 609996c..073e0bf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC {
 
 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
 
-struct smu7_power_state *cast_phw_smu7_power_state(
+static struct smu7_power_state *cast_phw_smu7_power_state(
 				  struct pp_hw_power_state *hw_ps)
 {
 	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state(
 	return (struct smu7_power_state *)hw_ps;
 }
 
-const struct smu7_power_state *cast_const_phw_smu7_power_state(
+static const struct smu7_power_state *cast_const_phw_smu7_power_state(
 				 const struct pp_hw_power_state *hw_ps)
 {
 	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state(
  * @param    hwmgr  the address of the powerplay hardware manager.
  * @return   always 0
  */
-int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
 {
 	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
 
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
 {
 	uint32_t speedCntl = 0;
 
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
 			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
 }
 
-int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 {
 	uint32_t link_width;
 
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 * @param    pHwMgr  the address of the powerplay hardware manager.
 * @return   always PP_Result_OK
 */
-int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
 {
 	if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
@@ -1153,7 +1153,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
 	return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
 }
 
-int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	data->pcie_performance_request = true;
@@ -1161,7 +1161,7 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result = 0;
 	int result = 0;
@@ -1352,6 +1352,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct cgs_system_info sys_info = {0};
+	int result;
 
 	data->dll_default_on = false;
 	data->mclk_dpm0_activity_target = 0xa;
@@ -1439,6 +1441,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 	data->pcie_lane_performance.min = 16;
 	data->pcie_lane_power_saving.max = 0;
 	data->pcie_lane_power_saving.min = 16;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+	result = cgs_query_system_info(hwmgr->device, &sys_info);
+	if (!result) {
+		if (sys_info.value & AMD_PG_SUPPORT_UVD)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_UVDPowerGating);
+		if (sys_info.value & AMD_PG_SUPPORT_VCE)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_VCEPowerGating);
+	}
 }
 
 /**
@@ -1864,7 +1878,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
 {
 	struct phm_ppt_v1_information *table_info =
 		       (struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2253,7 +2267,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data;
 	int result;
@@ -3672,14 +3686,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
 			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
 }
 
-int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 {
 	PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
 
 	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
 }
 
-int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 {
 	uint32_t num_active_displays = 0;
 	struct cgs_display_info info = {0};
@@ -3701,7 +3717,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 * @param    hwmgr  the address of the powerplay hardware manager.
 * @return   always OK
 */
-int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	uint32_t num_active_displays = 0;
@@ -3751,7 +3767,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
 	return smu7_program_display_gap(hwmgr);
 }
@@ -3775,13 +3791,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
 			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
 }
 
-int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
 					const void *thermal_interrupt_info)
 {
 	return 0;
 }
 
-bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	bool is_update_required = false;
@@ -3810,7 +3827,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
 		  (pl1->pcie_lane == pl2->pcie_lane));
 }
 
-int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
+		const struct pp_hw_power_state *pstate1,
+		const struct pp_hw_power_state *pstate2, bool *equal)
 {
 	const struct smu7_power_state *psa;
 	const struct smu7_power_state *psb;
@@ -3843,7 +3862,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
 	return 0;
 }
 
-int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
@@ -3972,7 +3991,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result, result = 0;
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 3fb5e57..eb3e83d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -359,6 +359,7 @@ struct amd_powerplay_funcs {
 	int (*get_mclk_od)(void *handle);
 	int (*set_mclk_od)(void *handle, uint32_t value);
 	int (*read_sensor)(void *handle, int idx, int32_t *value);
+	struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
 };
 
 struct amd_powerplay {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 4f0fedd..e38b999 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -367,7 +367,7 @@ struct pp_table_func {
 	int (*pptable_get_vce_state_table_entry)(
 						struct pp_hwmgr *hwmgr,
 						unsigned long i,
-						struct pp_vce_state *vce_state,
+						struct amd_vce_state *vce_state,
 						void **clock_info,
 						unsigned long *flag);
 };
@@ -586,18 +586,6 @@ struct phm_microcode_version_info {
 	uint32_t NB;
 };
 
-#define PP_MAX_VCE_LEVELS 6
-
-enum PP_VCE_LEVEL {
-	PP_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
-	PP_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
-	PP_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
-	PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
-	PP_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
-	PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-
 enum PP_TABLE_VERSION {
 	PP_TABLE_V0 = 0,
 	PP_TABLE_V1,
@@ -620,7 +608,7 @@ struct pp_hwmgr {
 	void *hardcode_pp_table;
 	bool need_pp_table_upload;
 
-	struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+	struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
 	uint32_t num_vce_state_tables;
 
 	enum amd_dpm_forced_level dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 9ceaed9..827860f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -156,15 +156,6 @@ struct pp_power_state {
 	struct pp_hw_power_state  hardware;
 };
 
-
-/*Structure to hold a VCE state entry*/
-struct pp_vce_state {
-	uint32_t evclk;
-	uint32_t ecclk;
-	uint32_t sclk;
-	uint32_t mclk;
-};
-
 enum PP_MMProfilingState {
 	PP_MMProfilingState_NA = 0,
 	PP_MMProfilingState_Started,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3df5de2..8fe8ba9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -21,9 +21,6 @@
  *
  */
 
-extern bool acpi_atcs_functions_supported(void *device,
-							uint32_t index);
-extern int acpi_pcie_perf_request(void *device,
-						uint8_t perf_req,
-						bool advertise);
-extern bool acpi_atcs_notify_pcie_device_ready(void *device);
+bool acpi_atcs_functions_supported(void *device, uint32_t index);
+int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
+bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 76310ac..34523fe 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -2049,7 +2049,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
@@ -2125,7 +2125,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
 			return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
 		}
 	}
-	printk("cant't get the offset of type %x member %x \n", type, member);
+	printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
 	return 0;
 }
 
@@ -2150,7 +2150,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
 		return SMU73_MAX_LEVELS_MVDD;
 	}
 
-	printk("cant't get the mac of %x \n", value);
+	printk(KERN_WARNING "can't get the mac of %x\n", value);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 02fe1df..b86e48f 100755
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
 	return result;
 }
 
-int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
 {
 	int i, result = -1;
 	uint32_t reg, data;
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
 	return result;
 }
 
-int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
+static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
 {
 	int result = 0;
 	uint32_t table_start;
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
 	return result;
 }
 
-int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 {
 	int32_t vr_config;
 	uint32_t table_start;
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 }
 
 /* Work in Progress */
-int fiji_restore_vft_table(struct pp_smumgr *smumgr)
+static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
 {
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr)
 }
 
 /* Work in Progress */
-int fiji_save_vft_table(struct pp_smumgr *smumgr)
+static int fiji_save_vft_table(struct pp_smumgr *smumgr)
 {
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr)
 		return -EINVAL;
 }
 
-int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
 {
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index 8c889ca..b579f0c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2140,7 +2140,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
 			return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
 		}
 	}
-	printk("cant't get the offset of type %x member %x \n", type, member);
+	printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
 	return 0;
 }
 
@@ -2163,7 +2163,7 @@ uint32_t iceland_get_mac_definition(uint32_t value)
 		return SMU71_MAX_LEVELS_MVDD;
 	}
 
-	printk("cant't get the mac of %x \n", value);
+	printk(KERN_WARNING "can't get the mac of %x\n", value);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 4ccc0b7..006b220 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -2174,7 +2174,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
 			return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
 		}
 	}
-	printk("cant't get the offset of type %x member %x \n", type, member);
+	printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
 	return 0;
 }
 
@@ -2201,7 +2201,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
 		return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
 	}
 
-	printk("cant't get the mac of %x \n", value);
+	printk(KERN_WARNING "can't get the mac of %x\n", value);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5c3598a..f38a687 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
 }
 
 
-int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 {
 	uint32_t vr_config;
 	uint32_t dpm_table_start;
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 	return 0;
 }
 
-int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+static int
+polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
 {
 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 6af744f..6df0d6e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
 	case UCODE_ID_RLC_G:
 		result = CGS_UCODE_ID_RLC_G;
 		break;
+	case UCODE_ID_MEC_STORAGE:
+		result = CGS_UCODE_ID_STORAGE;
+		break;
 	default:
 		break;
 	}
@@ -452,6 +455,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
 	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
 				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
 				"Failed to Get Firmware Entry.", return -EINVAL);
+	if (cgs_is_virtualization_enabled(smumgr->device))
+		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
 
 	smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
 	smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 76352f2..919be43 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -28,8 +28,6 @@
 #include <pp_endian.h>
 
 #define SMC_RAM_END 0x40000
-#define mmSMC_IND_INDEX_11                              0x01AC
-#define mmSMC_IND_DATA_11                               0x01AD
 
 struct smu7_buffer_entry {
 	uint32_t data_size;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index de2a24d..d08f6f1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -2651,7 +2651,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
 			return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
 		}
 	}
-	printk("cant't get the offset of type %x member %x\n", type, member);
+	printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
 	return 0;
 }
 
@@ -2675,7 +2675,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
 	case SMU_MAX_LEVELS_MVDD:
 		return SMU72_MAX_LEVELS_MVDD;
 	}
-	printk("cant't get the mac value %x\n", value);
+	printk(KERN_WARNING "can't get the mac value %x\n", value);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index b961a1c..dbd4fd3a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
 	    TP_STRUCT__entry(
 			     __field(struct amd_sched_entity *, entity)
 			     __field(struct amd_sched_job *, sched_job)
-			     __field(struct fence *, fence)
+			     __field(struct dma_fence *, fence)
 			     __field(const char *, name)
 			     __field(u32, job_count)
 			     __field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
 	    TP_PROTO(struct amd_sched_fence *fence),
 	    TP_ARGS(fence),
 	    TP_STRUCT__entry(
-		    __field(struct fence *, fence)
+		    __field(struct dma_fence *, fence)
 		    ),
 
 	    TP_fast_assign(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d..5364e6a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,7 +32,7 @@
 
 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 
 struct kmem_cache *sched_fence_slab;
 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 		return r;
 
 	atomic_set(&entity->fence_seq, 0);
-	entity->fence_context = fence_context_alloc(2);
+	entity->fence_context = dma_fence_context_alloc(2);
 
 	return 0;
 }
@@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 	kfifo_free(&entity->job_queue);
 }
 
-static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 	struct amd_sched_entity *entity =
 		container_of(cb, struct amd_sched_entity, cb);
 	entity->dependency = NULL;
-	fence_put(f);
+	dma_fence_put(f);
 	amd_sched_wakeup(entity->sched);
 }
 
-static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 	struct amd_sched_entity *entity =
 		container_of(cb, struct amd_sched_entity, cb);
 	entity->dependency = NULL;
-	fence_put(f);
+	dma_fence_put(f);
 }
 
 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 {
 	struct amd_gpu_scheduler *sched = entity->sched;
-	struct fence * fence = entity->dependency;
+	struct dma_fence * fence = entity->dependency;
 	struct amd_sched_fence *s_fence;
 
 	if (fence->context == entity->fence_context) {
 		/* We can ignore fences from ourself */
-		fence_put(entity->dependency);
+		dma_fence_put(entity->dependency);
 		return false;
 	}
 
@@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 		 * Fence is from the same scheduler, only need to wait for
 		 * it to be scheduled
 		 */
-		fence = fence_get(&s_fence->scheduled);
-		fence_put(entity->dependency);
+		fence = dma_fence_get(&s_fence->scheduled);
+		dma_fence_put(entity->dependency);
 		entity->dependency = fence;
-		if (!fence_add_callback(fence, &entity->cb,
-					amd_sched_entity_clear_dep))
+		if (!dma_fence_add_callback(fence, &entity->cb,
+					    amd_sched_entity_clear_dep))
 			return true;
 
 		/* Ignore it when it is already scheduled */
-		fence_put(fence);
+		dma_fence_put(fence);
 		return false;
 	}
 
-	if (!fence_add_callback(entity->dependency, &entity->cb,
-				amd_sched_entity_wakeup))
+	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+				    amd_sched_entity_wakeup))
 		return true;
 
-	fence_put(entity->dependency);
+	dma_fence_put(entity->dependency);
 	return false;
 }
 
@@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work)
 	sched->ops->free_job(s_job);
 }
 
-static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+static void amd_sched_job_finish_cb(struct dma_fence *f,
+				    struct dma_fence_cb *cb)
 {
 	struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
 						 finish_cb);
@@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
 
 	spin_lock(&sched->job_list_lock);
 	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
-		if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
-			fence_put(s_job->s_fence->parent);
+		if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+			dma_fence_put(s_job->s_fence->parent);
 			s_job->s_fence->parent = NULL;
 		}
 	}
@@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
 
 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
 		struct amd_sched_fence *s_fence = s_job->s_fence;
-		struct fence *fence;
+		struct dma_fence *fence;
 
 		spin_unlock(&sched->job_list_lock);
 		fence = sched->ops->run_job(s_job);
 		atomic_inc(&sched->hw_rq_count);
 		if (fence) {
-			s_fence->parent = fence_get(fence);
-			r = fence_add_callback(fence, &s_fence->cb,
-					       amd_sched_process_job);
+			s_fence->parent = dma_fence_get(fence);
+			r = dma_fence_add_callback(fence, &s_fence->cb,
+						   amd_sched_process_job);
 			if (r == -ENOENT)
 				amd_sched_process_job(fence, &s_fence->cb);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
-			fence_put(fence);
+			dma_fence_put(fence);
 		} else {
 			DRM_ERROR("Failed to run job!\n");
 			amd_sched_process_job(NULL, &s_fence->cb);
@@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 	struct amd_sched_entity *entity = sched_job->s_entity;
 
 	trace_amd_sched_job(sched_job);
-	fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
-			   amd_sched_job_finish_cb);
+	dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+			       amd_sched_job_finish_cb);
 	wait_event(entity->sched->job_scheduled,
 		   amd_sched_entity_in(sched_job));
 }
@@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
 	return entity;
 }
 
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 	struct amd_sched_fence *s_fence =
 		container_of(cb, struct amd_sched_fence, cb);
@@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
 	amd_sched_fence_finished(s_fence);
 
 	trace_amd_sched_process_job(s_fence);
-	fence_put(&s_fence->finished);
+	dma_fence_put(&s_fence->finished);
 	wake_up_interruptible(&sched->wake_up_worker);
 }
 
@@ -547,7 +548,7 @@ static int amd_sched_main(void *param)
 		struct amd_sched_entity *entity = NULL;
 		struct amd_sched_fence *s_fence;
 		struct amd_sched_job *sched_job;
-		struct fence *fence;
+		struct dma_fence *fence;
 
 		wait_event_interruptible(sched->wake_up_worker,
 					 (!amd_sched_blocked(sched) &&
@@ -569,15 +570,15 @@ static int amd_sched_main(void *param)
 		fence = sched->ops->run_job(sched_job);
 		amd_sched_fence_scheduled(s_fence);
 		if (fence) {
-			s_fence->parent = fence_get(fence);
-			r = fence_add_callback(fence, &s_fence->cb,
-					       amd_sched_process_job);
+			s_fence->parent = dma_fence_get(fence);
+			r = dma_fence_add_callback(fence, &s_fence->cb,
+						   amd_sched_process_job);
 			if (r == -ENOENT)
 				amd_sched_process_job(fence, &s_fence->cb);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
-			fence_put(fence);
+			dma_fence_put(fence);
 		} else {
 			DRM_ERROR("Failed to run job!\n");
 			amd_sched_process_job(NULL, &s_fence->cb);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb..876aa43 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -25,7 +25,7 @@
 #define _GPU_SCHEDULER_H_
 
 #include <linux/kfifo.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 struct amd_gpu_scheduler;
 struct amd_sched_rq;
@@ -50,8 +50,8 @@ struct amd_sched_entity {
 	atomic_t			fence_seq;
 	uint64_t                        fence_context;
 
-	struct fence			*dependency;
-	struct fence_cb			cb;
+	struct dma_fence		*dependency;
+	struct dma_fence_cb		cb;
 };
 
 /**
@@ -66,10 +66,10 @@ struct amd_sched_rq {
 };
 
 struct amd_sched_fence {
-	struct fence                    scheduled;
-	struct fence                    finished;
-	struct fence_cb                 cb;
-	struct fence                    *parent;
+	struct dma_fence                scheduled;
+	struct dma_fence                finished;
+	struct dma_fence_cb             cb;
+	struct dma_fence                *parent;
 	struct amd_gpu_scheduler	*sched;
 	spinlock_t			lock;
 	void                            *owner;
@@ -79,15 +79,15 @@ struct amd_sched_job {
 	struct amd_gpu_scheduler        *sched;
 	struct amd_sched_entity         *s_entity;
 	struct amd_sched_fence          *s_fence;
-	struct fence_cb			finish_cb;
+	struct dma_fence_cb		finish_cb;
 	struct work_struct		finish_work;
 	struct list_head		node;
 	struct delayed_work		work_tdr;
 };
 
-extern const struct fence_ops amd_sched_fence_ops_scheduled;
-extern const struct fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
 {
 	if (f->ops == &amd_sched_fence_ops_scheduled)
 		return container_of(f, struct amd_sched_fence, scheduled);
@@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
  * these functions should be implemented in driver side
 */
 struct amd_sched_backend_ops {
-	struct fence *(*dependency)(struct amd_sched_job *sched_job);
-	struct fence *(*run_job)(struct amd_sched_job *sched_job);
+	struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+	struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
 	void (*timedout_job)(struct amd_sched_job *sched_job);
 	void (*free_job)(struct amd_sched_job *sched_job);
 };
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63bea..c26fa29 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
 	spin_lock_init(&fence->lock);
 
 	seq = atomic_inc_return(&entity->fence_seq);
-	fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
-		   &fence->lock, entity->fence_context, seq);
-	fence_init(&fence->finished, &amd_sched_fence_ops_finished,
-		   &fence->lock, entity->fence_context + 1, seq);
+	dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+		       &fence->lock, entity->fence_context, seq);
+	dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+		       &fence->lock, entity->fence_context + 1, seq);
 
 	return fence;
 }
 
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
 {
-	int ret = fence_signal(&fence->scheduled);
+	int ret = dma_fence_signal(&fence->scheduled);
 
 	if (!ret)
-		FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
+		DMA_FENCE_TRACE(&fence->scheduled,
+				"signaled from irq context\n");
 	else
-		FENCE_TRACE(&fence->scheduled, "was already signaled\n");
+		DMA_FENCE_TRACE(&fence->scheduled,
+				"was already signaled\n");
 }
 
 void amd_sched_fence_finished(struct amd_sched_fence *fence)
 {
-	int ret = fence_signal(&fence->finished);
+	int ret = dma_fence_signal(&fence->finished);
 
 	if (!ret)
-		FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+		DMA_FENCE_TRACE(&fence->finished,
+				"signaled from irq context\n");
 	else
-		FENCE_TRACE(&fence->finished, "was already signaled\n");
+		DMA_FENCE_TRACE(&fence->finished,
+				"was already signaled\n");
 }
 
-static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "amd_sched";
 }
 
-static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
 {
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 	return (const char *)fence->sched->name;
 }
 
-static bool amd_sched_fence_enable_signaling(struct fence *f)
+static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
 {
 	return true;
 }
@@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
  */
 static void amd_sched_fence_free(struct rcu_head *rcu)
 {
-	struct fence *f = container_of(rcu, struct fence, rcu);
+	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 
-	fence_put(fence->parent);
+	dma_fence_put(fence->parent);
 	kmem_cache_free(sched_fence_slab, fence);
 }
 
@@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
  * This function is called when the reference count becomes zero.
  * It just RCU schedules freeing up the fence.
  */
-static void amd_sched_fence_release_scheduled(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct dma_fence *f)
 {
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 
@@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
  *
  * Drop the extra reference from the scheduled fence to the base fence.
  */
-static void amd_sched_fence_release_finished(struct fence *f)
+static void amd_sched_fence_release_finished(struct dma_fence *f)
 {
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 
-	fence_put(&fence->scheduled);
+	dma_fence_put(&fence->scheduled);
 }
 
-const struct fence_ops amd_sched_fence_ops_scheduled = {
+const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
 	.get_driver_name = amd_sched_fence_get_driver_name,
 	.get_timeline_name = amd_sched_fence_get_timeline_name,
 	.enable_signaling = amd_sched_fence_enable_signaling,
 	.signaled = NULL,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = amd_sched_fence_release_scheduled,
 };
 
-const struct fence_ops amd_sched_fence_ops_finished = {
+const struct dma_fence_ops amd_sched_fence_ops_finished = {
 	.get_driver_name = amd_sched_fence_get_driver_name,
 	.get_timeline_name = amd_sched_fence_get_timeline_name,
 	.enable_signaling = amd_sched_fence_enable_signaling,
 	.signaled = NULL,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = amd_sched_fence_release_finished,
 };
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 48019ae..bbaa55a 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -223,14 +223,12 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
 {
 	struct hdlcd_drm_private *hdlcd;
 	struct drm_gem_cma_object *gem;
-	unsigned int depth, bpp;
 	u32 src_w, src_h, dest_w, dest_h;
 	dma_addr_t scanout_start;
 
 	if (!plane->state->fb)
 		return;
 
-	drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
 	src_w = plane->state->src_w >> 16;
 	src_h = plane->state->src_h >> 16;
 	dest_w = plane->state->crtc_w;
@@ -238,7 +236,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
 	gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
 	scanout_start = gem->paddr + plane->state->fb->offsets[0] +
 		plane->state->crtc_y * plane->state->fb->pitches[0] +
-		plane->state->crtc_x * bpp / 8;
+		plane->state->crtc_x *
+		drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
 
 	hdlcd = plane->dev->dev_private;
 	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index fb6a418..6477d1a 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev)
 		return -EAGAIN;
 	}
 
-	component_match_add(&pdev->dev, &match, compare_dev, port);
+	drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+	of_node_put(port);
 
 	return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
 					       match);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 9280358..9f47394 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
 		return -EAGAIN;
 	}
 
-	component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+	drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
+				   port);
+	of_node_put(port);
 	return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
 					       match);
 }
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index a6132f1..be815d0 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -198,9 +198,6 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
 
 static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
 {
-	unsigned int depth;
-	int bpp;
-
 	/* RGB888 or BGR888 can't be rotated */
 	if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
 		return -EINVAL;
@@ -210,9 +207,7 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
 	 * worth of pixel data. Required size is then:
 	 *    size = rotated_width * (bpp / 8) * 8;
 	 */
-	drm_fb_get_bpp_depth(fmt, &depth, &bpp);
-
-	return w * bpp;
+	return w * drm_format_plane_cpp(fmt, 0) * 8;
 }
 
 static int malidp550_query_hw(struct malidp_hw_device *hwdev)
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 82c193e..abaca03 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -254,21 +254,18 @@ int malidp_de_planes_init(struct drm_device *drm)
 		if (ret < 0)
 			goto cleanup;
 
-		if (!drm->mode_config.rotation_property) {
+		/* SMART layer can't be rotated */
+		if (id != DE_SMART) {
 			unsigned long flags = DRM_ROTATE_0 |
 					      DRM_ROTATE_90 |
 					      DRM_ROTATE_180 |
 					      DRM_ROTATE_270 |
 					      DRM_REFLECT_X |
 					      DRM_REFLECT_Y;
-			drm->mode_config.rotation_property =
-				drm_mode_create_rotation_property(drm, flags);
+			drm_plane_create_rotation_property(&plane->base,
+							   DRM_ROTATE_0,
+							   flags);
 		}
-		/* SMART layer can't be rotated */
-		if (drm->mode_config.rotation_property && (id != DE_SMART))
-			drm_object_attach_property(&plane->base.base,
-						   drm->mode_config.rotation_property,
-						   DRM_ROTATE_0);
 
 		drm_plane_helper_add(&plane->base,
 				     &malidp_de_plane_helper_funcs);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 1e0e68f..94e46da 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev,
 			continue;
 		}
 
-		component_match_add(dev, match, compare_of, remote);
+		drm_of_component_match_add(dev, match, compare_of, remote);
 		of_node_put(remote);
 	}
 }
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 608df4c..7134fdf 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
 	.ttm_tt_populate = ast_ttm_tt_populate,
 	.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
 	.init_mem_type = ast_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = ast_bo_evict_flags,
 	.move = NULL,
 	.verify_access = ast_bo_verify_access,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 5f48431..9f62228 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -464,7 +464,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
 
 	drm_atomic_helper_cleanup_planes(dev, old_state);
 
-	drm_atomic_state_free(old_state);
+	drm_atomic_state_put(old_state);
 
 	/* Complete the commit, wake up any waiter. */
 	spin_lock(&dc->commit.wait.lock);
@@ -521,6 +521,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
 	/* Swap the state, this is the point of no return. */
 	drm_atomic_helper_swap_state(state, true);
 
+	drm_atomic_state_get(state);
 	if (async)
 		queue_work(dc->wq, &commit->work);
 	else
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 9d4c030..246ed1e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
 
 	if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
 	     state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
-	    (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
+	    drm_rotation_90_or_270(state->base.rotation))
 		cfg |= ATMEL_HLCDC_YUV422ROT;
 
 	atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
 	/*
 	 * Swap width and size in case of 90 or 270 degrees rotation
 	 */
-	if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+	if (drm_rotation_90_or_270(state->base.rotation)) {
 		tmp = state->crtc_w;
 		state->crtc_w = state->crtc_h;
 		state->crtc_h = tmp;
@@ -883,9 +883,9 @@ static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
 	return 0;
 }
 
-static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
-				const struct atmel_hlcdc_layer_desc *desc,
-				struct atmel_hlcdc_plane_properties *props)
+static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
+					     const struct atmel_hlcdc_layer_desc *desc,
+					     struct atmel_hlcdc_plane_properties *props)
 {
 	struct regmap *regmap = plane->layer.hlcdc->regmap;
 
@@ -902,10 +902,18 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
 				ATMEL_HLCDC_LAYER_GA_MASK);
 	}
 
-	if (desc->layout.xstride && desc->layout.pstride)
-		drm_object_attach_property(&plane->base.base,
-				plane->base.dev->mode_config.rotation_property,
-				DRM_ROTATE_0);
+	if (desc->layout.xstride && desc->layout.pstride) {
+		int ret;
+
+		ret = drm_plane_create_rotation_property(&plane->base,
+							 DRM_ROTATE_0,
+							 DRM_ROTATE_0 |
+							 DRM_ROTATE_90 |
+							 DRM_ROTATE_180 |
+							 DRM_ROTATE_270);
+		if (ret)
+			return ret;
+	}
 
 	if (desc->layout.csc) {
 		/*
@@ -925,6 +933,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
 			     ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 2),
 			     0x40040890);
 	}
+
+	return 0;
 }
 
 static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
@@ -1036,7 +1046,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
 			     &atmel_hlcdc_layer_plane_helper_funcs);
 
 	/* Set default property values*/
-	atmel_hlcdc_plane_init_properties(plane, desc, props);
+	ret = atmel_hlcdc_plane_init_properties(plane, desc, props);
+	if (ret)
+		return ERR_PTR(ret);
 
 	return plane;
 }
@@ -1054,15 +1066,6 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
 	if (!props->alpha)
 		return ERR_PTR(-ENOMEM);
 
-	dev->mode_config.rotation_property =
-			drm_mode_create_rotation_property(dev,
-							  DRM_ROTATE_0 |
-							  DRM_ROTATE_90 |
-							  DRM_ROTATE_180 |
-							  DRM_ROTATE_270);
-	if (!dev->mode_config.rotation_property)
-		return ERR_PTR(-ENOMEM);
-
 	return props;
 }
 
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 269cfca9..099a3c6 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
 	.ttm_tt_populate = ttm_pool_populate,
 	.ttm_tt_unpopulate = ttm_pool_unpopulate,
 	.init_mem_type = bochs_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = bochs_bo_evict_flags,
 	.move = NULL,
 	.verify_access = bochs_bo_verify_access,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 10e12e7..bd6acc8 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -57,6 +57,13 @@
 	---help---
 	  Parade eDP-LVDS bridge chip driver.
 
+config DRM_SIL_SII8620
+	tristate "Silicon Image SII8620 HDMI/MHL bridge"
+	depends on OF
+	select DRM_KMS_HELPER
+	help
+	  Silicon Image SII8620 HDMI/MHL bridge chip driver.
+
 config DRM_SII902X
 	tristate "Silicon Image sii902x RGB/HDMI bridge"
 	depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cdf3a3c..97ed1a5 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
 obj-$(CONFIG_DRM_SII902X) += sii902x.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
new file mode 100644
index 0000000..b2c267d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -0,0 +1,1564 @@
+/*
+ * Silicon Image SiI8620 HDMI/MHL bridge driver
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "sil-sii8620.h"
+
+#define VAL_RX_HDMI_CTRL2_DEFVAL	VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+
+enum sii8620_mode {
+	CM_DISCONNECTED,
+	CM_DISCOVERY,
+	CM_MHL1,
+	CM_MHL3,
+	CM_ECBUS_S
+};
+
+enum sii8620_sink_type {
+	SINK_NONE,
+	SINK_HDMI,
+	SINK_DVI
+};
+
+enum sii8620_mt_state {
+	MT_STATE_READY,
+	MT_STATE_BUSY,
+	MT_STATE_DONE
+};
+
+struct sii8620 {
+	struct drm_bridge bridge;
+	struct device *dev;
+	struct clk *clk_xtal;
+	struct gpio_desc *gpio_reset;
+	struct gpio_desc *gpio_int;
+	struct regulator_bulk_data supplies[2];
+	struct mutex lock; /* context lock, protects fields below */
+	int error;
+	enum sii8620_mode mode;
+	enum sii8620_sink_type sink_type;
+	u8 cbus_status;
+	u8 stat[MHL_DST_SIZE];
+	u8 xstat[MHL_XDS_SIZE];
+	u8 devcap[MHL_DCAP_SIZE];
+	u8 xdevcap[MHL_XDC_SIZE];
+	u8 avif[19];
+	struct edid *edid;
+	unsigned int gen2_write_burst:1;
+	enum sii8620_mt_state mt_state;
+	struct list_head mt_queue;
+};
+
+struct sii8620_mt_msg;
+
+typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
+				  struct sii8620_mt_msg *msg);
+
+struct sii8620_mt_msg {
+	struct list_head node;
+	u8 reg[4];
+	u8 ret;
+	sii8620_mt_msg_cb send;
+	sii8620_mt_msg_cb recv;
+};
+
+static const u8 sii8620_i2c_page[] = {
+	0x39, /* Main System */
+	0x3d, /* TDM and HSIC */
+	0x49, /* TMDS Receiver, MHL EDID */
+	0x4d, /* eMSC, HDCP, HSIC */
+	0x5d, /* MHL Spec */
+	0x64, /* MHL CBUS */
+	0x59, /* Hardware TPI (Transmitter Programming Interface) */
+	0x61, /* eCBUS-S, eCBUS-D */
+};
+
+static void sii8620_fetch_edid(struct sii8620 *ctx);
+static void sii8620_set_upstream_edid(struct sii8620 *ctx);
+static void sii8620_enable_hpd(struct sii8620 *ctx);
+static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+
+static int sii8620_clear_error(struct sii8620 *ctx)
+{
+	int ret = ctx->error;
+
+	ctx->error = 0;
+	return ret;
+}
+
+static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
+{
+	struct device *dev = ctx->dev;
+	struct i2c_client *client = to_i2c_client(dev);
+	u8 data = addr;
+	struct i2c_msg msg[] = {
+		{
+			.addr = sii8620_i2c_page[addr >> 8],
+			.flags = client->flags,
+			.len = 1,
+			.buf = &data
+		},
+		{
+			.addr = sii8620_i2c_page[addr >> 8],
+			.flags = client->flags | I2C_M_RD,
+			.len = len,
+			.buf = buf
+		},
+	};
+	int ret;
+
+	if (ctx->error)
+		return;
+
+	ret = i2c_transfer(client->adapter, msg, 2);
+	dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+	if (ret != 2) {
+		dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n",
+			addr, len, ret);
+		ctx->error = ret < 0 ? ret : -EIO;
+	}
+}
+
+static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
+{
+	u8 ret;
+
+	sii8620_read_buf(ctx, addr, &ret, 1);
+	return ret;
+}
+
+static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf,
+			      int len)
+{
+	struct device *dev = ctx->dev;
+	struct i2c_client *client = to_i2c_client(dev);
+	u8 data[2];
+	struct i2c_msg msg = {
+		.addr = sii8620_i2c_page[addr >> 8],
+		.flags = client->flags,
+		.len = len + 1,
+	};
+	int ret;
+
+	if (ctx->error)
+		return;
+
+	if (len > 1) {
+		msg.buf = kmalloc(len + 1, GFP_KERNEL);
+		if (!msg.buf) {
+			ctx->error = -ENOMEM;
+			return;
+		}
+		memcpy(msg.buf + 1, buf, len);
+	} else {
+		msg.buf = data;
+		msg.buf[1] = *buf;
+	}
+
+	msg.buf[0] = addr;
+
+	ret = i2c_transfer(client->adapter, &msg, 1);
+	dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+	if (ret != 1) {
+		dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n",
+			addr, len, buf, ret);
+		ctx->error = ret ?: -EIO;
+	}
+
+	if (len > 1)
+		kfree(msg.buf);
+}
+
+#define sii8620_write(ctx, addr, arr...) \
+({\
+	u8 d[] = { arr }; \
+	sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \
+})
+
+static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i += 2)
+		sii8620_write(ctx, seq[i], seq[i + 1]);
+}
+
+#define sii8620_write_seq(ctx, seq...) \
+({\
+	const u16 d[] = { seq }; \
+	__sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define sii8620_write_seq_static(ctx, seq...) \
+({\
+	static const u16 d[] = { seq }; \
+	__sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
+{
+	val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask);
+	sii8620_write(ctx, addr, val);
+}
+
+static void sii8620_mt_cleanup(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg, *n;
+
+	list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) {
+		list_del(&msg->node);
+		kfree(msg);
+	}
+	ctx->mt_state = MT_STATE_READY;
+}
+
+static void sii8620_mt_work(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg;
+
+	if (ctx->error)
+		return;
+	if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue))
+		return;
+
+	if (ctx->mt_state == MT_STATE_DONE) {
+		ctx->mt_state = MT_STATE_READY;
+		msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
+				       node);
+		if (msg->recv)
+			msg->recv(ctx, msg);
+		list_del(&msg->node);
+		kfree(msg);
+	}
+
+	if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue))
+		return;
+
+	ctx->mt_state = MT_STATE_BUSY;
+	msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+	if (msg->send)
+		msg->send(ctx, msg);
+}
+
+static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
+				    struct sii8620_mt_msg *msg)
+{
+	switch (msg->reg[0]) {
+	case MHL_WRITE_STAT:
+	case MHL_SET_INT:
+		sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2);
+		sii8620_write(ctx, REG_MSC_COMMAND_START,
+			      BIT_MSC_COMMAND_START_WRITE_STAT);
+		break;
+	case MHL_MSC_MSG:
+		sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3);
+		sii8620_write(ctx, REG_MSC_COMMAND_START,
+			      BIT_MSC_COMMAND_START_MSC_MSG);
+		break;
+	default:
+		dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
+			msg->reg[0]);
+	}
+}
+
+static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+
+	if (!msg)
+		ctx->error = -ENOMEM;
+	else
+		list_add_tail(&msg->node, &ctx->mt_queue);
+
+	return msg;
+}
+
+static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
+{
+	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+	if (!msg)
+		return;
+
+	msg->reg[0] = cmd;
+	msg->reg[1] = arg1;
+	msg->reg[2] = arg2;
+	msg->send = sii8620_mt_msc_cmd_send;
+}
+
+static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val)
+{
+	sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val);
+}
+
+static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask)
+{
+	sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask);
+}
+
+static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data)
+{
+	sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data);
+}
+
+static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
+{
+	sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
+}
+
+static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
+					struct sii8620_mt_msg *msg)
+{
+	u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+			| BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+			| BIT_EDID_CTRL_EDID_MODE_EN;
+
+	if (msg->reg[0] == MHL_READ_XDEVCAP)
+		ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+	sii8620_write_seq(ctx,
+		REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE,
+		REG_EDID_CTRL, ctrl,
+		REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START
+	);
+}
+
+/* copy src to dst and set changed bits in src */
+static void sii8620_update_array(u8 *dst, u8 *src, int count)
+{
+	while (--count >= 0) {
+		*src ^= *dst;
+		*dst++ ^= *src++;
+	}
+}
+
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+	static const char * const sink_str[] = {
+		[SINK_NONE] = "NONE",
+		[SINK_HDMI] = "HDMI",
+		[SINK_DVI] = "DVI"
+	};
+
+	u8 dcap[MHL_DCAP_SIZE];
+	char sink_name[20];
+	struct device *dev = ctx->dev;
+
+	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+	if (ctx->error < 0)
+		return;
+
+	dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
+	dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+		 dcap[MHL_DCAP_MHL_VERSION] / 16,
+		 dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
+		 dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
+		 dcap[MHL_DCAP_DEVICE_ID_L]);
+	sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+
+	if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+		return;
+
+	sii8620_fetch_edid(ctx);
+	if (!ctx->edid) {
+		dev_err(ctx->dev, "Cannot fetch EDID\n");
+		sii8620_mhl_disconnected(ctx);
+		return;
+	}
+
+	if (drm_detect_hdmi_monitor(ctx->edid))
+		ctx->sink_type = SINK_HDMI;
+	else
+		ctx->sink_type = SINK_DVI;
+
+	drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name));
+
+	dev_info(dev, "detected sink(type: %s): %s\n",
+		 sink_str[ctx->sink_type], sink_name);
+	sii8620_set_upstream_edid(ctx);
+	sii8620_enable_hpd(ctx);
+}
+
+static void sii8620_mr_xdevcap(struct sii8620 *ctx)
+{
+	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
+			 MHL_XDC_SIZE);
+
+	sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+			      MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+	sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+}
+
+static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
+					struct sii8620_mt_msg *msg)
+{
+	u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+		| BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+		| BIT_EDID_CTRL_EDID_MODE_EN;
+
+	if (msg->reg[0] == MHL_READ_XDEVCAP)
+		ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+	sii8620_write_seq(ctx,
+		REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE
+			| BIT_INTR9_EDID_ERROR,
+		REG_EDID_CTRL, ctrl,
+		REG_EDID_FIFO_ADDR, 0
+	);
+
+	if (msg->reg[0] == MHL_READ_XDEVCAP)
+		sii8620_mr_xdevcap(ctx);
+	else
+		sii8620_mr_devcap(ctx);
+}
+
+static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
+{
+	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+	if (!msg)
+		return;
+
+	msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP;
+	msg->send = sii8620_mt_read_devcap_send;
+	msg->recv = sii8620_mt_read_devcap_recv;
+}
+
+static void sii8620_fetch_edid(struct sii8620 *ctx)
+{
+	u8 lm_ddc, ddc_cmd, int3, cbus;
+	int fetched, i;
+	int edid_len = EDID_LENGTH;
+	u8 *edid;
+
+	sii8620_readb(ctx, REG_CBUS_STATUS);
+	lm_ddc = sii8620_readb(ctx, REG_LM_DDC);
+	ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD);
+
+	sii8620_write_seq(ctx,
+		REG_INTR9_MASK, 0,
+		REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+		REG_HDCP2X_POLL_CS, 0x71,
+		REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX,
+		REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED,
+	);
+
+	for (i = 0; i < 256; ++i) {
+		u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS);
+
+		if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG))
+			break;
+		sii8620_write(ctx, REG_DDC_STATUS,
+			      BIT_DDC_STATUS_DDC_FIFO_EMPTY);
+	}
+
+	sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1);
+
+	edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+	if (!edid) {
+		ctx->error = -ENOMEM;
+		return;
+	}
+
+#define FETCH_SIZE 16
+	for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) {
+		sii8620_readb(ctx, REG_DDC_STATUS);
+		sii8620_write_seq(ctx,
+			REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT,
+			REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO,
+			REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY
+		);
+		sii8620_write_seq(ctx,
+			REG_DDC_SEGM, fetched >> 8,
+			REG_DDC_OFFSET, fetched & 0xff,
+			REG_DDC_DIN_CNT1, FETCH_SIZE,
+			REG_DDC_DIN_CNT2, 0,
+			REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
+		);
+
+		do {
+			int3 = sii8620_readb(ctx, REG_INTR3);
+			cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+			if (int3 & BIT_DDC_CMD_DONE)
+				break;
+
+			if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+				kfree(edid);
+				edid = NULL;
+				goto end;
+			}
+		} while (1);
+
+		sii8620_readb(ctx, REG_DDC_STATUS);
+		while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+			usleep_range(10, 20);
+
+		sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
+		if (fetched + FETCH_SIZE == EDID_LENGTH) {
+			u8 ext = ((struct edid *)edid)->extensions;
+
+			if (ext) {
+				u8 *new_edid;
+
+				edid_len += ext * EDID_LENGTH;
+				new_edid = krealloc(edid, edid_len, GFP_KERNEL);
+				if (!new_edid) {
+					kfree(edid);
+					ctx->error = -ENOMEM;
+					return;
+				}
+				edid = new_edid;
+			}
+		}
+
+		if (fetched + FETCH_SIZE == edid_len)
+			sii8620_write(ctx, REG_INTR3, int3);
+	}
+
+	sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+
+end:
+	kfree(ctx->edid);
+	ctx->edid = (struct edid *)edid;
+}
+
+static void sii8620_set_upstream_edid(struct sii8620 *ctx)
+{
+	sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N
+			| BIT_DPD_PD_MHL_CLK_N, 0xff);
+
+	sii8620_write_seq_static(ctx,
+		REG_RX_HDMI_CTRL3, 0x00,
+		REG_PKT_FILTER_0, 0xFF,
+		REG_PKT_FILTER_1, 0xFF,
+		REG_ALICE0_BW_I2C, 0x06
+	);
+
+	sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER,
+			BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff);
+
+	sii8620_write_seq_static(ctx,
+		REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+			| BIT_EDID_CTRL_EDID_MODE_EN,
+		REG_EDID_FIFO_ADDR, 0,
+	);
+
+	sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid,
+			  (ctx->edid->extensions + 1) * EDID_LENGTH);
+
+	sii8620_write_seq_static(ctx,
+		REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID
+			| BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+			| BIT_EDID_CTRL_EDID_MODE_EN,
+		REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE,
+		REG_INTR9_MASK, 0
+	);
+}
+
+static void sii8620_xtal_set_rate(struct sii8620 *ctx)
+{
+	static const struct {
+		unsigned int rate;
+		u8 div;
+		u8 tp1;
+	} rates[] = {
+		{ 19200, 0x04, 0x53 },
+		{ 20000, 0x04, 0x62 },
+		{ 24000, 0x05, 0x75 },
+		{ 30000, 0x06, 0x92 },
+		{ 38400, 0x0c, 0xbc },
+	};
+	unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i)
+		if (rate <= rates[i].rate)
+			break;
+
+	if (rate != rates[i].rate)
+		dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n",
+			rate, rates[i].rate);
+
+	sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div);
+	sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1);
+}
+
+static int sii8620_hw_on(struct sii8620 *ctx)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret)
+		return ret;
+	usleep_range(10000, 20000);
+	return clk_prepare_enable(ctx->clk_xtal);
+}
+
+static int sii8620_hw_off(struct sii8620 *ctx)
+{
+	clk_disable_unprepare(ctx->clk_xtal);
+	gpiod_set_value(ctx->gpio_reset, 1);
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii8620_hw_reset(struct sii8620 *ctx)
+{
+	usleep_range(10000, 20000);
+	gpiod_set_value(ctx->gpio_reset, 0);
+	usleep_range(5000, 20000);
+	gpiod_set_value(ctx->gpio_reset, 1);
+	usleep_range(10000, 20000);
+	gpiod_set_value(ctx->gpio_reset, 0);
+	msleep(300);
+}
+
+static void sii8620_cbus_reset(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+			| BIT_PWD_SRST_CBUS_RST_SW_EN,
+		REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
+	);
+}
+
+static void sii8620_set_auto_zone(struct sii8620 *ctx)
+{
+	if (ctx->mode != CM_MHL1) {
+		sii8620_write_seq_static(ctx,
+			REG_TX_ZONE_CTL1, 0x0,
+			REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+				| BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+				| BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+		);
+	} else {
+		sii8620_write_seq_static(ctx,
+			REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE,
+			REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+				| BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+		);
+	}
+}
+
+static void sii8620_stop_video(struct sii8620 *ctx)
+{
+	u8 uninitialized_var(val);
+
+	sii8620_write_seq_static(ctx,
+		REG_TPI_INTR_EN, 0,
+		REG_HDCP2X_INTR0_MASK, 0,
+		REG_TPI_COPP_DATA2, 0,
+		REG_TPI_INTR_ST0, ~0,
+	);
+
+	switch (ctx->sink_type) {
+	case SINK_DVI:
+		val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+			| BIT_TPI_SC_TPI_AV_MUTE;
+		break;
+	case SINK_HDMI:
+		val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+			| BIT_TPI_SC_TPI_AV_MUTE
+			| BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
+		break;
+	default:
+		return;
+	}
+
+	sii8620_write(ctx, REG_TPI_SC, val);
+}
+
+static void sii8620_start_hdmi(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
+			| BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
+		REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
+			| BIT_VID_OVRRD_M1080P_OVRRD,
+		REG_VID_MODE, 0,
+		REG_MHL_TOP_CTL, 0x1,
+		REG_MHLTX_CTL6, 0xa0,
+		REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+		REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+	);
+
+	sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+			      MHL_DST_LM_CLK_MODE_NORMAL |
+			      MHL_DST_LM_PATH_ENABLED);
+
+	sii8620_set_auto_zone(ctx);
+
+	sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+
+	sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
+			  ARRAY_SIZE(ctx->avif));
+
+	sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+}
+
+static void sii8620_start_video(struct sii8620 *ctx)
+{
+	if (ctx->mode < CM_MHL3)
+		sii8620_stop_video(ctx);
+
+	switch (ctx->sink_type) {
+	case SINK_HDMI:
+		sii8620_start_hdmi(ctx);
+		break;
+	case SINK_DVI:
+	default:
+		break;
+	}
+}
+
+static void sii8620_disable_hpd(struct sii8620 *ctx)
+{
+	sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0);
+	sii8620_write_seq_static(ctx,
+		REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN,
+		REG_INTR8_MASK, 0
+	);
+}
+
+static void sii8620_enable_hpd(struct sii8620 *ctx)
+{
+	sii8620_setbits(ctx, REG_TMDS_CSTAT_P3,
+			BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS
+			| BIT_TMDS_CSTAT_P3_CLR_AVI, ~0);
+	sii8620_write_seq_static(ctx,
+		REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN
+			| BIT_HPD_CTRL_HPD_HIGH,
+	);
+}
+
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+	if (ctx->gen2_write_burst)
+		return;
+
+	sii8620_write_seq_static(ctx,
+		REG_MDT_RCV_TIMEOUT, 100,
+		REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
+	);
+	ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+	if (!ctx->gen2_write_burst)
+		return;
+
+	sii8620_write_seq_static(ctx,
+		REG_MDT_XMIT_CTRL, 0,
+		REG_MDT_RCV_CTRL, 0
+	);
+	ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+			| BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+			| BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+			| BIT_MDT_XMIT_SM_ERROR,
+		REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+			| BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+			| BIT_MDT_RFIFO_DATA_RDY
+	);
+	sii8620_enable_gen2_write_burst(ctx);
+}
+
+static void sii8620_mhl_discover(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+			| BIT_DISC_CTRL9_DISC_PULSE_PROCEED,
+		REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K),
+		REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT
+			| BIT_MHL_EST_INT
+			| BIT_NOT_MHL_EST_INT
+			| BIT_CBUS_MHL3_DISCON_INT
+			| BIT_CBUS_MHL12_DISCON_INT
+			| BIT_RGND_READY_INT,
+		REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+			| BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+			| BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+		REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+			| BIT_MHL_DP_CTL0_TX_OE_OVR,
+		REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+		REG_MHL_DP_CTL1, 0xA2,
+		REG_MHL_DP_CTL2, 0x03,
+		REG_MHL_DP_CTL3, 0x35,
+		REG_MHL_DP_CTL5, 0x02,
+		REG_MHL_DP_CTL6, 0x02,
+		REG_MHL_DP_CTL7, 0x03,
+		REG_COC_CTLC, 0xFF,
+		REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+			| BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC,
+		REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE
+			| BIT_COC_CALIBRATION_DONE,
+		REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD
+			| BIT_CBUS_CMD_ABORT,
+		REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE
+			| BIT_CBUS_HPD_CHG
+			| BIT_CBUS_MSC_MR_WRITE_STAT
+			| BIT_CBUS_MSC_MR_MSC_MSG
+			| BIT_CBUS_MSC_MR_WRITE_BURST
+			| BIT_CBUS_MSC_MR_SET_INT
+			| BIT_CBUS_MSC_MT_DONE_NACK
+	);
+}
+
+static void sii8620_peer_specific_init(struct sii8620 *ctx)
+{
+	if (ctx->mode == CM_MHL3)
+		sii8620_write_seq_static(ctx,
+			REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
+			REG_EMSCINTRMASK1,
+				BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR
+		);
+	else
+		sii8620_write_seq_static(ctx,
+			REG_HDCP2X_INTR0_MASK, 0x00,
+			REG_EMSCINTRMASK1, 0x00,
+			REG_HDCP2X_INTR0, 0xFF,
+			REG_INTR1, 0xFF,
+			REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD
+				| BIT_SYS_CTRL1_TX_CTRL_HDMI
+		);
+}
+
+#define SII8620_MHL_VERSION			0x32
+#define SII8620_SCRATCHPAD_SIZE			16
+#define SII8620_INT_STAT_SIZE			0x33
+
+static void sii8620_set_dev_cap(struct sii8620 *ctx)
+{
+	static const u8 devcap[MHL_DCAP_SIZE] = {
+		[MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION,
+		[MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER,
+		[MHL_DCAP_ADOPTER_ID_H] = 0x01,
+		[MHL_DCAP_ADOPTER_ID_L] = 0x41,
+		[MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444
+			| MHL_DCAP_VID_LINK_PPIXEL
+			| MHL_DCAP_VID_LINK_16BPP,
+		[MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH,
+		[MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS,
+		[MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI,
+		[MHL_DCAP_BANDWIDTH] = 0x0f,
+		[MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT
+			| MHL_DCAP_FEATURE_RAP_SUPPORT
+			| MHL_DCAP_FEATURE_SP_SUPPORT,
+		[MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE,
+		[MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE,
+	};
+	static const u8 xdcap[MHL_XDC_SIZE] = {
+		[MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075
+			| MHL_XDC_ECBUS_S_8BIT,
+		[MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150
+			| MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600,
+		[MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST,
+		[MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE,
+	};
+
+	sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap));
+	sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap));
+}
+
+static void sii8620_mhl_init(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+		REG_CBUS_MSC_COMPAT_CTRL,
+			BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN,
+	);
+
+	sii8620_peer_specific_init(ctx);
+
+	sii8620_disable_hpd(ctx);
+
+	sii8620_write_seq_static(ctx,
+		REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+		REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+			| BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+		REG_TMDS0_CCTRL1, 0x90,
+		REG_TMDS_CLK_EN, 0x01,
+		REG_TMDS_CH_EN, 0x11,
+		REG_BGR_BIAS, 0x87,
+		REG_ALICE0_ZONE_CTRL, 0xE8,
+		REG_ALICE0_MODE_CTRL, 0x04,
+	);
+	sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0);
+	sii8620_write_seq_static(ctx,
+		REG_TPI_HW_OPT3, 0x76,
+		REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE,
+		REG_TPI_DTD_B2, 79,
+	);
+	sii8620_set_dev_cap(ctx);
+	sii8620_write_seq_static(ctx,
+		REG_MDT_XMIT_TIMEOUT, 100,
+		REG_MDT_XMIT_CTRL, 0x03,
+		REG_MDT_XFIFO_STAT, 0x00,
+		REG_MDT_RCV_TIMEOUT, 100,
+		REG_CBUS_LINK_CTRL_8, 0x1D,
+	);
+
+	sii8620_start_gen2_write_burst(ctx);
+	sii8620_write_seq_static(ctx,
+		REG_BIST_CTRL, 0x00,
+		REG_COC_CTL1, 0x10,
+		REG_COC_CTL2, 0x18,
+		REG_COC_CTLF, 0x07,
+		REG_COC_CTL11, 0xF8,
+		REG_COC_CTL17, 0x61,
+		REG_COC_CTL18, 0x46,
+		REG_COC_CTL19, 0x15,
+		REG_COC_CTL1A, 0x01,
+		REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN,
+		REG_MHL_COC_CTL4, 0x2D,
+		REG_MHL_COC_CTL5, 0xF9,
+		REG_MSC_HEARTBEAT_CTRL, 0x27,
+	);
+	sii8620_disable_gen2_write_burst(ctx);
+
+	/* currently MHL3 is not supported, so we force version to 0 */
+	sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+	sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
+			      MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
+			      | MHL_DST_CONN_POW_STAT);
+	sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
+}
+
+static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
+{
+	if (ctx->mode == mode)
+		return;
+
+	ctx->mode = mode;
+
+	switch (mode) {
+	case CM_MHL1:
+		sii8620_write_seq_static(ctx,
+			REG_CBUS_MSC_COMPAT_CTRL, 0x02,
+			REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE,
+			REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+				| BIT_DPD_OSC_EN,
+			REG_COC_INTR_MASK, 0
+		);
+		break;
+	case CM_MHL3:
+		sii8620_write_seq_static(ctx,
+			REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+			REG_COC_CTL0, 0x40,
+			REG_MHL_COC_CTL1, 0x07
+		);
+		break;
+	case CM_DISCONNECTED:
+		break;
+	default:
+		dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
+		break;
+	}
+
+	sii8620_set_auto_zone(ctx);
+
+	if (mode != CM_MHL1)
+		return;
+
+	sii8620_write_seq_static(ctx,
+		REG_MHL_DP_CTL0, 0xBC,
+		REG_MHL_DP_CTL1, 0xBB,
+		REG_MHL_DP_CTL3, 0x48,
+		REG_MHL_DP_CTL5, 0x39,
+		REG_MHL_DP_CTL2, 0x2A,
+		REG_MHL_DP_CTL6, 0x2A,
+		REG_MHL_DP_CTL7, 0x08
+	);
+}
+
+static void sii8620_disconnect(struct sii8620 *ctx)
+{
+	sii8620_disable_gen2_write_burst(ctx);
+	sii8620_stop_video(ctx);
+	msleep(50);
+	sii8620_cbus_reset(ctx);
+	sii8620_set_mode(ctx, CM_DISCONNECTED);
+	sii8620_write_seq_static(ctx,
+		REG_COC_CTL0, 0x40,
+		REG_CBUS3_CNVT, 0x84,
+		REG_COC_CTL14, 0x00,
+		REG_COC_CTL0, 0x40,
+		REG_HRXCTRL3, 0x07,
+		REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+			| BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+			| BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+		REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+			| BIT_MHL_DP_CTL0_TX_OE_OVR,
+		REG_MHL_DP_CTL1, 0xBB,
+		REG_MHL_DP_CTL3, 0x48,
+		REG_MHL_DP_CTL5, 0x3F,
+		REG_MHL_DP_CTL2, 0x2F,
+		REG_MHL_DP_CTL6, 0x2A,
+		REG_MHL_DP_CTL7, 0x03
+	);
+	sii8620_disable_hpd(ctx);
+	sii8620_write_seq_static(ctx,
+		REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+		REG_MHL_COC_CTL1, 0x07,
+		REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+		REG_DISC_CTRL8, 0x00,
+		REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+			| BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+		REG_INT_CTRL, 0x00,
+		REG_MSC_HEARTBEAT_CTRL, 0x27,
+		REG_DISC_CTRL1, 0x25,
+		REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT,
+		REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT,
+		REG_MDT_INT_1, 0xff,
+		REG_MDT_INT_1_MASK, 0x00,
+		REG_MDT_INT_0, 0xff,
+		REG_MDT_INT_0_MASK, 0x00,
+		REG_COC_INTR, 0xff,
+		REG_COC_INTR_MASK, 0x00,
+		REG_TRXINTH, 0xff,
+		REG_TRXINTMH, 0x00,
+		REG_CBUS_INT_0, 0xff,
+		REG_CBUS_INT_0_MASK, 0x00,
+		REG_CBUS_INT_1, 0xff,
+		REG_CBUS_INT_1_MASK, 0x00,
+		REG_EMSCINTR, 0xff,
+		REG_EMSCINTRMASK, 0x00,
+		REG_EMSCINTR1, 0xff,
+		REG_EMSCINTRMASK1, 0x00,
+		REG_INTR8, 0xff,
+		REG_INTR8_MASK, 0x00,
+		REG_TPI_INTR_ST0, 0xff,
+		REG_TPI_INTR_EN, 0x00,
+		REG_HDCP2X_INTR0, 0xff,
+		REG_HDCP2X_INTR0_MASK, 0x00,
+		REG_INTR9, 0xff,
+		REG_INTR9_MASK, 0x00,
+		REG_INTR3, 0xff,
+		REG_INTR3_MASK, 0x00,
+		REG_INTR5, 0xff,
+		REG_INTR5_MASK, 0x00,
+		REG_INTR2, 0xff,
+		REG_INTR2_MASK, 0x00,
+	);
+	memset(ctx->stat, 0, sizeof(ctx->stat));
+	memset(ctx->xstat, 0, sizeof(ctx->xstat));
+	memset(ctx->devcap, 0, sizeof(ctx->devcap));
+	memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+	ctx->cbus_status = 0;
+	ctx->sink_type = SINK_NONE;
+	kfree(ctx->edid);
+	ctx->edid = NULL;
+	sii8620_mt_cleanup(ctx);
+}
+
+static void sii8620_mhl_disconnected(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+		REG_CBUS_MSC_COMPAT_CTRL,
+			BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN
+	);
+	sii8620_disconnect(ctx);
+}
+
+static void sii8620_irq_disc(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0);
+
+	if (stat & VAL_CBUS_MHL_DISCON)
+		sii8620_mhl_disconnected(ctx);
+
+	if (stat & BIT_RGND_READY_INT) {
+		u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2);
+
+		if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) {
+			sii8620_mhl_discover(ctx);
+		} else {
+			sii8620_write_seq_static(ctx,
+				REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+					| BIT_DISC_CTRL9_NOMHL_EST
+					| BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+				REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT
+					| BIT_CBUS_MHL3_DISCON_INT
+					| BIT_CBUS_MHL12_DISCON_INT
+					| BIT_NOT_MHL_EST_INT
+			);
+		}
+	}
+	if (stat & BIT_MHL_EST_INT)
+		sii8620_mhl_init(ctx);
+
+	sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
+}
+
+static void sii8620_irq_g2wb(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
+
+	if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
+		dev_dbg(ctx->dev, "HAWB idle\n");
+
+	sii8620_write(ctx, REG_MDT_INT_0, stat);
+}
+
+static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+{
+	if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
+		sii8620_set_mode(ctx, CM_MHL1);
+		sii8620_peer_specific_init(ctx);
+		sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+			       | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
+	}
+}
+
+static void sii8620_status_changed_path(struct sii8620 *ctx)
+{
+	if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
+		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+				      MHL_DST_LM_CLK_MODE_NORMAL
+				      | MHL_DST_LM_PATH_ENABLED);
+		sii8620_mt_read_devcap(ctx, false);
+	} else {
+		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+				      MHL_DST_LM_CLK_MODE_NORMAL);
+	}
+}
+
+static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
+{
+	u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE];
+
+	sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE);
+	sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE);
+
+	sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
+	sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
+
+	if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+		sii8620_status_changed_dcap(ctx);
+
+	if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+		sii8620_status_changed_path(ctx);
+}
+
+static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
+{
+	u8 ints[MHL_INT_SIZE];
+
+	sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+	sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+}
+
+static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
+{
+	struct device *dev = ctx->dev;
+
+	if (list_empty(&ctx->mt_queue)) {
+		dev_err(dev, "unexpected MSC MT response\n");
+		return NULL;
+	}
+
+	return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+}
+
+static void sii8620_msc_mt_done(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+
+	if (!msg)
+		return;
+
+	msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0);
+	ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+	u8 buf[2];
+
+	if (!msg)
+		return;
+
+	sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
+
+	switch (buf[0]) {
+	case MHL_MSC_MSG_RAPK:
+		msg->ret = buf[1];
+		ctx->mt_state = MT_STATE_DONE;
+		break;
+	default:
+		dev_err(ctx->dev, "%s message type %d,%d not supported",
+			__func__, buf[0], buf[1]);
+	}
+}
+
+static void sii8620_irq_msc(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0);
+
+	if (stat & ~BIT_CBUS_HPD_CHG)
+		sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG);
+
+	if (stat & BIT_CBUS_HPD_CHG) {
+		u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+		if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) {
+			sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG);
+		} else {
+			stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+			cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+		}
+		ctx->cbus_status = cbus_stat;
+	}
+
+	if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
+		sii8620_msc_mr_write_stat(ctx);
+
+	if (stat & BIT_CBUS_MSC_MR_SET_INT)
+		sii8620_msc_mr_set_int(ctx);
+
+	if (stat & BIT_CBUS_MSC_MT_DONE)
+		sii8620_msc_mt_done(ctx);
+
+	if (stat & BIT_CBUS_MSC_MR_MSC_MSG)
+		sii8620_msc_mr_msc_msg(ctx);
+}
+
+static void sii8620_irq_coc(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_COC_INTR);
+
+	sii8620_write(ctx, REG_COC_INTR, stat);
+}
+
+static void sii8620_irq_merr(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1);
+
+	sii8620_write(ctx, REG_CBUS_INT_1, stat);
+}
+
+static void sii8620_irq_edid(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_INTR9);
+
+	sii8620_write(ctx, REG_INTR9, stat);
+
+	if (stat & BIT_INTR9_DEVCAP_DONE)
+		ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_scdt_high(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
+		REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
+	);
+}
+
+static void sii8620_scdt_low(struct sii8620 *ctx)
+{
+	sii8620_write(ctx, REG_TMDS_CSTAT_P3,
+		      BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
+		      BIT_TMDS_CSTAT_P3_CLR_AVI);
+
+	sii8620_stop_video(ctx);
+
+	sii8620_write(ctx, REG_INTR8_MASK, 0);
+}
+
+static void sii8620_irq_scdt(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_INTR5);
+
+	if (stat & BIT_INTR_SCDT_CHANGE) {
+		u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
+
+		if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+			sii8620_scdt_high(ctx);
+		else
+			sii8620_scdt_low(ctx);
+	}
+
+	sii8620_write(ctx, REG_INTR5, stat);
+}
+
+static void sii8620_new_vsi(struct sii8620 *ctx)
+{
+	u8 vsif[11];
+
+	sii8620_write(ctx, REG_RX_HDMI_CTRL2,
+		      VAL_RX_HDMI_CTRL2_DEFVAL |
+		      BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
+	sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
+			 ARRAY_SIZE(vsif));
+}
+
+static void sii8620_new_avi(struct sii8620 *ctx)
+{
+	sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
+	sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
+			 ARRAY_SIZE(ctx->avif));
+}
+
+static void sii8620_irq_infr(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_INTR8)
+		& (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
+
+	sii8620_write(ctx, REG_INTR8, stat);
+
+	if (stat & BIT_CEA_NEW_VSI)
+		sii8620_new_vsi(ctx);
+
+	if (stat & BIT_CEA_NEW_AVI)
+		sii8620_new_avi(ctx);
+
+	if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
+		sii8620_start_video(ctx);
+}
+
+/* endian agnostic, non-volatile version of test_bit */
+static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
+{
+	return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE));
+}
+
+static irqreturn_t sii8620_irq_thread(int irq, void *data)
+{
+	static const struct {
+		int bit;
+		void (*handler)(struct sii8620 *ctx);
+	} irq_vec[] = {
+		{ BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
+		{ BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
+		{ BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+		{ BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
+		{ BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+		{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+		{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
+		{ BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
+	};
+	struct sii8620 *ctx = data;
+	u8 stats[LEN_FAST_INTR_STAT];
+	int i, ret;
+
+	mutex_lock(&ctx->lock);
+
+	sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats));
+	for (i = 0; i < ARRAY_SIZE(irq_vec); ++i)
+		if (sii8620_test_bit(irq_vec[i].bit, stats))
+			irq_vec[i].handler(ctx);
+
+	sii8620_mt_work(ctx);
+
+	ret = sii8620_clear_error(ctx);
+	if (ret) {
+		dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret);
+		sii8620_mhl_disconnected(ctx);
+	}
+	mutex_unlock(&ctx->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void sii8620_cable_in(struct sii8620 *ctx)
+{
+	struct device *dev = ctx->dev;
+	u8 ver[5];
+	int ret;
+
+	ret = sii8620_hw_on(ctx);
+	if (ret) {
+		dev_err(dev, "Error powering on, %d.\n", ret);
+		return;
+	}
+	sii8620_hw_reset(ctx);
+
+	sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
+	ret = sii8620_clear_error(ctx);
+	if (ret) {
+		dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+		return;
+	}
+
+	dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0],
+		 ver[3], ver[2], ver[4]);
+
+	sii8620_write(ctx, REG_DPD,
+		      BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN);
+
+	sii8620_xtal_set_rate(ctx);
+	sii8620_disconnect(ctx);
+
+	sii8620_write_seq_static(ctx,
+		REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG
+			| VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734,
+		REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM,
+		REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN,
+	);
+
+	ret = sii8620_clear_error(ctx);
+	if (ret) {
+		dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+		return;
+	}
+
+	enable_irq(to_i2c_client(ctx->dev)->irq);
+}
+
+static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct sii8620, bridge);
+}
+
+static bool sii8620_mode_fixup(struct drm_bridge *bridge,
+			       const struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct sii8620 *ctx = bridge_to_sii8620(bridge);
+	bool ret = false;
+	int max_clock = 74250;
+
+	mutex_lock(&ctx->lock);
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		goto out;
+
+	if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
+		max_clock = 300000;
+
+	ret = mode->clock <= max_clock;
+
+out:
+	mutex_unlock(&ctx->lock);
+
+	return ret;
+}
+
+static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+	.mode_fixup = sii8620_mode_fixup,
+};
+
+static int sii8620_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct sii8620 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->dev = dev;
+	mutex_init(&ctx->lock);
+	INIT_LIST_HEAD(&ctx->mt_queue);
+
+	ctx->clk_xtal = devm_clk_get(dev, "xtal");
+	if (IS_ERR(ctx->clk_xtal)) {
+		dev_err(dev, "failed to get xtal clock from DT\n");
+		return PTR_ERR(ctx->clk_xtal);
+	}
+
+	if (!client->irq) {
+		dev_err(dev, "no irq provided\n");
+		return -EINVAL;
+	}
+	irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+	ret = devm_request_threaded_irq(dev, client->irq, NULL,
+					sii8620_irq_thread,
+					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+					"sii8620", ctx);
+
+	ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(ctx->gpio_reset)) {
+		dev_err(dev, "failed to get reset gpio from DT\n");
+		return PTR_ERR(ctx->gpio_reset);
+	}
+
+	ctx->supplies[0].supply = "cvcc10";
+	ctx->supplies[1].supply = "iovcc18";
+	ret = devm_regulator_bulk_get(dev, 2, ctx->supplies);
+	if (ret)
+		return ret;
+
+	i2c_set_clientdata(client, ctx);
+
+	ctx->bridge.funcs = &sii8620_bridge_funcs;
+	ctx->bridge.of_node = dev->of_node;
+	drm_bridge_add(&ctx->bridge);
+
+	sii8620_cable_in(ctx);
+
+	return 0;
+}
+
+static int sii8620_remove(struct i2c_client *client)
+{
+	struct sii8620 *ctx = i2c_get_clientdata(client);
+
+	disable_irq(to_i2c_client(ctx->dev)->irq);
+	drm_bridge_remove(&ctx->bridge);
+	sii8620_hw_off(ctx);
+
+	return 0;
+}
+
+static const struct of_device_id sii8620_dt_match[] = {
+	{ .compatible = "sil,sii8620" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sii8620_dt_match);
+
+static const struct i2c_device_id sii8620_id[] = {
+	{ "sii8620", 0 },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(i2c, sii8620_id);
+static struct i2c_driver sii8620_driver = {
+	.driver = {
+		.name	= "sii8620",
+		.of_match_table = of_match_ptr(sii8620_dt_match),
+	},
+	.probe		= sii8620_probe,
+	.remove		= sii8620_remove,
+	.id_table = sii8620_id,
+};
+
+module_i2c_driver(sii8620_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h
new file mode 100644
index 0000000..6ff616a4
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.h
@@ -0,0 +1,1517 @@
+/*
+ * Registers of Silicon Image SiI8620 Mobile HD Transmitter
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SIL_SII8620_H__
+#define __SIL_SII8620_H__
+
+/* Vendor ID Low byte, default value: 0x01 */
+#define REG_VND_IDL				0x0000
+
+/* Vendor ID High byte, default value: 0x00 */
+#define REG_VND_IDH				0x0001
+
+/* Device ID Low byte, default value: 0x60 */
+#define REG_DEV_IDL				0x0002
+
+/* Device ID High byte, default value: 0x86 */
+#define REG_DEV_IDH				0x0003
+
+/* Device Revision, default value: 0x10 */
+#define REG_DEV_REV				0x0004
+
+/* OTP DBYTE510, default value: 0x00 */
+#define REG_OTP_DBYTE510			0x0006
+
+/* System Control #1, default value: 0x00 */
+#define REG_SYS_CTRL1				0x0008
+#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET		BIT(7)
+#define BIT_SYS_CTRL1_VSYNCPIN			BIT(6)
+#define BIT_SYS_CTRL1_OTPADROPOVR_SET		BIT(5)
+#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD		BIT(4)
+#define BIT_SYS_CTRL1_OTP2XVOVR_EN		BIT(3)
+#define BIT_SYS_CTRL1_OTP2XAOVR_EN		BIT(2)
+#define BIT_SYS_CTRL1_TX_CTRL_HDMI		BIT(1)
+#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET		BIT(0)
+
+/* System Control DPD, default value: 0x90 */
+#define REG_DPD					0x000b
+#define BIT_DPD_PWRON_PLL			BIT(7)
+#define BIT_DPD_PDNTX12				BIT(6)
+#define BIT_DPD_PDNRX12				BIT(5)
+#define BIT_DPD_OSC_EN				BIT(4)
+#define BIT_DPD_PWRON_HSIC			BIT(3)
+#define BIT_DPD_PDIDCK_N			BIT(2)
+#define BIT_DPD_PD_MHL_CLK_N			BIT(1)
+
+/* Dual link Control, default value: 0x00 */
+#define REG_DCTL				0x000d
+#define BIT_DCTL_TDM_LCLK_PHASE			BIT(7)
+#define BIT_DCTL_HSIC_CLK_PHASE			BIT(6)
+#define BIT_DCTL_CTS_TCK_PHASE			BIT(5)
+#define BIT_DCTL_EXT_DDC_SEL			BIT(4)
+#define BIT_DCTL_TRANSCODE			BIT(3)
+#define BIT_DCTL_HSIC_RX_STROBE_PHASE		BIT(2)
+#define BIT_DCTL_HSIC_TX_BIST_START_SEL		BIT(1)
+#define BIT_DCTL_TCLKNX_PHASE			BIT(0)
+
+/* PWD Software Reset, default value: 0x20 */
+#define REG_PWD_SRST				0x000e
+#define BIT_PWD_SRST_COC_DOC_RST		BIT(7)
+#define BIT_PWD_SRST_CBUS_RST_SW		BIT(6)
+#define BIT_PWD_SRST_CBUS_RST_SW_EN		BIT(5)
+#define BIT_PWD_SRST_MHLFIFO_RST		BIT(4)
+#define BIT_PWD_SRST_CBUS_RST			BIT(3)
+#define BIT_PWD_SRST_SW_RST_AUTO		BIT(2)
+#define BIT_PWD_SRST_HDCP2X_SW_RST		BIT(1)
+#define BIT_PWD_SRST_SW_RST			BIT(0)
+
+/* AKSV_1, default value: 0x00 */
+#define REG_AKSV_1				0x001d
+
+/* Video H Resolution #1, default value: 0x00 */
+#define REG_H_RESL				0x003a
+
+/* Video Mode, default value: 0x00 */
+#define REG_VID_MODE				0x004a
+#define BIT_VID_MODE_M1080P			BIT(6)
+
+/* Video Input Mode, default value: 0xc0 */
+#define REG_VID_OVRRD				0x0051
+#define BIT_VID_OVRRD_PP_AUTO_DISABLE		BIT(7)
+#define BIT_VID_OVRRD_M1080P_OVRRD		BIT(6)
+#define BIT_VID_OVRRD_MINIVSYNC_ON		BIT(5)
+#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK	BIT(4)
+#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN	BIT(3)
+#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD		BIT(2)
+#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD	BIT(0)
+
+/* I2C Address reassignment, default value: 0x00 */
+#define REG_PAGE_MHLSPEC_ADDR			0x0057
+#define REG_PAGE7_ADDR				0x0058
+#define REG_PAGE8_ADDR				0x005c
+
+/* Fast Interrupt Status, default value: 0x00 */
+#define REG_FAST_INTR_STAT			0x005f
+#define LEN_FAST_INTR_STAT			7
+#define BIT_FAST_INTR_STAT_TIMR			8
+#define BIT_FAST_INTR_STAT_INT2			9
+#define BIT_FAST_INTR_STAT_DDC			10
+#define BIT_FAST_INTR_STAT_SCDT			11
+#define BIT_FAST_INTR_STAT_INFR			13
+#define BIT_FAST_INTR_STAT_EDID			14
+#define BIT_FAST_INTR_STAT_HDCP			15
+#define BIT_FAST_INTR_STAT_MSC			16
+#define BIT_FAST_INTR_STAT_MERR			17
+#define BIT_FAST_INTR_STAT_G2WB			18
+#define BIT_FAST_INTR_STAT_G2WB_ERR		19
+#define BIT_FAST_INTR_STAT_DISC			28
+#define BIT_FAST_INTR_STAT_BLOCK		30
+#define BIT_FAST_INTR_STAT_LTRN			31
+#define BIT_FAST_INTR_STAT_HDCP2		32
+#define BIT_FAST_INTR_STAT_TDM			42
+#define BIT_FAST_INTR_STAT_COC			51
+
+/* GPIO Control, default value: 0x15 */
+#define REG_GPIO_CTRL1				0x006e
+#define BIT_CTRL1_GPIO_I_8			BIT(5)
+#define BIT_CTRL1_GPIO_OEN_8			BIT(4)
+#define BIT_CTRL1_GPIO_I_7			BIT(3)
+#define BIT_CTRL1_GPIO_OEN_7			BIT(2)
+#define BIT_CTRL1_GPIO_I_6			BIT(1)
+#define BIT_CTRL1_GPIO_OEN_6			BIT(0)
+
+/* Interrupt Control, default value: 0x06 */
+#define REG_INT_CTRL				0x006f
+#define BIT_INT_CTRL_SOFTWARE_WP		BIT(7)
+#define BIT_INT_CTRL_INTR_OD			BIT(2)
+#define BIT_INT_CTRL_INTR_POLARITY		BIT(1)
+
+/* Interrupt State, default value: 0x00 */
+#define REG_INTR_STATE				0x0070
+#define BIT_INTR_STATE_INTR_STATE		BIT(0)
+
+/* Interrupt Source #1, default value: 0x00 */
+#define REG_INTR1				0x0071
+
+/* Interrupt Source #2, default value: 0x00 */
+#define REG_INTR2				0x0072
+
+/* Interrupt Source #3, default value: 0x01 */
+#define REG_INTR3				0x0073
+#define BIT_DDC_CMD_DONE			BIT(3)
+
+/* Interrupt Source #5, default value: 0x00 */
+#define REG_INTR5				0x0074
+
+/* Interrupt #1 Mask, default value: 0x00 */
+#define REG_INTR1_MASK				0x0075
+
+/* Interrupt #2 Mask, default value: 0x00 */
+#define REG_INTR2_MASK				0x0076
+
+/* Interrupt #3 Mask, default value: 0x00 */
+#define REG_INTR3_MASK				0x0077
+
+/* Interrupt #5 Mask, default value: 0x00 */
+#define REG_INTR5_MASK				0x0078
+#define BIT_INTR_SCDT_CHANGE			BIT(0)
+
+/* Hot Plug Connection Control, default value: 0x45 */
+#define REG_HPD_CTRL				0x0079
+#define BIT_HPD_CTRL_HPD_DS_SIGNAL		BIT(7)
+#define BIT_HPD_CTRL_HPD_OUT_OD_EN		BIT(6)
+#define BIT_HPD_CTRL_HPD_HIGH			BIT(5)
+#define BIT_HPD_CTRL_HPD_OUT_OVR_EN		BIT(4)
+#define BIT_HPD_CTRL_GPIO_I_1			BIT(3)
+#define BIT_HPD_CTRL_GPIO_OEN_1			BIT(2)
+#define BIT_HPD_CTRL_GPIO_I_0			BIT(1)
+#define BIT_HPD_CTRL_GPIO_OEN_0			BIT(0)
+
+/* GPIO Control, default value: 0x55 */
+#define REG_GPIO_CTRL				0x007a
+#define BIT_CTRL_GPIO_I_5			BIT(7)
+#define BIT_CTRL_GPIO_OEN_5			BIT(6)
+#define BIT_CTRL_GPIO_I_4			BIT(5)
+#define BIT_CTRL_GPIO_OEN_4			BIT(4)
+#define BIT_CTRL_GPIO_I_3			BIT(3)
+#define BIT_CTRL_GPIO_OEN_3			BIT(2)
+#define BIT_CTRL_GPIO_I_2			BIT(1)
+#define BIT_CTRL_GPIO_OEN_2			BIT(0)
+
+/* Interrupt Source 7, default value: 0x00 */
+#define REG_INTR7				0x007b
+
+/* Interrupt Source 8, default value: 0x00 */
+#define REG_INTR8				0x007c
+
+/* Interrupt #7 Mask, default value: 0x00 */
+#define REG_INTR7_MASK				0x007d
+
+/* Interrupt #8 Mask, default value: 0x00 */
+#define REG_INTR8_MASK				0x007e
+#define BIT_CEA_NEW_VSI				BIT(2)
+#define BIT_CEA_NEW_AVI				BIT(1)
+
+/* IEEE, default value: 0x10 */
+#define REG_TMDS_CCTRL				0x0080
+#define BIT_TMDS_CCTRL_TMDS_OE			BIT(4)
+
+/* TMDS Control #4, default value: 0x02 */
+#define REG_TMDS_CTRL4				0x0085
+#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL		BIT(1)
+#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT		BIT(0)
+
+/* BIST CNTL, default value: 0x00 */
+#define REG_BIST_CTRL				0x00bb
+#define BIT_RXBIST_VGB_EN			BIT(7)
+#define BIT_TXBIST_VGB_EN			BIT(6)
+#define BIT_BIST_START_SEL			BIT(5)
+#define BIT_BIST_START_BIT			BIT(4)
+#define BIT_BIST_ALWAYS_ON			BIT(3)
+#define BIT_BIST_TRANS				BIT(2)
+#define BIT_BIST_RESET				BIT(1)
+#define BIT_BIST_EN				BIT(0)
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_TEST_SEL			0x00bd
+#define MSK_BIST_TEST_SEL_BIST_PATT_SEL		0x0f
+
+/* BIST VIDEO_MODE, default value: 0x00 */
+#define REG_BIST_VIDEO_MODE			0x00be
+#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0	0x0f
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_DURATION_0			0x00bf
+
+/* BIST DURATION1, default value: 0x00 */
+#define REG_BIST_DURATION_1			0x00c0
+
+/* BIST DURATION2, default value: 0x00 */
+#define REG_BIST_DURATION_2			0x00c1
+
+/* BIST 8BIT_PATTERN, default value: 0x00 */
+#define REG_BIST_8BIT_PATTERN			0x00c2
+
+/* LM DDC, default value: 0x80 */
+#define REG_LM_DDC				0x00c7
+#define BIT_LM_DDC_SW_TPI_EN_DISABLED		BIT(7)
+
+#define BIT_LM_DDC_VIDEO_MUTE_EN		BIT(5)
+#define BIT_LM_DDC_DDC_TPI_SW			BIT(2)
+#define BIT_LM_DDC_DDC_GRANT			BIT(1)
+#define BIT_LM_DDC_DDC_GPU_REQUEST		BIT(0)
+
+/* DDC I2C Manual, default value: 0x03 */
+#define REG_DDC_MANUAL				0x00ec
+#define BIT_DDC_MANUAL_MAN_DDC			BIT(7)
+#define BIT_DDC_MANUAL_VP_SEL			BIT(6)
+#define BIT_DDC_MANUAL_DSDA			BIT(5)
+#define BIT_DDC_MANUAL_DSCL			BIT(4)
+#define BIT_DDC_MANUAL_GCP_HW_CTL_EN		BIT(3)
+#define BIT_DDC_MANUAL_DDCM_ABORT_WP		BIT(2)
+#define BIT_DDC_MANUAL_IO_DSDA			BIT(1)
+#define BIT_DDC_MANUAL_IO_DSCL			BIT(0)
+
+/* DDC I2C Target Slave Address, default value: 0x00 */
+#define REG_DDC_ADDR				0x00ed
+#define MSK_DDC_ADDR_DDC_ADDR			0xfe
+
+/* DDC I2C Target Segment Address, default value: 0x00 */
+#define REG_DDC_SEGM				0x00ee
+
+/* DDC I2C Target Offset Address, default value: 0x00 */
+#define REG_DDC_OFFSET				0x00ef
+
+/* DDC I2C Data In count #1, default value: 0x00 */
+#define REG_DDC_DIN_CNT1			0x00f0
+
+/* DDC I2C Data In count #2, default value: 0x00 */
+#define REG_DDC_DIN_CNT2			0x00f1
+#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8	0x03
+
+/* DDC I2C Status, default value: 0x04 */
+#define REG_DDC_STATUS				0x00f2
+#define BIT_DDC_STATUS_DDC_BUS_LOW		BIT(6)
+#define BIT_DDC_STATUS_DDC_NO_ACK		BIT(5)
+#define BIT_DDC_STATUS_DDC_I2C_IN_PROG		BIT(4)
+#define BIT_DDC_STATUS_DDC_FIFO_FULL		BIT(3)
+#define BIT_DDC_STATUS_DDC_FIFO_EMPTY		BIT(2)
+#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE	BIT(1)
+#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE	BIT(0)
+
+/* DDC I2C Command, default value: 0x70 */
+#define REG_DDC_CMD				0x00f3
+#define BIT_DDC_CMD_HDCP_DDC_EN			BIT(6)
+#define BIT_DDC_CMD_SDA_DEL_EN			BIT(5)
+#define BIT_DDC_CMD_DDC_FLT_EN			BIT(4)
+
+#define MSK_DDC_CMD_DDC_CMD			0x0f
+#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK		0x04
+#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO		0x09
+#define VAL_DDC_CMD_DDC_CMD_ABORT		0x0f
+
+/* DDC I2C FIFO Data In/Out, default value: 0x00 */
+#define REG_DDC_DATA				0x00f4
+
+/* DDC I2C Data Out Counter, default value: 0x00 */
+#define REG_DDC_DOUT_CNT			0x00f5
+#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8	BIT(7)
+#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT	0x1f
+
+/* DDC I2C Delay Count, default value: 0x14 */
+#define REG_DDC_DELAY_CNT			0x00f6
+
+/* Test Control, default value: 0x80 */
+#define REG_TEST_TXCTRL				0x00f7
+#define BIT_TEST_TXCTRL_RCLK_REF_SEL		BIT(7)
+#define BIT_TEST_TXCTRL_PCLK_REF_SEL		BIT(6)
+#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK		0x3c
+#define BIT_TEST_TXCTRL_HDMI_MODE		BIT(1)
+#define BIT_TEST_TXCTRL_TST_PLLCK		BIT(0)
+
+/* CBUS Address, default value: 0x00 */
+#define REG_PAGE_CBUS_ADDR			0x00f8
+
+/* I2C Device Address re-assignment */
+#define REG_PAGE1_ADDR				0x00fc
+#define REG_PAGE2_ADDR				0x00fd
+#define REG_PAGE3_ADDR				0x00fe
+#define REG_HW_TPI_ADDR				0x00ff
+
+/* USBT CTRL0, default value: 0x00 */
+#define REG_UTSRST				0x0100
+#define BIT_UTSRST_FC_SRST			BIT(5)
+#define BIT_UTSRST_KEEPER_SRST			BIT(4)
+#define BIT_UTSRST_HTX_SRST			BIT(3)
+#define BIT_UTSRST_TRX_SRST			BIT(2)
+#define BIT_UTSRST_TTX_SRST			BIT(1)
+#define BIT_UTSRST_HRX_SRST			BIT(0)
+
+/* HSIC RX Control3, default value: 0x07 */
+#define REG_HRXCTRL3				0x0104
+#define MSK_HRXCTRL3_HRX_AFFCTRL		0xf0
+#define BIT_HRXCTRL3_HRX_OUT_EN			BIT(2)
+#define BIT_HRXCTRL3_STATUS_EN			BIT(1)
+#define BIT_HRXCTRL3_HRX_STAY_RESET		BIT(0)
+
+/* HSIC RX INT Registers */
+#define REG_HRXINTL				0x0111
+#define REG_HRXINTH				0x0112
+
+/* TDM TX NUMBITS, default value: 0x0c */
+#define REG_TTXNUMB				0x0116
+#define MSK_TTXNUMB_TTX_AFFCTRL_3_0		0xf0
+#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT	BIT(3)
+#define MSK_TTXNUMB_TTX_NUMBPS_2_0		0x07
+
+/* TDM TX NUMSPISYM, default value: 0x04 */
+#define REG_TTXSPINUMS				0x0117
+
+/* TDM TX NUMHSICSYM, default value: 0x14 */
+#define REG_TTXHSICNUMS				0x0118
+
+/* TDM TX NUMTOTSYM, default value: 0x18 */
+#define REG_TTXTOTNUMS				0x0119
+
+/* TDM TX INT Low, default value: 0x00 */
+#define REG_TTXINTL				0x0136
+#define BIT_TTXINTL_TTX_INTR7			BIT(7)
+#define BIT_TTXINTL_TTX_INTR6			BIT(6)
+#define BIT_TTXINTL_TTX_INTR5			BIT(5)
+#define BIT_TTXINTL_TTX_INTR4			BIT(4)
+#define BIT_TTXINTL_TTX_INTR3			BIT(3)
+#define BIT_TTXINTL_TTX_INTR2			BIT(2)
+#define BIT_TTXINTL_TTX_INTR1			BIT(1)
+#define BIT_TTXINTL_TTX_INTR0			BIT(0)
+
+/* TDM TX INT High, default value: 0x00 */
+#define REG_TTXINTH				0x0137
+#define BIT_TTXINTH_TTX_INTR15			BIT(7)
+#define BIT_TTXINTH_TTX_INTR14			BIT(6)
+#define BIT_TTXINTH_TTX_INTR13			BIT(5)
+#define BIT_TTXINTH_TTX_INTR12			BIT(4)
+#define BIT_TTXINTH_TTX_INTR11			BIT(3)
+#define BIT_TTXINTH_TTX_INTR10			BIT(2)
+#define BIT_TTXINTH_TTX_INTR9			BIT(1)
+#define BIT_TTXINTH_TTX_INTR8			BIT(0)
+
+/* TDM RX Control, default value: 0x1c */
+#define REG_TRXCTRL				0x013b
+#define BIT_TRXCTRL_TRX_CLR_WVALLOW		BIT(4)
+#define BIT_TRXCTRL_TRX_FROM_SE_COC		BIT(3)
+#define MSK_TRXCTRL_TRX_NUMBPS_2_0		0x07
+
+/* TDM RX NUMSPISYM, default value: 0x04 */
+#define REG_TRXSPINUMS				0x013c
+
+/* TDM RX NUMHSICSYM, default value: 0x14 */
+#define REG_TRXHSICNUMS				0x013d
+
+/* TDM RX NUMTOTSYM, default value: 0x18 */
+#define REG_TRXTOTNUMS				0x013e
+
+/* TDM RX Status 2nd, default value: 0x00 */
+#define REG_TRXSTA2				0x015c
+
+/* TDM RX INT Low, default value: 0x00 */
+#define REG_TRXINTL				0x0163
+
+/* TDM RX INT High, default value: 0x00 */
+#define REG_TRXINTH				0x0164
+
+/* TDM RX INTMASK High, default value: 0x00 */
+#define REG_TRXINTMH				0x0166
+
+/* HSIC TX CRTL, default value: 0x00 */
+#define REG_HTXCTRL				0x0169
+#define BIT_HTXCTRL_HTX_ALLSBE_SOP		BIT(4)
+#define BIT_HTXCTRL_HTX_RGDINV_USB		BIT(3)
+#define BIT_HTXCTRL_HTX_RSPTDM_BUSY		BIT(2)
+#define BIT_HTXCTRL_HTX_DRVCONN1		BIT(1)
+#define BIT_HTXCTRL_HTX_DRVRST1			BIT(0)
+
+/* HSIC TX INT Low, default value: 0x00 */
+#define REG_HTXINTL				0x017d
+
+/* HSIC TX INT High, default value: 0x00 */
+#define REG_HTXINTH				0x017e
+
+/* HSIC Keeper, default value: 0x00 */
+#define REG_KEEPER				0x0181
+#define MSK_KEEPER_KEEPER_MODE_1_0		0x03
+
+/* HSIC Flow Control General, default value: 0x02 */
+#define REG_FCGC				0x0183
+#define BIT_FCGC_HSIC_FC_HOSTMODE		BIT(1)
+#define BIT_FCGC_HSIC_FC_ENABLE			BIT(0)
+
+/* HSIC Flow Control CTR13, default value: 0xfc */
+#define REG_FCCTR13				0x0191
+
+/* HSIC Flow Control CTR14, default value: 0xff */
+#define REG_FCCTR14				0x0192
+
+/* HSIC Flow Control CTR15, default value: 0xff */
+#define REG_FCCTR15				0x0193
+
+/* HSIC Flow Control CTR50, default value: 0x03 */
+#define REG_FCCTR50				0x01b6
+
+/* HSIC Flow Control INTR0, default value: 0x00 */
+#define REG_FCINTR0				0x01ec
+#define REG_FCINTR1				0x01ed
+#define REG_FCINTR2				0x01ee
+#define REG_FCINTR3				0x01ef
+#define REG_FCINTR4				0x01f0
+#define REG_FCINTR5				0x01f1
+#define REG_FCINTR6				0x01f2
+#define REG_FCINTR7				0x01f3
+
+/* TDM Low Latency, default value: 0x20 */
+#define REG_TDMLLCTL				0x01fc
+#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL		0xc0
+#define MSK_TDMLLCTL_TRX_LL_SEL_MODE		0x30
+#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL		0x0c
+#define BIT_TDMLLCTL_TTX_LL_TIE_LOW		BIT(1)
+#define BIT_TDMLLCTL_TTX_LL_SEL_MODE		BIT(0)
+
+/* TMDS 0 Clock Control, default value: 0x10 */
+#define REG_TMDS0_CCTRL1			0x0210
+#define MSK_TMDS0_CCTRL1_TEST_SEL		0xc0
+#define MSK_TMDS0_CCTRL1_CLK1X_CTL		0x30
+
+/* TMDS Clock Enable, default value: 0x00 */
+#define REG_TMDS_CLK_EN				0x0211
+#define BIT_TMDS_CLK_EN_CLK_EN			BIT(0)
+
+/* TMDS Channel Enable, default value: 0x00 */
+#define REG_TMDS_CH_EN				0x0212
+#define BIT_TMDS_CH_EN_CH0_EN			BIT(4)
+#define BIT_TMDS_CH_EN_CH12_EN			BIT(0)
+
+/* BGR_BIAS, default value: 0x07 */
+#define REG_BGR_BIAS				0x0215
+#define BIT_BGR_BIAS_BGR_EN			BIT(7)
+#define MSK_BGR_BIAS_BIAS_BGR_D			0x0f
+
+/* TMDS 0 Digital I2C BW, default value: 0x0a */
+#define REG_ALICE0_BW_I2C			0x0231
+
+/* TMDS 0 Digital Zone Control, default value: 0xe0 */
+#define REG_ALICE0_ZONE_CTRL			0x024c
+#define BIT_ALICE0_ZONE_CTRL_ICRST_N		BIT(7)
+#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20	BIT(6)
+#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C		0x30
+#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL		0x0f
+
+/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */
+#define REG_ALICE0_MODE_CTRL			0x024d
+#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C	0x0c
+#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL		0x03
+
+/* MHL Tx Control 6th, default value: 0xa0 */
+#define REG_MHLTX_CTL6				0x0285
+#define MSK_MHLTX_CTL6_EMI_SEL			0xe0
+#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8		0x03
+
+/* Packet Filter0, default value: 0x00 */
+#define REG_PKT_FILTER_0			0x0290
+#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT	BIT(7)
+#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT	BIT(6)
+#define BIT_PKT_FILTER_0_DROP_MPEG_PKT		BIT(5)
+#define BIT_PKT_FILTER_0_DROP_SPIF_PKT		BIT(4)
+#define BIT_PKT_FILTER_0_DROP_AIF_PKT		BIT(3)
+#define BIT_PKT_FILTER_0_DROP_AVI_PKT		BIT(2)
+#define BIT_PKT_FILTER_0_DROP_CTS_PKT		BIT(1)
+#define BIT_PKT_FILTER_0_DROP_GCP_PKT		BIT(0)
+
+/* Packet Filter1, default value: 0x00 */
+#define REG_PKT_FILTER_1			0x0291
+#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS	BIT(7)
+#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS	BIT(6)
+#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT		BIT(3)
+#define BIT_PKT_FILTER_1_DROP_GEN2_PKT		BIT(2)
+#define BIT_PKT_FILTER_1_DROP_GEN_PKT		BIT(1)
+#define BIT_PKT_FILTER_1_DROP_VSIF_PKT		BIT(0)
+
+/* TMDS Clock Status, default value: 0x10 */
+#define REG_TMDS_CSTAT_P3			0x02a0
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE	BIT(7)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE	BIT(6)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP	BIT(5)
+#define BIT_TMDS_CSTAT_P3_CLR_AVI		BIT(3)
+#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS	BIT(2)
+#define BIT_TMDS_CSTAT_P3_SCDT			BIT(1)
+#define BIT_TMDS_CSTAT_P3_CKDT			BIT(0)
+
+/* RX_HDMI Control, default value: 0x10 */
+#define REG_RX_HDMI_CTRL0			0x02a1
+#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC	BIT(5)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE	BIT(3)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE	BIT(2)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN	BIT(1)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE	BIT(0)
+
+/* RX_HDMI Control, default value: 0x38 */
+#define REG_RX_HDMI_CTRL2			0x02a3
+#define MSK_RX_HDMI_CTRL2_IDLE_CNT		0xf0
+#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n)		((n) << 4)
+#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE		BIT(3)
+#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI	BIT(0)
+
+/* RX_HDMI Control, default value: 0x0f */
+#define REG_RX_HDMI_CTRL3			0x02a4
+#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN	0x0f
+
+/* rx_hdmi Clear Buffer, default value: 0x00 */
+#define REG_RX_HDMI_CLR_BUFFER			0x02ac
+#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP	0xc0
+#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI	BIT(5)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI	BIT(4)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3)
+#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID	BIT(2)
+#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN	BIT(1)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN	BIT(0)
+
+/* RX_HDMI VSI Header1, default value: 0x00 */
+#define REG_RX_HDMI_MON_PKT_HEADER1		0x02b8
+
+/* RX_HDMI VSI MHL Monitor, default value: 0x3c */
+#define REG_RX_HDMI_VSIF_MHL_MON		0x02d7
+
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03
+
+/* Interrupt Source 9, default value: 0x00 */
+#define REG_INTR9				0x02e0
+#define BIT_INTR9_EDID_ERROR			BIT(6)
+#define BIT_INTR9_EDID_DONE			BIT(5)
+#define BIT_INTR9_DEVCAP_DONE			BIT(4)
+
+/* Interrupt 9 Mask, default value: 0x00 */
+#define REG_INTR9_MASK				0x02e1
+
+/* TPI CBUS Start, default value: 0x00 */
+#define REG_TPI_CBUS_START			0x02e2
+#define BIT_TPI_CBUS_START_RCP_REQ_START	BIT(7)
+#define BIT_TPI_CBUS_START_RCPK_REPLY_START	BIT(6)
+#define BIT_TPI_CBUS_START_RCPE_REPLY_START	BIT(5)
+#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START	BIT(4)
+#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START	BIT(3)
+#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START	BIT(2)
+#define BIT_TPI_CBUS_START_GET_EDID_START_0	BIT(1)
+#define BIT_TPI_CBUS_START_GET_DEVCAP_START	BIT(0)
+
+/* EDID Control, default value: 0x10 */
+#define REG_EDID_CTRL				0x02e3
+#define BIT_EDID_CTRL_EDID_PRIME_VALID		BIT(7)
+#define BIT_EDID_CTRL_XDEVCAP_EN		BIT(6)
+#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP	BIT(5)
+#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO	BIT(4)
+#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3)
+#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL	BIT(2)
+#define BIT_EDID_CTRL_INVALID_BKSV		BIT(1)
+#define BIT_EDID_CTRL_EDID_MODE_EN		BIT(0)
+
+/* EDID FIFO Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR			0x02e9
+
+/* EDID FIFO Write Data, default value: 0x00 */
+#define REG_EDID_FIFO_WR_DATA			0x02ea
+
+/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR_MON			0x02eb
+
+/* EDID FIFO Read Data, default value: 0x00 */
+#define REG_EDID_FIFO_RD_DATA			0x02ec
+
+/* EDID DDC Segment Pointer, default value: 0x00 */
+#define REG_EDID_START_EXT			0x02ed
+
+/* TX IP BIST CNTL and Status, default value: 0x00 */
+#define REG_TX_IP_BIST_CNTLSTA			0x02f2
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE	BIT(5)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON	BIT(4)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN	BIT(3)
+#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL	BIT(2)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN	BIT(1)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL	BIT(0)
+
+/* TX IP BIST INST LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_INST_LOW			0x02f3
+#define REG_TX_IP_BIST_INST_HIGH		0x02f4
+
+/* TX IP BIST PATTERN LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_PAT_LOW			0x02f5
+#define REG_TX_IP_BIST_PAT_HIGH			0x02f6
+
+/* TX IP BIST CONFIGURE LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_CONF_LOW			0x02f7
+#define REG_TX_IP_BIST_CONF_HIGH		0x02f8
+
+/* E-MSC General Control, default value: 0x80 */
+#define REG_GENCTL				0x0300
+#define BIT_GENCTL_SPEC_TRANS_DIS		BIT(7)
+#define BIT_GENCTL_DIS_XMIT_ERR_STATE		BIT(6)
+#define BIT_GENCTL_SPI_MISO_EDGE		BIT(5)
+#define BIT_GENCTL_SPI_MOSI_EDGE		BIT(4)
+#define BIT_GENCTL_CLR_EMSC_RFIFO		BIT(3)
+#define BIT_GENCTL_CLR_EMSC_XFIFO		BIT(2)
+#define BIT_GENCTL_START_TRAIN_SEQ		BIT(1)
+#define BIT_GENCTL_EMSC_EN			BIT(0)
+
+/* E-MSC Comma ErrorCNT, default value: 0x03 */
+#define REG_COMMECNT				0x0305
+#define BIT_COMMECNT_I2C_TO_EMSC_EN		BIT(7)
+#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT		0x0f
+
+/* E-MSC RFIFO ByteCnt, default value: 0x00 */
+#define REG_EMSCRFIFOBCNTL			0x031a
+#define REG_EMSCRFIFOBCNTH			0x031b
+
+/* SPI Burst Cnt Status, default value: 0x00 */
+#define REG_SPIBURSTCNT				0x031e
+
+/* SPI Burst Status and SWRST, default value: 0x00 */
+#define REG_SPIBURSTSTAT			0x0322
+#define BIT_SPIBURSTSTAT_SPI_HDCPRST		BIT(7)
+#define BIT_SPIBURSTSTAT_SPI_CBUSRST		BIT(6)
+#define BIT_SPIBURSTSTAT_SPI_SRST		BIT(5)
+#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE	BIT(0)
+
+/* E-MSC 1st Interrupt, default value: 0x00 */
+#define REG_EMSCINTR				0x0323
+#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY		BIT(7)
+#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT		BIT(6)
+#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR	BIT(5)
+#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR	BIT(4)
+#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR	BIT(3)
+#define BIT_EMSCINTR_EMSC_XMIT_DONE		BIT(2)
+#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT		BIT(1)
+#define BIT_EMSCINTR_SPI_DVLD		BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK			0x0324
+
+/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_XMIT_WRITE_PORT		0x032a
+
+/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_RCV_READ_PORT			0x032b
+
+/* E-MSC 2nd Interrupt, default value: 0x00 */
+#define REG_EMSCINTR1				0x032c
+#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR	BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK1			0x032d
+#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0	BIT(0)
+
+/* MHL Top Ctl, default value: 0x00 */
+#define REG_MHL_TOP_CTL				0x0330
+#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL		BIT(7)
+#define BIT_MHL_TOP_CTL_MHL_PP_SEL		BIT(6)
+#define MSK_MHL_TOP_CTL_IF_TIMING_CTL		0x03
+
+/* MHL DataPath 1st Ctl, default value: 0xbc */
+#define REG_MHL_DP_CTL0				0x0331
+#define BIT_MHL_DP_CTL0_DP_OE			BIT(7)
+#define BIT_MHL_DP_CTL0_TX_OE_OVR		BIT(6)
+#define MSK_MHL_DP_CTL0_TX_OE			0x3f
+
+/* MHL DataPath 2nd Ctl, default value: 0xbb */
+#define REG_MHL_DP_CTL1				0x0332
+#define MSK_MHL_DP_CTL1_CK_SWING_CTL		0xf0
+#define MSK_MHL_DP_CTL1_DT_SWING_CTL		0x0f
+
+/* MHL DataPath 3rd Ctl, default value: 0x2f */
+#define REG_MHL_DP_CTL2				0x0333
+#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN		BIT(7)
+#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL		0x30
+#define MSK_MHL_DP_CTL2_CK_TERM_SEL		0x0c
+#define MSK_MHL_DP_CTL2_DT_TERM_SEL		0x03
+
+/* MHL DataPath 4th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL3				0x0334
+#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL		0xf0
+#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL		0x0f
+
+/* MHL DataPath 5th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL4				0x0335
+#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL		0xf0
+#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL		0x0f
+
+/* MHL DataPath 6th Ctl, default value: 0x3f */
+#define REG_MHL_DP_CTL5				0x0336
+#define BIT_MHL_DP_CTL5_RSEN_EN_OVR		BIT(7)
+#define BIT_MHL_DP_CTL5_RSEN_EN			BIT(6)
+#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL	0x30
+#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL		0x0c
+#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL		0x03
+
+/* MHL PLL 1st Ctl, default value: 0x05 */
+#define REG_MHL_PLL_CTL0			0x0337
+#define BIT_MHL_PLL_CTL0_AUD_CLK_EN		BIT(7)
+
+#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO		0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10	0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6	0x60
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4	0x50
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2	0x40
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5	0x30
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3	0x20
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1	0x00
+
+#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO		0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X	0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X	0x08
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X	0x04
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X	0x00
+
+#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL	BIT(1)
+#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE		BIT(0)
+
+/* MHL PLL 3rd Ctl, default value: 0x80 */
+#define REG_MHL_PLL_CTL2			0x0339
+#define BIT_MHL_PLL_CTL2_CLKDETECT_EN		BIT(7)
+#define BIT_MHL_PLL_CTL2_MEAS_FVCO		BIT(3)
+#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK		BIT(2)
+#define MSK_MHL_PLL_CTL2_PLL_LF_SEL		0x03
+
+/* MHL CBUS 1st Ctl, default value: 0x12 */
+#define REG_MHL_CBUS_CTL0			0x0340
+#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE	BIT(7)
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL	0x30
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734	0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747	0x10
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740	0x20
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754	0x30
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL	0x0c
+
+#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL		0x03
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST	0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK	0x01
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG	0x02
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03
+
+/* MHL CBUS 2nd Ctl, default value: 0x03 */
+#define REG_MHL_CBUS_CTL1			0x0341
+#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL	0x07
+#define VAL_MHL_CBUS_CTL1_0888_OHM		0x00
+#define VAL_MHL_CBUS_CTL1_1115_OHM		0x04
+#define VAL_MHL_CBUS_CTL1_1378_OHM		0x07
+
+/* MHL CoC 1st Ctl, default value: 0xc3 */
+#define REG_MHL_COC_CTL0			0x0342
+#define BIT_MHL_COC_CTL0_COC_BIAS_EN		BIT(7)
+#define MSK_MHL_COC_CTL0_COC_BIAS_CTL		0x70
+#define MSK_MHL_COC_CTL0_COC_TERM_CTL		0x07
+
+/* MHL CoC 2nd Ctl, default value: 0x87 */
+#define REG_MHL_COC_CTL1			0x0343
+#define BIT_MHL_COC_CTL1_COC_EN			BIT(7)
+#define MSK_MHL_COC_CTL1_COC_DRV_CTL		0x3f
+
+/* MHL CoC 4th Ctl, default value: 0x00 */
+#define REG_MHL_COC_CTL3			0x0345
+#define BIT_MHL_COC_CTL3_COC_AECHO_EN		BIT(0)
+
+/* MHL CoC 5th Ctl, default value: 0x28 */
+#define REG_MHL_COC_CTL4			0x0346
+#define MSK_MHL_COC_CTL4_COC_IF_CTL		0xf0
+#define MSK_MHL_COC_CTL4_COC_SLEW_CTL		0x0f
+
+/* MHL CoC 6th Ctl, default value: 0x0d */
+#define REG_MHL_COC_CTL5			0x0347
+
+/* MHL DoC 1st Ctl, default value: 0x18 */
+#define REG_MHL_DOC_CTL0			0x0349
+#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN		BIT(7)
+#define MSK_MHL_DOC_CTL0_DOC_DM_TERM		0x38
+#define MSK_MHL_DOC_CTL0_DOC_OPMODE		0x06
+#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN		BIT(0)
+
+/* MHL DataPath 7th Ctl, default value: 0x2a */
+#define REG_MHL_DP_CTL6				0x0350
+#define BIT_MHL_DP_CTL6_DP_TAP2_SGN		BIT(5)
+#define BIT_MHL_DP_CTL6_DP_TAP2_EN		BIT(4)
+#define BIT_MHL_DP_CTL6_DP_TAP1_SGN		BIT(3)
+#define BIT_MHL_DP_CTL6_DP_TAP1_EN		BIT(2)
+#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN	BIT(1)
+#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL		BIT(0)
+
+/* MHL DataPath 8th Ctl, default value: 0x06 */
+#define REG_MHL_DP_CTL7				0x0351
+#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL	0xf0
+#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL		0x0f
+
+/* Tx Zone Ctl1, default value: 0x00 */
+#define REG_TX_ZONE_CTL1			0x0361
+#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE	0x08
+
+/* MHL3 Tx Zone Ctl, default value: 0x00 */
+#define REG_MHL3_TX_ZONE_CTL			0x0364
+#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7)
+#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE	0x03
+
+#define MSK_TX_ZONE_CTL3_TX_ZONE		0x03
+#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS		0x00
+#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS		0x01
+#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS	0x02
+
+/* HDCP Polling Control and Status, default value: 0x70 */
+#define REG_HDCP2X_POLL_CS			0x0391
+
+#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4)
+#define MSK_HDCP2X_POLL_CS_			0x0c
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT	BIT(1)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN	BIT(0)
+
+/* HDCP Interrupt 0, default value: 0x00 */
+#define REG_HDCP2X_INTR0			0x0398
+
+/* HDCP Interrupt 0 Mask, default value: 0x00 */
+#define REG_HDCP2X_INTR0_MASK			0x0399
+
+/* HDCP General Control 0, default value: 0x02 */
+#define REG_HDCP2X_CTRL_0			0x03a0
+#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN	BIT(7)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL	BIT(6)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR	BIT(5)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE	BIT(4)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE	BIT(3)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER	BIT(2)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX		BIT(1)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_EN		BIT(0)
+
+/* HDCP General Control 1, default value: 0x08 */
+#define REG_HDCP2X_CTRL_1			0x03a1
+#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0	0xf0
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW		BIT(3)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR	BIT(2)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK	BIT(1)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW	BIT(0)
+
+/* HDCP Misc Control, default value: 0x00 */
+#define REG_HDCP2X_MISC_CTRL			0x03a5
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR	BIT(2)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD	BIT(0)
+
+/* HDCP RPT SMNG K, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_K			0x03a6
+
+/* HDCP RPT SMNG In, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_IN			0x03a7
+
+/* HDCP Auth Status, default value: 0x00 */
+#define REG_HDCP2X_AUTH_STAT			0x03aa
+
+/* HDCP RPT RCVID Out, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVID_OUT		0x03ac
+
+/* HDCP TP1, default value: 0x62 */
+#define REG_HDCP2X_TP1				0x03b4
+
+/* HDCP GP Out 0, default value: 0x00 */
+#define REG_HDCP2X_GP_OUT0			0x03c7
+
+/* HDCP Repeater RCVR ID 0, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVR_ID0			0x03d1
+
+/* HDCP DDCM Status, default value: 0x00 */
+#define REG_HDCP2X_DDCM_STS			0x03d8
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f
+
+/* HDMI2MHL3 Control, default value: 0x0a */
+#define REG_M3_CTRL				0x03e0
+#define BIT_M3_CTRL_H2M_SWRST			BIT(4)
+#define BIT_M3_CTRL_SW_MHL3_SEL			BIT(3)
+#define BIT_M3_CTRL_M3AV_EN			BIT(2)
+#define BIT_M3_CTRL_ENC_TMDS			BIT(1)
+#define BIT_M3_CTRL_MHL3_MASTER_EN		BIT(0)
+
+#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+				  | BIT_M3_CTRL_ENC_TMDS)
+#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+				| BIT_M3_CTRL_M3AV_EN \
+				| BIT_M3_CTRL_ENC_TMDS \
+				| BIT_M3_CTRL_MHL3_MASTER_EN)
+
+/* HDMI2MHL3 Port0 Control, default value: 0x04 */
+#define REG_M3_P0CTRL				0x03e1
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN	BIT(4)
+#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN	BIT(3)
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN		BIT(2)
+#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED	BIT(1)
+#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN		BIT(0)
+
+#define REG_M3_POSTM				0x03e2
+#define MSK_M3_POSTM_RRP_DECODE			0xf8
+#define MSK_M3_POSTM_MHL3_P0_STM_ID		0x07
+
+/* HDMI2MHL3 Scramble Control, default value: 0x41 */
+#define REG_M3_SCTRL				0x03e6
+#define MSK_M3_SCTRL_MHL3_SR_LENGTH		0xf0
+#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN		BIT(0)
+
+/* HSIC Div Ctl, default value: 0x05 */
+#define REG_DIV_CTL_MAIN			0x03f2
+#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN	0x1c
+#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN	0x03
+
+/* MHL Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_DEVCAP_0			0x0400
+
+/* MHL Interrupt 1st Byte, default value: 0x00 */
+#define REG_MHL_INT_0				0x0420
+
+/* Device Status 1st byte, default value: 0x00 */
+#define REG_MHL_STAT_0				0x0430
+
+/* CBUS Scratch Pad 1st Byte, default value: 0x00 */
+#define REG_MHL_SCRPAD_0			0x0440
+
+/* MHL Extended Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_EXTDEVCAP_0			0x0480
+
+/* Device Extended Status 1st byte, default value: 0x00 */
+#define REG_MHL_EXTSTAT_0			0x0490
+
+/* TPI DTD Byte2, default value: 0x00 */
+#define REG_TPI_DTD_B2				0x0602
+
+#define VAL_TPI_QUAN_RANGE_LIMITED		0x01
+#define VAL_TPI_QUAN_RANGE_FULL			0x02
+#define VAL_TPI_FORMAT_RGB			0x00
+#define VAL_TPI_FORMAT_YCBCR444			0x01
+#define VAL_TPI_FORMAT_YCBCR422			0x02
+#define VAL_TPI_FORMAT_INTERNAL_RGB		0x03
+#define VAL_TPI_FORMAT(_fmt, _qr) \
+		(VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2))
+
+/* Input Format, default value: 0x00 */
+#define REG_TPI_INPUT				0x0609
+#define BIT_TPI_INPUT_EXTENDEDBITMODE		BIT(7)
+#define BIT_TPI_INPUT_ENDITHER			BIT(6)
+#define MSK_TPI_INPUT_INPUT_QUAN_RANGE		0x0c
+#define MSK_TPI_INPUT_INPUT_FORMAT		0x03
+
+/* Output Format, default value: 0x00 */
+#define REG_TPI_OUTPUT				0x060a
+#define BIT_TPI_OUTPUT_CSCMODE709		BIT(4)
+#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE	0x0c
+#define MSK_TPI_OUTPUT_OUTPUT_FORMAT		0x03
+
+/* TPI AVI Check Sum, default value: 0x00 */
+#define REG_TPI_AVI_CHSUM			0x060c
+
+/* TPI System Control, default value: 0x00 */
+#define REG_TPI_SC				0x061a
+#define BIT_TPI_SC_TPI_UPDATE_FLG		BIT(7)
+#define BIT_TPI_SC_TPI_REAUTH_CTL		BIT(6)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_1		BIT(5)
+#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN	BIT(4)
+#define BIT_TPI_SC_TPI_AV_MUTE			BIT(3)
+#define BIT_TPI_SC_DDC_GPU_REQUEST		BIT(2)
+#define BIT_TPI_SC_DDC_TPI_SW			BIT(1)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI	BIT(0)
+
+/* TPI COPP Query Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA1			0x0629
+#define BIT_TPI_COPP_DATA1_COPP_GPROT		BIT(7)
+#define BIT_TPI_COPP_DATA1_COPP_LPROT		BIT(6)
+#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS	0x30
+#define VAL_TPI_COPP_LINK_STATUS_NORMAL		0x00
+#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST	0x10
+#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20
+#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED	0x30
+#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP	BIT(3)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0	BIT(2)
+#define BIT_TPI_COPP_DATA1_COPP_PROTYPE		BIT(1)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1	BIT(0)
+
+/* TPI COPP Control Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA2			0x062a
+#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION	BIT(5)
+#define BIT_TPI_COPP_DATA2_KSV_FORWARD		BIT(4)
+#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN	BIT(3)
+#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK	BIT(2)
+#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD	BIT(1)
+#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL	BIT(0)
+
+/* TPI Interrupt Enable, default value: 0x00 */
+#define REG_TPI_INTR_EN				0x063c
+
+/* TPI Interrupt Status Low Byte, default value: 0x00 */
+#define REG_TPI_INTR_ST0			0x063d
+#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT	BIT(7)
+#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT		BIT(6)
+#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT	BIT(5)
+#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT	BIT(3)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1)
+#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT	BIT(0)
+
+/* TPI DS BCAPS Status, default value: 0x00 */
+#define REG_TPI_DS_BCAPS			0x0644
+
+/* TPI BStatus1, default value: 0x00 */
+#define REG_TPI_BSTATUS1			0x0645
+#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED		BIT(7)
+#define MSK_TPI_BSTATUS1_DS_DEV_CNT		0x7f
+
+/* TPI BStatus2, default value: 0x10 */
+#define REG_TPI_BSTATUS2			0x0646
+#define MSK_TPI_BSTATUS2_DS_BSTATUS		0xe0
+#define BIT_TPI_BSTATUS2_DS_HDMI_MODE		BIT(4)
+#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED		BIT(3)
+#define MSK_TPI_BSTATUS2_DS_DEPTH		0x07
+
+/* TPI HW Optimization Control #3, default value: 0x00 */
+#define REG_TPI_HW_OPT3				0x06bb
+#define BIT_TPI_HW_OPT3_DDC_DEBUG		BIT(7)
+#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP		BIT(3)
+#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE	BIT(2)
+#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL	0x03
+
+/* TPI Info Frame Select, default value: 0x00 */
+#define REG_TPI_INFO_FSEL			0x06bf
+#define BIT_TPI_INFO_FSEL_TPI_INFO_EN		BIT(7)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT		BIT(6)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG	BIT(5)
+#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL		0x07
+
+/* TPI Info Byte #0, default value: 0x00 */
+#define REG_TPI_INFO_B0				0x06c0
+
+/* CoC Status, default value: 0x00 */
+#define REG_COC_STAT_0				0x0700
+#define REG_COC_STAT_1				0x0701
+#define REG_COC_STAT_2				0x0702
+#define REG_COC_STAT_3				0x0703
+#define REG_COC_STAT_4				0x0704
+#define REG_COC_STAT_5				0x0705
+
+/* CoC 1st Ctl, default value: 0x40 */
+#define REG_COC_CTL0				0x0710
+
+/* CoC 2nd Ctl, default value: 0x0a */
+#define REG_COC_CTL1				0x0711
+#define MSK_COC_CTL1_COC_CTRL1_7_6		0xc0
+#define MSK_COC_CTL1_COC_CTRL1_5_0		0x3f
+
+/* CoC 3rd Ctl, default value: 0x14 */
+#define REG_COC_CTL2				0x0712
+#define MSK_COC_CTL2_COC_CTRL2_7_6		0xc0
+#define MSK_COC_CTL2_COC_CTRL2_5_0		0x3f
+
+/* CoC 4th Ctl, default value: 0x40 */
+#define REG_COC_CTL3				0x0713
+#define BIT_COC_CTL3_COC_CTRL3_7		BIT(7)
+#define MSK_COC_CTL3_COC_CTRL3_6_0		0x7f
+
+/* CoC 7th Ctl, default value: 0x00 */
+#define REG_COC_CTL6				0x0716
+#define BIT_COC_CTL6_COC_CTRL6_7		BIT(7)
+#define BIT_COC_CTL6_COC_CTRL6_6		BIT(6)
+#define MSK_COC_CTL6_COC_CTRL6_5_0		0x3f
+
+/* CoC 8th Ctl, default value: 0x06 */
+#define REG_COC_CTL7				0x0717
+#define BIT_COC_CTL7_COC_CTRL7_7		BIT(7)
+#define BIT_COC_CTL7_COC_CTRL7_6		BIT(6)
+#define BIT_COC_CTL7_COC_CTRL7_5		BIT(5)
+#define MSK_COC_CTL7_COC_CTRL7_4_3		0x18
+#define MSK_COC_CTL7_COC_CTRL7_2_0		0x07
+
+/* CoC 10th Ctl, default value: 0x00 */
+#define REG_COC_CTL9				0x0719
+
+/* CoC 11th Ctl, default value: 0x00 */
+#define REG_COC_CTLA				0x071a
+
+/* CoC 12th Ctl, default value: 0x00 */
+#define REG_COC_CTLB				0x071b
+
+/* CoC 13th Ctl, default value: 0x0f */
+#define REG_COC_CTLC				0x071c
+
+/* CoC 14th Ctl, default value: 0x0a */
+#define REG_COC_CTLD				0x071d
+#define BIT_COC_CTLD_COC_CTRLD_7		BIT(7)
+#define MSK_COC_CTLD_COC_CTRLD_6_0		0x7f
+
+/* CoC 15th Ctl, default value: 0x0a */
+#define REG_COC_CTLE				0x071e
+#define BIT_COC_CTLE_COC_CTRLE_7		BIT(7)
+#define MSK_COC_CTLE_COC_CTRLE_6_0		0x7f
+
+/* CoC 16th Ctl, default value: 0x00 */
+#define REG_COC_CTLF				0x071f
+#define MSK_COC_CTLF_COC_CTRLF_7_3		0xf8
+#define MSK_COC_CTLF_COC_CTRLF_2_0		0x07
+
+/* CoC 18th Ctl, default value: 0x32 */
+#define REG_COC_CTL11				0x0721
+#define MSK_COC_CTL11_COC_CTRL11_7_4		0xf0
+#define MSK_COC_CTL11_COC_CTRL11_3_0		0x0f
+
+/* CoC 21st Ctl, default value: 0x00 */
+#define REG_COC_CTL14				0x0724
+#define MSK_COC_CTL14_COC_CTRL14_7_4		0xf0
+#define MSK_COC_CTL14_COC_CTRL14_3_0		0x0f
+
+/* CoC 22nd Ctl, default value: 0x00 */
+#define REG_COC_CTL15				0x0725
+#define BIT_COC_CTL15_COC_CTRL15_7		BIT(7)
+#define MSK_COC_CTL15_COC_CTRL15_6_4		0x70
+#define MSK_COC_CTL15_COC_CTRL15_3_0		0x0f
+
+/* CoC Interrupt, default value: 0x00 */
+#define REG_COC_INTR				0x0726
+
+/* CoC Interrupt Mask, default value: 0x00 */
+#define REG_COC_INTR_MASK			0x0727
+#define BIT_COC_PLL_LOCK_STATUS_CHANGE		BIT(0)
+#define BIT_COC_CALIBRATION_DONE		BIT(1)
+
+/* CoC Misc Ctl, default value: 0x00 */
+#define REG_COC_MISC_CTL0			0x0728
+#define BIT_COC_MISC_CTL0_FSM_MON		BIT(7)
+
+/* CoC 24th Ctl, default value: 0x00 */
+#define REG_COC_CTL17				0x072a
+#define MSK_COC_CTL17_COC_CTRL17_7_4		0xf0
+#define MSK_COC_CTL17_COC_CTRL17_3_0		0x0f
+
+/* CoC 25th Ctl, default value: 0x00 */
+#define REG_COC_CTL18				0x072b
+#define MSK_COC_CTL18_COC_CTRL18_7_4		0xf0
+#define MSK_COC_CTL18_COC_CTRL18_3_0		0x0f
+
+/* CoC 26th Ctl, default value: 0x00 */
+#define REG_COC_CTL19				0x072c
+#define MSK_COC_CTL19_COC_CTRL19_7_4		0xf0
+#define MSK_COC_CTL19_COC_CTRL19_3_0		0x0f
+
+/* CoC 27th Ctl, default value: 0x00 */
+#define REG_COC_CTL1A				0x072d
+#define MSK_COC_CTL1A_COC_CTRL1A_7_2		0xfc
+#define MSK_COC_CTL1A_COC_CTRL1A_1_0		0x03
+
+/* DoC 9th Status, default value: 0x00 */
+#define REG_DOC_STAT_8				0x0740
+
+/* DoC 10th Status, default value: 0x00 */
+#define REG_DOC_STAT_9				0x0741
+
+/* DoC 5th CFG, default value: 0x00 */
+#define REG_DOC_CFG4				0x074e
+#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM		0x0f
+
+/* DoC 1st Ctl, default value: 0x40 */
+#define REG_DOC_CTL0				0x0751
+
+/* DoC 7th Ctl, default value: 0x00 */
+#define REG_DOC_CTL6				0x0757
+#define BIT_DOC_CTL6_DOC_CTRL6_7		BIT(7)
+#define BIT_DOC_CTL6_DOC_CTRL6_6		BIT(6)
+#define MSK_DOC_CTL6_DOC_CTRL6_5_4		0x30
+#define MSK_DOC_CTL6_DOC_CTRL6_3_0		0x0f
+
+/* DoC 8th Ctl, default value: 0x00 */
+#define REG_DOC_CTL7				0x0758
+#define BIT_DOC_CTL7_DOC_CTRL7_7		BIT(7)
+#define BIT_DOC_CTL7_DOC_CTRL7_6		BIT(6)
+#define BIT_DOC_CTL7_DOC_CTRL7_5		BIT(5)
+#define MSK_DOC_CTL7_DOC_CTRL7_4_3		0x18
+#define MSK_DOC_CTL7_DOC_CTRL7_2_0		0x07
+
+/* DoC 9th Ctl, default value: 0x00 */
+#define REG_DOC_CTL8				0x076c
+#define BIT_DOC_CTL8_DOC_CTRL8_7		BIT(7)
+#define MSK_DOC_CTL8_DOC_CTRL8_6_4		0x70
+#define MSK_DOC_CTL8_DOC_CTRL8_3_2		0x0c
+#define MSK_DOC_CTL8_DOC_CTRL8_1_0		0x03
+
+/* DoC 10th Ctl, default value: 0x00 */
+#define REG_DOC_CTL9				0x076d
+
+/* DoC 11th Ctl, default value: 0x00 */
+#define REG_DOC_CTLA				0x076e
+
+/* DoC 15th Ctl, default value: 0x00 */
+#define REG_DOC_CTLE				0x0772
+#define BIT_DOC_CTLE_DOC_CTRLE_7		BIT(7)
+#define BIT_DOC_CTLE_DOC_CTRLE_6		BIT(6)
+#define MSK_DOC_CTLE_DOC_CTRLE_5_4		0x30
+#define MSK_DOC_CTLE_DOC_CTRLE_3_0		0x0f
+
+/* Interrupt Mask 1st, default value: 0x00 */
+#define REG_MHL_INT_0_MASK			0x0580
+
+/* Interrupt Mask 2nd, default value: 0x00 */
+#define REG_MHL_INT_1_MASK			0x0581
+
+/* Interrupt Mask 3rd, default value: 0x00 */
+#define REG_MHL_INT_2_MASK			0x0582
+
+/* Interrupt Mask 4th, default value: 0x00 */
+#define REG_MHL_INT_3_MASK			0x0583
+
+/* MDT Receive Time Out, default value: 0x00 */
+#define REG_MDT_RCV_TIMEOUT			0x0584
+
+/* MDT Transmit Time Out, default value: 0x00 */
+#define REG_MDT_XMIT_TIMEOUT			0x0585
+
+/* MDT Receive Control, default value: 0x00 */
+#define REG_MDT_RCV_CTRL			0x0586
+#define BIT_MDT_RCV_CTRL_MDT_RCV_EN		BIT(7)
+#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN	BIT(6)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN	BIT(4)
+#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN	BIT(3)
+#define BIT_MDT_RCV_CTRL_MDT_DISABLE		BIT(2)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL	BIT(1)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR	BIT(0)
+
+/* MDT Receive Read Port, default value: 0x00 */
+#define REG_MDT_RCV_READ_PORT			0x0587
+
+/* MDT Transmit Control, default value: 0x70 */
+#define REG_MDT_XMIT_CTRL			0x0588
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN		BIT(7)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN	BIT(6)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID	BIT(4)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
+#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT	BIT(2)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL	BIT(1)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR	BIT(0)
+
+/* MDT Receive WRITE Port, default value: 0x00 */
+#define REG_MDT_XMIT_WRITE_PORT			0x0589
+
+/* MDT RFIFO Status, default value: 0x00 */
+#define REG_MDT_RFIFO_STAT			0x058a
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT	0xe0
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f
+
+/* MDT XFIFO Status, default value: 0x80 */
+#define REG_MDT_XFIFO_STAT			0x058b
+#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0
+#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN	BIT(4)
+#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN	0x0f
+
+/* MDT Interrupt 0, default value: 0x0c */
+#define REG_MDT_INT_0				0x058c
+#define BIT_MDT_RFIFO_DATA_RDY			BIT(0)
+#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE		BIT(2)
+#define BIT_MDT_XFIFO_EMPTY			BIT(3)
+
+/* MDT Interrupt 0 Mask, default value: 0x00 */
+#define REG_MDT_INT_0_MASK			0x058d
+
+/* MDT Interrupt 1, default value: 0x00 */
+#define REG_MDT_INT_1				0x058e
+#define BIT_MDT_RCV_TIMEOUT			BIT(0)
+#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD		BIT(1)
+#define BIT_MDT_RCV_SM_ERROR			BIT(2)
+#define BIT_MDT_XMIT_TIMEOUT			BIT(5)
+#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD		BIT(6)
+#define BIT_MDT_XMIT_SM_ERROR			BIT(7)
+
+/* MDT Interrupt 1 Mask, default value: 0x00 */
+#define REG_MDT_INT_1_MASK			0x058f
+
+/* CBUS Vendor ID, default value: 0x01 */
+#define REG_CBUS_VENDOR_ID			0x0590
+
+/* CBUS Connection Status, default value: 0x00 */
+#define REG_CBUS_STATUS				0x0591
+#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT	BIT(4)
+#define BIT_CBUS_STATUS_MSC_HB_SUCCESS		BIT(3)
+#define BIT_CBUS_STATUS_CBUS_HPD		BIT(2)
+#define BIT_CBUS_STATUS_MHL_MODE		BIT(1)
+#define BIT_CBUS_STATUS_CBUS_CONNECTED		BIT(0)
+
+/* CBUS Interrupt 1st, default value: 0x00 */
+#define REG_CBUS_INT_0				0x0592
+#define BIT_CBUS_MSC_MT_DONE_NACK		BIT(7)
+#define BIT_CBUS_MSC_MR_SET_INT			BIT(6)
+#define BIT_CBUS_MSC_MR_WRITE_BURST		BIT(5)
+#define BIT_CBUS_MSC_MR_MSC_MSG			BIT(4)
+#define BIT_CBUS_MSC_MR_WRITE_STAT		BIT(3)
+#define BIT_CBUS_HPD_CHG			BIT(2)
+#define BIT_CBUS_MSC_MT_DONE			BIT(1)
+#define BIT_CBUS_CNX_CHG			BIT(0)
+
+/* CBUS Interrupt Mask 1st, default value: 0x00 */
+#define REG_CBUS_INT_0_MASK			0x0593
+
+/* CBUS Interrupt 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1				0x0594
+#define BIT_CBUS_CMD_ABORT			BIT(6)
+#define BIT_CBUS_MSC_ABORT_RCVD			BIT(3)
+#define BIT_CBUS_DDC_ABORT			BIT(2)
+#define BIT_CBUS_CEC_ABORT			BIT(1)
+
+/* CBUS Interrupt Mask 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1_MASK			0x0595
+
+/* CBUS DDC Abort Interrupt, default value: 0x00 */
+#define REG_DDC_ABORT_INT			0x0598
+
+/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */
+#define REG_DDC_ABORT_INT_MASK			0x0599
+
+/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT			0x059a
+
+/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT_MASK		0x059b
+
+/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT			0x059c
+
+/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT_MASK		0x059d
+
+/* CBUS RX DISCOVERY interrupt, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0			0x059e
+
+/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0_MASK		0x059f
+
+/* CBUS_Link_Layer Control #8, default value: 0x00 */
+#define REG_CBUS_LINK_CTRL_8			0x05a7
+
+/* MDT State Machine Status, default value: 0x00 */
+#define REG_MDT_SM_STAT				0x05b5
+#define MSK_MDT_SM_STAT_MDT_RCV_STATE		0xf0
+#define MSK_MDT_SM_STAT_MDT_XMIT_STATE		0x0f
+
+/* CBUS MSC command trigger, default value: 0x00 */
+#define REG_MSC_COMMAND_START			0x05b8
+#define BIT_MSC_COMMAND_START_DEBUG		BIT(5)
+#define BIT_MSC_COMMAND_START_WRITE_BURST	BIT(4)
+#define BIT_MSC_COMMAND_START_WRITE_STAT	BIT(3)
+#define BIT_MSC_COMMAND_START_READ_DEVCAP	BIT(2)
+#define BIT_MSC_COMMAND_START_MSC_MSG		BIT(1)
+#define BIT_MSC_COMMAND_START_PEER		BIT(0)
+
+/* CBUS MSC Command/Offset, default value: 0x00 */
+#define REG_MSC_CMD_OR_OFFSET			0x05b9
+
+/* CBUS MSC Transmit Data */
+#define REG_MSC_1ST_TRANSMIT_DATA		0x05ba
+#define REG_MSC_2ND_TRANSMIT_DATA		0x05bb
+
+/* CBUS MSC Requester Received Data */
+#define REG_MSC_MT_RCVD_DATA0			0x05bc
+#define REG_MSC_MT_RCVD_DATA1			0x05bd
+
+/* CBUS MSC Responder MSC_MSG Received Data */
+#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA	0x05bf
+#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA	0x05c0
+
+/* CBUS MSC Heartbeat Control, default value: 0x27 */
+#define REG_MSC_HEARTBEAT_CTRL			0x05c4
+#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN	BIT(7)
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f
+
+/* CBUS MSC Compatibility Control, default value: 0x02 */
+#define REG_CBUS_MSC_COMPAT_CTRL		0x05c7
+#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN	BIT(7)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2)
+
+/* CBUS3 Converter Control, default value: 0x24 */
+#define REG_CBUS3_CNVT				0x05dc
+#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT		0xf0
+#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL	0x0c
+#define BIT_CBUS3_CNVT_TEARCBUS_EN		BIT(1)
+#define BIT_CBUS3_CNVT_CBUS3CNVT_EN		BIT(0)
+
+/* Discovery Control1, default value: 0x24 */
+#define REG_DISC_CTRL1				0x05e0
+#define BIT_DISC_CTRL1_CBUS_INTR_EN		BIT(7)
+#define BIT_DISC_CTRL1_HB_ONLY			BIT(6)
+#define MSK_DISC_CTRL1_DISC_ATT			0x30
+#define MSK_DISC_CTRL1_DISC_CYC			0x0c
+#define BIT_DISC_CTRL1_DISC_EN			BIT(0)
+
+#define VAL_PUP_OFF				0
+#define VAL_PUP_20K				1
+#define VAL_PUP_5K				2
+
+/* Discovery Control4, default value: 0x80 */
+#define REG_DISC_CTRL4				0x05e3
+#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL		0xc0
+#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL		0x30
+#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4))
+
+/* Discovery Control5, default value: 0x03 */
+#define REG_DISC_CTRL5				0x05e4
+#define BIT_DISC_CTRL5_DSM_OVRIDE		BIT(3)
+#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL		0x03
+
+/* Discovery Control8, default value: 0x81 */
+#define REG_DISC_CTRL8				0x05e7
+#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS	BIT(7)
+#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN	BIT(0)
+
+/* Discovery Control9, default value: 0x54 */
+#define REG_DISC_CTRL9			0x05e8
+#define BIT_DISC_CTRL9_MHL3_RSEN_BYP		BIT(7)
+#define BIT_DISC_CTRL9_MHL3DISC_EN		BIT(6)
+#define BIT_DISC_CTRL9_WAKE_DRVFLT		BIT(4)
+#define BIT_DISC_CTRL9_NOMHL_EST		BIT(3)
+#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED	BIT(2)
+#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS	BIT(1)
+#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0)
+
+/* Discovery Status1, default value: 0x00 */
+#define REG_DISC_STAT1				0x05eb
+#define BIT_DISC_STAT1_PSM_OVRIDE		BIT(5)
+#define MSK_DISC_STAT1_DISC_SM			0x0f
+
+/* Discovery Status2, default value: 0x00 */
+#define REG_DISC_STAT2				0x05ec
+#define BIT_DISC_STAT2_CBUS_OE_POL		BIT(6)
+#define BIT_DISC_STAT2_CBUS_SATUS		BIT(5)
+#define BIT_DISC_STAT2_RSEN			BIT(4)
+
+#define MSK_DISC_STAT2_MHL_VRSN			0x0c
+#define VAL_DISC_STAT2_DEFAULT			0x00
+#define VAL_DISC_STAT2_MHL1_2			0x04
+#define VAL_DISC_STAT2_MHL3			0x08
+#define VAL_DISC_STAT2_RESERVED			0x0c
+
+#define MSK_DISC_STAT2_RGND			0x03
+#define VAL_RGND_OPEN				0x00
+#define VAL_RGND_2K				0x01
+#define VAL_RGND_1K				0x02
+#define VAL_RGND_SHORT				0x03
+
+/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0			0x05ed
+#define BIT_RGND_READY_INT			BIT(6)
+#define BIT_CBUS_MHL12_DISCON_INT		BIT(5)
+#define BIT_CBUS_MHL3_DISCON_INT		BIT(4)
+#define BIT_NOT_MHL_EST_INT			BIT(3)
+#define BIT_MHL_EST_INT				BIT(2)
+#define BIT_MHL3_EST_INT			BIT(1)
+#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \
+			    | BIT_CBUS_MHL3_DISCON_INT \
+			    | BIT_NOT_MHL_EST_INT)
+
+/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0_MASK		0x05ee
+
+#endif /* __SIL_SII8620_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index daecf1a..3a6309d7 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -138,12 +138,12 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
 {
 	struct drm_device *dev = afbdev->helper.dev;
 	struct cirrus_device *cdev = dev->dev_private;
-	u32 bpp, depth;
+	u32 bpp;
 	u32 size;
 	struct drm_gem_object *gobj;
-
 	int ret = 0;
-	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
 
 	if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
 				      bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 76bcb43..2c3c0d4 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -52,10 +52,10 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
 	struct cirrus_device *cdev = dev->dev_private;
 	struct drm_gem_object *obj;
 	struct cirrus_framebuffer *cirrus_fb;
+	u32 bpp;
 	int ret;
-	u32 bpp, depth;
 
-	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+	bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
 
 	if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
 				      bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bb2438d..de52b20 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
 	.ttm_tt_populate = cirrus_ttm_tt_populate,
 	.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
 	.init_mem_type = cirrus_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = cirrus_bo_evict_flags,
 	.move = NULL,
 	.verify_access = cirrus_bo_verify_access,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 2373960..c32fb3c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -74,6 +74,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_release);
 int
 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
 {
+	kref_init(&state->ref);
+
 	/* TODO legacy paths should maybe do a better job about
 	 * setting this appropriately?
 	 */
@@ -215,22 +217,16 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
 EXPORT_SYMBOL(drm_atomic_state_clear);
 
 /**
- * drm_atomic_state_free - free all memory for an atomic state
- * @state: atomic state to deallocate
+ * __drm_atomic_state_free - free all memory for an atomic state
+ * @ref: This atomic state to deallocate
  *
  * This frees all memory associated with an atomic state, including all the
  * per-object state for planes, crtcs and connectors.
  */
-void drm_atomic_state_free(struct drm_atomic_state *state)
+void __drm_atomic_state_free(struct kref *ref)
 {
-	struct drm_device *dev;
-	struct drm_mode_config *config;
-
-	if (!state)
-		return;
-
-	dev = state->dev;
-	config = &dev->mode_config;
+	struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
+	struct drm_mode_config *config = &state->dev->mode_config;
 
 	drm_atomic_state_clear(state);
 
@@ -243,7 +239,7 @@ void drm_atomic_state_free(struct drm_atomic_state *state)
 		kfree(state);
 	}
 }
-EXPORT_SYMBOL(drm_atomic_state_free);
+EXPORT_SYMBOL(__drm_atomic_state_free);
 
 /**
  * drm_atomic_get_crtc_state - get crtc state
@@ -709,7 +705,9 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
 		state->src_w = val;
 	} else if (property == config->prop_src_h) {
 		state->src_h = val;
-	} else if (property == config->rotation_property) {
+	} else if (property == plane->rotation_property) {
+		if (!is_power_of_2(val & DRM_ROTATE_MASK))
+			return -EINVAL;
 		state->rotation = val;
 	} else if (property == plane->zpos_property) {
 		state->zpos = val;
@@ -767,7 +765,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
 		*val = state->src_w;
 	} else if (property == config->prop_src_h) {
 		*val = state->src_h;
-	} else if (property == config->rotation_property) {
+	} else if (property == plane->rotation_property) {
 		*val = state->rotation;
 	} else if (property == plane->zpos_property) {
 		*val = state->zpos;
@@ -1465,7 +1463,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
 
 static struct drm_pending_vblank_event *create_vblank_event(
 		struct drm_device *dev, struct drm_file *file_priv,
-		struct fence *fence, uint64_t user_data)
+		struct dma_fence *fence, uint64_t user_data)
 {
 	struct drm_pending_vblank_event *e = NULL;
 	int ret;
@@ -1742,7 +1740,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
 		/*
 		 * Unlike commit, check_only does not clean up state.
-		 * Below we call drm_atomic_state_free for it.
+		 * Below we call drm_atomic_state_put for it.
 		 */
 		ret = drm_atomic_check_only(state);
 	} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
@@ -1775,8 +1773,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 		goto retry;
 	}
 
-	if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
-		drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c3f8347..75ad01d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,7 +30,7 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #include "drm_crtc_internal.h"
 
@@ -458,10 +458,11 @@ mode_fixup(struct drm_atomic_state *state)
  * removed from the crtc.
  * crtc_state->active_changed is set when crtc_state->active changes,
  * which is used for dpms.
+ * See also: drm_atomic_crtc_needs_modeset()
  *
  * IMPORTANT:
  *
- * Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
+ * Drivers which set ->mode_changed (e.g. in their ->atomic_check hooks if a
  * plane update can't be done without a full modeset) _must_ call this function
  * afterwards after that change. It is permitted to call this function multiple
  * times for the same update, e.g. when the ->atomic_check functions depend upon
@@ -510,9 +511,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 
 	for_each_connector_in_state(state, connector, connector_state, i) {
 		/*
-		 * This only sets crtc->mode_changed for routing changes,
-		 * drivers must set crtc->mode_changed themselves when connector
-		 * properties need to be updated.
+		 * This only sets crtc->connectors_changed for routing changes,
+		 * drivers must set crtc->connectors_changed themselves when
+		 * connector properties need to be updated.
 		 */
 		ret = update_connector_routing(state, connector,
 					       connector_state);
@@ -1016,7 +1017,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
  * drm_atomic_helper_swap_state() so it uses the current plane state (and
  * just uses the atomic state to find the changed planes)
  *
- * Returns zero if success or < 0 if fence_wait() fails.
+ * Returns zero if success or < 0 if dma_fence_wait() fails.
  */
 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
 				      struct drm_atomic_state *state,
@@ -1040,11 +1041,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
 		 * still interrupt the operation. Instead of blocking until the
 		 * timer expires, make the wait interruptible.
 		 */
-		ret = fence_wait(plane_state->fence, pre_swap);
+		ret = dma_fence_wait(plane_state->fence, pre_swap);
 		if (ret)
 			return ret;
 
-		fence_put(plane_state->fence);
+		dma_fence_put(plane_state->fence);
 		plane_state->fence = NULL;
 	}
 
@@ -1207,7 +1208,7 @@ static void commit_tail(struct drm_atomic_state *state)
 
 	drm_atomic_helper_commit_cleanup_done(state);
 
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 }
 
 static void commit_work(struct work_struct *work)
@@ -1289,6 +1290,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 	 * make sure work items don't artifically stall on each another.
 	 */
 
+	drm_atomic_state_get(state);
 	if (nonblock)
 		queue_work(system_unbound_wq, &state->commit_work);
 	else
@@ -1590,7 +1592,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
  *
  * This signals completion of the atomic update @state, including any cleanup
  * work. If used, it must be called right before calling
- * drm_atomic_state_free().
+ * drm_atomic_state_put().
  *
  * This is part of the atomic helper support for nonblocking commits, see
  * drm_atomic_helper_setup_commit() for an overview.
@@ -2113,18 +2115,13 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane,
 		state->legacy_cursor_update = true;
 
 	ret = drm_atomic_commit(state);
-	if (ret != 0)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2186,18 +2183,13 @@ int drm_atomic_helper_disable_plane(struct drm_plane *plane)
 		goto fail;
 
 	ret = drm_atomic_commit(state);
-	if (ret != 0)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2326,18 +2318,13 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
 		goto fail;
 
 	ret = drm_atomic_commit(state);
-	if (ret != 0)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2412,7 +2399,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
 	primary_state->crtc_h = vdisplay;
 	primary_state->src_x = set->x << 16;
 	primary_state->src_y = set->y << 16;
-	if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+	if (drm_rotation_90_or_270(primary_state->rotation)) {
 		primary_state->src_w = vdisplay << 16;
 		primary_state->src_h = hdisplay << 16;
 	} else {
@@ -2479,11 +2466,8 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
 	}
 
 	err = drm_atomic_commit(state);
-
 free:
-	if (err < 0)
-		drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return err;
 }
 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
@@ -2534,7 +2518,7 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
 
 	err = drm_atomic_helper_disable_all(dev, &ctx);
 	if (err < 0) {
-		drm_atomic_state_free(state);
+		drm_atomic_state_put(state);
 		state = ERR_PTR(err);
 		goto unlock;
 	}
@@ -2623,18 +2607,13 @@ drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
 		goto fail;
 
 	ret = drm_atomic_commit(state);
-	if (ret != 0)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2683,18 +2662,13 @@ drm_atomic_helper_plane_set_property(struct drm_plane *plane,
 		goto fail;
 
 	ret = drm_atomic_commit(state);
-	if (ret != 0)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2743,18 +2717,13 @@ drm_atomic_helper_connector_set_property(struct drm_connector *connector,
 		goto fail;
 
 	ret = drm_atomic_commit(state);
-	if (ret != 0)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2827,18 +2796,13 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
 	}
 
 	ret = drm_atomic_nonblocking_commit(state);
-	if (ret != 0)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2914,19 +2878,14 @@ int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
 	crtc_state->active = active;
 
 	ret = drm_atomic_commit(state);
-	if (ret != 0)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
 	connector->dpms = old_mode;
-	drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -3333,7 +3292,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
 
 free:
 	if (err < 0) {
-		drm_atomic_state_free(state);
+		drm_atomic_state_put(state);
 		state = ERR_PTR(err);
 	}
 
@@ -3448,22 +3407,14 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
 		goto fail;
 
 	ret = drm_atomic_commit(state);
-	if (ret)
-		goto fail;
-
-	/* Driver takes ownership of state on successful commit. */
-
-	drm_property_unreference_blob(blob);
-
-	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 	drm_property_unreference_blob(blob);
-
 	return ret;
+
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 85172a9..1f2412c 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -89,7 +89,7 @@
  * On top of this basic transformation additional properties can be exposed by
  * the driver:
  *
- * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ * - Rotation is set up with drm_plane_create_rotation_property(). It adds a
  *   rotation and reflection step between the source and destination rectangles.
  *   Without this property the rectangle is only scaled, but not rotated or
  *   reflected.
@@ -105,18 +105,12 @@
  */
 
 /**
- * drm_mode_create_rotation_property - create a new rotation property
- * @dev: DRM device
+ * drm_plane_create_rotation_property - create a new rotation property
+ * @plane: drm plane
+ * @rotation: initial value of the rotation property
  * @supported_rotations: bitmask of supported rotations and reflections
  *
  * This creates a new property with the selected support for transformations.
- * The resulting property should be stored in @rotation_property in
- * &drm_mode_config. It then must be attached to each plane which supports
- * rotations using drm_object_attach_property().
- *
- * FIXME: Probably better if the rotation property is created on each plane,
- * like the zpos property. Otherwise it's not possible to allow different
- * rotation modes on different planes.
  *
  * Since a rotation by 180° degress is the same as reflecting both along the x
  * and the y axis the rotation property is somewhat redundant. Drivers can use
@@ -144,8 +138,9 @@
  * rotation. After reflection, the rotation is applied to the image sampled from
  * the source rectangle, before scaling it to fit the destination rectangle.
  */
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-						       unsigned int supported_rotations)
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+				       unsigned int rotation,
+				       unsigned int supported_rotations)
 {
 	static const struct drm_prop_enum_list props[] = {
 		{ __builtin_ffs(DRM_ROTATE_0) - 1,   "rotate-0" },
@@ -155,12 +150,28 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
 		{ __builtin_ffs(DRM_REFLECT_X) - 1,  "reflect-x" },
 		{ __builtin_ffs(DRM_REFLECT_Y) - 1,  "reflect-y" },
 	};
+	struct drm_property *prop;
 
-	return drm_property_create_bitmask(dev, 0, "rotation",
+	WARN_ON((supported_rotations & DRM_ROTATE_MASK) == 0);
+	WARN_ON(!is_power_of_2(rotation & DRM_ROTATE_MASK));
+	WARN_ON(rotation & ~supported_rotations);
+
+	prop = drm_property_create_bitmask(plane->dev, 0, "rotation",
 					   props, ARRAY_SIZE(props),
 					   supported_rotations);
+	if (!prop)
+		return -ENOMEM;
+
+	drm_object_attach_property(&plane->base, prop, rotation);
+
+	if (plane->state)
+		plane->state->rotation = rotation;
+
+	plane->rotation_property = prop;
+
+	return 0;
 }
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
+EXPORT_SYMBOL(drm_plane_create_rotation_property);
 
 /**
  * drm_rotation_simplify() - Try to simplify the rotation
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2d7bedf..13441e2 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -40,7 +40,7 @@
 #include <drm/drm_modeset_lock.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_auth.h>
-#include <drm/drm_framebuffer.h>
+#include <drm/drm_debugfs_crc.h>
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
@@ -122,6 +122,10 @@ static int drm_crtc_register_all(struct drm_device *dev)
 	int ret = 0;
 
 	drm_for_each_crtc(crtc, dev) {
+		if (drm_debugfs_crtc_add(crtc))
+			DRM_ERROR("Failed to initialize debugfs entry for CRTC '%s'.\n",
+				  crtc->name);
+
 		if (crtc->funcs->late_register)
 			ret = crtc->funcs->late_register(crtc);
 		if (ret)
@@ -138,9 +142,29 @@ static void drm_crtc_unregister_all(struct drm_device *dev)
 	drm_for_each_crtc(crtc, dev) {
 		if (crtc->funcs->early_unregister)
 			crtc->funcs->early_unregister(crtc);
+		drm_debugfs_crtc_remove(crtc);
 	}
 }
 
+static int drm_crtc_crc_init(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+	spin_lock_init(&crtc->crc.lock);
+	init_waitqueue_head(&crtc->crc.wq);
+	crtc->crc.source = kstrdup("auto", GFP_KERNEL);
+	if (!crtc->crc.source)
+		return -ENOMEM;
+#endif
+	return 0;
+}
+
+static void drm_crtc_crc_fini(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+	kfree(crtc->crc.source);
+#endif
+}
+
 /**
  * drm_crtc_init_with_planes - Initialise a new CRTC object with
  *    specified primary and cursor planes.
@@ -210,6 +234,12 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 	if (cursor)
 		cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
 
+	ret = drm_crtc_crc_init(crtc);
+	if (ret) {
+		drm_mode_object_unregister(dev, &crtc->base);
+		return ret;
+	}
+
 	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 		drm_object_attach_property(&crtc->base, config->prop_active, 0);
 		drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
@@ -236,6 +266,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 	 * the indices on the drm_crtc after us in the crtc_list.
 	 */
 
+	drm_crtc_crc_fini(crtc);
+
 	kfree(crtc->gamma_store);
 	crtc->gamma_store = NULL;
 
@@ -695,8 +727,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
 	drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
 
 	if (crtc->state &&
-	    crtc->primary->state->rotation & (DRM_ROTATE_90 |
-					      DRM_ROTATE_270))
+	    drm_rotation_90_or_270(crtc->primary->state->rotation))
 		swap(hdisplay, vdisplay);
 
 	return drm_framebuffer_check_src_coords(x << 16, y << 16,
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 1205790..800055c 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -415,5 +415,37 @@ void drm_debugfs_connector_remove(struct drm_connector *connector)
 	connector->debugfs_entry = NULL;
 }
 
-#endif /* CONFIG_DEBUG_FS */
+int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+	struct drm_minor *minor = crtc->dev->primary;
+	struct dentry *root;
+	char *name;
 
+	name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
+	if (!name)
+		return -ENOMEM;
+
+	root = debugfs_create_dir(name, minor->debugfs_root);
+	kfree(name);
+	if (!root)
+		return -ENOMEM;
+
+	crtc->debugfs_entry = root;
+
+	if (drm_debugfs_crtc_crc_add(crtc))
+		goto error;
+
+	return 0;
+
+error:
+	drm_debugfs_crtc_remove(crtc);
+	return -ENOMEM;
+}
+
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+	debugfs_remove_recursive(crtc->debugfs_entry);
+	crtc->debugfs_entry = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
new file mode 100644
index 0000000..00e771f
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * Copyright © 2016 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Based on code from the i915 driver.
+ * Original author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <drm/drmP.h>
+#include "drm_internal.h"
+
+/**
+ * DOC: CRC ABI
+ *
+ * DRM device drivers can provide to userspace CRC information of each frame as
+ * it reached a given hardware component (a "source").
+ *
+ * Userspace can control generation of CRCs in a given CRTC by writing to the
+ * file dri/0/crtc-N/crc/control in debugfs, with N being the index of the CRTC.
+ * Accepted values are source names (which are driver-specific) and the "auto"
+ * keyword, which will let the driver select a default source of frame CRCs
+ * for this CRTC.
+ *
+ * Once frame CRC generation is enabled, userspace can capture them by reading
+ * the dri/0/crtc-N/crc/data file. Each line in that file contains the frame
+ * number in the first field and then a number of unsigned integer fields
+ * containing the CRC data. Fields are separated by a single space and the number
+ * of CRC fields is source-specific.
+ *
+ * Note that though in some cases the CRC is computed in a specified way and on
+ * the frame contents as supplied by userspace (eDP 1.3), in general the CRC
+ * computation is performed in an unspecified way and on frame contents that have
+ * been already processed in also an unspecified way and thus userspace cannot
+ * rely on being able to generate matching CRC values for the frame contents that
+ * it submits. In this general case, the maximum userspace can do is to compare
+ * the reported CRCs of frames that should have the same contents.
+ */
+
+static int crc_control_show(struct seq_file *m, void *data)
+{
+	struct drm_crtc *crtc = m->private;
+
+	seq_printf(m, "%s\n", crtc->crc.source);
+
+	return 0;
+}
+
+static int crc_control_open(struct inode *inode, struct file *file)
+{
+	struct drm_crtc *crtc = inode->i_private;
+
+	return single_open(file, crc_control_show, crtc);
+}
+
+static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
+				 size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_crtc *crtc = m->private;
+	struct drm_crtc_crc *crc = &crtc->crc;
+	char *source;
+
+	if (len == 0)
+		return 0;
+
+	if (len > PAGE_SIZE - 1) {
+		DRM_DEBUG_KMS("Expected < %lu bytes into crtc crc control\n",
+			      PAGE_SIZE);
+		return -E2BIG;
+	}
+
+	source = memdup_user_nul(ubuf, len);
+	if (IS_ERR(source))
+		return PTR_ERR(source);
+
+	if (source[len] == '\n')
+		source[len] = '\0';
+
+	spin_lock_irq(&crc->lock);
+
+	if (crc->opened) {
+		spin_unlock_irq(&crc->lock);
+		kfree(source);
+		return -EBUSY;
+	}
+
+	kfree(crc->source);
+	crc->source = source;
+
+	spin_unlock_irq(&crc->lock);
+
+	*offp += len;
+	return len;
+}
+
+static const struct file_operations drm_crtc_crc_control_fops = {
+	.owner = THIS_MODULE,
+	.open = crc_control_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = crc_control_write
+};
+
+static int crtc_crc_open(struct inode *inode, struct file *filep)
+{
+	struct drm_crtc *crtc = inode->i_private;
+	struct drm_crtc_crc *crc = &crtc->crc;
+	struct drm_crtc_crc_entry *entries = NULL;
+	size_t values_cnt;
+	int ret;
+
+	if (crc->opened)
+		return -EBUSY;
+
+	ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
+		ret = -EINVAL;
+		goto err_disable;
+	}
+
+	if (WARN_ON(values_cnt == 0)) {
+		ret = -EINVAL;
+		goto err_disable;
+	}
+
+	entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
+	if (!entries) {
+		ret = -ENOMEM;
+		goto err_disable;
+	}
+
+	spin_lock_irq(&crc->lock);
+	crc->entries = entries;
+	crc->values_cnt = values_cnt;
+	crc->opened = true;
+	spin_unlock_irq(&crc->lock);
+
+	return 0;
+
+err_disable:
+	crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+	return ret;
+}
+
+static int crtc_crc_release(struct inode *inode, struct file *filep)
+{
+	struct drm_crtc *crtc = filep->f_inode->i_private;
+	struct drm_crtc_crc *crc = &crtc->crc;
+	size_t values_cnt;
+
+	spin_lock_irq(&crc->lock);
+	kfree(crc->entries);
+	crc->entries = NULL;
+	crc->head = 0;
+	crc->tail = 0;
+	crc->values_cnt = 0;
+	crc->opened = false;
+	spin_unlock_irq(&crc->lock);
+
+	crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+
+	return 0;
+}
+
+static int crtc_crc_data_count(struct drm_crtc_crc *crc)
+{
+	assert_spin_locked(&crc->lock);
+	return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
+}
+
+/*
+ * 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space
+ * separated, with a newline at the end and null-terminated.
+ */
+#define LINE_LEN(values_cnt)	(10 + 11 * values_cnt + 1 + 1)
+#define MAX_LINE_LEN		(LINE_LEN(DRM_MAX_CRC_NR))
+
+static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
+			     size_t count, loff_t *pos)
+{
+	struct drm_crtc *crtc = filep->f_inode->i_private;
+	struct drm_crtc_crc *crc = &crtc->crc;
+	struct drm_crtc_crc_entry *entry;
+	char buf[MAX_LINE_LEN];
+	int ret, i;
+
+	spin_lock_irq(&crc->lock);
+
+	if (!crc->source) {
+		spin_unlock_irq(&crc->lock);
+		return 0;
+	}
+
+	/* Nothing to read? */
+	while (crtc_crc_data_count(crc) == 0) {
+		if (filep->f_flags & O_NONBLOCK) {
+			spin_unlock_irq(&crc->lock);
+			return -EAGAIN;
+		}
+
+		ret = wait_event_interruptible_lock_irq(crc->wq,
+							crtc_crc_data_count(crc),
+							crc->lock);
+		if (ret) {
+			spin_unlock_irq(&crc->lock);
+			return ret;
+		}
+	}
+
+	/* We know we have an entry to be read */
+	entry = &crc->entries[crc->tail];
+
+	if (count < LINE_LEN(crc->values_cnt)) {
+		spin_unlock_irq(&crc->lock);
+		return -EINVAL;
+	}
+
+	BUILD_BUG_ON_NOT_POWER_OF_2(DRM_CRC_ENTRIES_NR);
+	crc->tail = (crc->tail + 1) & (DRM_CRC_ENTRIES_NR - 1);
+
+	spin_unlock_irq(&crc->lock);
+
+	if (entry->has_frame_counter)
+		sprintf(buf, "0x%08x", entry->frame);
+	else
+		sprintf(buf, "XXXXXXXXXX");
+
+	for (i = 0; i < crc->values_cnt; i++)
+		sprintf(buf + 10 + i * 11, " 0x%08x", entry->crcs[i]);
+	sprintf(buf + 10 + crc->values_cnt * 11, "\n");
+
+	if (copy_to_user(user_buf, buf, LINE_LEN(crc->values_cnt)))
+		return -EFAULT;
+
+	return LINE_LEN(crc->values_cnt);
+}
+
+static const struct file_operations drm_crtc_crc_data_fops = {
+	.owner = THIS_MODULE,
+	.open = crtc_crc_open,
+	.read = crtc_crc_read,
+	.release = crtc_crc_release,
+};
+
+/**
+ * drm_debugfs_crtc_crc_add - Add files to debugfs for capture of frame CRCs
+ * @crtc: CRTC to whom the frames will belong
+ *
+ * Adds files to debugfs directory that allows userspace to control the
+ * generation of frame CRCs and to read them.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+	struct dentry *crc_ent, *ent;
+
+	if (!crtc->funcs->set_crc_source)
+		return 0;
+
+	crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
+	if (!crc_ent)
+		return -ENOMEM;
+
+	ent = debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+				  &drm_crtc_crc_control_fops);
+	if (!ent)
+		goto error;
+
+	ent = debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
+				  &drm_crtc_crc_data_fops);
+	if (!ent)
+		goto error;
+
+	return 0;
+
+error:
+	debugfs_remove_recursive(crc_ent);
+
+	return -ENOMEM;
+}
+
+/**
+ * drm_crtc_add_crc_entry - Add entry with CRC information for a frame
+ * @crtc: CRTC to which the frame belongs
+ * @has_frame: whether this entry has a frame number to go with
+ * @frame: number of the frame these CRCs are about
+ * @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt
+ *
+ * For each frame, the driver polls the source of CRCs for new data and calls
+ * this function to add them to the buffer from where userspace reads.
+ */
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+			   uint32_t frame, uint32_t *crcs)
+{
+	struct drm_crtc_crc *crc = &crtc->crc;
+	struct drm_crtc_crc_entry *entry;
+	int head, tail;
+
+	assert_spin_locked(&crc->lock);
+
+	/* Caller may not have noticed yet that userspace has stopped reading */
+	if (!crc->opened)
+		return -EINVAL;
+
+	head = crc->head;
+	tail = crc->tail;
+
+	if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+		DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+		return -ENOBUFS;
+	}
+
+	entry = &crc->entries[head];
+	entry->frame = frame;
+	entry->has_frame_counter = has_frame;
+	memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt);
+
+	head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
+	crc->head = head;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a75..e025639 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -142,12 +142,25 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
 		      sizeof(dp_dual_mode_hdmi_id)) == 0;
 }
 
+static bool is_type1_adaptor(uint8_t adaptor_id)
+{
+	return adaptor_id == 0 || adaptor_id == 0xff;
+}
+
 static bool is_type2_adaptor(uint8_t adaptor_id)
 {
 	return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
 			      DP_DUAL_MODE_REV_TYPE2);
 }
 
+static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
+			      const uint8_t adaptor_id)
+{
+	return is_hdmi_adaptor(hdmi_id) &&
+		(adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
+		 DP_DUAL_MODE_TYPE_HAS_DPCD));
+}
+
 /**
  * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
  * @adapter: I2C adapter for the DDC bus
@@ -185,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
 	 */
 	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
 				    hdmi_id, sizeof(hdmi_id));
+	DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n",
+		      ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
 	if (ret)
 		return DRM_DP_DUAL_MODE_UNKNOWN;
 
@@ -202,13 +217,26 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
 	 */
 	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
 				    &adaptor_id, sizeof(adaptor_id));
+	DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
+		      adaptor_id, ret);
 	if (ret == 0) {
+		if (is_lspcon_adaptor(hdmi_id, adaptor_id))
+			return DRM_DP_DUAL_MODE_LSPCON;
 		if (is_type2_adaptor(adaptor_id)) {
 			if (is_hdmi_adaptor(hdmi_id))
 				return DRM_DP_DUAL_MODE_TYPE2_HDMI;
 			else
 				return DRM_DP_DUAL_MODE_TYPE2_DVI;
 		}
+		/*
+		 * If neither a proper type 1 ID nor a broken type 1 adaptor
+		 * as described above, assume type 1, but let the user know
+		 * that we may have misdetected the type.
+		 */
+		if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+			DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
+				  adaptor_id);
+
 	}
 
 	if (is_hdmi_adaptor(hdmi_id))
@@ -364,3 +392,96 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
 	}
 }
 EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
+
+/**
+ * drm_lspcon_get_mode: Get LSPCON's current mode of operation by
+ * reading offset (0x80, 0x41)
+ * @adapter: I2C-over-aux adapter
+ * @mode: current lspcon mode of operation output variable
+ *
+ * Returns:
+ * 0 on success, sets the current_mode value to appropriate mode
+ * -error on failure
+ */
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+			enum drm_lspcon_mode *mode)
+{
+	u8 data;
+	int ret = 0;
+
+	if (!mode) {
+		DRM_ERROR("NULL input\n");
+		return -EINVAL;
+	}
+
+	/* Read Status: i2c over aux */
+	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
+				    &data, sizeof(data));
+	if (ret < 0) {
+		DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
+		return -EFAULT;
+	}
+
+	if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
+		*mode = DRM_LSPCON_MODE_PCON;
+	else
+		*mode = DRM_LSPCON_MODE_LS;
+	return 0;
+}
+EXPORT_SYMBOL(drm_lspcon_get_mode);
+
+/**
+ * drm_lspcon_set_mode: Change LSPCON's mode of operation by
+ * writing offset (0x80, 0x40)
+ * @adapter: I2C-over-aux adapter
+ * @mode: required mode of operation
+ *
+ * Returns:
+ * 0 on success, -error on failure/timeout
+ */
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+			enum drm_lspcon_mode mode)
+{
+	u8 data = 0;
+	int ret;
+	int time_out = 200;
+	enum drm_lspcon_mode current_mode;
+
+	if (mode == DRM_LSPCON_MODE_PCON)
+		data = DP_DUAL_MODE_LSPCON_MODE_PCON;
+
+	/* Change mode */
+	ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
+				     &data, sizeof(data));
+	if (ret < 0) {
+		DRM_ERROR("LSPCON mode change failed\n");
+		return ret;
+	}
+
+	/*
+	 * Confirm mode change by reading the status bit.
+	 * Sometimes, it takes a while to change the mode,
+	 * so wait and retry until time out or done.
+	 */
+	do {
+		ret = drm_lspcon_get_mode(adapter, &current_mode);
+		if (ret) {
+			DRM_ERROR("can't confirm LSPCON mode change\n");
+			return ret;
+		} else {
+			if (current_mode != mode) {
+				msleep(10);
+				time_out -= 10;
+			} else {
+				DRM_DEBUG_KMS("LSPCON mode changed to %s\n",
+						mode == DRM_LSPCON_MODE_LS ?
+						"LS" : "PCON");
+				return 0;
+			}
+		}
+	} while (time_out);
+
+	DRM_ERROR("LSPCON mode change timed out\n");
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(drm_lspcon_set_mode);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ec77bd3..1801e9c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
 	return ret == xfers ? 0 : -1;
 }
 
+static void connector_bad_edid(struct drm_connector *connector,
+			       u8 *edid, int num_blocks)
+{
+	int i;
+
+	if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+		return;
+
+	dev_warn(connector->dev->dev,
+		 "%s: EDID is invalid:\n",
+		 connector->name);
+	for (i = 0; i < num_blocks; i++) {
+		u8 *block = edid + i * EDID_LENGTH;
+		char prefix[20];
+
+		if (drm_edid_is_zero(block, EDID_LENGTH))
+			sprintf(prefix, "\t[%02x] ZERO ", i);
+		else if (!drm_edid_block_valid(block, i, false, NULL))
+			sprintf(prefix, "\t[%02x] BAD  ", i);
+		else
+			sprintf(prefix, "\t[%02x] GOOD ", i);
+
+		print_hex_dump(KERN_WARNING,
+			       prefix, DUMP_PREFIX_NONE, 16, 1,
+			       block, EDID_LENGTH, false);
+	}
+}
+
 /**
  * drm_do_get_edid - get EDID data using a custom EDID block read function
  * @connector: connector we're probing
@@ -1282,20 +1310,19 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
 	void *data)
 {
 	int i, j = 0, valid_extensions = 0;
-	u8 *block, *new;
-	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+	u8 *edid, *new;
 
-	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+	if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
 		return NULL;
 
 	/* base block fetch */
 	for (i = 0; i < 4; i++) {
-		if (get_edid_block(data, block, 0, EDID_LENGTH))
+		if (get_edid_block(data, edid, 0, EDID_LENGTH))
 			goto out;
-		if (drm_edid_block_valid(block, 0, print_bad_edid,
+		if (drm_edid_block_valid(edid, 0, false,
 					 &connector->edid_corrupt))
 			break;
-		if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+		if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
 			connector->null_edid_counter++;
 			goto carp;
 		}
@@ -1304,58 +1331,62 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
 		goto carp;
 
 	/* if there's no extensions, we're done */
-	if (block[0x7e] == 0)
-		return (struct edid *)block;
+	valid_extensions = edid[0x7e];
+	if (valid_extensions == 0)
+		return (struct edid *)edid;
 
-	new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+	new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
 	if (!new)
 		goto out;
-	block = new;
+	edid = new;
 
-	for (j = 1; j <= block[0x7e]; j++) {
+	for (j = 1; j <= edid[0x7e]; j++) {
+		u8 *block = edid + j * EDID_LENGTH;
+
 		for (i = 0; i < 4; i++) {
-			if (get_edid_block(data,
-				  block + (valid_extensions + 1) * EDID_LENGTH,
-				  j, EDID_LENGTH))
+			if (get_edid_block(data, block, j, EDID_LENGTH))
 				goto out;
-			if (drm_edid_block_valid(block + (valid_extensions + 1)
-						 * EDID_LENGTH, j,
-						 print_bad_edid,
-						 NULL)) {
-				valid_extensions++;
+			if (drm_edid_block_valid(block, j, false, NULL))
 				break;
-			}
 		}
 
-		if (i == 4 && print_bad_edid) {
-			dev_warn(connector->dev->dev,
-			 "%s: Ignoring invalid EDID block %d.\n",
-			 connector->name, j);
-
-			connector->bad_edid_counter++;
-		}
+		if (i == 4)
+			valid_extensions--;
 	}
 
-	if (valid_extensions != block[0x7e]) {
-		block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
-		block[0x7e] = valid_extensions;
-		new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+	if (valid_extensions != edid[0x7e]) {
+		u8 *base;
+
+		connector_bad_edid(connector, edid, edid[0x7e] + 1);
+
+		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
+		edid[0x7e] = valid_extensions;
+
+		new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
 		if (!new)
 			goto out;
-		block = new;
+
+		base = new;
+		for (i = 0; i <= edid[0x7e]; i++) {
+			u8 *block = edid + i * EDID_LENGTH;
+
+			if (!drm_edid_block_valid(block, i, false, NULL))
+				continue;
+
+			memcpy(base, block, EDID_LENGTH);
+			base += EDID_LENGTH;
+		}
+
+		kfree(edid);
+		edid = new;
 	}
 
-	return (struct edid *)block;
+	return (struct edid *)edid;
 
 carp:
-	if (print_bad_edid) {
-		dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
-			 connector->name, j);
-	}
-	connector->bad_edid_counter++;
-
+	connector_bad_edid(connector, edid, 1);
 out:
-	kfree(block);
+	kfree(edid);
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(drm_do_get_edid);
@@ -3580,32 +3611,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
 EXPORT_SYMBOL(drm_av_sync_delay);
 
 /**
- * drm_select_eld - select one ELD from multiple HDMI/DP sinks
- * @encoder: the encoder just changed display mode
- *
- * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
- * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
- *
- * Return: The connector associated with the first HDMI/DP sink that has ELD
- * attached to it.
- */
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
-{
-	struct drm_connector *connector;
-	struct drm_device *dev = encoder->dev;
-
-	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
-	drm_for_each_connector(connector, dev)
-		if (connector->encoder == encoder && connector->eld[0])
-			return connector;
-
-	return NULL;
-}
-EXPORT_SYMBOL(drm_select_eld);
-
-/**
  * drm_detect_hdmi_monitor - detect whether monitor is HDMI
  * @edid: monitor EDID information
  *
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 1fd6eac..4c66644 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -176,20 +176,20 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
 	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
 	const struct drm_framebuffer_funcs *funcs)
 {
+	const struct drm_format_info *info;
 	struct drm_fb_cma *fb_cma;
 	struct drm_gem_cma_object *objs[4];
 	struct drm_gem_object *obj;
-	unsigned int hsub;
-	unsigned int vsub;
 	int ret;
 	int i;
 
-	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
-	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+	info = drm_format_info(mode_cmd->pixel_format);
+	if (!info)
+		return ERR_PTR(-EINVAL);
 
-	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
-		unsigned int width = mode_cmd->width / (i ? hsub : 1);
-		unsigned int height = mode_cmd->height / (i ? vsub : 1);
+	for (i = 0; i < info->num_planes; i++) {
+		unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+		unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
 		unsigned int min_size;
 
 		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
@@ -200,7 +200,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
 		}
 
 		min_size = (height - 1) * mode_cmd->pitches[i]
-			 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+			 + width * info->cpp[i]
 			 + mode_cmd->offsets[i];
 
 		if (obj->size < min_size) {
@@ -269,12 +269,15 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
 static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
 	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	const struct drm_format_info *info;
+	int i;
 
 	seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
 			(char *)&fb->pixel_format);
 
-	for (i = 0; i < n; i++) {
+	info = drm_format_info(fb->pixel_format);
+
+	for (i = 0; i < info->num_planes; i++) {
 		seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
 				i, fb->offsets[i], fb->pitches[i]);
 		drm_gem_cma_describe(fb_cma->obj[i], m);
@@ -557,7 +560,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
 {
 	drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
-	drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
+	if (fbdev_cma->fb_helper.fbdev)
+		drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
 	drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
 
 	if (fbdev_cma->fb) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 03414bd..83dbae0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -367,9 +367,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	if (ret != 0)
-		drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
 
 backoff:
@@ -394,11 +392,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 		if (plane->type != DRM_PLANE_TYPE_PRIMARY)
 			drm_plane_force_disable(plane);
 
-		if (dev->mode_config.rotation_property) {
+		if (plane->rotation_property)
 			drm_mode_plane_set_obj_prop(plane,
-						    dev->mode_config.rotation_property,
+						    plane->rotation_property,
 						    DRM_ROTATE_0);
-		}
 	}
 
 	for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -1211,11 +1208,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 	if (var->pixclock != 0 || in_dbg_master())
 		return -EINVAL;
 
-	/* Need to resize the fb object !!! */
-	if (var->bits_per_pixel > fb->bits_per_pixel ||
-	    var->xres > fb->width || var->yres > fb->height ||
-	    var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
-		DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+	/*
+	 * Changes struct fb_var_screeninfo are currently not pushed back
+	 * to KMS, hence fail if different settings are requested.
+	 */
+	if (var->bits_per_pixel != fb->bits_per_pixel ||
+	    var->xres != fb->width || var->yres != fb->height ||
+	    var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
+		DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
 			  "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
 			  var->xres, var->yres, var->bits_per_pixel,
 			  var->xres_virtual, var->yres_virtual,
@@ -1361,16 +1361,13 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
 	info->var.xoffset = var->xoffset;
 	info->var.yoffset = var->yoffset;
 
-
 fail:
 	drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
 	if (ret == -EDEADLK)
 		goto backoff;
 
-	if (ret != 0)
-		drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 	return ret;
 
 backoff:
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e84faec..cf993db 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -663,6 +663,10 @@ void drm_event_cancel_free(struct drm_device *dev,
 		list_del(&p->pending_link);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (p->fence)
+		dma_fence_put(p->fence);
+
 	kfree(p);
 }
 EXPORT_SYMBOL(drm_event_cancel_free);
@@ -692,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
 	}
 
 	if (e->fence) {
-		fence_signal(e->fence);
-		fence_put(e->fence);
+		dma_fence_signal(e->fence);
+		dma_fence_put(e->fence);
 	}
 
 	if (!e->file_priv) {
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 29c56b4..cbb8b77 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -102,83 +102,105 @@ char *drm_get_format_name(uint32_t format)
 }
 EXPORT_SYMBOL(drm_get_format_name);
 
-/**
- * drm_fb_get_bpp_depth - get the bpp/depth values for format
- * @format: pixel format (DRM_FORMAT_*)
- * @depth: storage for the depth value
- * @bpp: storage for the bpp value
- *
- * This only supports RGB formats here for compat with code that doesn't use
- * pixel formats directly yet.
+/*
+ * Internal function to query information for a given format. See
+ * drm_format_info() for the public API.
  */
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
-			  int *bpp)
+const struct drm_format_info *__drm_format_info(u32 format)
 {
-	char *format_name;
+	static const struct drm_format_info formats[] = {
+		{ .format = DRM_FORMAT_C8,		.depth = 8,  .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGB332,		.depth = 8,  .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGR233,		.depth = 8,  .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_XRGB4444,	.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_XBGR4444,	.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGBX4444,	.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGRX4444,	.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_ARGB4444,	.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_ABGR4444,	.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGBA4444,	.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGRA4444,	.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_XRGB1555,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_XBGR1555,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGBX5551,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGRX5551,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_ARGB1555,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_ABGR1555,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGBA5551,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGRA5551,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGB565,		.depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGR565,		.depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGB888,		.depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGR888,		.depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_XRGB8888,	.depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_XBGR8888,	.depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGBX8888,	.depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGRX8888,	.depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_XRGB2101010,	.depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_XBGR2101010,	.depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGBX1010102,	.depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGRX1010102,	.depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_ARGB2101010,	.depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_ABGR2101010,	.depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGBA1010102,	.depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGRA1010102,	.depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_ARGB8888,	.depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_ABGR8888,	.depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_RGBA8888,	.depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_BGRA8888,	.depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_YUV410,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+		{ .format = DRM_FORMAT_YVU410,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+		{ .format = DRM_FORMAT_YUV411,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+		{ .format = DRM_FORMAT_YVU411,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+		{ .format = DRM_FORMAT_YUV420,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+		{ .format = DRM_FORMAT_YVU420,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+		{ .format = DRM_FORMAT_YUV422,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+		{ .format = DRM_FORMAT_YVU422,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+		{ .format = DRM_FORMAT_YUV444,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_YVU444,		.depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_NV12,		.depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+		{ .format = DRM_FORMAT_NV21,		.depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+		{ .format = DRM_FORMAT_NV16,		.depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+		{ .format = DRM_FORMAT_NV61,		.depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+		{ .format = DRM_FORMAT_NV24,		.depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_NV42,		.depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+		{ .format = DRM_FORMAT_YUYV,		.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+		{ .format = DRM_FORMAT_YVYU,		.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+		{ .format = DRM_FORMAT_UYVY,		.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+		{ .format = DRM_FORMAT_VYUY,		.depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+		{ .format = DRM_FORMAT_AYUV,		.depth = 0,  .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+	};
 
-	switch (format) {
-	case DRM_FORMAT_C8:
-	case DRM_FORMAT_RGB332:
-	case DRM_FORMAT_BGR233:
-		*depth = 8;
-		*bpp = 8;
-		break;
-	case DRM_FORMAT_XRGB1555:
-	case DRM_FORMAT_XBGR1555:
-	case DRM_FORMAT_RGBX5551:
-	case DRM_FORMAT_BGRX5551:
-	case DRM_FORMAT_ARGB1555:
-	case DRM_FORMAT_ABGR1555:
-	case DRM_FORMAT_RGBA5551:
-	case DRM_FORMAT_BGRA5551:
-		*depth = 15;
-		*bpp = 16;
-		break;
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_BGR565:
-		*depth = 16;
-		*bpp = 16;
-		break;
-	case DRM_FORMAT_RGB888:
-	case DRM_FORMAT_BGR888:
-		*depth = 24;
-		*bpp = 24;
-		break;
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_RGBX8888:
-	case DRM_FORMAT_BGRX8888:
-		*depth = 24;
-		*bpp = 32;
-		break;
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_RGBX1010102:
-	case DRM_FORMAT_BGRX1010102:
-	case DRM_FORMAT_ARGB2101010:
-	case DRM_FORMAT_ABGR2101010:
-	case DRM_FORMAT_RGBA1010102:
-	case DRM_FORMAT_BGRA1010102:
-		*depth = 30;
-		*bpp = 32;
-		break;
-	case DRM_FORMAT_ARGB8888:
-	case DRM_FORMAT_ABGR8888:
-	case DRM_FORMAT_RGBA8888:
-	case DRM_FORMAT_BGRA8888:
-		*depth = 32;
-		*bpp = 32;
-		break;
-	default:
-		format_name = drm_get_format_name(format);
-		DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
-		kfree(format_name);
-		*depth = 0;
-		*bpp = 0;
-		break;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+		if (formats[i].format == format)
+			return &formats[i];
 	}
+
+	return NULL;
 }
-EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_info - query information for a given format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * The caller should only pass a supported pixel format to this function.
+ * Unsupported pixel formats will generate a warning in the kernel log.
+ *
+ * Returns:
+ * The instance of struct drm_format_info that describes the pixel format, or
+ * NULL if the format is unsupported.
+ */
+const struct drm_format_info *drm_format_info(u32 format)
+{
+	const struct drm_format_info *info;
+
+	info = __drm_format_info(format);
+	WARN_ON(!info);
+	return info;
+}
+EXPORT_SYMBOL(drm_format_info);
 
 /**
  * drm_format_num_planes - get the number of planes for format
@@ -189,28 +211,10 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
  */
 int drm_format_num_planes(uint32_t format)
 {
-	switch (format) {
-	case DRM_FORMAT_YUV410:
-	case DRM_FORMAT_YVU410:
-	case DRM_FORMAT_YUV411:
-	case DRM_FORMAT_YVU411:
-	case DRM_FORMAT_YUV420:
-	case DRM_FORMAT_YVU420:
-	case DRM_FORMAT_YUV422:
-	case DRM_FORMAT_YVU422:
-	case DRM_FORMAT_YUV444:
-	case DRM_FORMAT_YVU444:
-		return 3;
-	case DRM_FORMAT_NV12:
-	case DRM_FORMAT_NV21:
-	case DRM_FORMAT_NV16:
-	case DRM_FORMAT_NV61:
-	case DRM_FORMAT_NV24:
-	case DRM_FORMAT_NV42:
-		return 2;
-	default:
-		return 1;
-	}
+	const struct drm_format_info *info;
+
+	info = drm_format_info(format);
+	return info ? info->num_planes : 1;
 }
 EXPORT_SYMBOL(drm_format_num_planes);
 
@@ -224,40 +228,13 @@ EXPORT_SYMBOL(drm_format_num_planes);
  */
 int drm_format_plane_cpp(uint32_t format, int plane)
 {
-	unsigned int depth;
-	int bpp;
+	const struct drm_format_info *info;
 
-	if (plane >= drm_format_num_planes(format))
+	info = drm_format_info(format);
+	if (!info || plane >= info->num_planes)
 		return 0;
 
-	switch (format) {
-	case DRM_FORMAT_YUYV:
-	case DRM_FORMAT_YVYU:
-	case DRM_FORMAT_UYVY:
-	case DRM_FORMAT_VYUY:
-		return 2;
-	case DRM_FORMAT_NV12:
-	case DRM_FORMAT_NV21:
-	case DRM_FORMAT_NV16:
-	case DRM_FORMAT_NV61:
-	case DRM_FORMAT_NV24:
-	case DRM_FORMAT_NV42:
-		return plane ? 2 : 1;
-	case DRM_FORMAT_YUV410:
-	case DRM_FORMAT_YVU410:
-	case DRM_FORMAT_YUV411:
-	case DRM_FORMAT_YVU411:
-	case DRM_FORMAT_YUV420:
-	case DRM_FORMAT_YVU420:
-	case DRM_FORMAT_YUV422:
-	case DRM_FORMAT_YVU422:
-	case DRM_FORMAT_YUV444:
-	case DRM_FORMAT_YVU444:
-		return 1;
-	default:
-		drm_fb_get_bpp_depth(format, &depth, &bpp);
-		return bpp >> 3;
-	}
+	return info->cpp[plane];
 }
 EXPORT_SYMBOL(drm_format_plane_cpp);
 
@@ -271,28 +248,10 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
  */
 int drm_format_horz_chroma_subsampling(uint32_t format)
 {
-	switch (format) {
-	case DRM_FORMAT_YUV411:
-	case DRM_FORMAT_YVU411:
-	case DRM_FORMAT_YUV410:
-	case DRM_FORMAT_YVU410:
-		return 4;
-	case DRM_FORMAT_YUYV:
-	case DRM_FORMAT_YVYU:
-	case DRM_FORMAT_UYVY:
-	case DRM_FORMAT_VYUY:
-	case DRM_FORMAT_NV12:
-	case DRM_FORMAT_NV21:
-	case DRM_FORMAT_NV16:
-	case DRM_FORMAT_NV61:
-	case DRM_FORMAT_YUV422:
-	case DRM_FORMAT_YVU422:
-	case DRM_FORMAT_YUV420:
-	case DRM_FORMAT_YVU420:
-		return 2;
-	default:
-		return 1;
-	}
+	const struct drm_format_info *info;
+
+	info = drm_format_info(format);
+	return info ? info->hsub : 1;
 }
 EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
 
@@ -306,18 +265,10 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
  */
 int drm_format_vert_chroma_subsampling(uint32_t format)
 {
-	switch (format) {
-	case DRM_FORMAT_YUV410:
-	case DRM_FORMAT_YVU410:
-		return 4;
-	case DRM_FORMAT_YUV420:
-	case DRM_FORMAT_YVU420:
-	case DRM_FORMAT_NV12:
-	case DRM_FORMAT_NV21:
-		return 2;
-	default:
-		return 1;
-	}
+	const struct drm_format_info *info;
+
+	info = drm_format_info(format);
+	return info ? info->vsub : 1;
 }
 EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
 
@@ -332,13 +283,16 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
  */
 int drm_format_plane_width(int width, uint32_t format, int plane)
 {
-	if (plane >= drm_format_num_planes(format))
+	const struct drm_format_info *info;
+
+	info = drm_format_info(format);
+	if (!info || plane >= info->num_planes)
 		return 0;
 
 	if (plane == 0)
 		return width;
 
-	return width / drm_format_horz_chroma_subsampling(format);
+	return width / info->hsub;
 }
 EXPORT_SYMBOL(drm_format_plane_width);
 
@@ -353,12 +307,15 @@ EXPORT_SYMBOL(drm_format_plane_width);
  */
 int drm_format_plane_height(int height, uint32_t format, int plane)
 {
-	if (plane >= drm_format_num_planes(format))
+	const struct drm_format_info *info;
+
+	info = drm_format_info(format);
+	if (!info || plane >= info->num_planes)
 		return 0;
 
 	if (plane == 0)
 		return height;
 
-	return height / drm_format_vert_chroma_subsampling(format);
+	return height / info->vsub;
 }
 EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 398efd6..49fd7db 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -126,111 +126,33 @@ int drm_mode_addfb(struct drm_device *dev,
 	return 0;
 }
 
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
-	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
-	char *format_name;
-
-	switch (format) {
-	case DRM_FORMAT_C8:
-	case DRM_FORMAT_RGB332:
-	case DRM_FORMAT_BGR233:
-	case DRM_FORMAT_XRGB4444:
-	case DRM_FORMAT_XBGR4444:
-	case DRM_FORMAT_RGBX4444:
-	case DRM_FORMAT_BGRX4444:
-	case DRM_FORMAT_ARGB4444:
-	case DRM_FORMAT_ABGR4444:
-	case DRM_FORMAT_RGBA4444:
-	case DRM_FORMAT_BGRA4444:
-	case DRM_FORMAT_XRGB1555:
-	case DRM_FORMAT_XBGR1555:
-	case DRM_FORMAT_RGBX5551:
-	case DRM_FORMAT_BGRX5551:
-	case DRM_FORMAT_ARGB1555:
-	case DRM_FORMAT_ABGR1555:
-	case DRM_FORMAT_RGBA5551:
-	case DRM_FORMAT_BGRA5551:
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_BGR565:
-	case DRM_FORMAT_RGB888:
-	case DRM_FORMAT_BGR888:
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_RGBX8888:
-	case DRM_FORMAT_BGRX8888:
-	case DRM_FORMAT_ARGB8888:
-	case DRM_FORMAT_ABGR8888:
-	case DRM_FORMAT_RGBA8888:
-	case DRM_FORMAT_BGRA8888:
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_RGBX1010102:
-	case DRM_FORMAT_BGRX1010102:
-	case DRM_FORMAT_ARGB2101010:
-	case DRM_FORMAT_ABGR2101010:
-	case DRM_FORMAT_RGBA1010102:
-	case DRM_FORMAT_BGRA1010102:
-	case DRM_FORMAT_YUYV:
-	case DRM_FORMAT_YVYU:
-	case DRM_FORMAT_UYVY:
-	case DRM_FORMAT_VYUY:
-	case DRM_FORMAT_AYUV:
-	case DRM_FORMAT_NV12:
-	case DRM_FORMAT_NV21:
-	case DRM_FORMAT_NV16:
-	case DRM_FORMAT_NV61:
-	case DRM_FORMAT_NV24:
-	case DRM_FORMAT_NV42:
-	case DRM_FORMAT_YUV410:
-	case DRM_FORMAT_YVU410:
-	case DRM_FORMAT_YUV411:
-	case DRM_FORMAT_YVU411:
-	case DRM_FORMAT_YUV420:
-	case DRM_FORMAT_YVU420:
-	case DRM_FORMAT_YUV422:
-	case DRM_FORMAT_YVU422:
-	case DRM_FORMAT_YUV444:
-	case DRM_FORMAT_YVU444:
-		return 0;
-	default:
-		format_name = drm_get_format_name(r->pixel_format);
-		DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
-		kfree(format_name);
-		return -EINVAL;
-	}
-}
-
 static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
 {
-	int ret, hsub, vsub, num_planes, i;
+	const struct drm_format_info *info;
+	int i;
 
-	ret = format_check(r);
-	if (ret) {
+	info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
+	if (!info) {
 		char *format_name = drm_get_format_name(r->pixel_format);
 		DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
 		kfree(format_name);
-		return ret;
+		return -EINVAL;
 	}
 
-	hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
-	vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
-	num_planes = drm_format_num_planes(r->pixel_format);
-
-	if (r->width == 0 || r->width % hsub) {
+	if (r->width == 0 || r->width % info->hsub) {
 		DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
 		return -EINVAL;
 	}
 
-	if (r->height == 0 || r->height % vsub) {
+	if (r->height == 0 || r->height % info->vsub) {
 		DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
 		return -EINVAL;
 	}
 
-	for (i = 0; i < num_planes; i++) {
-		unsigned int width = r->width / (i != 0 ? hsub : 1);
-		unsigned int height = r->height / (i != 0 ? vsub : 1);
-		unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+	for (i = 0; i < info->num_planes; i++) {
+		unsigned int width = r->width / (i != 0 ? info->hsub : 1);
+		unsigned int height = r->height / (i != 0 ? info->vsub : 1);
+		unsigned int cpp = info->cpp[i];
 
 		if (!r->handles[i]) {
 			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
@@ -273,7 +195,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
 		}
 	}
 
-	for (i = num_planes; i < 4; i++) {
+	for (i = info->num_planes; i < 4; i++) {
 		if (r->modifier[i]) {
 			DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
 			return -EINVAL;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e66af28..abd2098 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -100,6 +100,9 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 int drm_debugfs_cleanup(struct drm_minor *minor);
 int drm_debugfs_connector_add(struct drm_connector *connector);
 void drm_debugfs_connector_remove(struct drm_connector *connector);
+int drm_debugfs_crtc_add(struct drm_crtc *crtc);
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
 #else
 static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 				   struct dentry *root)
@@ -119,4 +122,17 @@ static inline int drm_debugfs_connector_add(struct drm_connector *connector)
 static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
 {
 }
+
+static inline int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+	return 0;
+}
+static inline void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+}
+
+static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+	return 0;
+}
 #endif
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b969a64..48a6167 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -952,8 +952,10 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
 	u32 vblank_count;
 	unsigned int seq;
 
-	if (WARN_ON(pipe >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs)) {
+		*vblanktime = (struct timeval) { 0 };
 		return 0;
+	}
 
 	do {
 		seq = read_seqbegin(&vblank->seqlock);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 53f07ac..f64ac86 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -165,6 +165,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
 	unsigned int vfieldrate, hperiod;
 	int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
 	int interlace;
+	u64 tmp;
 
 	/* allocate the drm_display_mode structure. If failure, we will
 	 * return directly
@@ -322,8 +323,11 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
 		drm_mode->vsync_end = drm_mode->vsync_start + vsync;
 	}
 	/* 15/13. Find pixel clock frequency (kHz for xf86) */
-	drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
-	drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+	tmp = drm_mode->htotal; /* perform intermediate calcs in u64 */
+	tmp *= HV_FACTOR * 1000;
+	do_div(tmp, hperiod);
+	tmp -= drm_mode->clock % CVT_CLOCK_STEP;
+	drm_mode->clock = tmp;
 	/* 18/16. Find actual vertical frame frequency */
 	/* ignore - just set the mode flag for interlaced */
 	if (interlaced) {
@@ -997,6 +1001,7 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
 	    mode1->vsync_end == mode2->vsync_end &&
 	    mode1->vtotal == mode2->vtotal &&
 	    mode1->vscan == mode2->vscan &&
+	    mode1->picture_aspect_ratio == mode2->picture_aspect_ratio &&
 	    (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
 	     (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
 		return true;
@@ -1499,6 +1504,27 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
 	out->vrefresh = in->vrefresh;
 	out->flags = in->flags;
 	out->type = in->type;
+	out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
+
+	switch (in->picture_aspect_ratio) {
+	case HDMI_PICTURE_ASPECT_4_3:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_4_3;
+		break;
+	case HDMI_PICTURE_ASPECT_16_9:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_16_9;
+		break;
+	case HDMI_PICTURE_ASPECT_64_27:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_64_27;
+		break;
+	case DRM_MODE_PICTURE_ASPECT_256_135:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
+		break;
+	case HDMI_PICTURE_ASPECT_RESERVED:
+	default:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
+		break;
+	}
+
 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 }
@@ -1544,6 +1570,27 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 
+	/* Clearing picture aspect ratio bits from out flags */
+	out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
+
+	switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
+	case DRM_MODE_FLAG_PIC_AR_4_3:
+		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
+		break;
+	case DRM_MODE_FLAG_PIC_AR_16_9:
+		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
+		break;
+	case DRM_MODE_FLAG_PIC_AR_64_27:
+		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
+		break;
+	case DRM_MODE_FLAG_PIC_AR_256_135:
+		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
+		break;
+	default:
+		out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+		break;
+	}
+
 	out->status = drm_mode_validate_basic(out);
 	if (out->status != MODE_OK)
 		goto out;
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 1d45738..2544dfe 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -70,8 +70,23 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
 void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
 				    const struct drm_mode_fb_cmd2 *mode_cmd)
 {
+	const struct drm_format_info *info;
 	int i;
 
+	info = drm_format_info(mode_cmd->pixel_format);
+	if (!info || !info->depth) {
+		char *format_name = drm_get_format_name(mode_cmd->pixel_format);
+
+		DRM_DEBUG_KMS("non-RGB pixel format %s\n", format_name);
+		kfree(format_name);
+
+		fb->depth = 0;
+		fb->bits_per_pixel = 0;
+	} else {
+		fb->depth = info->depth;
+		fb->bits_per_pixel = info->cpp[0] * 8;
+	}
+
 	fb->width = mode_cmd->width;
 	fb->height = mode_cmd->height;
 	for (i = 0; i < 4; i++) {
@@ -79,8 +94,6 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
 		fb->offsets[i] = mode_cmd->offsets[i];
 		fb->modifier[i] = mode_cmd->modifier[i];
 	}
-	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
-				    &fb->bits_per_pixel);
 	fb->pixel_format = mode_cmd->pixel_format;
 	fb->flags = mode_cmd->flags;
 }
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index bc98bb9..47848ed 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -6,6 +6,11 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_of.h>
 
+static void drm_release_of(struct device *dev, void *data)
+{
+	of_node_put(data);
+}
+
 /**
  * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
  * @dev: DRM device
@@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
 EXPORT_SYMBOL(drm_of_find_possible_crtcs);
 
 /**
+ * drm_of_component_match_add - Add a component helper OF node match rule
+ * @master: master device
+ * @matchptr: component match pointer
+ * @compare: compare function used for matching component
+ * @node: of_node
+ */
+void drm_of_component_match_add(struct device *master,
+				struct component_match **matchptr,
+				int (*compare)(struct device *, void *),
+				struct device_node *node)
+{
+	of_node_get(node);
+	component_match_add_release(master, matchptr, drm_release_of,
+				    compare, node);
+}
+EXPORT_SYMBOL_GPL(drm_of_component_match_add);
+
+/**
  * drm_of_component_probe - Generic probe function for a component based master
  * @dev: master device containing the OF node
  * @compare_of: compare function used for matching components
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev,
 			continue;
 		}
 
-		component_match_add(dev, &match, compare_of, port);
+		drm_of_component_match_add(dev, &match, compare_of, port);
 		of_node_put(port);
 	}
 
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev,
 				continue;
 			}
 
-			component_match_add(dev, &match, compare_of, remote);
+			drm_of_component_match_add(dev, &match, compare_of,
+						   remote);
 			of_node_put(remote);
 		}
 		of_node_put(port);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index aa68766..0dee6ac 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -16,6 +16,7 @@
 
 #include <linux/component.h>
 #include <linux/of_platform.h>
+#include <drm/drm_of.h>
 
 #include "etnaviv_drv.h"
 #include "etnaviv_gpu.h"
@@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
 			if (!core_node)
 				break;
 
-			component_match_add(&pdev->dev, &match, compare_of,
-					    core_node);
+			drm_of_component_match_add(&pdev->dev, &match,
+						   compare_of, core_node);
 			of_node_put(core_node);
 		}
 	} else if (dev->platform_data) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 0370b84..7d066a9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -409,20 +409,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct drm_device *dev = obj->dev;
 	bool write = !!(op & ETNA_PREP_WRITE);
-	int ret;
+	unsigned long remain =
+		op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
+	long lret;
 
-	if (op & ETNA_PREP_NOSYNC) {
-		if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
-							  write))
-			return -EBUSY;
-	} else {
-		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
-
-		ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
-							  write, true, remain);
-		if (ret <= 0)
-			return ret == 0 ? -ETIMEDOUT : ret;
-	}
+	lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+						   write, true, remain);
+	if (lret < 0)
+		return lret;
+	else if (lret == 0)
+		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 
 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 		if (!etnaviv_obj->sgt) {
@@ -470,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
 }
 
 #ifdef CONFIG_DEBUG_FS
-static void etnaviv_gem_describe_fence(struct fence *fence,
+static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 	const char *type, struct seq_file *m)
 {
-	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		seq_printf(m, "\t%9s: %s %s seq %u\n",
 			   type,
 			   fence->ops->get_driver_name(fence),
@@ -486,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct reservation_object *robj = etnaviv_obj->resv;
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	unsigned long off = drm_vma_node_start(&obj->vma_node);
 
 	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b1254f8..d221182 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -15,7 +15,7 @@
  */
 
 #include <linux/component.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
 #include "etnaviv_dump.h"
@@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work)
 	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 		if (!gpu->event[i].used)
 			continue;
-		fence_signal(gpu->event[i].fence);
+		dma_fence_signal(gpu->event[i].fence);
 		gpu->event[i].fence = NULL;
 		gpu->event[i].used = false;
 		complete(&gpu->event_free);
@@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
 /* fence object management */
 struct etnaviv_fence {
 	struct etnaviv_gpu *gpu;
-	struct fence base;
+	struct dma_fence base;
 };
 
-static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
 {
 	return container_of(fence, struct etnaviv_fence, base);
 }
 
-static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "etnaviv";
 }
 
-static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
 {
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 
 	return dev_name(f->gpu->dev);
 }
 
-static bool etnaviv_fence_enable_signaling(struct fence *fence)
+static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
 {
 	return true;
 }
 
-static bool etnaviv_fence_signaled(struct fence *fence)
+static bool etnaviv_fence_signaled(struct dma_fence *fence)
 {
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 
 	return fence_completed(f->gpu, f->base.seqno);
 }
 
-static void etnaviv_fence_release(struct fence *fence)
+static void etnaviv_fence_release(struct dma_fence *fence)
 {
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 
 	kfree_rcu(f, base.rcu);
 }
 
-static const struct fence_ops etnaviv_fence_ops = {
+static const struct dma_fence_ops etnaviv_fence_ops = {
 	.get_driver_name = etnaviv_fence_get_driver_name,
 	.get_timeline_name = etnaviv_fence_get_timeline_name,
 	.enable_signaling = etnaviv_fence_enable_signaling,
 	.signaled = etnaviv_fence_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = etnaviv_fence_release,
 };
 
-static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 {
 	struct etnaviv_fence *f;
 
@@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 
 	f->gpu = gpu;
 
-	fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
-		   gpu->fence_context, ++gpu->next_fence);
+	dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+		       gpu->fence_context, ++gpu->next_fence);
 
 	return &f->base;
 }
@@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
 {
 	struct reservation_object *robj = etnaviv_obj->resv;
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	int i, ret;
 
 	if (!exclusive) {
@@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
 		/* Wait on any existing exclusive fence which isn't our own */
 		fence = reservation_object_get_excl(robj);
 		if (fence && fence->context != context) {
-			ret = fence_wait(fence, true);
+			ret = dma_fence_wait(fence, true);
 			if (ret)
 				return ret;
 		}
@@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
 		fence = rcu_dereference_protected(fobj->shared[i],
 						reservation_object_held(robj));
 		if (fence->context != context) {
-			ret = fence_wait(fence, true);
+			ret = dma_fence_wait(fence, true);
 			if (ret)
 				return ret;
 		}
@@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work)
 
 	mutex_lock(&gpu->lock);
 	list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
-		if (!fence_is_signaled(cmdbuf->fence))
+		if (!dma_fence_is_signaled(cmdbuf->fence))
 			break;
 
 		list_del(&cmdbuf->node);
-		fence_put(cmdbuf->fence);
+		dma_fence_put(cmdbuf->fence);
 
 		for (i = 0; i < cmdbuf->nr_bos; i++) {
 			struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
@@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
 {
-	struct fence *fence;
+	struct dma_fence *fence;
 	unsigned int event, i;
 	int ret;
 
@@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data)
 		}
 
 		while ((event = ffs(intr)) != 0) {
-			struct fence *fence;
+			struct dma_fence *fence;
 
 			event -= 1;
 
@@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data)
 
 			fence = gpu->event[event].fence;
 			gpu->event[event].fence = NULL;
-			fence_signal(fence);
+			dma_fence_signal(fence);
 
 			/*
 			 * Events can be processed out of order.  Eg,
@@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 		return ret;
 
 	gpu->drm = drm;
-	gpu->fence_context = fence_context_alloc(1);
+	gpu->fence_context = dma_fence_context_alloc(1);
 	spin_lock_init(&gpu->fence_spinlock);
 
 	INIT_LIST_HEAD(&gpu->active_cmd_list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 73c278d..8c6b824 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
 
 struct etnaviv_event {
 	bool used;
-	struct fence *fence;
+	struct dma_fence *fence;
 };
 
 struct etnaviv_cmdbuf;
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
 	/* vram node used if the cmdbuf is mapped through the MMUv2 */
 	struct drm_mm_node vram_node;
 	/* fence after which this buffer is to be disposed */
-	struct fence *fence;
+	struct dma_fence *fence;
 	/* target exec state */
 	u32 exec_state;
 	/* per GPU in-flight list */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index def78c8..4a21a74 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -69,7 +69,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 
 	spin_lock(&priv->lock);
 	priv->pending &= ~commit->crtcs;
@@ -254,6 +254,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
 
 	drm_atomic_helper_swap_state(state, true);
 
+	drm_atomic_state_get(state);
 	if (nonblock)
 		schedule_work(&commit->work);
 	else
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 3a44e70..97daf23 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -124,7 +124,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
 				  psbfb->gtt->offset;
 
-	page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	page_num = vma_pages(vma);
 	address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
 
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -236,22 +236,20 @@ static int psb_framebuffer_init(struct drm_device *dev,
 					const struct drm_mode_fb_cmd2 *mode_cmd,
 					struct gtt_range *gt)
 {
-	u32 bpp, depth;
+	const struct drm_format_info *info;
 	int ret;
 
-	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+	/*
+	 * Reject unknown formats, YUV formats, and formats with more than
+	 * 4 bytes per pixel.
+	 */
+	info = drm_format_info(mode_cmd->pixel_format);
+	if (!info || !info->depth || info->cpp[0] > 4)
+		return -EINVAL;
 
 	if (mode_cmd->pitches[0] & 63)
 		return -EINVAL;
-	switch (bpp) {
-	case 8:
-	case 16:
-	case 24:
-	case 32:
-		break;
-	default:
-		return -EINVAL;
-	}
+
 	drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
 	fb->gtt = gt;
 	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
@@ -298,7 +296,6 @@ static struct drm_framebuffer *psb_framebuffer_create
  *	psbfb_alloc		-	allocate frame buffer memory
  *	@dev: the DRM device
  *	@aligned_size: space needed
- *	@force: fall back to GEM buffers if need be
  *
  *	Allocate the frame buffer. In the usual case we get a GTT range that
  *	is stolen memory backed and life is simple. If there isn't sufficient
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 8f69225..76aea2e 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -76,6 +76,7 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
  *	psb_gtt_insert	-	put an object into the GTT
  *	@dev: our DRM device
  *	@r: our GTT range
+ *	@resume: on resume
  *
  *	Take our preallocated GTT range and insert the GEM object into
  *	the GTT. This is protected via the gtt mutex which the caller
@@ -321,6 +322,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
  *	@len: length (bytes) of address space required
  *	@name: resource name
  *	@backed: resource should be backed by stolen pages
+ *	@align: requested alignment
  *
  *	Ask the kernel core to find us a suitable range of addresses
  *	to use for a GTT mapping.
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 90377a6..e88fde1 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -24,6 +24,7 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
 
 #include "kirin_drm_drv.h"
 
@@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np)
 		DRM_ERROR("no valid endpoint node\n");
 		return ERR_PTR(-ENODEV);
 	}
-	of_node_put(endpoint);
 
 	remote = of_graph_get_remote_port_parent(endpoint);
+	of_node_put(endpoint);
 	if (!remote) {
 		DRM_ERROR("no valid remote node\n");
 		return ERR_PTR(-ENODEV);
 	}
-	of_node_put(remote);
 
 	if (!of_device_is_available(remote)) {
 		DRM_ERROR("not available for remote node\n");
@@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
 	if (IS_ERR(remote))
 		return PTR_ERR(remote);
 
-	component_match_add(dev, &match, compare_of, remote);
+	drm_of_component_match_add(dev, &match, compare_of, remote);
+	of_node_put(remote);
 
 	return component_master_add_with_match(dev, &kirin_drm_ops, match);
 
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 9798d40..af8683e 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1289,7 +1289,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
 	mutex_unlock(&priv->audio_mutex);
 }
 
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int
+tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
 	struct tda998x_priv *priv = dev_get_drvdata(dev);
 
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 7769e46..df96aed 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -11,6 +11,7 @@
 	select DRM_KMS_HELPER
 	select DRM_PANEL
 	select DRM_MIPI_DSI
+	select RELAY
 	# i915 depends on ACPI_VIDEO when ACPI is enabled
 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
 	select BACKLIGHT_LCD_SUPPORT if ACPI
@@ -24,16 +25,17 @@
 	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
 	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
 	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
-	  If M is selected, the module will be called i915.  AGP support
-	  is required for this driver to work. This driver is used by
-	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
-	  replaces the older i830 module that supported a subset of the
-	  hardware in older X.org releases.
+
+	  This driver is used by the Intel driver in X.org 6.8 and
+	  XFree86 4.4 and above. It replaces the older i830 module that
+	  supported a subset of the hardware in older X.org releases.
 
 	  Note that the older i810/i815 chipsets require the use of the
 	  i810 driver instead, and the Atom z5xx series has an entirely
 	  different implementation.
 
+	  If "M" is selected, the module will be called i915.
+
 config DRM_I915_PRELIMINARY_HW_SUPPORT
 	bool "Enable preliminary support for prerelease Intel hardware by default"
 	depends on DRM_I915
@@ -46,6 +48,31 @@
 
 	  If in doubt, say "N".
 
+config DRM_I915_CAPTURE_ERROR
+	bool "Enable capturing GPU state following a hang"
+	depends on DRM_I915
+	default y
+	help
+	  This option enables capturing the GPU state when a hang is detected.
+	  This information is vital for triaging hangs and assists in debugging.
+	  Please report any hang to
+            https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+	  for triaging.
+
+	  If in doubt, say "Y".
+
+config DRM_I915_COMPRESS_ERROR
+	bool "Compress GPU error state"
+	depends on DRM_I915_CAPTURE_ERROR
+	select ZLIB_DEFLATE
+	default y
+	help
+	  This option selects ZLIB_DEFLATE if it isn't already
+	  selected and causes any error state captured upon a GPU hang
+	  to be compressed using zlib.
+
+	  If in doubt, say "Y".
+
 config DRM_I915_USERPTR
 	bool "Always enable userptr support"
 	depends on DRM_I915
@@ -60,6 +87,7 @@
 config DRM_I915_GVT
         bool "Enable Intel GVT-g graphics virtualization host support"
         depends on DRM_I915
+        depends on 64BIT
         default n
         help
 	  Choose this option if you want to enable Intel GVT-g graphics
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a998c2b..0857e50 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -35,17 +35,19 @@
 	  i915_gem_execbuffer.o \
 	  i915_gem_fence.o \
 	  i915_gem_gtt.o \
+	  i915_gem_internal.o \
 	  i915_gem.o \
 	  i915_gem_render_state.o \
 	  i915_gem_request.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
+	  i915_gem_timeline.o \
 	  i915_gem_userptr.o \
-	  i915_gpu_error.o \
 	  i915_trace_points.o \
 	  intel_breadcrumbs.o \
 	  intel_engine_cs.o \
+	  intel_hangcheck.o \
 	  intel_lrc.o \
 	  intel_mocs.o \
 	  intel_ringbuffer.o \
@@ -102,11 +104,15 @@
 	  intel_dvo.o \
 	  intel_hdmi.o \
 	  intel_i2c.o \
+	  intel_lspcon.o \
 	  intel_lvds.o \
 	  intel_panel.o \
 	  intel_sdvo.o \
 	  intel_tv.o
 
+# Post-mortem debug and GPU hang state capture
+i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+
 # virtual gpu code
 i915-y += i915_vgpu.o
 
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index d0f21a6..34ea477 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,5 +1,7 @@
 GVT_DIR := gvt
-GVT_SOURCE := gvt.o
+GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
+	interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
+	execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
 
 ccflags-y                      += -I$(src) -I$(src)/$(GVT_DIR) -Wall
 i915-y			       += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
new file mode 100644
index 0000000..0d41ebc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Dexuan Cui
+ *
+ * Contributors:
+ *    Pei Zhang <pei.zhang@intel.com>
+ *    Min He <min.he@intel.com>
+ *    Niu Bing <bing.niu@intel.com>
+ *    Yulei Zhang <yulei.zhang@intel.com>
+ *    Zhenyu Wang <zhenyuw@linux.intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	u32 alloc_flag, search_flag;
+	u64 start, end, size;
+	struct drm_mm_node *node;
+	int retried = 0;
+	int ret;
+
+	if (high_gm) {
+		search_flag = DRM_MM_SEARCH_BELOW;
+		alloc_flag = DRM_MM_CREATE_TOP;
+		node = &vgpu->gm.high_gm_node;
+		size = vgpu_hidden_sz(vgpu);
+		start = gvt_hidden_gmadr_base(gvt);
+		end = gvt_hidden_gmadr_end(gvt);
+	} else {
+		search_flag = DRM_MM_SEARCH_DEFAULT;
+		alloc_flag = DRM_MM_CREATE_DEFAULT;
+		node = &vgpu->gm.low_gm_node;
+		size = vgpu_aperture_sz(vgpu);
+		start = gvt_aperture_gmadr_base(gvt);
+		end = gvt_aperture_gmadr_end(gvt);
+	}
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+search_again:
+	ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
+						  node, size, 4096, 0,
+						  start, end, search_flag,
+						  alloc_flag);
+	if (ret) {
+		ret = i915_gem_evict_something(&dev_priv->ggtt.base,
+					       size, 4096, 0, start, end, 0);
+		if (ret == 0 && ++retried < 3)
+			goto search_again;
+
+		gvt_err("fail to alloc %s gm space from host, retried %d\n",
+				high_gm ? "high" : "low", retried);
+	}
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+	return ret;
+}
+
+static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	int ret;
+
+	ret = alloc_gm(vgpu, false);
+	if (ret)
+		return ret;
+
+	ret = alloc_gm(vgpu, true);
+	if (ret)
+		goto out_free_aperture;
+
+	gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
+		     vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
+
+	gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
+		     vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
+
+	return 0;
+out_free_aperture:
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	drm_mm_remove_node(&vgpu->gm.low_gm_node);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+	return ret;
+}
+
+static void free_vgpu_gm(struct intel_vgpu *vgpu)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	drm_mm_remove_node(&vgpu->gm.low_gm_node);
+	drm_mm_remove_node(&vgpu->gm.high_gm_node);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+/**
+ * intel_vgpu_write_fence - write fence registers owned by a vGPU
+ * @vgpu: vGPU instance
+ * @fence: vGPU fence register number
+ * @value: Fence register value to be written
+ *
+ * This function is used to write fence registers owned by a vGPU. The vGPU
+ * fence register number will be translated into HW fence register number.
+ *
+ */
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+		u32 fence, u64 value)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_fence_reg *reg;
+	i915_reg_t fence_reg_lo, fence_reg_hi;
+
+	assert_rpm_wakelock_held(dev_priv);
+
+	if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+		return;
+
+	reg = vgpu->fence.regs[fence];
+	if (WARN_ON(!reg))
+		return;
+
+	fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
+	fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
+
+	I915_WRITE(fence_reg_lo, 0);
+	POSTING_READ(fence_reg_lo);
+
+	I915_WRITE(fence_reg_hi, upper_32_bits(value));
+	I915_WRITE(fence_reg_lo, lower_32_bits(value));
+	POSTING_READ(fence_reg_lo);
+}
+
+static void free_vgpu_fence(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_fence_reg *reg;
+	u32 i;
+
+	if (WARN_ON(!vgpu_fence_sz(vgpu)))
+		return;
+
+	intel_runtime_pm_get(dev_priv);
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+		reg = vgpu->fence.regs[i];
+		intel_vgpu_write_fence(vgpu, i, 0);
+		list_add_tail(&reg->link,
+			      &dev_priv->mm.fence_list);
+	}
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_fence_reg *reg;
+	int i;
+	struct list_head *pos, *q;
+
+	intel_runtime_pm_get(dev_priv);
+
+	/* Request fences from host */
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	i = 0;
+	list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
+		reg = list_entry(pos, struct drm_i915_fence_reg, link);
+		if (reg->pin_count || reg->vma)
+			continue;
+		list_del(pos);
+		vgpu->fence.regs[i] = reg;
+		intel_vgpu_write_fence(vgpu, i, 0);
+		if (++i == vgpu_fence_sz(vgpu))
+			break;
+	}
+	if (i != vgpu_fence_sz(vgpu))
+		goto out_free_fence;
+
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_runtime_pm_put(dev_priv);
+	return 0;
+out_free_fence:
+	/* Return fences to host, if fail */
+	for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+		reg = vgpu->fence.regs[i];
+		if (!reg)
+			continue;
+		list_add_tail(&reg->link,
+			      &dev_priv->mm.fence_list);
+	}
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_runtime_pm_put(dev_priv);
+	return -ENOSPC;
+}
+
+static void free_resource(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+
+	gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
+	gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
+	gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
+}
+
+static int alloc_resource(struct intel_vgpu *vgpu,
+		struct intel_vgpu_creation_params *param)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	unsigned long request, avail, max, taken;
+	const char *item;
+
+	if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+		gvt_err("Invalid vGPU creation params\n");
+		return -EINVAL;
+	}
+
+	item = "low GM space";
+	max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+	taken = gvt->gm.vgpu_allocated_low_gm_size;
+	avail = max - taken;
+	request = MB_TO_BYTES(param->low_gm_sz);
+
+	if (request > avail)
+		goto no_enough_resource;
+
+	vgpu_aperture_sz(vgpu) = request;
+
+	item = "high GM space";
+	max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+	taken = gvt->gm.vgpu_allocated_high_gm_size;
+	avail = max - taken;
+	request = MB_TO_BYTES(param->high_gm_sz);
+
+	if (request > avail)
+		goto no_enough_resource;
+
+	vgpu_hidden_sz(vgpu) = request;
+
+	item = "fence";
+	max = gvt_fence_sz(gvt) - HOST_FENCE;
+	taken = gvt->fence.vgpu_allocated_fence_num;
+	avail = max - taken;
+	request = param->fence_sz;
+
+	if (request > avail)
+		goto no_enough_resource;
+
+	vgpu_fence_sz(vgpu) = request;
+
+	gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
+	gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
+	gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+	return 0;
+
+no_enough_resource:
+	gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
+	gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
+		vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+		BYTES_TO_MB(max), BYTES_TO_MB(taken));
+	return -ENOSPC;
+}
+
+/**
+ * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to free the HW resource owned by a vGPU.
+ *
+ */
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
+{
+	free_vgpu_gm(vgpu);
+	free_vgpu_fence(vgpu);
+	free_resource(vgpu);
+}
+
+/**
+ * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * @vgpu: vGPU
+ * @param: vGPU creation params
+ *
+ * This function is used to allocate HW resource for a vGPU. User specifies
+ * the resource configuration through the creation params.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+		struct intel_vgpu_creation_params *param)
+{
+	int ret;
+
+	ret = alloc_resource(vgpu, param);
+	if (ret)
+		return ret;
+
+	ret = alloc_vgpu_gm(vgpu);
+	if (ret)
+		goto out_free_resource;
+
+	ret = alloc_vgpu_fence(vgpu);
+	if (ret)
+		goto out_free_vgpu_gm;
+
+	return 0;
+
+out_free_vgpu_gm:
+	free_vgpu_gm(vgpu);
+out_free_resource:
+	free_resource(vgpu);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
new file mode 100644
index 0000000..4c68774
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eddie Dong <eddie.dong@intel.com>
+ *    Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+enum {
+	INTEL_GVT_PCI_BAR_GTTMMIO = 0,
+	INTEL_GVT_PCI_BAR_APERTURE,
+	INTEL_GVT_PCI_BAR_PIO,
+	INTEL_GVT_PCI_BAR_MAX,
+};
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
+	void *p_data, unsigned int bytes)
+{
+	struct intel_vgpu *vgpu = __vgpu;
+
+	if (WARN_ON(bytes > 4))
+		return -EINVAL;
+
+	if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+		return -EINVAL;
+
+	memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
+	return 0;
+}
+
+static int map_aperture(struct intel_vgpu *vgpu, bool map)
+{
+	u64 first_gfn, first_mfn;
+	u64 val;
+	int ret;
+
+	if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
+		return 0;
+
+	val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
+	if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+		val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+	else
+		val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+
+	first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
+	first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+	ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
+						  first_mfn,
+						  vgpu_aperture_sz(vgpu)
+						  >> PAGE_SHIFT, map,
+						  GVT_MAP_APERTURE);
+	if (ret)
+		return ret;
+
+	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
+	return 0;
+}
+
+static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
+{
+	u64 start, end;
+	u64 val;
+	int ret;
+
+	if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
+		return 0;
+
+	val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
+	if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+		start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+	else
+		start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+
+	start &= ~GENMASK(3, 0);
+	end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
+
+	ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
+	if (ret)
+		return ret;
+
+	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
+	return 0;
+}
+
+static int emulate_pci_command_write(struct intel_vgpu *vgpu,
+	unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u8 old = vgpu_cfg_space(vgpu)[offset];
+	u8 new = *(u8 *)p_data;
+	u8 changed = old ^ new;
+	int ret;
+
+	if (!(changed & PCI_COMMAND_MEMORY))
+		return 0;
+
+	if (old & PCI_COMMAND_MEMORY) {
+		ret = trap_gttmmio(vgpu, false);
+		if (ret)
+			return ret;
+		ret = map_aperture(vgpu, false);
+		if (ret)
+			return ret;
+	} else {
+		ret = trap_gttmmio(vgpu, true);
+		if (ret)
+			return ret;
+		ret = map_aperture(vgpu, true);
+		if (ret)
+			return ret;
+	}
+
+	memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+	return 0;
+}
+
+static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+	void *p_data, unsigned int bytes)
+{
+	unsigned int bar_index =
+		(rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
+	u32 new = *(u32 *)(p_data);
+	bool lo = IS_ALIGNED(offset, 8);
+	u64 size;
+	int ret = 0;
+	bool mmio_enabled =
+		vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
+
+	if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
+		return -EINVAL;
+
+	if (new == 0xffffffff) {
+		/*
+		 * Power-up software can determine how much address
+		 * space the device requires by writing a value of
+		 * all 1's to the register and then reading the value
+		 * back. The device will return 0's in all don't-care
+		 * address bits.
+		 */
+		size = vgpu->cfg_space.bar[bar_index].size;
+		if (lo) {
+			new = rounddown(new, size);
+		} else {
+			u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
+			/* for 32bit mode bar it returns all-0 in upper 32
+			 * bit, for 64bit mode bar it will calculate the
+			 * size with lower 32bit and return the corresponding
+			 * value
+			 */
+			if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+				new &= (~(size-1)) >> 32;
+			else
+				new = 0;
+		}
+		/*
+		 * Unmapp & untrap the BAR, since guest hasn't configured a
+		 * valid GPA
+		 */
+		switch (bar_index) {
+		case INTEL_GVT_PCI_BAR_GTTMMIO:
+			ret = trap_gttmmio(vgpu, false);
+			break;
+		case INTEL_GVT_PCI_BAR_APERTURE:
+			ret = map_aperture(vgpu, false);
+			break;
+		}
+		intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+	} else {
+		/*
+		 * Unmapp & untrap the old BAR first, since guest has
+		 * re-configured the BAR
+		 */
+		switch (bar_index) {
+		case INTEL_GVT_PCI_BAR_GTTMMIO:
+			ret = trap_gttmmio(vgpu, false);
+			break;
+		case INTEL_GVT_PCI_BAR_APERTURE:
+			ret = map_aperture(vgpu, false);
+			break;
+		}
+		intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+		/* Track the new BAR */
+		if (mmio_enabled) {
+			switch (bar_index) {
+			case INTEL_GVT_PCI_BAR_GTTMMIO:
+				ret = trap_gttmmio(vgpu, true);
+				break;
+			case INTEL_GVT_PCI_BAR_APERTURE:
+				ret = map_aperture(vgpu, true);
+				break;
+			}
+		}
+	}
+	return ret;
+}
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
+	void *p_data, unsigned int bytes)
+{
+	struct intel_vgpu *vgpu = __vgpu;
+	int ret;
+
+	if (WARN_ON(bytes > 4))
+		return -EINVAL;
+
+	if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+		return -EINVAL;
+
+	/* First check if it's PCI_COMMAND */
+	if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
+		if (WARN_ON(bytes > 2))
+			return -EINVAL;
+		return emulate_pci_command_write(vgpu, offset, p_data, bytes);
+	}
+
+	switch (rounddown(offset, 4)) {
+	case PCI_BASE_ADDRESS_0:
+	case PCI_BASE_ADDRESS_1:
+	case PCI_BASE_ADDRESS_2:
+	case PCI_BASE_ADDRESS_3:
+		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+			return -EINVAL;
+		return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+
+	case INTEL_GVT_PCI_SWSCI:
+		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+			return -EINVAL;
+		ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
+		if (ret)
+			return ret;
+		break;
+
+	case INTEL_GVT_PCI_OPREGION:
+		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+			return -EINVAL;
+		ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+		if (ret)
+			return ret;
+
+		memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+		break;
+	default:
+		memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+		break;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
new file mode 100644
index 0000000..1238b75
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -0,0 +1,2830 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Ke Yu
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Yulei Zhang <yulei.zhang@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+#define INVALID_OP    (~0U)
+
+#define OP_LEN_MI           9
+#define OP_LEN_2D           10
+#define OP_LEN_3D_MEDIA     16
+#define OP_LEN_MFX_VC       16
+#define OP_LEN_VEBOX	    16
+
+#define CMD_TYPE(cmd)	(((cmd) >> 29) & 7)
+
+struct sub_op_bits {
+	int hi;
+	int low;
+};
+struct decode_info {
+	char *name;
+	int op_len;
+	int nr_sub_op;
+	struct sub_op_bits *sub_op;
+};
+
+#define   MAX_CMD_BUDGET			0x7fffffff
+#define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
+#define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
+#define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
+
+#define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
+#define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
+#define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
+
+/* Render Command Map */
+
+/* MI_* command Opcode (28:23) */
+#define OP_MI_NOOP                          0x0
+#define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
+#define OP_MI_USER_INTERRUPT                0x2
+#define OP_MI_WAIT_FOR_EVENT                0x3
+#define OP_MI_FLUSH                         0x4
+#define OP_MI_ARB_CHECK                     0x5
+#define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
+#define OP_MI_REPORT_HEAD                   0x7
+#define OP_MI_ARB_ON_OFF                    0x8
+#define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
+#define OP_MI_BATCH_BUFFER_END              0xA
+#define OP_MI_SUSPEND_FLUSH                 0xB
+#define OP_MI_PREDICATE                     0xC  /* IVB+ */
+#define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
+#define OP_MI_SET_APPID                     0xE  /* IVB+ */
+#define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
+#define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
+#define OP_MI_DISPLAY_FLIP                  0x14
+#define OP_MI_SEMAPHORE_MBOX                0x16
+#define OP_MI_SET_CONTEXT                   0x18
+#define OP_MI_MATH                          0x1A
+#define OP_MI_URB_CLEAR                     0x19
+#define OP_MI_SEMAPHORE_SIGNAL		    0x1B  /* BDW+ */
+#define OP_MI_SEMAPHORE_WAIT		    0x1C  /* BDW+ */
+
+#define OP_MI_STORE_DATA_IMM                0x20
+#define OP_MI_STORE_DATA_INDEX              0x21
+#define OP_MI_LOAD_REGISTER_IMM             0x22
+#define OP_MI_UPDATE_GTT                    0x23
+#define OP_MI_STORE_REGISTER_MEM            0x24
+#define OP_MI_FLUSH_DW                      0x26
+#define OP_MI_CLFLUSH                       0x27
+#define OP_MI_REPORT_PERF_COUNT             0x28
+#define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
+#define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
+#define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
+#define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
+#define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
+#define OP_MI_2E			    0x2E  /* BDW+ */
+#define OP_MI_2F			    0x2F  /* BDW+ */
+#define OP_MI_BATCH_BUFFER_START            0x31
+
+/* Bit definition for dword 0 */
+#define _CMDBIT_BB_START_IN_PPGTT	(1UL << 8)
+
+#define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+#define BATCH_BUFFER_ADR_SPACE_BIT(x)	(((x) >> 8) & 1U)
+#define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
+
+/* 2D command: Opcode (28:22) */
+#define OP_2D(x)    ((2<<7) | x)
+
+#define OP_XY_SETUP_BLT                             OP_2D(0x1)
+#define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
+#define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
+#define OP_XY_PIXEL_BLT                             OP_2D(0x24)
+#define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
+#define OP_XY_TEXT_BLT                              OP_2D(0x26)
+#define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
+#define OP_XY_COLOR_BLT                             OP_2D(0x50)
+#define OP_XY_PAT_BLT                               OP_2D(0x51)
+#define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
+#define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
+#define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
+#define OP_XY_FULL_BLT                              OP_2D(0x55)
+#define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
+#define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
+#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
+#define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
+#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
+#define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
+#define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
+#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
+#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
+#define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
+#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
+
+/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
+#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
+	((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
+
+#define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
+
+#define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
+#define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
+#define OP_3D_MEDIA_0_1_4			OP_3D_MEDIA(0x0, 0x1, 0x04)
+
+#define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
+
+#define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
+
+#define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
+#define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
+#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
+#define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
+#define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
+
+#define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
+#define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
+#define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
+#define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
+
+#define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
+#define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
+#define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
+#define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
+#define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
+#define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
+#define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
+#define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
+#define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
+#define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
+#define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
+#define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
+#define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
+#define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
+#define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
+#define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
+#define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
+#define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
+#define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
+#define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
+#define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
+#define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
+#define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
+#define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
+#define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
+#define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
+#define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
+#define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
+#define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
+#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
+#define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
+#define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
+#define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
+#define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
+#define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
+
+#define OP_3DSTATE_VF_INSTANCING 		OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
+#define OP_3DSTATE_VF_SGVS  			OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
+#define OP_3DSTATE_VF_TOPOLOGY   		OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
+#define OP_3DSTATE_WM_CHROMAKEY   		OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
+#define OP_3DSTATE_PS_BLEND   			OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
+#define OP_3DSTATE_WM_DEPTH_STENCIL   		OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
+#define OP_3DSTATE_PS_EXTRA   			OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
+#define OP_3DSTATE_RASTER   			OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
+#define OP_3DSTATE_SBE_SWIZ   			OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
+#define OP_3DSTATE_WM_HZ_OP   			OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
+#define OP_3DSTATE_COMPONENT_PACKING		OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
+
+#define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
+#define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
+#define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
+#define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
+#define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
+#define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
+#define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
+#define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
+#define OP_3DSTATE_MULTISAMPLE_BDW		OP_3D_MEDIA(0x3, 0x0, 0x0D)
+#define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
+#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
+#define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
+#define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
+#define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
+#define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
+#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
+#define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
+#define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
+#define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
+#define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
+
+/* VCCP Command Parser */
+
+/*
+ * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
+ * git://anongit.freedesktop.org/vaapi/intel-driver
+ * src/i965_defines.h
+ *
+ */
+
+#define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
+	(3 << 13 | \
+	 (pipeline) << 11 | \
+	 (op) << 8 | \
+	 (sub_opa) << 5 | \
+	 (sub_opb))
+
+#define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
+#define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
+#define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
+#define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
+#define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
+#define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
+#define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
+#define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
+#define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
+#define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
+#define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
+
+#define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
+
+#define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
+#define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
+#define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
+#define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
+#define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
+#define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
+#define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
+#define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
+#define OP_MFD_AVC_DPB_STATE			   OP_MFX(2, 1, 1, 6) /* IVB+ */
+#define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
+#define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
+#define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
+
+#define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
+#define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
+#define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
+#define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
+#define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
+
+#define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
+#define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
+#define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
+#define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
+#define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
+
+#define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
+#define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
+#define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
+
+#define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
+#define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
+#define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
+
+#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
+	(3 << 13 | \
+	 (pipeline) << 11 | \
+	 (op) << 8 | \
+	 (sub_opa) << 5 | \
+	 (sub_opb))
+
+#define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
+#define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
+#define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
+
+struct parser_exec_state;
+
+typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
+
+#define GVT_CMD_HASH_BITS   7
+
+/* which DWords need address fix */
+#define ADDR_FIX_1(x1)			(1 << (x1))
+#define ADDR_FIX_2(x1, x2)		(ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
+#define ADDR_FIX_3(x1, x2, x3)		(ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
+#define ADDR_FIX_4(x1, x2, x3, x4)	(ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
+#define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+
+struct cmd_info {
+	char *name;
+	u32 opcode;
+
+#define F_LEN_MASK	(1U<<0)
+#define F_LEN_CONST  1U
+#define F_LEN_VAR    0U
+
+/*
+ * command has its own ip advance logic
+ * e.g. MI_BATCH_START, MI_BATCH_END
+ */
+#define F_IP_ADVANCE_CUSTOM (1<<1)
+
+#define F_POST_HANDLE	(1<<2)
+	u32 flag;
+
+#define R_RCS	(1 << RCS)
+#define R_VCS1  (1 << VCS)
+#define R_VCS2  (1 << VCS2)
+#define R_VCS	(R_VCS1 | R_VCS2)
+#define R_BCS	(1 << BCS)
+#define R_VECS	(1 << VECS)
+#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
+	/* rings that support this cmd: BLT/RCS/VCS/VECS */
+	uint16_t rings;
+
+	/* devices that support this cmd: SNB/IVB/HSW/... */
+	uint16_t devices;
+
+	/* which DWords are address that need fix up.
+	 * bit 0 means a 32-bit non address operand in command
+	 * bit 1 means address operand, which could be 32-bit
+	 * or 64-bit depending on different architectures.(
+	 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
+	 * No matter the address length, each address only takes
+	 * one bit in the bitmap.
+	 */
+	uint16_t addr_bitmap;
+
+	/* flag == F_LEN_CONST : command length
+	 * flag == F_LEN_VAR : length bias bits
+	 * Note: length is in DWord
+	 */
+	uint8_t	len;
+
+	parser_cmd_handler handler;
+};
+
+struct cmd_entry {
+	struct hlist_node hlist;
+	struct cmd_info *info;
+};
+
+enum {
+	RING_BUFFER_INSTRUCTION,
+	BATCH_BUFFER_INSTRUCTION,
+	BATCH_BUFFER_2ND_LEVEL,
+};
+
+enum {
+	GTT_BUFFER,
+	PPGTT_BUFFER
+};
+
+struct parser_exec_state {
+	struct intel_vgpu *vgpu;
+	int ring_id;
+
+	int buf_type;
+
+	/* batch buffer address type */
+	int buf_addr_type;
+
+	/* graphics memory address of ring buffer start */
+	unsigned long ring_start;
+	unsigned long ring_size;
+	unsigned long ring_head;
+	unsigned long ring_tail;
+
+	/* instruction graphics memory address */
+	unsigned long ip_gma;
+
+	/* mapped va of the instr_gma */
+	void *ip_va;
+	void *rb_va;
+
+	void *ret_bb_va;
+	/* next instruction when return from  batch buffer to ring buffer */
+	unsigned long ret_ip_gma_ring;
+
+	/* next instruction when return from 2nd batch buffer to batch buffer */
+	unsigned long ret_ip_gma_bb;
+
+	/* batch buffer address type (GTT or PPGTT)
+	 * used when ret from 2nd level batch buffer
+	 */
+	int saved_buf_addr_type;
+
+	struct cmd_info *info;
+
+	struct intel_vgpu_workload *workload;
+};
+
+#define gmadr_dw_number(s)	\
+	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
+
+static unsigned long bypass_scan_mask = 0;
+static bool bypass_batch_buffer_scan = true;
+
+/* ring ALL, type = 0 */
+static struct sub_op_bits sub_op_mi[] = {
+	{31, 29},
+	{28, 23},
+};
+
+static struct decode_info decode_info_mi = {
+	"MI",
+	OP_LEN_MI,
+	ARRAY_SIZE(sub_op_mi),
+	sub_op_mi,
+};
+
+/* ring RCS, command type 2 */
+static struct sub_op_bits sub_op_2d[] = {
+	{31, 29},
+	{28, 22},
+};
+
+static struct decode_info decode_info_2d = {
+	"2D",
+	OP_LEN_2D,
+	ARRAY_SIZE(sub_op_2d),
+	sub_op_2d,
+};
+
+/* ring RCS, command type 3 */
+static struct sub_op_bits sub_op_3d_media[] = {
+	{31, 29},
+	{28, 27},
+	{26, 24},
+	{23, 16},
+};
+
+static struct decode_info decode_info_3d_media = {
+	"3D_Media",
+	OP_LEN_3D_MEDIA,
+	ARRAY_SIZE(sub_op_3d_media),
+	sub_op_3d_media,
+};
+
+/* ring VCS, command type 3 */
+static struct sub_op_bits sub_op_mfx_vc[] = {
+	{31, 29},
+	{28, 27},
+	{26, 24},
+	{23, 21},
+	{20, 16},
+};
+
+static struct decode_info decode_info_mfx_vc = {
+	"MFX_VC",
+	OP_LEN_MFX_VC,
+	ARRAY_SIZE(sub_op_mfx_vc),
+	sub_op_mfx_vc,
+};
+
+/* ring VECS, command type 3 */
+static struct sub_op_bits sub_op_vebox[] = {
+	{31, 29},
+	{28, 27},
+	{26, 24},
+	{23, 21},
+	{20, 16},
+};
+
+static struct decode_info decode_info_vebox = {
+	"VEBOX",
+	OP_LEN_VEBOX,
+	ARRAY_SIZE(sub_op_vebox),
+	sub_op_vebox,
+};
+
+static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+	[RCS] = {
+		&decode_info_mi,
+		NULL,
+		NULL,
+		&decode_info_3d_media,
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+	},
+
+	[VCS] = {
+		&decode_info_mi,
+		NULL,
+		NULL,
+		&decode_info_mfx_vc,
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+	},
+
+	[BCS] = {
+		&decode_info_mi,
+		NULL,
+		&decode_info_2d,
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+	},
+
+	[VECS] = {
+		&decode_info_mi,
+		NULL,
+		NULL,
+		&decode_info_vebox,
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+	},
+
+	[VCS2] = {
+		&decode_info_mi,
+		NULL,
+		NULL,
+		&decode_info_mfx_vc,
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+	},
+};
+
+static inline u32 get_opcode(u32 cmd, int ring_id)
+{
+	struct decode_info *d_info;
+
+	if (ring_id >= I915_NUM_ENGINES)
+		return INVALID_OP;
+
+	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+	if (d_info == NULL)
+		return INVALID_OP;
+
+	return cmd >> (32 - d_info->op_len);
+}
+
+static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
+		unsigned int opcode, int ring_id)
+{
+	struct cmd_entry *e;
+
+	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
+		if ((opcode == e->info->opcode) &&
+				(e->info->rings & (1 << ring_id)))
+			return e->info;
+	}
+	return NULL;
+}
+
+static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
+		u32 cmd, int ring_id)
+{
+	u32 opcode;
+
+	opcode = get_opcode(cmd, ring_id);
+	if (opcode == INVALID_OP)
+		return NULL;
+
+	return find_cmd_entry(gvt, opcode, ring_id);
+}
+
+static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
+{
+	return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
+}
+
+static inline void print_opcode(u32 cmd, int ring_id)
+{
+	struct decode_info *d_info;
+	int i;
+
+	if (ring_id >= I915_NUM_ENGINES)
+		return;
+
+	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+	if (d_info == NULL)
+		return;
+
+	gvt_err("opcode=0x%x %s sub_ops:",
+			cmd >> (32 - d_info->op_len), d_info->name);
+
+	for (i = 0; i < d_info->nr_sub_op; i++)
+		pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
+					d_info->sub_op[i].low));
+
+	pr_err("\n");
+}
+
+static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
+{
+	return s->ip_va + (index << 2);
+}
+
+static inline u32 cmd_val(struct parser_exec_state *s, int index)
+{
+	return *cmd_ptr(s, index);
+}
+
+static void parser_exec_state_dump(struct parser_exec_state *s)
+{
+	int cnt = 0;
+	int i;
+
+	gvt_err("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+			" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
+			s->ring_id, s->ring_start, s->ring_start + s->ring_size,
+			s->ring_head, s->ring_tail);
+
+	gvt_err("  %s %s ip_gma(%08lx) ",
+			s->buf_type == RING_BUFFER_INSTRUCTION ?
+			"RING_BUFFER" : "BATCH_BUFFER",
+			s->buf_addr_type == GTT_BUFFER ?
+			"GTT" : "PPGTT", s->ip_gma);
+
+	if (s->ip_va == NULL) {
+		gvt_err(" ip_va(NULL)");
+		return;
+	}
+
+	gvt_err("  ip_va=%p: %08x %08x %08x %08x\n",
+			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+			cmd_val(s, 2), cmd_val(s, 3));
+
+	print_opcode(cmd_val(s, 0), s->ring_id);
+
+	/* print the whole page to trace */
+	pr_err("    ip_va=%p: %08x %08x %08x %08x\n",
+			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+			cmd_val(s, 2), cmd_val(s, 3));
+
+	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
+
+	while (cnt < 1024) {
+		pr_err("ip_va=%p: ", s->ip_va);
+		for (i = 0; i < 8; i++)
+			pr_err("%08x ", cmd_val(s, i));
+		pr_err("\n");
+
+		s->ip_va += 8 * sizeof(u32);
+		cnt += 8;
+	}
+}
+
+static inline void update_ip_va(struct parser_exec_state *s)
+{
+	unsigned long len = 0;
+
+	if (WARN_ON(s->ring_head == s->ring_tail))
+		return;
+
+	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+		unsigned long ring_top = s->ring_start + s->ring_size;
+
+		if (s->ring_head > s->ring_tail) {
+			if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
+				len = (s->ip_gma - s->ring_head);
+			else if (s->ip_gma >= s->ring_start &&
+					s->ip_gma <= s->ring_tail)
+				len = (ring_top - s->ring_head) +
+					(s->ip_gma - s->ring_start);
+		} else
+			len = (s->ip_gma - s->ring_head);
+
+		s->ip_va = s->rb_va + len;
+	} else {/* shadow batch buffer */
+		s->ip_va = s->ret_bb_va;
+	}
+}
+
+static inline int ip_gma_set(struct parser_exec_state *s,
+		unsigned long ip_gma)
+{
+	WARN_ON(!IS_ALIGNED(ip_gma, 4));
+
+	s->ip_gma = ip_gma;
+	update_ip_va(s);
+	return 0;
+}
+
+static inline int ip_gma_advance(struct parser_exec_state *s,
+		unsigned int dw_len)
+{
+	s->ip_gma += (dw_len << 2);
+
+	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+		if (s->ip_gma >= s->ring_start + s->ring_size)
+			s->ip_gma -= s->ring_size;
+		update_ip_va(s);
+	} else {
+		s->ip_va += (dw_len << 2);
+	}
+
+	return 0;
+}
+
+static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+{
+	if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
+		return info->len;
+	else
+		return (cmd & ((1U << info->len) - 1)) + 2;
+	return 0;
+}
+
+static inline int cmd_length(struct parser_exec_state *s)
+{
+	return get_cmd_length(s->info, cmd_val(s, 0));
+}
+
+/* do not remove this, some platform may need clflush here */
+#define patch_value(s, addr, val) do { \
+	*addr = val; \
+} while (0)
+
+static bool is_shadowed_mmio(unsigned int offset)
+{
+	bool ret = false;
+
+	if ((offset == 0x2168) || /*BB current head register UDW */
+	    (offset == 0x2140) || /*BB current header register */
+	    (offset == 0x211c) || /*second BB header register UDW */
+	    (offset == 0x2114)) { /*second BB header register UDW */
+		ret = true;
+	}
+	return ret;
+}
+
+static int cmd_reg_handler(struct parser_exec_state *s,
+	unsigned int offset, unsigned int index, char *cmd)
+{
+	struct intel_vgpu *vgpu = s->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+
+	if (offset + 4 > gvt->device_info.mmio_size) {
+		gvt_err("%s access to (%x) outside of MMIO range\n",
+				cmd, offset);
+		return -EINVAL;
+	}
+
+	if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+		gvt_err("vgpu%d: %s access to non-render register (%x)\n",
+				s->vgpu->id, cmd, offset);
+		return 0;
+	}
+
+	if (is_shadowed_mmio(offset)) {
+		gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
+				s->vgpu->id, offset);
+		return 0;
+	}
+
+	if (offset == i915_mmio_reg_offset(DERRMR) ||
+		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
+		/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
+		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
+	}
+
+	/* TODO: Update the global mask if this MMIO is a masked-MMIO */
+	intel_gvt_mmio_set_cmd_accessed(gvt, offset);
+	return 0;
+}
+
+#define cmd_reg(s, i) \
+	(cmd_val(s, i) & GENMASK(22, 2))
+
+#define cmd_reg_inhibit(s, i) \
+	(cmd_val(s, i) & GENMASK(22, 18))
+
+#define cmd_gma(s, i) \
+	(cmd_val(s, i) & GENMASK(31, 2))
+
+#define cmd_gma_hi(s, i) \
+	(cmd_val(s, i) & GENMASK(15, 0))
+
+static int cmd_handler_lri(struct parser_exec_state *s)
+{
+	int i, ret = 0;
+	int cmd_len = cmd_length(s);
+	struct intel_gvt *gvt = s->vgpu->gvt;
+
+	for (i = 1; i < cmd_len; i += 2) {
+		if (IS_BROADWELL(gvt->dev_priv) &&
+				(s->ring_id != RCS)) {
+			if (s->ring_id == BCS &&
+					cmd_reg(s, i) ==
+					i915_mmio_reg_offset(DERRMR))
+				ret |= 0;
+			else
+				ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+		}
+		if (ret)
+			break;
+		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
+	}
+	return ret;
+}
+
+static int cmd_handler_lrr(struct parser_exec_state *s)
+{
+	int i, ret = 0;
+	int cmd_len = cmd_length(s);
+
+	for (i = 1; i < cmd_len; i += 2) {
+		if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+			ret |= ((cmd_reg_inhibit(s, i) ||
+					(cmd_reg_inhibit(s, i + 1)))) ?
+				-EINVAL : 0;
+		if (ret)
+			break;
+		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
+		ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
+	}
+	return ret;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+		unsigned long guest_gma, int op_size, bool index_mode);
+
+static int cmd_handler_lrm(struct parser_exec_state *s)
+{
+	struct intel_gvt *gvt = s->vgpu->gvt;
+	int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+	unsigned long gma;
+	int i, ret = 0;
+	int cmd_len = cmd_length(s);
+
+	for (i = 1; i < cmd_len;) {
+		if (IS_BROADWELL(gvt->dev_priv))
+			ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+		if (ret)
+			break;
+		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
+		if (cmd_val(s, 0) & (1 << 22)) {
+			gma = cmd_gma(s, i + 1);
+			if (gmadr_bytes == 8)
+				gma |= (cmd_gma_hi(s, i + 2)) << 32;
+			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+		}
+		i += gmadr_dw_number(s) + 1;
+	}
+	return ret;
+}
+
+static int cmd_handler_srm(struct parser_exec_state *s)
+{
+	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+	unsigned long gma;
+	int i, ret = 0;
+	int cmd_len = cmd_length(s);
+
+	for (i = 1; i < cmd_len;) {
+		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
+		if (cmd_val(s, 0) & (1 << 22)) {
+			gma = cmd_gma(s, i + 1);
+			if (gmadr_bytes == 8)
+				gma |= (cmd_gma_hi(s, i + 2)) << 32;
+			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+		}
+		i += gmadr_dw_number(s) + 1;
+	}
+	return ret;
+}
+
+struct cmd_interrupt_event {
+	int pipe_control_notify;
+	int mi_flush_dw;
+	int mi_user_interrupt;
+};
+
+static struct cmd_interrupt_event cmd_interrupt_events[] = {
+	[RCS] = {
+		.pipe_control_notify = RCS_PIPE_CONTROL,
+		.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
+		.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
+	},
+	[BCS] = {
+		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+		.mi_flush_dw = BCS_MI_FLUSH_DW,
+		.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
+	},
+	[VCS] = {
+		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+		.mi_flush_dw = VCS_MI_FLUSH_DW,
+		.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
+	},
+	[VCS2] = {
+		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+		.mi_flush_dw = VCS2_MI_FLUSH_DW,
+		.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
+	},
+	[VECS] = {
+		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+		.mi_flush_dw = VECS_MI_FLUSH_DW,
+		.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
+	},
+};
+
+static int cmd_handler_pipe_control(struct parser_exec_state *s)
+{
+	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+	unsigned long gma;
+	bool index_mode = false;
+	unsigned int post_sync;
+	int ret = 0;
+
+	post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
+
+	/* LRI post sync */
+	if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
+		ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
+	/* post sync */
+	else if (post_sync) {
+		if (post_sync == 2)
+			ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
+		else if (post_sync == 3)
+			ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
+		else if (post_sync == 1) {
+			/* check ggtt*/
+			if ((cmd_val(s, 2) & (1 << 2))) {
+				gma = cmd_val(s, 2) & GENMASK(31, 3);
+				if (gmadr_bytes == 8)
+					gma |= (cmd_gma_hi(s, 3)) << 32;
+				/* Store Data Index */
+				if (cmd_val(s, 1) & (1 << 21))
+					index_mode = true;
+				ret |= cmd_address_audit(s, gma, sizeof(u64),
+						index_mode);
+			}
+		}
+	}
+
+	if (ret)
+		return ret;
+
+	if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
+		set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
+				s->workload->pending_events);
+	return 0;
+}
+
+static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
+{
+	set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
+			s->workload->pending_events);
+	return 0;
+}
+
+static int cmd_advance_default(struct parser_exec_state *s)
+{
+	return ip_gma_advance(s, cmd_length(s));
+}
+
+static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
+{
+	int ret;
+
+	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+		s->buf_type = BATCH_BUFFER_INSTRUCTION;
+		ret = ip_gma_set(s, s->ret_ip_gma_bb);
+		s->buf_addr_type = s->saved_buf_addr_type;
+	} else {
+		s->buf_type = RING_BUFFER_INSTRUCTION;
+		s->buf_addr_type = GTT_BUFFER;
+		if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
+			s->ret_ip_gma_ring -= s->ring_size;
+		ret = ip_gma_set(s, s->ret_ip_gma_ring);
+	}
+	return ret;
+}
+
+struct mi_display_flip_command_info {
+	int pipe;
+	int plane;
+	int event;
+	i915_reg_t stride_reg;
+	i915_reg_t ctrl_reg;
+	i915_reg_t surf_reg;
+	u64 stride_val;
+	u64 tile_val;
+	u64 surf_val;
+	bool async_flip;
+};
+
+struct plane_code_mapping {
+	int pipe;
+	int plane;
+	int event;
+};
+
+static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
+		struct mi_display_flip_command_info *info)
+{
+	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct plane_code_mapping gen8_plane_code[] = {
+		[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
+		[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
+		[2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
+		[3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
+		[4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
+		[5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
+	};
+	u32 dword0, dword1, dword2;
+	u32 v;
+
+	dword0 = cmd_val(s, 0);
+	dword1 = cmd_val(s, 1);
+	dword2 = cmd_val(s, 2);
+
+	v = (dword0 & GENMASK(21, 19)) >> 19;
+	if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+		return -EINVAL;
+
+	info->pipe = gen8_plane_code[v].pipe;
+	info->plane = gen8_plane_code[v].plane;
+	info->event = gen8_plane_code[v].event;
+	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+	info->tile_val = (dword1 & 0x1);
+	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+	if (info->plane == PLANE_A) {
+		info->ctrl_reg = DSPCNTR(info->pipe);
+		info->stride_reg = DSPSTRIDE(info->pipe);
+		info->surf_reg = DSPSURF(info->pipe);
+	} else if (info->plane == PLANE_B) {
+		info->ctrl_reg = SPRCTL(info->pipe);
+		info->stride_reg = SPRSTRIDE(info->pipe);
+		info->surf_reg = SPRSURF(info->pipe);
+	} else {
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int skl_decode_mi_display_flip(struct parser_exec_state *s,
+		struct mi_display_flip_command_info *info)
+{
+	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	u32 dword0 = cmd_val(s, 0);
+	u32 dword1 = cmd_val(s, 1);
+	u32 dword2 = cmd_val(s, 2);
+	u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
+
+	switch (plane) {
+	case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
+		info->pipe = PIPE_A;
+		info->event = PRIMARY_A_FLIP_DONE;
+		break;
+	case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
+		info->pipe = PIPE_B;
+		info->event = PRIMARY_B_FLIP_DONE;
+		break;
+	case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
+		info->pipe = PIPE_C;
+		info->event = PRIMARY_C_FLIP_DONE;
+		break;
+	default:
+		gvt_err("unknown plane code %d\n", plane);
+		return -EINVAL;
+	}
+
+	info->pipe = PRIMARY_PLANE;
+	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+	info->tile_val = (dword1 & GENMASK(2, 0));
+	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+	info->ctrl_reg = DSPCNTR(info->pipe);
+	info->stride_reg = DSPSTRIDE(info->pipe);
+	info->surf_reg = DSPSURF(info->pipe);
+
+	return 0;
+}
+
+static int gen8_check_mi_display_flip(struct parser_exec_state *s,
+		struct mi_display_flip_command_info *info)
+{
+	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	u32 stride, tile;
+
+	if (!info->async_flip)
+		return 0;
+
+	if (IS_SKYLAKE(dev_priv)) {
+		stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
+		tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
+				GENMASK(12, 10)) >> 10;
+	} else {
+		stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
+				GENMASK(15, 6)) >> 6;
+		tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
+	}
+
+	if (stride != info->stride_val)
+		gvt_dbg_cmd("cannot change stride during async flip\n");
+
+	if (tile != info->tile_val)
+		gvt_dbg_cmd("cannot change tile during async flip\n");
+
+	return 0;
+}
+
+static int gen8_update_plane_mmio_from_mi_display_flip(
+		struct parser_exec_state *s,
+		struct mi_display_flip_command_info *info)
+{
+	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct intel_vgpu *vgpu = s->vgpu;
+
+	set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+		      info->surf_val << 12);
+	if (IS_SKYLAKE(dev_priv)) {
+		set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+			      info->stride_val);
+		set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+			      info->tile_val << 10);
+	} else {
+		set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+			      info->stride_val << 6);
+		set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+			      info->tile_val << 10);
+	}
+
+	vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
+	intel_vgpu_trigger_virtual_event(vgpu, info->event);
+	return 0;
+}
+
+static int decode_mi_display_flip(struct parser_exec_state *s,
+		struct mi_display_flip_command_info *info)
+{
+	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+	if (IS_BROADWELL(dev_priv))
+		return gen8_decode_mi_display_flip(s, info);
+	if (IS_SKYLAKE(dev_priv))
+		return skl_decode_mi_display_flip(s, info);
+
+	return -ENODEV;
+}
+
+static int check_mi_display_flip(struct parser_exec_state *s,
+		struct mi_display_flip_command_info *info)
+{
+	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+	if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+		return gen8_check_mi_display_flip(s, info);
+	return -ENODEV;
+}
+
+static int update_plane_mmio_from_mi_display_flip(
+		struct parser_exec_state *s,
+		struct mi_display_flip_command_info *info)
+{
+	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+	if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+		return gen8_update_plane_mmio_from_mi_display_flip(s, info);
+	return -ENODEV;
+}
+
+static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
+{
+	struct mi_display_flip_command_info info;
+	int ret;
+	int i;
+	int len = cmd_length(s);
+
+	ret = decode_mi_display_flip(s, &info);
+	if (ret) {
+		gvt_err("fail to decode MI display flip command\n");
+		return ret;
+	}
+
+	ret = check_mi_display_flip(s, &info);
+	if (ret) {
+		gvt_err("invalid MI display flip command\n");
+		return ret;
+	}
+
+	ret = update_plane_mmio_from_mi_display_flip(s, &info);
+	if (ret) {
+		gvt_err("fail to update plane mmio\n");
+		return ret;
+	}
+
+	for (i = 0; i < len; i++)
+		patch_value(s, cmd_ptr(s, i), MI_NOOP);
+	return 0;
+}
+
+static bool is_wait_for_flip_pending(u32 cmd)
+{
+	return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
+			MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
+			MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
+			MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
+			MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
+			MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
+}
+
+static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
+{
+	u32 cmd = cmd_val(s, 0);
+
+	if (!is_wait_for_flip_pending(cmd))
+		return 0;
+
+	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
+	return 0;
+}
+
+static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
+{
+	unsigned long addr;
+	unsigned long gma_high, gma_low;
+	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+		return INTEL_GVT_INVALID_ADDR;
+
+	gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
+	if (gmadr_bytes == 4) {
+		addr = gma_low;
+	} else {
+		gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
+		addr = (((unsigned long)gma_high) << 32) | gma_low;
+	}
+	return addr;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+		unsigned long guest_gma, int op_size, bool index_mode)
+{
+	struct intel_vgpu *vgpu = s->vgpu;
+	u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
+	int i;
+	int ret;
+
+	if (op_size > max_surface_size) {
+		gvt_err("command address audit fail name %s\n", s->info->name);
+		return -EINVAL;
+	}
+
+	if (index_mode)	{
+		if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
+			ret = -EINVAL;
+			goto err;
+		}
+	} else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
+			(!vgpu_gmadr_is_valid(s->vgpu,
+					      guest_gma + op_size - 1))) {
+		ret = -EINVAL;
+		goto err;
+	}
+	return 0;
+err:
+	gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+			s->info->name, guest_gma, op_size);
+
+	pr_err("cmd dump: ");
+	for (i = 0; i < cmd_length(s); i++) {
+		if (!(i % 4))
+			pr_err("\n%08x ", cmd_val(s, i));
+		else
+			pr_err("%08x ", cmd_val(s, i));
+	}
+	pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
+			vgpu->id,
+			vgpu_aperture_gmadr_base(vgpu),
+			vgpu_aperture_gmadr_end(vgpu),
+			vgpu_hidden_gmadr_base(vgpu),
+			vgpu_hidden_gmadr_end(vgpu));
+	return ret;
+}
+
+static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
+{
+	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+	int op_size = (cmd_length(s) - 3) * sizeof(u32);
+	int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
+	unsigned long gma, gma_low, gma_high;
+	int ret = 0;
+
+	/* check ppggt */
+	if (!(cmd_val(s, 0) & (1 << 22)))
+		return 0;
+
+	gma = cmd_val(s, 2) & GENMASK(31, 2);
+
+	if (gmadr_bytes == 8) {
+		gma_low = cmd_val(s, 1) & GENMASK(31, 2);
+		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+		gma = (gma_high << 32) | gma_low;
+		core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
+	}
+	ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
+	return ret;
+}
+
+static inline int unexpected_cmd(struct parser_exec_state *s)
+{
+	gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
+			s->vgpu->id, s->info->name);
+	return -EINVAL;
+}
+
+static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
+{
+	return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
+{
+	return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
+{
+	return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
+{
+	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+	int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
+			sizeof(u32);
+	unsigned long gma, gma_high;
+	int ret = 0;
+
+	if (!(cmd_val(s, 0) & (1 << 22)))
+		return ret;
+
+	gma = cmd_val(s, 1) & GENMASK(31, 2);
+	if (gmadr_bytes == 8) {
+		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+		gma = (gma_high << 32) | gma;
+	}
+	ret = cmd_address_audit(s, gma, op_size, false);
+	return ret;
+}
+
+static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
+{
+	return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_clflush(struct parser_exec_state *s)
+{
+	return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_conditional_batch_buffer_end(
+		struct parser_exec_state *s)
+{
+	return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
+{
+	return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
+{
+	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+	unsigned long gma;
+	bool index_mode = false;
+	int ret = 0;
+
+	/* Check post-sync and ppgtt bit */
+	if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
+		gma = cmd_val(s, 1) & GENMASK(31, 3);
+		if (gmadr_bytes == 8)
+			gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
+		/* Store Data Index */
+		if (cmd_val(s, 0) & (1 << 21))
+			index_mode = true;
+		ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+	}
+	/* Check notify bit */
+	if ((cmd_val(s, 0) & (1 << 8)))
+		set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
+				s->workload->pending_events);
+	return ret;
+}
+
+static void addr_type_update_snb(struct parser_exec_state *s)
+{
+	if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
+			(BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
+		s->buf_addr_type = PPGTT_BUFFER;
+	}
+}
+
+
+static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
+		unsigned long gma, unsigned long end_gma, void *va)
+{
+	unsigned long copy_len, offset;
+	unsigned long len = 0;
+	unsigned long gpa;
+
+	while (gma != end_gma) {
+		gpa = intel_vgpu_gma_to_gpa(mm, gma);
+		if (gpa == INTEL_GVT_INVALID_ADDR) {
+			gvt_err("invalid gma address: %lx\n", gma);
+			return -EFAULT;
+		}
+
+		offset = gma & (GTT_PAGE_SIZE - 1);
+
+		copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
+			GTT_PAGE_SIZE - offset : end_gma - gma;
+
+		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
+
+		len += copy_len;
+		gma += copy_len;
+	}
+	return 0;
+}
+
+
+/*
+ * Check whether a batch buffer needs to be scanned. Currently
+ * the only criteria is based on privilege.
+ */
+static int batch_buffer_needs_scan(struct parser_exec_state *s)
+{
+	struct intel_gvt *gvt = s->vgpu->gvt;
+
+	if (bypass_batch_buffer_scan)
+		return 0;
+
+	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+		/* BDW decides privilege based on address space */
+		if (cmd_val(s, 0) & (1 << 8))
+			return 0;
+	}
+	return 1;
+}
+
+static uint32_t find_bb_size(struct parser_exec_state *s)
+{
+	unsigned long gma = 0;
+	struct cmd_info *info;
+	uint32_t bb_size = 0;
+	uint32_t cmd_len = 0;
+	bool met_bb_end = false;
+	u32 cmd;
+
+	/* get the start gm address of the batch buffer */
+	gma = get_gma_bb_from_cmd(s, 1);
+	cmd = cmd_val(s, 0);
+
+	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+	if (info == NULL) {
+		gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+				cmd, get_opcode(cmd, s->ring_id));
+		return -EINVAL;
+	}
+	do {
+		copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+				gma, gma + 4, &cmd);
+		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+		if (info == NULL) {
+			gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+				cmd, get_opcode(cmd, s->ring_id));
+			return -EINVAL;
+		}
+
+		if (info->opcode == OP_MI_BATCH_BUFFER_END) {
+			met_bb_end = true;
+		} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
+			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
+				/* chained batch buffer */
+				met_bb_end = true;
+			}
+		}
+		cmd_len = get_cmd_length(info, cmd) << 2;
+		bb_size += cmd_len;
+		gma += cmd_len;
+
+	} while (!met_bb_end);
+
+	return bb_size;
+}
+
+static int perform_bb_shadow(struct parser_exec_state *s)
+{
+	struct intel_shadow_bb_entry *entry_obj;
+	unsigned long gma = 0;
+	uint32_t bb_size;
+	void *dst = NULL;
+	int ret = 0;
+
+	/* get the start gm address of the batch buffer */
+	gma = get_gma_bb_from_cmd(s, 1);
+
+	/* get the size of the batch buffer */
+	bb_size = find_bb_size(s);
+
+	/* allocate shadow batch buffer */
+	entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
+	if (entry_obj == NULL)
+		return -ENOMEM;
+
+	entry_obj->obj =
+		i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
+				       roundup(bb_size, PAGE_SIZE));
+	if (IS_ERR(entry_obj->obj)) {
+		ret = PTR_ERR(entry_obj->obj);
+		goto free_entry;
+	}
+	entry_obj->len = bb_size;
+	INIT_LIST_HEAD(&entry_obj->list);
+
+	dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
+	if (IS_ERR(dst)) {
+		ret = PTR_ERR(dst);
+		goto put_obj;
+	}
+
+	ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
+	if (ret) {
+		gvt_err("failed to set shadow batch to CPU\n");
+		goto unmap_src;
+	}
+
+	entry_obj->va = dst;
+	entry_obj->bb_start_cmd_va = s->ip_va;
+
+	/* copy batch buffer to shadow batch buffer*/
+	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+			      gma, gma + bb_size,
+			      dst);
+	if (ret) {
+		gvt_err("fail to copy guest ring buffer\n");
+		goto unmap_src;
+	}
+
+	list_add(&entry_obj->list, &s->workload->shadow_bb);
+	/*
+	 * ip_va saves the virtual address of the shadow batch buffer, while
+	 * ip_gma saves the graphics address of the original batch buffer.
+	 * As the shadow batch buffer is just a copy from the originial one,
+	 * it should be right to use shadow batch buffer'va and original batch
+	 * buffer's gma in pair. After all, we don't want to pin the shadow
+	 * buffer here (too early).
+	 */
+	s->ip_va = dst;
+	s->ip_gma = gma;
+
+	return 0;
+
+unmap_src:
+	i915_gem_object_unpin_map(entry_obj->obj);
+put_obj:
+	i915_gem_object_put(entry_obj->obj);
+free_entry:
+	kfree(entry_obj);
+	return ret;
+}
+
+static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
+{
+	bool second_level;
+	int ret = 0;
+
+	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+		gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+		return -EINVAL;
+	}
+
+	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
+	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
+		gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+		return -EINVAL;
+	}
+
+	s->saved_buf_addr_type = s->buf_addr_type;
+	addr_type_update_snb(s);
+	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+		s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
+		s->buf_type = BATCH_BUFFER_INSTRUCTION;
+	} else if (second_level) {
+		s->buf_type = BATCH_BUFFER_2ND_LEVEL;
+		s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
+		s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
+	}
+
+	if (batch_buffer_needs_scan(s)) {
+		ret = perform_bb_shadow(s);
+		if (ret < 0)
+			gvt_err("invalid shadow batch buffer\n");
+	} else {
+		/* emulate a batch buffer end to do return right */
+		ret = cmd_handler_mi_batch_buffer_end(s);
+		if (ret < 0)
+			return ret;
+	}
+
+	return ret;
+}
+
+static struct cmd_info cmd_info[] = {
+	{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+	{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
+		0, 1, NULL},
+
+	{"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
+		0, 1, cmd_handler_mi_user_interrupt},
+
+	{"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
+		D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
+
+	{"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+	{"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+		NULL},
+
+	{"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+		NULL},
+
+	{"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+		NULL},
+
+	{"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+		NULL},
+
+	{"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
+		D_ALL, 0, 1, NULL},
+
+	{"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
+		F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+		cmd_handler_mi_batch_buffer_end},
+
+	{"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
+		0, 1, NULL},
+
+	{"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+		NULL},
+
+	{"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
+		D_ALL, 0, 1, NULL},
+
+	{"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+		NULL},
+
+	{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+		NULL},
+
+	{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+		R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
+
+	{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
+		0, 8, NULL},
+
+	{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
+
+	{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+		ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+
+	{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+		ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
+
+	{"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
+		0, 8, cmd_handler_mi_store_data_index},
+
+	{"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
+		D_ALL, 0, 8, cmd_handler_lri},
+
+	{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
+		cmd_handler_mi_update_gtt},
+
+	{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
+		D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+
+	{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
+		cmd_handler_mi_flush_dw},
+
+	{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
+		10, cmd_handler_mi_clflush},
+
+	{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
+		D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+
+	{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
+		D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+
+	{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
+		D_ALL, 0, 8, cmd_handler_lrr},
+
+	{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
+		D_ALL, 0, 8, NULL},
+
+	{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
+		ADDR_FIX_1(2), 8, NULL},
+
+	{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
+		ADDR_FIX_1(2), 8, NULL},
+
+	{"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
+		8, cmd_handler_mi_op_2e},
+
+	{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
+		8, cmd_handler_mi_op_2f},
+
+	{"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
+		F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
+		cmd_handler_mi_batch_buffer_start},
+
+	{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
+		F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+		cmd_handler_mi_conditional_batch_buffer_end},
+
+	{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
+		R_RCS | R_BCS, D_ALL, 0, 2, NULL},
+
+	{"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		ADDR_FIX_2(4, 7), 8, NULL},
+
+	{"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		0, 8, NULL},
+
+	{"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
+		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+	{"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+	{"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		0, 8, NULL},
+
+	{"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		ADDR_FIX_1(3), 8, NULL},
+
+	{"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
+		D_ALL, 0, 8, NULL},
+
+	{"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		ADDR_FIX_1(4), 8, NULL},
+
+	{"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		ADDR_FIX_2(4, 5), 8, NULL},
+
+	{"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		ADDR_FIX_1(4), 8, NULL},
+
+	{"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		ADDR_FIX_2(4, 7), 8, NULL},
+
+	{"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
+		D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+	{"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+	{"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
+		D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
+
+	{"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
+		R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+	{"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
+		OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
+		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+	{"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
+		D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+	{"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
+		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+	{"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
+		D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+	{"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
+		D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+	{"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
+		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+	{"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
+		OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
+		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+	{"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
+		ADDR_FIX_2(4, 5), 8, NULL},
+
+	{"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
+		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+	{"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
+		OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
+		OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_BLEND_STATE_POINTERS",
+		OP_3DSTATE_BLEND_STATE_POINTERS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
+		OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_BINDING_TABLE_POINTERS_VS",
+		OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_BINDING_TABLE_POINTERS_HS",
+		OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_BINDING_TABLE_POINTERS_DS",
+		OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_BINDING_TABLE_POINTERS_GS",
+		OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_BINDING_TABLE_POINTERS_PS",
+		OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SAMPLER_STATE_POINTERS_VS",
+		OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SAMPLER_STATE_POINTERS_HS",
+		OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SAMPLER_STATE_POINTERS_DS",
+		OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SAMPLER_STATE_POINTERS_GS",
+		OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SAMPLER_STATE_POINTERS_PS",
+		OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
+		0, 8, NULL},
+
+	{"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
+		0, 8, NULL},
+
+	{"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
+		0, 8, NULL},
+
+	{"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
+		0, 8, NULL},
+
+	{"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+	{"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+	{"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+	{"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+	{"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+	{"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+	{"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+	{"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+		NULL},
+
+	{"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+		8, NULL},
+
+	{"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
+		R_RCS, D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+		8, NULL},
+
+	{"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+		NULL},
+
+	{"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+		NULL},
+
+	{"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+		NULL},
+
+	{"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
+		R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
+
+	{"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
+		R_RCS, D_ALL, 0, 1, NULL},
+
+	{"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
+		R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
+		D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
+		D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
+		R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
+		0, 8, NULL},
+
+	{"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
+		D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+	{"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
+		D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
+		D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
+		D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
+		D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+	{"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
+		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+	{"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
+		R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
+		R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
+		D_ALL, 0, 9, NULL},
+
+	{"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+		ADDR_FIX_2(2, 4), 8, NULL},
+
+	{"3DSTATE_BINDING_TABLE_POOL_ALLOC",
+		OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
+		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+	{"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
+		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+	{"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
+		OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
+		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+	{"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
+		D_BDW_PLUS, 0, 8, NULL},
+
+	{"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
+		ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
+
+	{"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
+		1, NULL},
+
+	{"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
+		ADDR_FIX_1(1), 8, NULL},
+
+	{"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+		ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
+
+	{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
+		ADDR_FIX_1(1), 8, NULL},
+
+	{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+	{"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+		0, 8, NULL},
+
+	{"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
+		D_SKL_PLUS, 0, 8, NULL},
+
+	{"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
+		F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+	{"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
+		0, 16, NULL},
+
+	{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
+		0, 16, NULL},
+
+	{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+	{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
+		0, 16, NULL},
+
+	{"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
+		0, 16, NULL},
+
+	{"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+		0, 16, NULL},
+
+	{"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+		0, 8, NULL},
+
+	{"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
+		NULL},
+
+	{"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
+		F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+	{"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
+		R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+	{"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
+		F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+	{"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
+		F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
+
+	{"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+	{"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 6, NULL},
+
+	{"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+	{"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
+		R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
+
+	{"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
+		0, 16, NULL},
+
+	{"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+	{"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+	{"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
+		R_VCS, D_ALL, 0, 12, NULL},
+
+	{"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
+
+	{"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
+		0, 12, NULL},
+
+	{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
+		0, 20, NULL},
+};
+
+static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
+{
+	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
+}
+
+#define GVT_MAX_CMD_LENGTH     20  /* In Dword */
+
+static void trace_cs_command(struct parser_exec_state *s,
+		cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
+{
+	/* This buffer is used by ftrace to store all commands copied from
+	 * guest gma space. Sometimes commands can cross pages, this should
+	 * not be handled in ftrace logic. So this is just used as a
+	 * 'bounce buffer'
+	 */
+	u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
+	int i;
+	u32 cmd_len = cmd_length(s);
+	/* The chosen value of GVT_MAX_CMD_LENGTH are just based on
+	 * following two considerations:
+	 * 1) From observation, most common ring commands is not that long.
+	 *    But there are execeptions. So it indeed makes sence to observe
+	 *    longer commands.
+	 * 2) From the performance and debugging point of view, dumping all
+	 *    contents of very commands is not necessary.
+	 * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
+	 * future for performance considerations.
+	 */
+	if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
+		gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
+		cmd_len = GVT_MAX_CMD_LENGTH;
+	}
+
+	for (i = 0; i < cmd_len; i++)
+		cmd_trace_buf[i] = cmd_val(s, i);
+
+	trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
+			cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
+			cost_pre_cmd_handler, cost_cmd_handler);
+}
+
+/* call the cmd handler, and advance ip */
+static int cmd_parser_exec(struct parser_exec_state *s)
+{
+	struct cmd_info *info;
+	u32 cmd;
+	int ret = 0;
+	cycles_t t0, t1, t2;
+	struct parser_exec_state s_before_advance_custom;
+
+	t0 = get_cycles();
+
+	cmd = cmd_val(s, 0);
+
+	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+	if (info == NULL) {
+		gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+				cmd, get_opcode(cmd, s->ring_id));
+		return -EINVAL;
+	}
+
+	gvt_dbg_cmd("%s\n", info->name);
+
+	s->info = info;
+
+	t1 = get_cycles();
+
+	memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
+
+	if (info->handler) {
+		ret = info->handler(s);
+		if (ret < 0) {
+			gvt_err("%s handler error\n", info->name);
+			return ret;
+		}
+	}
+	t2 = get_cycles();
+
+	trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
+
+	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
+		ret = cmd_advance_default(s);
+		if (ret) {
+			gvt_err("%s IP advance error\n", info->name);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static inline bool gma_out_of_range(unsigned long gma,
+		unsigned long gma_head, unsigned int gma_tail)
+{
+	if (gma_tail >= gma_head)
+		return (gma < gma_head) || (gma > gma_tail);
+	else
+		return (gma > gma_tail) && (gma < gma_head);
+}
+
+static int command_scan(struct parser_exec_state *s,
+		unsigned long rb_head, unsigned long rb_tail,
+		unsigned long rb_start, unsigned long rb_len)
+{
+
+	unsigned long gma_head, gma_tail, gma_bottom;
+	int ret = 0;
+
+	gma_head = rb_start + rb_head;
+	gma_tail = rb_start + rb_tail;
+	gma_bottom = rb_start +  rb_len;
+
+	gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
+
+	while (s->ip_gma != gma_tail) {
+		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+			if (!(s->ip_gma >= rb_start) ||
+				!(s->ip_gma < gma_bottom)) {
+				gvt_err("ip_gma %lx out of ring scope."
+					"(base:0x%lx, bottom: 0x%lx)\n",
+					s->ip_gma, rb_start,
+					gma_bottom);
+				parser_exec_state_dump(s);
+				return -EINVAL;
+			}
+			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
+				gvt_err("ip_gma %lx out of range."
+					"base 0x%lx head 0x%lx tail 0x%lx\n",
+					s->ip_gma, rb_start,
+					rb_head, rb_tail);
+				parser_exec_state_dump(s);
+				break;
+			}
+		}
+		ret = cmd_parser_exec(s);
+		if (ret) {
+			gvt_err("cmd parser error\n");
+			parser_exec_state_dump(s);
+			break;
+		}
+	}
+
+	gvt_dbg_cmd("scan_end\n");
+
+	return ret;
+}
+
+static int scan_workload(struct intel_vgpu_workload *workload)
+{
+	unsigned long gma_head, gma_tail, gma_bottom;
+	struct parser_exec_state s;
+	int ret = 0;
+
+	/* ring base is page aligned */
+	if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
+		return -EINVAL;
+
+	gma_head = workload->rb_start + workload->rb_head;
+	gma_tail = workload->rb_start + workload->rb_tail;
+	gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+	s.buf_type = RING_BUFFER_INSTRUCTION;
+	s.buf_addr_type = GTT_BUFFER;
+	s.vgpu = workload->vgpu;
+	s.ring_id = workload->ring_id;
+	s.ring_start = workload->rb_start;
+	s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+	s.ring_head = gma_head;
+	s.ring_tail = gma_tail;
+	s.rb_va = workload->shadow_ring_buffer_va;
+	s.workload = workload;
+
+	if (bypass_scan_mask & (1 << workload->ring_id))
+		return 0;
+
+	ret = ip_gma_set(&s, gma_head);
+	if (ret)
+		goto out;
+
+	ret = command_scan(&s, workload->rb_head, workload->rb_tail,
+		workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
+
+out:
+	return ret;
+}
+
+static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+
+	unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
+	struct parser_exec_state s;
+	int ret = 0;
+
+	/* ring base is page aligned */
+	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
+		return -EINVAL;
+
+	ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+	ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
+			PAGE_SIZE);
+	gma_head = wa_ctx->indirect_ctx.guest_gma;
+	gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
+	gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
+
+	s.buf_type = RING_BUFFER_INSTRUCTION;
+	s.buf_addr_type = GTT_BUFFER;
+	s.vgpu = wa_ctx->workload->vgpu;
+	s.ring_id = wa_ctx->workload->ring_id;
+	s.ring_start = wa_ctx->indirect_ctx.guest_gma;
+	s.ring_size = ring_size;
+	s.ring_head = gma_head;
+	s.ring_tail = gma_tail;
+	s.rb_va = wa_ctx->indirect_ctx.shadow_va;
+	s.workload = wa_ctx->workload;
+
+	ret = ip_gma_set(&s, gma_head);
+	if (ret)
+		goto out;
+
+	ret = command_scan(&s, 0, ring_tail,
+		wa_ctx->indirect_ctx.guest_gma, ring_size);
+out:
+	return ret;
+}
+
+static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	int ring_id = workload->ring_id;
+	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+	struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
+	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
+	unsigned int copy_len = 0;
+	int ret;
+
+	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+	/* calculate workload ring buffer size */
+	workload->rb_len = (workload->rb_tail + guest_rb_size -
+			workload->rb_head) % guest_rb_size;
+
+	gma_head = workload->rb_start + workload->rb_head;
+	gma_tail = workload->rb_start + workload->rb_tail;
+	gma_top = workload->rb_start + guest_rb_size;
+
+	/* allocate shadow ring buffer */
+	ret = intel_ring_begin(workload->req, workload->rb_len / 4);
+	if (ret)
+		return ret;
+
+	/* get shadow ring buffer va */
+	workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+
+	/* head > tail --> copy head <-> top */
+	if (gma_head > gma_tail) {
+		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+				gma_head, gma_top,
+				workload->shadow_ring_buffer_va);
+		if (ret) {
+			gvt_err("fail to copy guest ring buffer\n");
+			return ret;
+		}
+		copy_len = gma_top - gma_head;
+		gma_head = workload->rb_start;
+	}
+
+	/* copy head or start <-> tail */
+	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+			gma_head, gma_tail,
+			workload->shadow_ring_buffer_va + copy_len);
+	if (ret) {
+		gvt_err("fail to copy guest ring buffer\n");
+		return ret;
+	}
+	ring->tail += workload->rb_len;
+	intel_ring_advance(ring);
+	return 0;
+}
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
+{
+	int ret;
+
+	ret = shadow_workload_ring_buffer(workload);
+	if (ret) {
+		gvt_err("fail to shadow workload ring_buffer\n");
+		return ret;
+	}
+
+	ret = scan_workload(workload);
+	if (ret) {
+		gvt_err("scan workload error\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
+	int ctx_size = wa_ctx->indirect_ctx.size;
+	unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+	void *map;
+
+	obj = i915_gem_object_create(dev,
+				     roundup(ctx_size + CACHELINE_BYTES,
+					     PAGE_SIZE));
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	/* get the va of the shadow batch buffer */
+	map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(map)) {
+		gvt_err("failed to vmap shadow indirect ctx\n");
+		ret = PTR_ERR(map);
+		goto put_obj;
+	}
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, false);
+	if (ret) {
+		gvt_err("failed to set shadow indirect ctx to CPU\n");
+		goto unmap_src;
+	}
+
+	ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
+				wa_ctx->workload->vgpu->gtt.ggtt_mm,
+				guest_gma, guest_gma + ctx_size,
+				map);
+	if (ret) {
+		gvt_err("fail to copy guest indirect ctx\n");
+		goto unmap_src;
+	}
+
+	wa_ctx->indirect_ctx.obj = obj;
+	wa_ctx->indirect_ctx.shadow_va = map;
+	return 0;
+
+unmap_src:
+	i915_gem_object_unpin_map(obj);
+put_obj:
+	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+	return ret;
+}
+
+static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+	unsigned char *bb_start_sva;
+
+	per_ctx_start[0] = 0x18800001;
+	per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
+
+	bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+				wa_ctx->indirect_ctx.size;
+
+	memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
+
+	return 0;
+}
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	int ret;
+
+	if (wa_ctx->indirect_ctx.size == 0)
+		return 0;
+
+	ret = shadow_indirect_ctx(wa_ctx);
+	if (ret) {
+		gvt_err("fail to shadow indirect ctx\n");
+		return ret;
+	}
+
+	combine_wa_ctx(wa_ctx);
+
+	ret = scan_wa_ctx(wa_ctx);
+	if (ret) {
+		gvt_err("scan wa ctx error\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
+		unsigned int opcode, int rings)
+{
+	struct cmd_info *info = NULL;
+	unsigned int ring;
+
+	for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+		info = find_cmd_entry(gvt, opcode, ring);
+		if (info)
+			break;
+	}
+	return info;
+}
+
+static int init_cmd_table(struct intel_gvt *gvt)
+{
+	int i;
+	struct cmd_entry *e;
+	struct cmd_info	*info;
+	unsigned int gen_type;
+
+	gen_type = intel_gvt_get_device_type(gvt);
+
+	for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+		if (!(cmd_info[i].devices & gen_type))
+			continue;
+
+		e = kzalloc(sizeof(*e), GFP_KERNEL);
+		if (!e)
+			return -ENOMEM;
+
+		e->info = &cmd_info[i];
+		info = find_cmd_entry_any_ring(gvt,
+				e->info->opcode, e->info->rings);
+		if (info) {
+			gvt_err("%s %s duplicated\n", e->info->name,
+					info->name);
+			return -EEXIST;
+		}
+
+		INIT_HLIST_NODE(&e->hlist);
+		add_cmd_entry(gvt, e);
+		gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
+				e->info->name, e->info->opcode, e->info->flag,
+				e->info->devices, e->info->rings);
+	}
+	return 0;
+}
+
+static void clean_cmd_table(struct intel_gvt *gvt)
+{
+	struct hlist_node *tmp;
+	struct cmd_entry *e;
+	int i;
+
+	hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
+		kfree(e);
+
+	hash_init(gvt->cmd_table);
+}
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
+{
+	clean_cmd_table(gvt);
+}
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
+{
+	int ret;
+
+	ret = init_cmd_table(gvt);
+	if (ret) {
+		intel_gvt_clean_cmd_parser(gvt);
+		return ret;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
new file mode 100644
index 0000000..bed3351
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Ke Yu
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Yulei Zhang <yulei.zhang@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+#ifndef _GVT_CMD_PARSER_H_
+#define _GVT_CMD_PARSER_H_
+
+#define GVT_CMD_HASH_BITS 7
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 7ef412b..68cba7b 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -24,11 +24,34 @@
 #ifndef __GVT_DEBUG_H__
 #define __GVT_DEBUG_H__
 
+#define gvt_err(fmt, args...) \
+	DRM_ERROR("gvt: "fmt, ##args)
+
 #define gvt_dbg_core(fmt, args...) \
 	DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
 
-/*
- * Other GVT debug stuff will be introduced in the GVT device model patches.
- */
+#define gvt_dbg_irq(fmt, args...) \
+	DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
+
+#define gvt_dbg_mm(fmt, args...) \
+	DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
+
+#define gvt_dbg_mmio(fmt, args...) \
+	DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
+
+#define gvt_dbg_dpy(fmt, args...) \
+	DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
+
+#define gvt_dbg_el(fmt, args...) \
+	DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
+
+#define gvt_dbg_sched(fmt, args...) \
+	DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
+
+#define gvt_dbg_render(fmt, args...) \
+	DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
+
+#define gvt_dbg_cmd(fmt, args...) \
+	DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
 
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
new file mode 100644
index 0000000..c0c884a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Ke Yu
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Terrence Xu <terrence.xu@intel.com>
+ *    Changbin Du <changbin.du@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int get_edp_pipe(struct intel_vgpu *vgpu)
+{
+	u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
+	int pipe = -1;
+
+	switch (data & TRANS_DDI_EDP_INPUT_MASK) {
+	case TRANS_DDI_EDP_INPUT_A_ON:
+	case TRANS_DDI_EDP_INPUT_A_ONOFF:
+		pipe = PIPE_A;
+		break;
+	case TRANS_DDI_EDP_INPUT_B_ONOFF:
+		pipe = PIPE_B;
+		break;
+	case TRANS_DDI_EDP_INPUT_C_ONOFF:
+		pipe = PIPE_C;
+		break;
+	}
+	return pipe;
+}
+
+static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+		return 0;
+
+	if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
+		return 0;
+	return 1;
+}
+
+static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+		return -EINVAL;
+
+	if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+		return 1;
+
+	if (edp_pipe_is_enabled(vgpu) &&
+			get_edp_pipe(vgpu) == pipe)
+		return 1;
+	return 0;
+}
+
+/* EDID with 1024x768 as its resolution */
+static unsigned char virtual_dp_monitor_edid[] = {
+	/*Header*/
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+	/* Vendor & Product Identification */
+	0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+	/* Version & Revision */
+	0x01, 0x04,
+	/* Basic Display Parameters & Features */
+	0xa5, 0x34, 0x20, 0x78, 0x23,
+	/* Color Characteristics */
+	0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+	/* Established Timings: maximum resolution is 1024x768 */
+	0x21, 0x08, 0x00,
+	/* Standard Timings. All invalid */
+	0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+	0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+	/* 18 Byte Data Blocks 1: invalid */
+	0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+	0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+	/* 18 Byte Data Blocks 2: invalid */
+	0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+	0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+	/* 18 Byte Data Blocks 3: invalid */
+	0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+	0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+	/* 18 Byte Data Blocks 4: invalid */
+	0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+	0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+	/* Extension Block Count */
+	0x00,
+	/* Checksum */
+	0xef,
+};
+
+#define DPCD_HEADER_SIZE        0xb
+
+static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
+	0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
+			SDE_PORTC_HOTPLUG_CPT |
+			SDE_PORTD_HOTPLUG_CPT);
+
+	if (IS_SKYLAKE(dev_priv))
+		vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
+				SDE_PORTE_HOTPLUG_SPT);
+
+	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+
+	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+
+	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+
+	if (IS_SKYLAKE(dev_priv) &&
+			intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
+		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
+	}
+
+	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+		if (IS_BROADWELL(dev_priv))
+			vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
+				GEN8_PORT_DP_A_HOTPLUG;
+		else
+			vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+	}
+}
+
+static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
+{
+	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+	kfree(port->edid);
+	port->edid = NULL;
+
+	kfree(port->dpcd);
+	port->dpcd = NULL;
+}
+
+static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
+		int type)
+{
+	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+	port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
+	if (!port->edid)
+		return -ENOMEM;
+
+	port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
+	if (!port->dpcd) {
+		kfree(port->edid);
+		return -ENOMEM;
+	}
+
+	memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+			EDID_SIZE);
+	port->edid->data_valid = true;
+
+	memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
+	port->dpcd->data_valid = true;
+	port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
+	port->type = type;
+
+	emulate_monitor_status_change(vgpu);
+	return 0;
+}
+
+/**
+ * intel_gvt_check_vblank_emulation - check if vblank emulation timer should
+ * be turned on/off when a virtual pipe is enabled/disabled.
+ * @gvt: a GVT device
+ *
+ * This function is used to turn on/off vblank timer according to currently
+ * enabled/disabled virtual pipes.
+ *
+ */
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
+{
+	struct intel_gvt_irq *irq = &gvt->irq;
+	struct intel_vgpu *vgpu;
+	bool have_enabled_pipe = false;
+	int pipe, id;
+
+	if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+		return;
+
+	hrtimer_cancel(&irq->vblank_timer.timer);
+
+	for_each_active_vgpu(gvt, vgpu, id) {
+		for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
+			have_enabled_pipe =
+				pipe_is_enabled(vgpu, pipe);
+			if (have_enabled_pipe)
+				break;
+		}
+	}
+
+	if (have_enabled_pipe)
+		hrtimer_start(&irq->vblank_timer.timer,
+			ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+			HRTIMER_MODE_ABS);
+}
+
+static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_vgpu_irq *irq = &vgpu->irq;
+	int vblank_event[] = {
+		[PIPE_A] = PIPE_A_VBLANK,
+		[PIPE_B] = PIPE_B_VBLANK,
+		[PIPE_C] = PIPE_C_VBLANK,
+	};
+	int event;
+
+	if (pipe < PIPE_A || pipe > PIPE_C)
+		return;
+
+	for_each_set_bit(event, irq->flip_done_event[pipe],
+			INTEL_GVT_EVENT_MAX) {
+		clear_bit(event, irq->flip_done_event[pipe]);
+		if (!pipe_is_enabled(vgpu, pipe))
+			continue;
+
+		vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+		intel_vgpu_trigger_virtual_event(vgpu, event);
+	}
+
+	if (pipe_is_enabled(vgpu, pipe)) {
+		vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
+		intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
+	}
+}
+
+static void emulate_vblank(struct intel_vgpu *vgpu)
+{
+	int pipe;
+
+	for_each_pipe(vgpu->gvt->dev_priv, pipe)
+		emulate_vblank_on_pipe(vgpu, pipe);
+}
+
+/**
+ * intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
+ * @gvt: a GVT device
+ *
+ * This function is used to trigger vblank interrupts for vGPUs on GVT device
+ *
+ */
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
+{
+	struct intel_vgpu *vgpu;
+	int id;
+
+	if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+		return;
+
+	for_each_active_vgpu(gvt, vgpu, id)
+		emulate_vblank(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_display - clean vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	if (IS_SKYLAKE(dev_priv))
+		clean_virtual_dp_monitor(vgpu, PORT_D);
+	else
+		clean_virtual_dp_monitor(vgpu, PORT_B);
+}
+
+/**
+ * intel_vgpu_init_display- initialize vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU virtual display emulation stuffs
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	intel_vgpu_init_i2c_edid(vgpu);
+
+	if (IS_SKYLAKE(dev_priv))
+		return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+	else
+		return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
new file mode 100644
index 0000000..7a60cb8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Ke Yu
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Terrence Xu <terrence.xu@intel.com>
+ *    Changbin Du <changbin.du@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_DISPLAY_H_
+#define _GVT_DISPLAY_H_
+
+#define SBI_REG_MAX	20
+#define DPCD_SIZE	0x700
+
+#define intel_vgpu_port(vgpu, port) \
+	(&(vgpu->display.ports[port]))
+
+#define intel_vgpu_has_monitor_on_port(vgpu, port) \
+	(intel_vgpu_port(vgpu, port)->edid && \
+		intel_vgpu_port(vgpu, port)->edid->data_valid)
+
+#define intel_vgpu_port_is_dp(vgpu, port) \
+	((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
+	(intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
+	(intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
+	(intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
+
+#define INTEL_GVT_MAX_UEVENT_VARS	3
+
+/* DPCD start */
+#define DPCD_SIZE	0x700
+
+/* DPCD */
+#define DP_SET_POWER            0x600
+#define DP_SET_POWER_D0         0x1
+#define AUX_NATIVE_WRITE        0x8
+#define AUX_NATIVE_READ         0x9
+
+#define AUX_NATIVE_REPLY_MASK   (0x3 << 4)
+#define AUX_NATIVE_REPLY_ACK    (0x0 << 4)
+#define AUX_NATIVE_REPLY_NAK    (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER  (0x2 << 4)
+
+#define AUX_BURST_SIZE          16
+
+/* DPCD addresses */
+#define DPCD_REV			0x000
+#define DPCD_MAX_LINK_RATE		0x001
+#define DPCD_MAX_LANE_COUNT		0x002
+
+#define DPCD_TRAINING_PATTERN_SET	0x102
+#define	DPCD_SINK_COUNT			0x200
+#define DPCD_LANE0_1_STATUS		0x202
+#define DPCD_LANE2_3_STATUS		0x203
+#define DPCD_LANE_ALIGN_STATUS_UPDATED	0x204
+#define DPCD_SINK_STATUS		0x205
+
+/* link training */
+#define DPCD_TRAINING_PATTERN_SET_MASK	0x03
+#define DPCD_LINK_TRAINING_DISABLED	0x00
+#define DPCD_TRAINING_PATTERN_1		0x01
+#define DPCD_TRAINING_PATTERN_2		0x02
+
+#define DPCD_CP_READY_MASK		(1 << 6)
+
+/* lane status */
+#define DPCD_LANES_CR_DONE		0x11
+#define DPCD_LANES_EQ_DONE		0x22
+#define DPCD_SYMBOL_LOCKED		0x44
+
+#define DPCD_INTERLANE_ALIGN_DONE	0x01
+
+#define DPCD_SINK_IN_SYNC		0x03
+/* DPCD end */
+
+#define SBI_RESPONSE_MASK               0x3
+#define SBI_RESPONSE_SHIFT              0x1
+#define SBI_STAT_MASK                   0x1
+#define SBI_STAT_SHIFT                  0x0
+#define SBI_OPCODE_SHIFT                8
+#define SBI_OPCODE_MASK			(0xff << SBI_OPCODE_SHIFT)
+#define SBI_CMD_IORD                    2
+#define SBI_CMD_IOWR                    3
+#define SBI_CMD_CRRD                    6
+#define SBI_CMD_CRWR                    7
+#define SBI_ADDR_OFFSET_SHIFT           16
+#define SBI_ADDR_OFFSET_MASK            (0xffff << SBI_ADDR_OFFSET_SHIFT)
+
+struct intel_vgpu_sbi_register {
+	unsigned int offset;
+	u32 value;
+};
+
+struct intel_vgpu_sbi {
+	int number;
+	struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
+};
+
+enum intel_gvt_plane_type {
+	PRIMARY_PLANE = 0,
+	CURSOR_PLANE,
+	SPRITE_PLANE,
+	MAX_PLANE
+};
+
+struct intel_vgpu_dpcd_data {
+	bool data_valid;
+	u8 data[DPCD_SIZE];
+};
+
+enum intel_vgpu_port_type {
+	GVT_CRT = 0,
+	GVT_DP_A,
+	GVT_DP_B,
+	GVT_DP_C,
+	GVT_DP_D,
+	GVT_HDMI_B,
+	GVT_HDMI_C,
+	GVT_HDMI_D,
+	GVT_PORT_MAX
+};
+
+struct intel_vgpu_port {
+	/* per display EDID information */
+	struct intel_vgpu_edid_data *edid;
+	/* per display DPCD information */
+	struct intel_vgpu_dpcd_data *dpcd;
+	int type;
+};
+
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
+
+int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
new file mode 100644
index 0000000..7e1da1c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Ke Yu
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Terrence Xu <terrence.xu@intel.com>
+ *    Changbin Du <changbin.du@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GMBUS1_TOTAL_BYTES_SHIFT 16
+#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
+#define gmbus1_total_byte_count(v) (((v) >> \
+	GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
+#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
+#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
+#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
+
+/* GMBUS0 bits definitions */
+#define _GMBUS_PIN_SEL_MASK     (0x7)
+
+static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
+{
+	struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+	unsigned char chr = 0;
+
+	if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
+		gvt_err("Driver tries to read EDID without proper sequence!\n");
+		return 0;
+	}
+	if (edid->current_edid_read >= EDID_SIZE) {
+		gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+		return 0;
+	}
+
+	if (!edid->edid_available) {
+		gvt_err("Reading EDID but EDID is not available!\n");
+		return 0;
+	}
+
+	if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
+		struct intel_vgpu_edid_data *edid_data =
+			intel_vgpu_port(vgpu, edid->port)->edid;
+
+		chr = edid_data->edid_block[edid->current_edid_read];
+		edid->current_edid_read++;
+	} else {
+		gvt_err("No EDID available during the reading?\n");
+	}
+	return chr;
+}
+
+static inline int get_port_from_gmbus0(u32 gmbus0)
+{
+	int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+	int port = -EINVAL;
+
+	if (port_select == 2)
+		port = PORT_E;
+	else if (port_select == 4)
+		port = PORT_C;
+	else if (port_select == 5)
+		port = PORT_B;
+	else if (port_select == 6)
+		port = PORT_D;
+	return port;
+}
+
+static void reset_gmbus_controller(struct intel_vgpu *vgpu)
+{
+	vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
+	if (!vgpu->display.i2c_edid.edid_available)
+		vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+	vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+}
+
+/* GMBUS0 */
+static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
+			unsigned int offset, void *p_data, unsigned int bytes)
+{
+	int port, pin_select;
+
+	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+
+	pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
+
+	intel_vgpu_init_i2c_edid(vgpu);
+
+	if (pin_select == 0)
+		return 0;
+
+	port = get_port_from_gmbus0(pin_select);
+	if (WARN_ON(port < 0))
+		return 0;
+
+	vgpu->display.i2c_edid.state = I2C_GMBUS;
+	vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+
+	vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+	vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+
+	if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
+			!intel_vgpu_port_is_dp(vgpu, port)) {
+		vgpu->display.i2c_edid.port = port;
+		vgpu->display.i2c_edid.edid_available = true;
+		vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
+	} else
+		vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+	return 0;
+}
+
+static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+	u32 slave_addr;
+	u32 wvalue = *(u32 *)p_data;
+
+	if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
+		if (!(wvalue & GMBUS_SW_CLR_INT)) {
+			vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
+			reset_gmbus_controller(vgpu);
+		}
+		/*
+		 * TODO: "This bit is cleared to zero when an event
+		 * causes the HW_RDY bit transition to occur "
+		 */
+	} else {
+		/*
+		 * per bspec setting this bit can cause:
+		 * 1) INT status bit cleared
+		 * 2) HW_RDY bit asserted
+		 */
+		if (wvalue & GMBUS_SW_CLR_INT) {
+			vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
+			vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
+		}
+
+		/* For virtualization, we suppose that HW is always ready,
+		 * so GMBUS_SW_RDY should always be cleared
+		 */
+		if (wvalue & GMBUS_SW_RDY)
+			wvalue &= ~GMBUS_SW_RDY;
+
+		i2c_edid->gmbus.total_byte_count =
+			gmbus1_total_byte_count(wvalue);
+		slave_addr = gmbus1_slave_addr(wvalue);
+
+		/* vgpu gmbus only support EDID */
+		if (slave_addr == EDID_ADDR) {
+			i2c_edid->slave_selected = true;
+		} else if (slave_addr != 0) {
+			gvt_dbg_dpy(
+				"vgpu%d: unsupported gmbus slave addr(0x%x)\n"
+				"	gmbus operations will be ignored.\n",
+					vgpu->id, slave_addr);
+		}
+
+		if (wvalue & GMBUS_CYCLE_INDEX)
+			i2c_edid->current_edid_read =
+				gmbus1_slave_index(wvalue);
+
+		i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
+		switch (gmbus1_bus_cycle(wvalue)) {
+		case GMBUS_NOCYCLE:
+			break;
+		case GMBUS_STOP:
+			/* From spec:
+			 * This can only cause a STOP to be generated
+			 * if a GMBUS cycle is generated, the GMBUS is
+			 * currently in a data/wait/idle phase, or it is in a
+			 * WAIT phase
+			 */
+			if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
+				!= GMBUS_NOCYCLE) {
+				intel_vgpu_init_i2c_edid(vgpu);
+				/* After the 'stop' cycle, hw state would become
+				 * 'stop phase' and then 'idle phase' after a
+				 * few milliseconds. In emulation, we just set
+				 * it as 'idle phase' ('stop phase' is not
+				 * visible in gmbus interface)
+				 */
+				i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+				vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+			}
+			break;
+		case NIDX_NS_W:
+		case IDX_NS_W:
+		case NIDX_STOP:
+		case IDX_STOP:
+			/* From hw spec the GMBUS phase
+			 * transition like this:
+			 * START (-->INDEX) -->DATA
+			 */
+			i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
+			vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
+			break;
+		default:
+			gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+			break;
+		}
+		/*
+		 * From hw spec the WAIT state will be
+		 * cleared:
+		 * (1) in a new GMBUS cycle
+		 * (2) by generating a stop
+		 */
+		vgpu_vreg(vgpu, offset) = wvalue;
+	}
+	return 0;
+}
+
+static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+	void *p_data, unsigned int bytes)
+{
+	WARN_ON(1);
+	return 0;
+}
+
+static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	int i;
+	unsigned char byte_data;
+	struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+	int byte_left = i2c_edid->gmbus.total_byte_count -
+				i2c_edid->current_edid_read;
+	int byte_count = byte_left;
+	u32 reg_data = 0;
+
+	/* Data can only be recevied if previous settings correct */
+	if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+		if (byte_left <= 0) {
+			memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+			return 0;
+		}
+
+		if (byte_count > 4)
+			byte_count = 4;
+		for (i = 0; i < byte_count; i++) {
+			byte_data = edid_get_byte(vgpu);
+			reg_data |= (byte_data << (i << 3));
+		}
+
+		memcpy(&vgpu_vreg(vgpu, offset), &reg_data, byte_count);
+		memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+
+		if (byte_left <= 4) {
+			switch (i2c_edid->gmbus.cycle_type) {
+			case NIDX_STOP:
+			case IDX_STOP:
+				i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+				break;
+			case NIDX_NS_W:
+			case IDX_NS_W:
+			default:
+				i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
+				break;
+			}
+			intel_vgpu_init_i2c_edid(vgpu);
+		}
+		/*
+		 * Read GMBUS3 during send operation,
+		 * return the latest written value
+		 */
+	} else {
+		memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+		gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
+				vgpu->id);
+	}
+	return 0;
+}
+
+static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 value = vgpu_vreg(vgpu, offset);
+
+	if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
+		vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
+	memcpy(p_data, (void *)&value, bytes);
+	return 0;
+}
+
+static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 wvalue = *(u32 *)p_data;
+
+	if (wvalue & GMBUS_INUSE)
+		vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
+	/* All other bits are read-only */
+	return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+	unsigned int offset, void *p_data, unsigned int bytes)
+{
+	if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+		return -EINVAL;
+
+	if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+		return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
+	else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+		return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
+
+	memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+	return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+		return -EINVAL;
+
+	if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
+		return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
+	else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
+		return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
+	else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+		return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
+	else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+		return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
+
+	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+	return 0;
+}
+
+enum {
+	AUX_CH_CTL = 0,
+	AUX_CH_DATA1,
+	AUX_CH_DATA2,
+	AUX_CH_DATA3,
+	AUX_CH_DATA4,
+	AUX_CH_DATA5
+};
+
+static inline int get_aux_ch_reg(unsigned int offset)
+{
+	int reg;
+
+	switch (offset & 0xff) {
+	case 0x10:
+		reg = AUX_CH_CTL;
+		break;
+	case 0x14:
+		reg = AUX_CH_DATA1;
+		break;
+	case 0x18:
+		reg = AUX_CH_DATA2;
+		break;
+	case 0x1c:
+		reg = AUX_CH_DATA3;
+		break;
+	case 0x20:
+		reg = AUX_CH_DATA4;
+		break;
+	case 0x24:
+		reg = AUX_CH_DATA5;
+		break;
+	default:
+		reg = -1;
+		break;
+	}
+	return reg;
+}
+
+#define AUX_CTL_MSG_LENGTH(reg) \
+	((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
+		DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
+
+/**
+ * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate AUX channel register write
+ *
+ */
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+				int port_idx,
+				unsigned int offset,
+				void *p_data)
+{
+	struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+	int msg_length, ret_msg_size;
+	int msg, addr, ctrl, op;
+	u32 value = *(u32 *)p_data;
+	int aux_data_for_write = 0;
+	int reg = get_aux_ch_reg(offset);
+
+	if (reg != AUX_CH_CTL) {
+		vgpu_vreg(vgpu, offset) = value;
+		return;
+	}
+
+	msg_length = AUX_CTL_MSG_LENGTH(value);
+	// check the msg in DATA register.
+	msg = vgpu_vreg(vgpu, offset + 4);
+	addr = (msg >> 8) & 0xffff;
+	ctrl = (msg >> 24) & 0xff;
+	op = ctrl >> 4;
+	if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
+		/* The ctl write to clear some states */
+		return;
+	}
+
+	/* Always set the wanted value for vms. */
+	ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
+	vgpu_vreg(vgpu, offset) =
+		DP_AUX_CH_CTL_DONE |
+		((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
+		DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
+
+	if (msg_length == 3) {
+		if (!(op & GVT_AUX_I2C_MOT)) {
+			/* stop */
+			intel_vgpu_init_i2c_edid(vgpu);
+		} else {
+			/* start or restart */
+			i2c_edid->aux_ch.i2c_over_aux_ch = true;
+			i2c_edid->aux_ch.aux_ch_mot = true;
+			if (addr == 0) {
+				/* reset the address */
+				intel_vgpu_init_i2c_edid(vgpu);
+			} else if (addr == EDID_ADDR) {
+				i2c_edid->state = I2C_AUX_CH;
+				i2c_edid->port = port_idx;
+				i2c_edid->slave_selected = true;
+				if (intel_vgpu_has_monitor_on_port(vgpu,
+					port_idx) &&
+					intel_vgpu_port_is_dp(vgpu, port_idx))
+					i2c_edid->edid_available = true;
+			}
+		}
+	} else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
+		/* TODO
+		 * We only support EDID reading from I2C_over_AUX. And
+		 * we do not expect the index mode to be used. Right now
+		 * the WRITE operation is ignored. It is good enough to
+		 * support the gfx driver to do EDID access.
+		 */
+	} else {
+		if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+			return;
+		if (WARN_ON(msg_length != 4))
+			return;
+		if (i2c_edid->edid_available && i2c_edid->slave_selected) {
+			unsigned char val = edid_get_byte(vgpu);
+
+			aux_data_for_write = (val << 16);
+		}
+	}
+	/* write the return value in AUX_CH_DATA reg which includes:
+	 * ACK of I2C_WRITE
+	 * returned byte if it is READ
+	 */
+
+	aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24;
+	vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
+}
+
+/**
+ * intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU i2c edid emulation stuffs
+ *
+ */
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
+{
+	struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+
+	edid->state = I2C_NOT_SPECIFIED;
+
+	edid->port = -1;
+	edid->slave_selected = false;
+	edid->edid_available = false;
+	edid->current_edid_read = 0;
+
+	memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
+
+	edid->aux_ch.i2c_over_aux_ch = false;
+	edid->aux_ch.aux_ch_mot = false;
+}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
new file mode 100644
index 0000000..de366b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Ke Yu
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Terrence Xu <terrence.xu@intel.com>
+ *    Changbin Du <changbin.du@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EDID_H_
+#define _GVT_EDID_H_
+
+#define EDID_SIZE		128
+#define EDID_ADDR		0x50 /* Linux hvm EDID addr */
+
+#define GVT_AUX_NATIVE_WRITE			0x8
+#define GVT_AUX_NATIVE_READ			0x9
+#define GVT_AUX_I2C_WRITE			0x0
+#define GVT_AUX_I2C_READ			0x1
+#define GVT_AUX_I2C_STATUS			0x2
+#define GVT_AUX_I2C_MOT				0x4
+#define GVT_AUX_I2C_REPLY_ACK			(0x0 << 6)
+
+struct intel_vgpu_edid_data {
+	bool data_valid;
+	unsigned char edid_block[EDID_SIZE];
+};
+
+enum gmbus_cycle_type {
+	GMBUS_NOCYCLE	= 0x0,
+	NIDX_NS_W	= 0x1,
+	IDX_NS_W	= 0x3,
+	GMBUS_STOP	= 0x4,
+	NIDX_STOP	= 0x5,
+	IDX_STOP	= 0x7
+};
+
+/*
+ * States of GMBUS
+ *
+ * GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
+ * registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
+ * not considered here. Below describes the usage of GMBUS registers that are
+ * cared by the EDID virtualization
+ *
+ * GMBUS0:
+ *      R/W
+ *      port selection. value of bit0 - bit2 corresponds to the GPIO registers.
+ *
+ * GMBUS1:
+ *      R/W Protect
+ *      Command and Status.
+ *      bit0 is the direction bit: 1 is read; 0 is write.
+ *      bit1 - bit7 is slave 7-bit address.
+ *      bit16 - bit24 total byte count (ignore?)
+ *
+ * GMBUS2:
+ *      Most of bits are read only except bit 15 (IN_USE)
+ *      Status register
+ *      bit0 - bit8 current byte count
+ *      bit 11: hardware ready;
+ *
+ * GMBUS3:
+ *      Read/Write
+ *      Data for transfer
+ */
+
+/* From hw specs, Other phases like START, ADDRESS, INDEX
+ * are invisible to GMBUS MMIO interface. So no definitions
+ * in below enum types
+ */
+enum gvt_gmbus_phase {
+	GMBUS_IDLE_PHASE = 0,
+	GMBUS_DATA_PHASE,
+	GMBUS_WAIT_PHASE,
+	//GMBUS_STOP_PHASE,
+	GMBUS_MAX_PHASE
+};
+
+struct intel_vgpu_i2c_gmbus {
+	unsigned int total_byte_count; /* from GMBUS1 */
+	enum gmbus_cycle_type cycle_type;
+	enum gvt_gmbus_phase phase;
+};
+
+struct intel_vgpu_i2c_aux_ch {
+	bool i2c_over_aux_ch;
+	bool aux_ch_mot;
+};
+
+enum i2c_state {
+	I2C_NOT_SPECIFIED = 0,
+	I2C_GMBUS = 1,
+	I2C_AUX_CH = 2
+};
+
+/* I2C sequences cannot interleave.
+ * GMBUS and AUX_CH sequences cannot interleave.
+ */
+struct intel_vgpu_i2c_edid {
+	enum i2c_state state;
+
+	unsigned int port;
+	bool slave_selected;
+	bool edid_available;
+	unsigned int current_edid_read;
+
+	struct intel_vgpu_i2c_gmbus gmbus;
+	struct intel_vgpu_i2c_aux_ch aux_ch;
+};
+
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
+
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes);
+
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes);
+
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+		int port_idx,
+		unsigned int offset,
+		void *p_data);
+
+#endif /*_GVT_EDID_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
new file mode 100644
index 0000000..c1f6019
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -0,0 +1,860 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define _EL_OFFSET_STATUS       0x234
+#define _EL_OFFSET_STATUS_BUF   0x370
+#define _EL_OFFSET_STATUS_PTR   0x3A0
+
+#define execlist_ring_mmio(gvt, ring_id, offset) \
+	(gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+
+#define valid_context(ctx) ((ctx)->valid)
+#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+		((a)->lrca == (b)->lrca))
+
+static int context_switch_events[] = {
+	[RCS] = RCS_AS_CONTEXT_SWITCH,
+	[BCS] = BCS_AS_CONTEXT_SWITCH,
+	[VCS] = VCS_AS_CONTEXT_SWITCH,
+	[VCS2] = VCS2_AS_CONTEXT_SWITCH,
+	[VECS] = VECS_AS_CONTEXT_SWITCH,
+};
+
+static int ring_id_to_context_switch_event(int ring_id)
+{
+	if (WARN_ON(ring_id < RCS && ring_id >
+				ARRAY_SIZE(context_switch_events)))
+		return -EINVAL;
+
+	return context_switch_events[ring_id];
+}
+
+static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
+{
+	gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
+			execlist->running_slot ?
+			execlist->running_slot->index : -1,
+			execlist->running_context ?
+			execlist->running_context->context_id : 0,
+			execlist->pending_slot ?
+			execlist->pending_slot->index : -1);
+
+	execlist->running_slot = execlist->pending_slot;
+	execlist->pending_slot = NULL;
+	execlist->running_context = execlist->running_context ?
+		&execlist->running_slot->ctx[0] : NULL;
+
+	gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
+			execlist->running_slot ?
+			execlist->running_slot->index : -1,
+			execlist->running_context ?
+			execlist->running_context->context_id : 0,
+			execlist->pending_slot ?
+			execlist->pending_slot->index : -1);
+}
+
+static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
+{
+	struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+	struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+	struct execlist_ctx_descriptor_format *desc = execlist->running_context;
+	struct intel_vgpu *vgpu = execlist->vgpu;
+	struct execlist_status_format status;
+	int ring_id = execlist->ring_id;
+	u32 status_reg = execlist_ring_mmio(vgpu->gvt,
+			ring_id, _EL_OFFSET_STATUS);
+
+	status.ldw = vgpu_vreg(vgpu, status_reg);
+	status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+	if (running) {
+		status.current_execlist_pointer = !!running->index;
+		status.execlist_write_pointer = !!!running->index;
+		status.execlist_0_active = status.execlist_0_valid =
+			!!!(running->index);
+		status.execlist_1_active = status.execlist_1_valid =
+			!!(running->index);
+	} else {
+		status.context_id = 0;
+		status.execlist_0_active = status.execlist_0_valid = 0;
+		status.execlist_1_active = status.execlist_1_valid = 0;
+	}
+
+	status.context_id = desc ? desc->context_id : 0;
+	status.execlist_queue_full = !!(pending);
+
+	vgpu_vreg(vgpu, status_reg) = status.ldw;
+	vgpu_vreg(vgpu, status_reg + 4) = status.udw;
+
+	gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
+		vgpu->id, status_reg, status.ldw, status.udw);
+}
+
+static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
+		struct execlist_context_status_format *status,
+		bool trigger_interrupt_later)
+{
+	struct intel_vgpu *vgpu = execlist->vgpu;
+	int ring_id = execlist->ring_id;
+	struct execlist_context_status_pointer_format ctx_status_ptr;
+	u32 write_pointer;
+	u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+
+	ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+			_EL_OFFSET_STATUS_PTR);
+	ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+			_EL_OFFSET_STATUS_BUF);
+
+	ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+
+	write_pointer = ctx_status_ptr.write_ptr;
+
+	if (write_pointer == 0x7)
+		write_pointer = 0;
+	else {
+		++write_pointer;
+		write_pointer %= 0x6;
+	}
+
+	offset = ctx_status_buf_reg + write_pointer * 8;
+
+	vgpu_vreg(vgpu, offset) = status->ldw;
+	vgpu_vreg(vgpu, offset + 4) = status->udw;
+
+	ctx_status_ptr.write_ptr = write_pointer;
+	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+
+	gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
+		vgpu->id, write_pointer, offset, status->ldw, status->udw);
+
+	if (trigger_interrupt_later)
+		return;
+
+	intel_vgpu_trigger_virtual_event(vgpu,
+			ring_id_to_context_switch_event(execlist->ring_id));
+}
+
+static int emulate_execlist_ctx_schedule_out(
+		struct intel_vgpu_execlist *execlist,
+		struct execlist_ctx_descriptor_format *ctx)
+{
+	struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+	struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+	struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
+	struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
+	struct execlist_context_status_format status;
+
+	memset(&status, 0, sizeof(status));
+
+	gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
+
+	if (WARN_ON(!same_context(ctx, execlist->running_context))) {
+		gvt_err("schedule out context is not running context,"
+				"ctx id %x running ctx id %x\n",
+				ctx->context_id,
+				execlist->running_context->context_id);
+		return -EINVAL;
+	}
+
+	/* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
+	if (valid_context(ctx1) && same_context(ctx0, ctx)) {
+		gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
+
+		execlist->running_context = ctx1;
+
+		emulate_execlist_status(execlist);
+
+		status.context_complete = status.element_switch = 1;
+		status.context_id = ctx->context_id;
+
+		emulate_csb_update(execlist, &status, false);
+		/*
+		 * ctx1 is not valid, ctx == ctx0
+		 * ctx1 is valid, ctx1 == ctx
+		 *	--> last element is finished
+		 * emulate:
+		 *	active-to-idle if there is *no* pending execlist
+		 *	context-complete if there *is* pending execlist
+		 */
+	} else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
+			|| (valid_context(ctx1) && same_context(ctx1, ctx))) {
+		gvt_dbg_el("need to switch virtual execlist slot\n");
+
+		switch_virtual_execlist_slot(execlist);
+
+		emulate_execlist_status(execlist);
+
+		status.context_complete = status.active_to_idle = 1;
+		status.context_id = ctx->context_id;
+
+		if (!pending) {
+			emulate_csb_update(execlist, &status, false);
+		} else {
+			emulate_csb_update(execlist, &status, true);
+
+			memset(&status, 0, sizeof(status));
+
+			status.idle_to_active = 1;
+			status.context_id = 0;
+
+			emulate_csb_update(execlist, &status, false);
+		}
+	} else {
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
+		struct intel_vgpu_execlist *execlist)
+{
+	struct intel_vgpu *vgpu = execlist->vgpu;
+	int ring_id = execlist->ring_id;
+	u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+			_EL_OFFSET_STATUS);
+	struct execlist_status_format status;
+
+	status.ldw = vgpu_vreg(vgpu, status_reg);
+	status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+	if (status.execlist_queue_full) {
+		gvt_err("virtual execlist slots are full\n");
+		return NULL;
+	}
+
+	return &execlist->slot[status.execlist_write_pointer];
+}
+
+static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
+		struct execlist_ctx_descriptor_format ctx[2])
+{
+	struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+	struct intel_vgpu_execlist_slot *slot =
+		get_next_execlist_slot(execlist);
+
+	struct execlist_ctx_descriptor_format *ctx0, *ctx1;
+	struct execlist_context_status_format status;
+
+	gvt_dbg_el("emulate schedule-in\n");
+
+	if (!slot) {
+		gvt_err("no available execlist slot\n");
+		return -EINVAL;
+	}
+
+	memset(&status, 0, sizeof(status));
+	memset(slot->ctx, 0, sizeof(slot->ctx));
+
+	slot->ctx[0] = ctx[0];
+	slot->ctx[1] = ctx[1];
+
+	gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
+			slot->index, ctx[0].context_id,
+			ctx[1].context_id);
+
+	/*
+	 * no running execlist, make this write bundle as running execlist
+	 * -> idle-to-active
+	 */
+	if (!running) {
+		gvt_dbg_el("no current running execlist\n");
+
+		execlist->running_slot = slot;
+		execlist->pending_slot = NULL;
+		execlist->running_context = &slot->ctx[0];
+
+		gvt_dbg_el("running slot index %d running context %x\n",
+				execlist->running_slot->index,
+				execlist->running_context->context_id);
+
+		emulate_execlist_status(execlist);
+
+		status.idle_to_active = 1;
+		status.context_id = 0;
+
+		emulate_csb_update(execlist, &status, false);
+		return 0;
+	}
+
+	ctx0 = &running->ctx[0];
+	ctx1 = &running->ctx[1];
+
+	gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
+		running->index, ctx0->context_id, ctx1->context_id);
+
+	/*
+	 * already has an running execlist
+	 *	a. running ctx1 is valid,
+	 *	   ctx0 is finished, and running ctx1 == new execlist ctx[0]
+	 *	b. running ctx1 is not valid,
+	 *	   ctx0 == new execlist ctx[0]
+	 * ----> lite-restore + preempted
+	 */
+	if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
+		/* condition a */
+		(!same_context(ctx0, execlist->running_context))) ||
+			(!valid_context(ctx1) &&
+			 same_context(ctx0, &slot->ctx[0]))) { /* condition b */
+		gvt_dbg_el("need to switch virtual execlist slot\n");
+
+		execlist->pending_slot = slot;
+		switch_virtual_execlist_slot(execlist);
+
+		emulate_execlist_status(execlist);
+
+		status.lite_restore = status.preempted = 1;
+		status.context_id = ctx[0].context_id;
+
+		emulate_csb_update(execlist, &status, false);
+	} else {
+		gvt_dbg_el("emulate as pending slot\n");
+		/*
+		 * otherwise
+		 * --> emulate pending execlist exist + but no preemption case
+		 */
+		execlist->pending_slot = slot;
+		emulate_execlist_status(execlist);
+	}
+	return 0;
+}
+
+static void free_workload(struct intel_vgpu_workload *workload)
+{
+	intel_vgpu_unpin_mm(workload->shadow_mm);
+	intel_gvt_mm_unreference(workload->shadow_mm);
+	kmem_cache_free(workload->vgpu->workloads, workload);
+}
+
+#define get_desc_from_elsp_dwords(ed, i) \
+	((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
+
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
+			     unsigned long add, int gmadr_bytes)
+{
+	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+		return -1;
+
+	*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
+		BATCH_BUFFER_ADDR_MASK;
+	if (gmadr_bytes == 8) {
+		*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
+			add & BATCH_BUFFER_ADDR_HIGH_MASK;
+	}
+
+	return 0;
+}
+
+static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+	int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+	/* pin the gem object to ggtt */
+	if (!list_empty(&workload->shadow_bb)) {
+		struct intel_shadow_bb_entry *entry_obj =
+			list_first_entry(&workload->shadow_bb,
+					 struct intel_shadow_bb_entry,
+					 list);
+		struct intel_shadow_bb_entry *temp;
+
+		list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+				list) {
+			struct i915_vma *vma;
+
+			vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
+						       4, 0);
+			if (IS_ERR(vma)) {
+				gvt_err("Cannot pin\n");
+				return;
+			}
+
+			/* FIXME: we are not tracking our pinned VMA leaving it
+			 * up to the core to fix up the stray pin_count upon
+			 * free.
+			 */
+
+			/* update the relocate gma with shadow batch buffer*/
+			set_gma_to_bb_cmd(entry_obj,
+					  i915_ggtt_offset(vma),
+					  gmadr_bytes);
+		}
+	}
+}
+
+static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	int ring_id = wa_ctx->workload->ring_id;
+	struct i915_gem_context *shadow_ctx =
+		wa_ctx->workload->vgpu->shadow_ctx;
+	struct drm_i915_gem_object *ctx_obj =
+		shadow_ctx->engine[ring_id].state->obj;
+	struct execlist_ring_context *shadow_ring_context;
+	struct page *page;
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	shadow_ring_context = kmap_atomic(page);
+
+	shadow_ring_context->bb_per_ctx_ptr.val =
+		(shadow_ring_context->bb_per_ctx_ptr.val &
+		(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+	shadow_ring_context->rcs_indirect_ctx.val =
+		(shadow_ring_context->rcs_indirect_ctx.val &
+		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+	kunmap_atomic(shadow_ring_context);
+	return 0;
+}
+
+static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	struct i915_vma *vma;
+	unsigned char *per_ctx_va =
+		(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+		wa_ctx->indirect_ctx.size;
+
+	if (wa_ctx->indirect_ctx.size == 0)
+		return;
+
+	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+				       0, CACHELINE_BYTES, 0);
+	if (IS_ERR(vma)) {
+		gvt_err("Cannot pin indirect ctx obj\n");
+		return;
+	}
+
+	/* FIXME: we are not tracking our pinned VMA leaving it
+	 * up to the core to fix up the stray pin_count upon
+	 * free.
+	 */
+
+	wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+	wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+	memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+	update_wa_ctx_2_shadow_ctx(wa_ctx);
+}
+
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct execlist_ctx_descriptor_format ctx[2];
+	int ring_id = workload->ring_id;
+
+	intel_vgpu_pin_mm(workload->shadow_mm);
+	intel_vgpu_sync_oos_pages(workload->vgpu);
+	intel_vgpu_flush_post_shadow(workload->vgpu);
+	prepare_shadow_batch_buffer(workload);
+	prepare_shadow_wa_ctx(&workload->wa_ctx);
+	if (!workload->emulate_schedule_in)
+		return 0;
+
+	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+	return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+	/* release all the shadow batch buffer */
+	if (!list_empty(&workload->shadow_bb)) {
+		struct intel_shadow_bb_entry *entry_obj =
+			list_first_entry(&workload->shadow_bb,
+					 struct intel_shadow_bb_entry,
+					 list);
+		struct intel_shadow_bb_entry *temp;
+
+		list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+					 list) {
+			i915_gem_object_unpin_map(entry_obj->obj);
+			i915_gem_object_put(entry_obj->obj);
+			list_del(&entry_obj->list);
+			kfree(entry_obj);
+		}
+	}
+}
+
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+	if (wa_ctx->indirect_ctx.size == 0)
+		return;
+
+	i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+}
+
+static int complete_execlist_workload(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_vgpu_execlist *execlist =
+		&vgpu->execlist[workload->ring_id];
+	struct intel_vgpu_workload *next_workload;
+	struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+	bool lite_restore = false;
+	int ret;
+
+	gvt_dbg_el("complete workload %p status %d\n", workload,
+			workload->status);
+
+	release_shadow_batch_buffer(workload);
+	release_shadow_wa_ctx(&workload->wa_ctx);
+
+	if (workload->status || vgpu->resetting)
+		goto out;
+
+	if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+		struct execlist_ctx_descriptor_format *this_desc, *next_desc;
+
+		next_workload = container_of(next,
+				struct intel_vgpu_workload, list);
+		this_desc = &workload->ctx_desc;
+		next_desc = &next_workload->ctx_desc;
+
+		lite_restore = same_context(this_desc, next_desc);
+	}
+
+	if (lite_restore) {
+		gvt_dbg_el("next context == current - no schedule-out\n");
+		free_workload(workload);
+		return 0;
+	}
+
+	ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
+	if (ret)
+		goto err;
+out:
+	free_workload(workload);
+	return 0;
+err:
+	free_workload(workload);
+	return ret;
+}
+
+#define RING_CTX_OFF(x) \
+	offsetof(struct execlist_ring_context, x)
+
+static void read_guest_pdps(struct intel_vgpu *vgpu,
+		u64 ring_context_gpa, u32 pdp[8])
+{
+	u64 gpa;
+	int i;
+
+	gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+	for (i = 0; i < 8; i++)
+		intel_gvt_hypervisor_read_gpa(vgpu,
+				gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static int prepare_mm(struct intel_vgpu_workload *workload)
+{
+	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+	struct intel_vgpu_mm *mm;
+	int page_table_level;
+	u32 pdp[8];
+
+	if (desc->addressing_mode == 1) { /* legacy 32-bit */
+		page_table_level = 3;
+	} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
+		page_table_level = 4;
+	} else {
+		gvt_err("Advanced Context mode(SVM) is not supported!\n");
+		return -EINVAL;
+	}
+
+	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+
+	mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+	if (mm) {
+		intel_gvt_mm_reference(mm);
+	} else {
+
+		mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
+				pdp, page_table_level, 0);
+		if (IS_ERR(mm)) {
+			gvt_err("fail to create mm object.\n");
+			return PTR_ERR(mm);
+		}
+	}
+	workload->shadow_mm = mm;
+	return 0;
+}
+
+#define get_last_workload(q) \
+	(list_empty(q) ? NULL : container_of(q->prev, \
+	struct intel_vgpu_workload, list))
+
+static int submit_context(struct intel_vgpu *vgpu, int ring_id,
+		struct execlist_ctx_descriptor_format *desc,
+		bool emulate_schedule_in)
+{
+	struct list_head *q = workload_q_head(vgpu, ring_id);
+	struct intel_vgpu_workload *last_workload = get_last_workload(q);
+	struct intel_vgpu_workload *workload = NULL;
+	u64 ring_context_gpa;
+	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+	int ret;
+
+	ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+			(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
+	if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+		gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+		return -EINVAL;
+	}
+
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(ring_header.val), &head, 4);
+
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+	head &= RB_HEAD_OFF_MASK;
+	tail &= RB_TAIL_OFF_MASK;
+
+	if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+		gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+		gvt_dbg_el("ctx head %x real head %lx\n", head,
+				last_workload->rb_tail);
+		/*
+		 * cannot use guest context head pointer here,
+		 * as it might not be updated at this time
+		 */
+		head = last_workload->rb_tail;
+	}
+
+	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+	workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
+	if (!workload)
+		return -ENOMEM;
+
+	/* record some ring buffer register values for scan and shadow */
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(rb_start.val), &start, 4);
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+	INIT_LIST_HEAD(&workload->list);
+	INIT_LIST_HEAD(&workload->shadow_bb);
+
+	init_waitqueue_head(&workload->shadow_ctx_status_wq);
+	atomic_set(&workload->shadow_ctx_active, 0);
+
+	workload->vgpu = vgpu;
+	workload->ring_id = ring_id;
+	workload->ctx_desc = *desc;
+	workload->ring_context_gpa = ring_context_gpa;
+	workload->rb_head = head;
+	workload->rb_tail = tail;
+	workload->rb_start = start;
+	workload->rb_ctl = ctl;
+	workload->prepare = prepare_execlist_workload;
+	workload->complete = complete_execlist_workload;
+	workload->status = -EINPROGRESS;
+	workload->emulate_schedule_in = emulate_schedule_in;
+
+	if (ring_id == RCS) {
+		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+			RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+		workload->wa_ctx.indirect_ctx.guest_gma =
+			indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+		workload->wa_ctx.indirect_ctx.size =
+			(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+			CACHELINE_BYTES;
+		workload->wa_ctx.per_ctx.guest_gma =
+			per_ctx & PER_CTX_ADDR_MASK;
+		workload->wa_ctx.workload = workload;
+
+		WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
+	}
+
+	if (emulate_schedule_in)
+		memcpy(&workload->elsp_dwords,
+				&vgpu->execlist[ring_id].elsp_dwords,
+				sizeof(workload->elsp_dwords));
+
+	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+			workload, ring_id, head, tail, start, ctl);
+
+	gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
+			emulate_schedule_in);
+
+	ret = prepare_mm(workload);
+	if (ret) {
+		kmem_cache_free(vgpu->workloads, workload);
+		return ret;
+	}
+
+	queue_workload(workload);
+	return 0;
+}
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+	struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
+	unsigned long valid_desc_bitmap = 0;
+	bool emulate_schedule_in = true;
+	int ret;
+	int i;
+
+	memset(valid_desc, 0, sizeof(valid_desc));
+
+	desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
+	desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+
+	for (i = 0; i < 2; i++) {
+		if (!desc[i]->valid)
+			continue;
+
+		if (!desc[i]->privilege_access) {
+			gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
+					vgpu->id);
+			return -EINVAL;
+		}
+
+		/* TODO: add another guest context checks here. */
+		set_bit(i, &valid_desc_bitmap);
+		valid_desc[i] = *desc[i];
+	}
+
+	if (!valid_desc_bitmap) {
+		gvt_err("vgpu%d: no valid desc in a elsp submission\n",
+				vgpu->id);
+		return -EINVAL;
+	}
+
+	if (!test_bit(0, (void *)&valid_desc_bitmap) &&
+			test_bit(1, (void *)&valid_desc_bitmap)) {
+		gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
+				vgpu->id);
+		return -EINVAL;
+	}
+
+	/* submit workload */
+	for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
+		ret = submit_context(vgpu, ring_id, &valid_desc[i],
+				emulate_schedule_in);
+		if (ret) {
+			gvt_err("vgpu%d: fail to schedule workload\n",
+					vgpu->id);
+			return ret;
+		}
+		emulate_schedule_in = false;
+	}
+	return 0;
+}
+
+static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+	struct execlist_context_status_pointer_format ctx_status_ptr;
+	u32 ctx_status_ptr_reg;
+
+	memset(execlist, 0, sizeof(*execlist));
+
+	execlist->vgpu = vgpu;
+	execlist->ring_id = ring_id;
+	execlist->slot[0].index = 0;
+	execlist->slot[1].index = 1;
+
+	ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+			_EL_OFFSET_STATUS_PTR);
+
+	ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+	ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+}
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
+{
+	kmem_cache_destroy(vgpu->workloads);
+}
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
+{
+	enum intel_engine_id i;
+	struct intel_engine_cs *engine;
+
+	/* each ring has a virtual execlist engine */
+	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+		init_vgpu_execlist(vgpu, i);
+		INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+	}
+
+	vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+			sizeof(struct intel_vgpu_workload), 0,
+			SLAB_HWCACHE_ALIGN,
+			NULL);
+
+	if (!vgpu->workloads)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+		unsigned long ring_bitmap)
+{
+	int bit;
+	struct list_head *pos, *n;
+	struct intel_vgpu_workload *workload = NULL;
+
+	for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
+		if (bit >= I915_NUM_ENGINES)
+			break;
+		/* free the unsubmited workload in the queue */
+		list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
+			workload = container_of(pos,
+					struct intel_vgpu_workload, list);
+			list_del_init(&workload->list);
+			free_workload(workload);
+		}
+
+		init_vgpu_execlist(vgpu, bit);
+	}
+}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
new file mode 100644
index 0000000..635f31c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EXECLIST_H_
+#define _GVT_EXECLIST_H_
+
+struct execlist_ctx_descriptor_format {
+	union {
+		u32 udw;
+		u32 context_id;
+	};
+	union {
+		u32 ldw;
+		struct {
+			u32 valid                  : 1;
+			u32 force_pd_restore       : 1;
+			u32 force_restore          : 1;
+			u32 addressing_mode        : 2;
+			u32 llc_coherency          : 1;
+			u32 fault_handling         : 2;
+			u32 privilege_access       : 1;
+			u32 reserved               : 3;
+			u32 lrca                   : 20;
+		};
+	};
+};
+
+struct execlist_status_format {
+	union {
+		u32 ldw;
+		struct {
+			u32 current_execlist_pointer       :1;
+			u32 execlist_write_pointer         :1;
+			u32 execlist_queue_full            :1;
+			u32 execlist_1_valid               :1;
+			u32 execlist_0_valid               :1;
+			u32 last_ctx_switch_reason         :9;
+			u32 current_active_elm_status      :2;
+			u32 arbitration_enable             :1;
+			u32 execlist_1_active              :1;
+			u32 execlist_0_active              :1;
+			u32 reserved                       :13;
+		};
+	};
+	union {
+		u32 udw;
+		u32 context_id;
+	};
+};
+
+struct execlist_context_status_pointer_format {
+	union {
+		u32 dw;
+		struct {
+			u32 write_ptr              :3;
+			u32 reserved               :5;
+			u32 read_ptr               :3;
+			u32 reserved2              :5;
+			u32 mask                   :16;
+		};
+	};
+};
+
+struct execlist_context_status_format {
+	union {
+		u32 ldw;
+		struct {
+			u32 idle_to_active         :1;
+			u32 preempted              :1;
+			u32 element_switch         :1;
+			u32 active_to_idle         :1;
+			u32 context_complete       :1;
+			u32 wait_on_sync_flip      :1;
+			u32 wait_on_vblank         :1;
+			u32 wait_on_semaphore      :1;
+			u32 wait_on_scanline       :1;
+			u32 reserved               :2;
+			u32 semaphore_wait_mode    :1;
+			u32 display_plane          :3;
+			u32 lite_restore           :1;
+			u32 reserved_2             :16;
+		};
+	};
+	union {
+		u32 udw;
+		u32 context_id;
+	};
+};
+
+struct execlist_mmio_pair {
+	u32 addr;
+	u32 val;
+};
+
+/* The first 52 dwords in register state context */
+struct execlist_ring_context {
+	u32 nop1;
+	u32 lri_cmd_1;
+	struct execlist_mmio_pair ctx_ctrl;
+	struct execlist_mmio_pair ring_header;
+	struct execlist_mmio_pair ring_tail;
+	struct execlist_mmio_pair rb_start;
+	struct execlist_mmio_pair rb_ctrl;
+	struct execlist_mmio_pair bb_cur_head_UDW;
+	struct execlist_mmio_pair bb_cur_head_LDW;
+	struct execlist_mmio_pair bb_state;
+	struct execlist_mmio_pair second_bb_addr_UDW;
+	struct execlist_mmio_pair second_bb_addr_LDW;
+	struct execlist_mmio_pair second_bb_state;
+	struct execlist_mmio_pair bb_per_ctx_ptr;
+	struct execlist_mmio_pair rcs_indirect_ctx;
+	struct execlist_mmio_pair rcs_indirect_ctx_offset;
+	u32 nop2;
+	u32 nop3;
+	u32 nop4;
+	u32 lri_cmd_2;
+	struct execlist_mmio_pair ctx_timestamp;
+	struct execlist_mmio_pair pdp3_UDW;
+	struct execlist_mmio_pair pdp3_LDW;
+	struct execlist_mmio_pair pdp2_UDW;
+	struct execlist_mmio_pair pdp2_LDW;
+	struct execlist_mmio_pair pdp1_UDW;
+	struct execlist_mmio_pair pdp1_LDW;
+	struct execlist_mmio_pair pdp0_UDW;
+	struct execlist_mmio_pair pdp0_LDW;
+};
+
+struct intel_vgpu_elsp_dwords {
+	u32 data[4];
+	u32 index;
+};
+
+struct intel_vgpu_execlist_slot {
+	struct execlist_ctx_descriptor_format ctx[2];
+	u32 index;
+};
+
+struct intel_vgpu_execlist {
+	struct intel_vgpu_execlist_slot slot[2];
+	struct intel_vgpu_execlist_slot *running_slot;
+	struct intel_vgpu_execlist_slot *pending_slot;
+	struct execlist_ctx_descriptor_format *running_context;
+	int ring_id;
+	struct intel_vgpu *vgpu;
+	struct intel_vgpu_elsp_dwords elsp_dwords;
+};
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+		unsigned long ring_bitmap);
+
+#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
new file mode 100644
index 0000000..2fae2a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ *    Changbin Du <changbin.du@intel.com>
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/crc32.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+#define FIRMWARE_VERSION (0x0)
+
+struct gvt_firmware_header {
+	u64 magic;
+	u32 crc32;		/* protect the data after this field */
+	u32 version;
+	u64 cfg_space_size;
+	u64 cfg_space_offset;	/* offset in the file */
+	u64 mmio_size;
+	u64 mmio_offset;	/* offset in the file */
+	unsigned char data[1];
+};
+
+#define RD(offset) (readl(mmio + offset.reg))
+#define WR(v, offset) (writel(v, mmio + offset.reg))
+
+static void bdw_forcewake_get(void __iomem *mmio)
+{
+	WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
+
+	RD(ECOBUS);
+
+	if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
+		gvt_err("fail to wait forcewake idle\n");
+
+	WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
+
+	if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
+		gvt_err("fail to wait forcewake ack\n");
+
+	if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
+		      GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
+		gvt_err("fail to wait c0 wake up\n");
+}
+
+#undef RD
+#undef WR
+
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
+static ssize_t
+gvt_firmware_read(struct file *filp, struct kobject *kobj,
+	     struct bin_attribute *attr, char *buf,
+	     loff_t offset, size_t count)
+{
+	memcpy(buf, attr->private + offset, count);
+	return count;
+}
+
+static struct bin_attribute firmware_attr = {
+	.attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
+	.read = gvt_firmware_read,
+	.write = NULL,
+	.mmap = NULL,
+};
+
+static int expose_firmware_sysfs(struct intel_gvt *gvt,
+					void __iomem *mmio)
+{
+	struct intel_gvt_device_info *info = &gvt->device_info;
+	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+	struct intel_gvt_mmio_info *e;
+	struct gvt_firmware_header *h;
+	void *firmware;
+	void *p;
+	unsigned long size;
+	int i;
+	int ret;
+
+	size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+	firmware = vmalloc(size);
+	if (!firmware)
+		return -ENOMEM;
+
+	h = firmware;
+
+	h->magic = VGT_MAGIC;
+	h->version = FIRMWARE_VERSION;
+	h->cfg_space_size = info->cfg_space_size;
+	h->cfg_space_offset = offsetof(struct gvt_firmware_header, data);
+	h->mmio_size = info->mmio_size;
+	h->mmio_offset = h->cfg_space_offset + h->cfg_space_size;
+
+	p = firmware + h->cfg_space_offset;
+
+	for (i = 0; i < h->cfg_space_size; i += 4)
+		pci_read_config_dword(pdev, i, p + i);
+
+	memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
+
+	p = firmware + h->mmio_offset;
+
+	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+		int j;
+
+		for (j = 0; j < e->length; j += 4)
+			*(u32 *)(p + e->offset + j) =
+				readl(mmio + e->offset + j);
+	}
+
+	memcpy(gvt->firmware.mmio, p, info->mmio_size);
+
+	firmware_attr.size = size;
+	firmware_attr.private = firmware;
+
+	ret = device_create_bin_file(&pdev->dev, &firmware_attr);
+	if (ret) {
+		vfree(firmware);
+		return ret;
+	}
+	return 0;
+}
+
+static void clean_firmware_sysfs(struct intel_gvt *gvt)
+{
+	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+	device_remove_bin_file(&pdev->dev, &firmware_attr);
+	vfree(firmware_attr.private);
+}
+
+/**
+ * intel_gvt_free_firmware - free GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+void intel_gvt_free_firmware(struct intel_gvt *gvt)
+{
+	if (!gvt->firmware.firmware_loaded)
+		clean_firmware_sysfs(gvt);
+
+	kfree(gvt->firmware.cfg_space);
+	kfree(gvt->firmware.mmio);
+}
+
+static int verify_firmware(struct intel_gvt *gvt,
+			   const struct firmware *fw)
+{
+	struct intel_gvt_device_info *info = &gvt->device_info;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct gvt_firmware_header *h;
+	unsigned long id, crc32_start;
+	const void *mem;
+	const char *item;
+	u64 file, request;
+
+	h = (struct gvt_firmware_header *)fw->data;
+
+	crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+	mem = fw->data + crc32_start;
+
+#define VERIFY(s, a, b) do { \
+	item = (s); file = (u64)(a); request = (u64)(b); \
+	if ((a) != (b)) \
+		goto invalid_firmware; \
+} while (0)
+
+	VERIFY("magic number", h->magic, VGT_MAGIC);
+	VERIFY("version", h->version, FIRMWARE_VERSION);
+	VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start));
+	VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size);
+	VERIFY("mmio size", h->mmio_size, info->mmio_size);
+
+	mem = (fw->data + h->cfg_space_offset);
+
+	id = *(u16 *)(mem + PCI_VENDOR_ID);
+	VERIFY("vender id", id, pdev->vendor);
+
+	id = *(u16 *)(mem + PCI_DEVICE_ID);
+	VERIFY("device id", id, pdev->device);
+
+	id = *(u8 *)(mem + PCI_REVISION_ID);
+	VERIFY("revision id", id, pdev->revision);
+
+#undef VERIFY
+	return 0;
+
+invalid_firmware:
+	gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n",
+		     item, file, request);
+	return -EINVAL;
+}
+
+#define GVT_FIRMWARE_PATH "i915/gvt"
+
+/**
+ * intel_gvt_load_firmware - load GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+int intel_gvt_load_firmware(struct intel_gvt *gvt)
+{
+	struct intel_gvt_device_info *info = &gvt->device_info;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct intel_gvt_firmware *firmware = &gvt->firmware;
+	struct gvt_firmware_header *h;
+	const struct firmware *fw;
+	char *path;
+	void __iomem *mmio;
+	void *mem;
+	int ret;
+
+	path = kmalloc(PATH_MAX, GFP_KERNEL);
+	if (!path)
+		return -ENOMEM;
+
+	mem = kmalloc(info->cfg_space_size, GFP_KERNEL);
+	if (!mem) {
+		kfree(path);
+		return -ENOMEM;
+	}
+
+	firmware->cfg_space = mem;
+
+	mem = kmalloc(info->mmio_size, GFP_KERNEL);
+	if (!mem) {
+		kfree(path);
+		kfree(firmware->cfg_space);
+		return -ENOMEM;
+	}
+
+	firmware->mmio = mem;
+
+	mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
+	if (!mmio) {
+		kfree(path);
+		kfree(firmware->cfg_space);
+		kfree(firmware->mmio);
+		return -EINVAL;
+	}
+
+	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
+		bdw_forcewake_get(mmio);
+
+	sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+		 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
+		 pdev->revision);
+
+	gvt_dbg_core("request hw state firmware %s...\n", path);
+
+	ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+	kfree(path);
+
+	if (ret)
+		goto expose_firmware;
+
+	gvt_dbg_core("success.\n");
+
+	ret = verify_firmware(gvt, fw);
+	if (ret)
+		goto out_free_fw;
+
+	gvt_dbg_core("verified.\n");
+
+	h = (struct gvt_firmware_header *)fw->data;
+
+	memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset,
+	       h->cfg_space_size);
+	memcpy(firmware->mmio, fw->data + h->mmio_offset,
+	       h->mmio_size);
+
+	release_firmware(fw);
+	firmware->firmware_loaded = true;
+	pci_iounmap(pdev, mmio);
+	return 0;
+
+out_free_fw:
+	release_firmware(fw);
+expose_firmware:
+	expose_firmware_sysfs(gvt, mmio);
+	pci_iounmap(pdev, mmio);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
new file mode 100644
index 0000000..15f7d4e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -0,0 +1,2261 @@
+/*
+ * GTT virtualization
+ *
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *    Zhenyu Wang <zhenyuw@linux.intel.com>
+ *    Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+static bool enable_out_of_sync = false;
+static int preallocated_oos_pages = 8192;
+
+/*
+ * validate a gm address and related range size,
+ * translate it to host gm address
+ */
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
+{
+	if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
+			&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
+		gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
+				vgpu->id, addr, size);
+		return false;
+	}
+	return true;
+}
+
+/* translate a guest gmadr to host gmadr */
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
+{
+	if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
+		 "invalid guest gmadr %llx\n", g_addr))
+		return -EACCES;
+
+	if (vgpu_gmadr_is_aperture(vgpu, g_addr))
+		*h_addr = vgpu_aperture_gmadr_base(vgpu)
+			  + (g_addr - vgpu_aperture_offset(vgpu));
+	else
+		*h_addr = vgpu_hidden_gmadr_base(vgpu)
+			  + (g_addr - vgpu_hidden_offset(vgpu));
+	return 0;
+}
+
+/* translate a host gmadr to guest gmadr */
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
+{
+	if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+		 "invalid host gmadr %llx\n", h_addr))
+		return -EACCES;
+
+	if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
+		*g_addr = vgpu_aperture_gmadr_base(vgpu)
+			+ (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
+	else
+		*g_addr = vgpu_hidden_gmadr_base(vgpu)
+			+ (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
+	return 0;
+}
+
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+			     unsigned long *h_index)
+{
+	u64 h_addr;
+	int ret;
+
+	ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+				       &h_addr);
+	if (ret)
+		return ret;
+
+	*h_index = h_addr >> GTT_PAGE_SHIFT;
+	return 0;
+}
+
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+			     unsigned long *g_index)
+{
+	u64 g_addr;
+	int ret;
+
+	ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+				       &g_addr);
+	if (ret)
+		return ret;
+
+	*g_index = g_addr >> GTT_PAGE_SHIFT;
+	return 0;
+}
+
+#define gtt_type_is_entry(type) \
+	(type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
+	 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
+	 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_type_is_pt(type) \
+	(type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
+
+#define gtt_type_is_pte_pt(type) \
+	(type == GTT_TYPE_PPGTT_PTE_PT)
+
+#define gtt_type_is_root_pointer(type) \
+	(gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_init_entry(e, t, p, v) do { \
+	(e)->type = t; \
+	(e)->pdev = p; \
+	memcpy(&(e)->val64, &v, sizeof(v)); \
+} while (0)
+
+/*
+ * Mappings between GTT_TYPE* enumerations.
+ * Following information can be found according to the given type:
+ * - type of next level page table
+ * - type of entry inside this level page table
+ * - type of entry with PSE set
+ *
+ * If the given type doesn't have such a kind of information,
+ * e.g. give a l4 root entry type, then request to get its PSE type,
+ * give a PTE page table type, then request to get its next level page
+ * table type, as we know l4 root entry doesn't have a PSE bit,
+ * and a PTE page table doesn't have a next level page table type,
+ * GTT_TYPE_INVALID will be returned. This is useful when traversing a
+ * page table.
+ */
+
+struct gtt_type_table_entry {
+	int entry_type;
+	int next_pt_type;
+	int pse_entry_type;
+};
+
+#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+	[type] = { \
+		.entry_type = e_type, \
+		.next_pt_type = npt_type, \
+		.pse_entry_type = pse_type, \
+	}
+
+static struct gtt_type_table_entry gtt_type_table[] = {
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+			GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+			GTT_TYPE_PPGTT_PML4_PT,
+			GTT_TYPE_INVALID),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
+			GTT_TYPE_PPGTT_PML4_ENTRY,
+			GTT_TYPE_PPGTT_PDP_PT,
+			GTT_TYPE_INVALID),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
+			GTT_TYPE_PPGTT_PML4_ENTRY,
+			GTT_TYPE_PPGTT_PDP_PT,
+			GTT_TYPE_INVALID),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
+			GTT_TYPE_PPGTT_PDP_ENTRY,
+			GTT_TYPE_PPGTT_PDE_PT,
+			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+			GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+			GTT_TYPE_PPGTT_PDE_PT,
+			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
+			GTT_TYPE_PPGTT_PDP_ENTRY,
+			GTT_TYPE_PPGTT_PDE_PT,
+			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
+			GTT_TYPE_PPGTT_PDE_ENTRY,
+			GTT_TYPE_PPGTT_PTE_PT,
+			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
+			GTT_TYPE_PPGTT_PDE_ENTRY,
+			GTT_TYPE_PPGTT_PTE_PT,
+			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
+			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+			GTT_TYPE_INVALID,
+			GTT_TYPE_INVALID),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+			GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+			GTT_TYPE_INVALID,
+			GTT_TYPE_INVALID),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+			GTT_TYPE_PPGTT_PDE_ENTRY,
+			GTT_TYPE_INVALID,
+			GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+			GTT_TYPE_PPGTT_PDP_ENTRY,
+			GTT_TYPE_INVALID,
+			GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+	GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
+			GTT_TYPE_GGTT_PTE,
+			GTT_TYPE_INVALID,
+			GTT_TYPE_INVALID),
+};
+
+static inline int get_next_pt_type(int type)
+{
+	return gtt_type_table[type].next_pt_type;
+}
+
+static inline int get_entry_type(int type)
+{
+	return gtt_type_table[type].entry_type;
+}
+
+static inline int get_pse_type(int type)
+{
+	return gtt_type_table[type].pse_entry_type;
+}
+
+static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+{
+	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+	u64 pte;
+
+#ifdef readq
+	pte = readq(addr);
+#else
+	pte = ioread32(addr);
+	pte |= (u64)ioread32(addr + 4) << 32;
+#endif
+	return pte;
+}
+
+static void write_pte64(struct drm_i915_private *dev_priv,
+		unsigned long index, u64 pte)
+{
+	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+
+#ifdef writeq
+	writeq(pte, addr);
+#else
+	iowrite32((u32)pte, addr);
+	iowrite32(pte >> 32, addr + 4);
+#endif
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
+		struct intel_gvt_gtt_entry *e,
+		unsigned long index, bool hypervisor_access, unsigned long gpa,
+		struct intel_vgpu *vgpu)
+{
+	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+	int ret;
+
+	if (WARN_ON(info->gtt_entry_size != 8))
+		return e;
+
+	if (hypervisor_access) {
+		ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
+				(index << info->gtt_entry_size_shift),
+				&e->val64, 8);
+		WARN_ON(ret);
+	} else if (!pt) {
+		e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+	} else {
+		e->val64 = *((u64 *)pt + index);
+	}
+	return e;
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
+		struct intel_gvt_gtt_entry *e,
+		unsigned long index, bool hypervisor_access, unsigned long gpa,
+		struct intel_vgpu *vgpu)
+{
+	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+	int ret;
+
+	if (WARN_ON(info->gtt_entry_size != 8))
+		return e;
+
+	if (hypervisor_access) {
+		ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
+				(index << info->gtt_entry_size_shift),
+				&e->val64, 8);
+		WARN_ON(ret);
+	} else if (!pt) {
+		write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+	} else {
+		*((u64 *)pt + index) = e->val64;
+	}
+	return e;
+}
+
+#define GTT_HAW 46
+
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+
+static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
+{
+	unsigned long pfn;
+
+	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
+		pfn = (e->val64 & ADDR_1G_MASK) >> 12;
+	else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
+		pfn = (e->val64 & ADDR_2M_MASK) >> 12;
+	else
+		pfn = (e->val64 & ADDR_4K_MASK) >> 12;
+	return pfn;
+}
+
+static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
+{
+	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+		e->val64 &= ~ADDR_1G_MASK;
+		pfn &= (ADDR_1G_MASK >> 12);
+	} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
+		e->val64 &= ~ADDR_2M_MASK;
+		pfn &= (ADDR_2M_MASK >> 12);
+	} else {
+		e->val64 &= ~ADDR_4K_MASK;
+		pfn &= (ADDR_4K_MASK >> 12);
+	}
+
+	e->val64 |= (pfn << 12);
+}
+
+static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
+{
+	/* Entry doesn't have PSE bit. */
+	if (get_pse_type(e->type) == GTT_TYPE_INVALID)
+		return false;
+
+	e->type = get_entry_type(e->type);
+	if (!(e->val64 & (1 << 7)))
+		return false;
+
+	e->type = get_pse_type(e->type);
+	return true;
+}
+
+static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
+{
+	/*
+	 * i915 writes PDP root pointer registers without present bit,
+	 * it also works, so we need to treat root pointer entry
+	 * specifically.
+	 */
+	if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+			|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
+		return (e->val64 != 0);
+	else
+		return (e->val64 & (1 << 0));
+}
+
+static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
+{
+	e->val64 &= ~(1 << 0);
+}
+
+/*
+ * Per-platform GMA routines.
+ */
+static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
+{
+	unsigned long x = (gma >> GTT_PAGE_SHIFT);
+
+	trace_gma_index(__func__, gma, x);
+	return x;
+}
+
+#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
+static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
+{ \
+	unsigned long x = (exp); \
+	trace_gma_index(__func__, gma, x); \
+	return x; \
+}
+
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
+
+static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
+	.get_entry = gtt_get_entry64,
+	.set_entry = gtt_set_entry64,
+	.clear_present = gtt_entry_clear_present,
+	.test_present = gen8_gtt_test_present,
+	.test_pse = gen8_gtt_test_pse,
+	.get_pfn = gen8_gtt_get_pfn,
+	.set_pfn = gen8_gtt_set_pfn,
+};
+
+static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
+	.gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
+	.gma_to_pte_index = gen8_gma_to_pte_index,
+	.gma_to_pde_index = gen8_gma_to_pde_index,
+	.gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
+	.gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
+	.gma_to_pml4_index = gen8_gma_to_pml4_index,
+};
+
+static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
+		struct intel_gvt_gtt_entry *m)
+{
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	unsigned long gfn, mfn;
+
+	*m = *p;
+
+	if (!ops->test_present(p))
+		return 0;
+
+	gfn = ops->get_pfn(p);
+
+	mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
+	if (mfn == INTEL_GVT_INVALID_ADDR) {
+		gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+		return -ENXIO;
+	}
+
+	ops->set_pfn(m, mfn);
+	return 0;
+}
+
+/*
+ * MM helpers.
+ */
+struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
+		void *page_table, struct intel_gvt_gtt_entry *e,
+		unsigned long index)
+{
+	struct intel_gvt *gvt = mm->vgpu->gvt;
+	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+	e->type = mm->page_table_entry_type;
+
+	ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+	ops->test_pse(e);
+	return e;
+}
+
+struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
+		void *page_table, struct intel_gvt_gtt_entry *e,
+		unsigned long index)
+{
+	struct intel_gvt *gvt = mm->vgpu->gvt;
+	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+	return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+}
+
+/*
+ * PPGTT shadow page table helpers.
+ */
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
+		struct intel_vgpu_ppgtt_spt *spt,
+		void *page_table, int type,
+		struct intel_gvt_gtt_entry *e, unsigned long index,
+		bool guest)
+{
+	struct intel_gvt *gvt = spt->vgpu->gvt;
+	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+	e->type = get_entry_type(type);
+
+	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+		return e;
+
+	ops->get_entry(page_table, e, index, guest,
+			spt->guest_page.gfn << GTT_PAGE_SHIFT,
+			spt->vgpu);
+	ops->test_pse(e);
+	return e;
+}
+
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
+		struct intel_vgpu_ppgtt_spt *spt,
+		void *page_table, int type,
+		struct intel_gvt_gtt_entry *e, unsigned long index,
+		bool guest)
+{
+	struct intel_gvt *gvt = spt->vgpu->gvt;
+	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+		return e;
+
+	return ops->set_entry(page_table, e, index, guest,
+			spt->guest_page.gfn << GTT_PAGE_SHIFT,
+			spt->vgpu);
+}
+
+#define ppgtt_get_guest_entry(spt, e, index) \
+	ppgtt_spt_get_entry(spt, NULL, \
+		spt->guest_page_type, e, index, true)
+
+#define ppgtt_set_guest_entry(spt, e, index) \
+	ppgtt_spt_set_entry(spt, NULL, \
+		spt->guest_page_type, e, index, true)
+
+#define ppgtt_get_shadow_entry(spt, e, index) \
+	ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
+		spt->shadow_page.type, e, index, false)
+
+#define ppgtt_set_shadow_entry(spt, e, index) \
+	ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
+		spt->shadow_page.type, e, index, false)
+
+/**
+ * intel_vgpu_init_guest_page - init a guest page data structure
+ * @vgpu: a vGPU
+ * @p: a guest page data structure
+ * @gfn: guest memory page frame number
+ * @handler: function will be called when target guest memory page has
+ * been modified.
+ *
+ * This function is called when user wants to track a guest memory page.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *p,
+		unsigned long gfn,
+		int (*handler)(void *, u64, void *, int),
+		void *data)
+{
+	INIT_HLIST_NODE(&p->node);
+
+	p->writeprotection = false;
+	p->gfn = gfn;
+	p->handler = handler;
+	p->data = data;
+	p->oos_page = NULL;
+	p->write_cnt = 0;
+
+	hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
+	return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_oos_page *oos_page);
+
+/**
+ * intel_vgpu_clean_guest_page - release the resource owned by guest page data
+ * structure
+ * @vgpu: a vGPU
+ * @p: a tracked guest page
+ *
+ * This function is called when user tries to stop tracking a guest memory
+ * page.
+ */
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *p)
+{
+	if (!hlist_unhashed(&p->node))
+		hash_del(&p->node);
+
+	if (p->oos_page)
+		detach_oos_page(vgpu, p->oos_page);
+
+	if (p->writeprotection)
+		intel_gvt_hypervisor_unset_wp_page(vgpu, p);
+}
+
+/**
+ * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
+ * @vgpu: a vGPU
+ * @gfn: guest memory page frame number
+ *
+ * This function is called when emulation logic wants to know if a trapped GFN
+ * is a tracked guest page.
+ *
+ * Returns:
+ * Pointer to guest page data structure, NULL if failed.
+ */
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+		struct intel_vgpu *vgpu, unsigned long gfn)
+{
+	struct intel_vgpu_guest_page *p;
+
+	hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
+		p, node, gfn) {
+		if (p->gfn == gfn)
+			return p;
+	}
+	return NULL;
+}
+
+static inline int init_shadow_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_shadow_page *p, int type)
+{
+	p->vaddr = page_address(p->page);
+	p->type = type;
+
+	INIT_HLIST_NODE(&p->node);
+
+	p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
+	if (p->mfn == INTEL_GVT_INVALID_ADDR)
+		return -EFAULT;
+
+	hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
+	return 0;
+}
+
+static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+{
+	if (!hlist_unhashed(&p->node))
+		hash_del(&p->node);
+}
+
+static inline struct intel_vgpu_shadow_page *find_shadow_page(
+		struct intel_vgpu *vgpu, unsigned long mfn)
+{
+	struct intel_vgpu_shadow_page *p;
+
+	hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
+		p, node, mfn) {
+		if (p->mfn == mfn)
+			return p;
+	}
+	return NULL;
+}
+
+#define guest_page_to_ppgtt_spt(ptr) \
+	container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
+
+#define shadow_page_to_ppgtt_spt(ptr) \
+	container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
+
+static void *alloc_spt(gfp_t gfp_mask)
+{
+	struct intel_vgpu_ppgtt_spt *spt;
+
+	spt = kzalloc(sizeof(*spt), gfp_mask);
+	if (!spt)
+		return NULL;
+
+	spt->shadow_page.page = alloc_page(gfp_mask);
+	if (!spt->shadow_page.page) {
+		kfree(spt);
+		return NULL;
+	}
+	return spt;
+}
+
+static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+	__free_page(spt->shadow_page.page);
+	kfree(spt);
+}
+
+static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+	trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
+
+	clean_shadow_page(&spt->shadow_page);
+	intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
+	list_del_init(&spt->post_shadow_list);
+
+	free_spt(spt);
+}
+
+static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
+{
+	struct hlist_node *n;
+	struct intel_vgpu_shadow_page *sp;
+	int i;
+
+	hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
+		ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+		u64 pa, void *p_data, int bytes);
+
+static int ppgtt_write_protection_handler(void *gp, u64 pa,
+		void *p_data, int bytes)
+{
+	struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+	int ret;
+
+	if (bytes != 4 && bytes != 8)
+		return -EINVAL;
+
+	if (!gpt->writeprotection)
+		return -EINVAL;
+
+	ret = ppgtt_handle_guest_write_page_table_bytes(gp,
+		pa, p_data, bytes);
+	if (ret)
+		return ret;
+	return ret;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
+		struct intel_vgpu *vgpu, int type, unsigned long gfn)
+{
+	struct intel_vgpu_ppgtt_spt *spt = NULL;
+	int ret;
+
+retry:
+	spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
+	if (!spt) {
+		if (reclaim_one_mm(vgpu->gvt))
+			goto retry;
+
+		gvt_err("fail to allocate ppgtt shadow page\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spt->vgpu = vgpu;
+	spt->guest_page_type = type;
+	atomic_set(&spt->refcount, 1);
+	INIT_LIST_HEAD(&spt->post_shadow_list);
+
+	/*
+	 * TODO: guest page type may be different with shadow page type,
+	 *	 when we support PSE page in future.
+	 */
+	ret = init_shadow_page(vgpu, &spt->shadow_page, type);
+	if (ret) {
+		gvt_err("fail to initialize shadow page for spt\n");
+		goto err;
+	}
+
+	ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
+			gfn, ppgtt_write_protection_handler, NULL);
+	if (ret) {
+		gvt_err("fail to initialize guest page for spt\n");
+		goto err;
+	}
+
+	trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+	return spt;
+err:
+	ppgtt_free_shadow_page(spt);
+	return ERR_PTR(ret);
+}
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
+		struct intel_vgpu *vgpu, unsigned long mfn)
+{
+	struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
+
+	if (p)
+		return shadow_page_to_ppgtt_spt(p);
+
+	gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
+			vgpu->id, mfn);
+	return NULL;
+}
+
+#define pt_entry_size_shift(spt) \
+	((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
+
+#define pt_entries(spt) \
+	(GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+
+#define for_each_present_guest_entry(spt, e, i) \
+	for (i = 0; i < pt_entries(spt); i++) \
+	if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+		ppgtt_get_guest_entry(spt, e, i)))
+
+#define for_each_present_shadow_entry(spt, e, i) \
+	for (i = 0; i < pt_entries(spt); i++) \
+	if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+		ppgtt_get_shadow_entry(spt, e, i)))
+
+static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+	int v = atomic_read(&spt->refcount);
+
+	trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
+
+	atomic_inc(&spt->refcount);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
+		struct intel_gvt_gtt_entry *e)
+{
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_vgpu_ppgtt_spt *s;
+	intel_gvt_gtt_type_t cur_pt_type;
+
+	if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
+		return -EINVAL;
+
+	if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+		&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+		cur_pt_type = get_next_pt_type(e->type) + 1;
+		if (ops->get_pfn(e) ==
+			vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
+			return 0;
+	}
+	s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+	if (!s) {
+		gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
+				vgpu->id, ops->get_pfn(e));
+		return -ENXIO;
+	}
+	return ppgtt_invalidate_shadow_page(s);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+	struct intel_gvt_gtt_entry e;
+	unsigned long index;
+	int ret;
+	int v = atomic_read(&spt->refcount);
+
+	trace_spt_change(spt->vgpu->id, "die", spt,
+			spt->guest_page.gfn, spt->shadow_page.type);
+
+	trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+
+	if (atomic_dec_return(&spt->refcount) > 0)
+		return 0;
+
+	if (gtt_type_is_pte_pt(spt->shadow_page.type))
+		goto release;
+
+	for_each_present_shadow_entry(spt, &e, index) {
+		if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
+			gvt_err("GVT doesn't support pse bit for now\n");
+			return -EINVAL;
+		}
+		ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
+				spt->vgpu, &e);
+		if (ret)
+			goto fail;
+	}
+release:
+	trace_spt_change(spt->vgpu->id, "release", spt,
+			spt->guest_page.gfn, spt->shadow_page.type);
+	ppgtt_free_shadow_page(spt);
+	return 0;
+fail:
+	gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
+			spt->vgpu->id, spt, e.val64, e.type);
+	return ret;
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
+		struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
+{
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_vgpu_ppgtt_spt *s = NULL;
+	struct intel_vgpu_guest_page *g;
+	int ret;
+
+	if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
+	if (g) {
+		s = guest_page_to_ppgtt_spt(g);
+		ppgtt_get_shadow_page(s);
+	} else {
+		int type = get_next_pt_type(we->type);
+
+		s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
+		if (IS_ERR(s)) {
+			ret = PTR_ERR(s);
+			goto fail;
+		}
+
+		ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+		if (ret)
+			goto fail;
+
+		ret = ppgtt_populate_shadow_page(s);
+		if (ret)
+			goto fail;
+
+		trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
+			s->shadow_page.type);
+	}
+	return s;
+fail:
+	gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+			vgpu->id, s, we->val64, we->type);
+	return ERR_PTR(ret);
+}
+
+static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
+		struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
+{
+	struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
+
+	se->type = ge->type;
+	se->val64 = ge->val64;
+
+	ops->set_pfn(se, s->shadow_page.mfn);
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+	struct intel_vgpu *vgpu = spt->vgpu;
+	struct intel_vgpu_ppgtt_spt *s;
+	struct intel_gvt_gtt_entry se, ge;
+	unsigned long i;
+	int ret;
+
+	trace_spt_change(spt->vgpu->id, "born", spt,
+			spt->guest_page.gfn, spt->shadow_page.type);
+
+	if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
+		for_each_present_guest_entry(spt, &ge, i) {
+			ret = gtt_entry_p2m(vgpu, &ge, &se);
+			if (ret)
+				goto fail;
+			ppgtt_set_shadow_entry(spt, &se, i);
+		}
+		return 0;
+	}
+
+	for_each_present_guest_entry(spt, &ge, i) {
+		if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
+			gvt_err("GVT doesn't support pse bit now\n");
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+		if (IS_ERR(s)) {
+			ret = PTR_ERR(s);
+			goto fail;
+		}
+		ppgtt_get_shadow_entry(spt, &se, i);
+		ppgtt_generate_shadow_entry(&se, s, &ge);
+		ppgtt_set_shadow_entry(spt, &se, i);
+	}
+	return 0;
+fail:
+	gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+			vgpu->id, spt, ge.val64, ge.type);
+	return ret;
+}
+
+static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
+		struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+	struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+	struct intel_vgpu *vgpu = spt->vgpu;
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_entry e;
+	int ret;
+
+	trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type,
+		we->val64, index);
+
+	ppgtt_get_shadow_entry(spt, &e, index);
+	if (!ops->test_present(&e))
+		return 0;
+
+	if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+		return 0;
+
+	if (gtt_type_is_pt(get_next_pt_type(we->type))) {
+		struct intel_vgpu_guest_page *g =
+			intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
+		if (!g) {
+			gvt_err("fail to find guest page\n");
+			ret = -ENXIO;
+			goto fail;
+		}
+		ret = ppgtt_invalidate_shadow_page(guest_page_to_ppgtt_spt(g));
+		if (ret)
+			goto fail;
+	}
+	ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
+	ppgtt_set_shadow_entry(spt, &e, index);
+	return 0;
+fail:
+	gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+			vgpu->id, spt, we->val64, we->type);
+	return ret;
+}
+
+static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
+		struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+	struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+	struct intel_vgpu *vgpu = spt->vgpu;
+	struct intel_gvt_gtt_entry m;
+	struct intel_vgpu_ppgtt_spt *s;
+	int ret;
+
+	trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
+		we->val64, index);
+
+	if (gtt_type_is_pt(get_next_pt_type(we->type))) {
+		s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
+		if (IS_ERR(s)) {
+			ret = PTR_ERR(s);
+			goto fail;
+		}
+		ppgtt_get_shadow_entry(spt, &m, index);
+		ppgtt_generate_shadow_entry(&m, s, we);
+		ppgtt_set_shadow_entry(spt, &m, index);
+	} else {
+		ret = gtt_entry_p2m(vgpu, we, &m);
+		if (ret)
+			goto fail;
+		ppgtt_set_shadow_entry(spt, &m, index);
+	}
+	return 0;
+fail:
+	gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
+			spt, we->val64, we->type);
+	return ret;
+}
+
+static int sync_oos_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_oos_page *oos_page)
+{
+	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+	struct intel_vgpu_ppgtt_spt *spt =
+		guest_page_to_ppgtt_spt(oos_page->guest_page);
+	struct intel_gvt_gtt_entry old, new, m;
+	int index;
+	int ret;
+
+	trace_oos_change(vgpu->id, "sync", oos_page->id,
+			oos_page->guest_page, spt->guest_page_type);
+
+	old.type = new.type = get_entry_type(spt->guest_page_type);
+	old.val64 = new.val64 = 0;
+
+	for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
+		index++) {
+		ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
+		ops->get_entry(NULL, &new, index, true,
+			oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+
+		if (old.val64 == new.val64
+			&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
+			continue;
+
+		trace_oos_sync(vgpu->id, oos_page->id,
+				oos_page->guest_page, spt->guest_page_type,
+				new.val64, index);
+
+		ret = gtt_entry_p2m(vgpu, &new, &m);
+		if (ret)
+			return ret;
+
+		ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
+		ppgtt_set_shadow_entry(spt, &m, index);
+	}
+
+	oos_page->guest_page->write_cnt = 0;
+	list_del_init(&spt->post_shadow_list);
+	return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_oos_page *oos_page)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_vgpu_ppgtt_spt *spt =
+		guest_page_to_ppgtt_spt(oos_page->guest_page);
+
+	trace_oos_change(vgpu->id, "detach", oos_page->id,
+			oos_page->guest_page, spt->guest_page_type);
+
+	oos_page->guest_page->write_cnt = 0;
+	oos_page->guest_page->oos_page = NULL;
+	oos_page->guest_page = NULL;
+
+	list_del_init(&oos_page->vm_list);
+	list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
+
+	return 0;
+}
+
+static int attach_oos_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_oos_page *oos_page,
+		struct intel_vgpu_guest_page *gpt)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	int ret;
+
+	ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
+		oos_page->mem, GTT_PAGE_SIZE);
+	if (ret)
+		return ret;
+
+	oos_page->guest_page = gpt;
+	gpt->oos_page = oos_page;
+
+	list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
+
+	trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
+			gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+	return 0;
+}
+
+static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *gpt)
+{
+	int ret;
+
+	ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+	if (ret)
+		return ret;
+
+	trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
+			gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+	list_del_init(&gpt->oos_page->vm_list);
+	return sync_oos_page(vgpu, gpt->oos_page);
+}
+
+static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *gpt)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_gtt *gtt = &gvt->gtt;
+	struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+	int ret;
+
+	WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
+
+	if (list_empty(&gtt->oos_page_free_list_head)) {
+		oos_page = container_of(gtt->oos_page_use_list_head.next,
+			struct intel_vgpu_oos_page, list);
+		ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+		if (ret)
+			return ret;
+		ret = detach_oos_page(vgpu, oos_page);
+		if (ret)
+			return ret;
+	} else
+		oos_page = container_of(gtt->oos_page_free_list_head.next,
+			struct intel_vgpu_oos_page, list);
+	return attach_oos_page(vgpu, oos_page, gpt);
+}
+
+static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *gpt)
+{
+	struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+
+	if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
+		return -EINVAL;
+
+	trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
+			gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+	list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
+	return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+}
+
+/**
+ * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to sync all the out-of-synced shadow for vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
+{
+	struct list_head *pos, *n;
+	struct intel_vgpu_oos_page *oos_page;
+	int ret;
+
+	if (!enable_out_of_sync)
+		return 0;
+
+	list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
+		oos_page = container_of(pos,
+				struct intel_vgpu_oos_page, vm_list);
+		ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/*
+ * The heart of PPGTT shadow page table.
+ */
+static int ppgtt_handle_guest_write_page_table(
+		struct intel_vgpu_guest_page *gpt,
+		struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+	struct intel_vgpu *vgpu = spt->vgpu;
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_entry ge;
+
+	int old_present, new_present;
+	int ret;
+
+	ppgtt_get_guest_entry(spt, &ge, index);
+
+	old_present = ops->test_present(&ge);
+	new_present = ops->test_present(we);
+
+	ppgtt_set_guest_entry(spt, we, index);
+
+	if (old_present) {
+		ret = ppgtt_handle_guest_entry_removal(gpt, &ge, index);
+		if (ret)
+			goto fail;
+	}
+	if (new_present) {
+		ret = ppgtt_handle_guest_entry_add(gpt, we, index);
+		if (ret)
+			goto fail;
+	}
+	return 0;
+fail:
+	gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
+			vgpu->id, spt, we->val64, we->type);
+	return ret;
+}
+
+static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
+{
+	return enable_out_of_sync
+		&& gtt_type_is_pte_pt(
+			guest_page_to_ppgtt_spt(gpt)->guest_page_type)
+		&& gpt->write_cnt >= 2;
+}
+
+static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
+		unsigned long index)
+{
+	set_bit(index, spt->post_shadow_bitmap);
+	if (!list_empty(&spt->post_shadow_list))
+		return;
+
+	list_add_tail(&spt->post_shadow_list,
+			&spt->vgpu->gtt.post_shadow_list_head);
+}
+
+/**
+ * intel_vgpu_flush_post_shadow - flush the post shadow transactions
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to flush all the post shadows for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
+{
+	struct list_head *pos, *n;
+	struct intel_vgpu_ppgtt_spt *spt;
+	struct intel_gvt_gtt_entry ge, e;
+	unsigned long index;
+	int ret;
+
+	list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
+		spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
+				post_shadow_list);
+
+		for_each_set_bit(index, spt->post_shadow_bitmap,
+				GTT_ENTRY_NUM_IN_ONE_PAGE) {
+			ppgtt_get_guest_entry(spt, &ge, index);
+			e = ge;
+			e.val64 = 0;
+			ppgtt_set_guest_entry(spt, &e, index);
+
+			ret = ppgtt_handle_guest_write_page_table(
+					&spt->guest_page, &ge, index);
+			if (ret)
+				return ret;
+			clear_bit(index, spt->post_shadow_bitmap);
+		}
+		list_del_init(&spt->post_shadow_list);
+	}
+	return 0;
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+		u64 pa, void *p_data, int bytes)
+{
+	struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+	struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+	struct intel_vgpu *vgpu = spt->vgpu;
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+	struct intel_gvt_gtt_entry we;
+	unsigned long index;
+	int ret;
+
+	index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
+
+	ppgtt_get_guest_entry(spt, &we, index);
+	memcpy((void *)&we.val64 + (pa & (info->gtt_entry_size - 1)),
+			p_data, bytes);
+
+	ops->test_pse(&we);
+
+	if (bytes == info->gtt_entry_size) {
+		ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
+		if (ret)
+			return ret;
+	} else {
+		struct intel_gvt_gtt_entry ge;
+
+		ppgtt_get_guest_entry(spt, &ge, index);
+
+		if (!test_bit(index, spt->post_shadow_bitmap)) {
+			ret = ppgtt_handle_guest_entry_removal(gpt,
+					&ge, index);
+			if (ret)
+				return ret;
+		}
+
+		ppgtt_set_post_shadow(spt, index);
+		ppgtt_set_guest_entry(spt, &we, index);
+	}
+
+	if (!enable_out_of_sync)
+		return 0;
+
+	gpt->write_cnt++;
+
+	if (gpt->oos_page)
+		ops->set_entry(gpt->oos_page->mem, &we, index,
+				false, 0, vgpu);
+
+	if (can_do_out_of_sync(gpt)) {
+		if (!gpt->oos_page)
+			ppgtt_allocate_oos_page(vgpu, gpt);
+
+		ret = ppgtt_set_guest_page_oos(vgpu, gpt);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+/*
+ * mm page table allocation policy for bdw+
+ *  - for ggtt, only virtual page table will be allocated.
+ *  - for ppgtt, dedicated virtual/shadow page table will be allocated.
+ */
+static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
+{
+	struct intel_vgpu *vgpu = mm->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	const struct intel_gvt_device_info *info = &gvt->device_info;
+	void *mem;
+
+	if (mm->type == INTEL_GVT_MM_PPGTT) {
+		mm->page_table_entry_cnt = 4;
+		mm->page_table_entry_size = mm->page_table_entry_cnt *
+			info->gtt_entry_size;
+		mem = kzalloc(mm->has_shadow_page_table ?
+			mm->page_table_entry_size * 2
+				: mm->page_table_entry_size,
+			GFP_ATOMIC);
+		if (!mem)
+			return -ENOMEM;
+		mm->virtual_page_table = mem;
+		if (!mm->has_shadow_page_table)
+			return 0;
+		mm->shadow_page_table = mem + mm->page_table_entry_size;
+	} else if (mm->type == INTEL_GVT_MM_GGTT) {
+		mm->page_table_entry_cnt =
+			(gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
+		mm->page_table_entry_size = mm->page_table_entry_cnt *
+			info->gtt_entry_size;
+		mem = vzalloc(mm->page_table_entry_size);
+		if (!mem)
+			return -ENOMEM;
+		mm->virtual_page_table = mem;
+	}
+	return 0;
+}
+
+static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
+{
+	if (mm->type == INTEL_GVT_MM_PPGTT) {
+		kfree(mm->virtual_page_table);
+	} else if (mm->type == INTEL_GVT_MM_GGTT) {
+		if (mm->virtual_page_table)
+			vfree(mm->virtual_page_table);
+	}
+	mm->virtual_page_table = mm->shadow_page_table = NULL;
+}
+
+static void invalidate_mm(struct intel_vgpu_mm *mm)
+{
+	struct intel_vgpu *vgpu = mm->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_gtt *gtt = &gvt->gtt;
+	struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+	struct intel_gvt_gtt_entry se;
+	int i;
+
+	if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+		return;
+
+	for (i = 0; i < mm->page_table_entry_cnt; i++) {
+		ppgtt_get_shadow_root_entry(mm, &se, i);
+		if (!ops->test_present(&se))
+			continue;
+		ppgtt_invalidate_shadow_page_by_shadow_entry(
+				vgpu, &se);
+		se.val64 = 0;
+		ppgtt_set_shadow_root_entry(mm, &se, i);
+
+		trace_gpt_change(vgpu->id, "destroy root pointer",
+				NULL, se.type, se.val64, i);
+	}
+	mm->shadowed = false;
+}
+
+/**
+ * intel_vgpu_destroy_mm - destroy a mm object
+ * @mm: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void intel_vgpu_destroy_mm(struct kref *mm_ref)
+{
+	struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+	struct intel_vgpu *vgpu = mm->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_gtt *gtt = &gvt->gtt;
+
+	if (!mm->initialized)
+		goto out;
+
+	list_del(&mm->list);
+	list_del(&mm->lru_list);
+
+	if (mm->has_shadow_page_table)
+		invalidate_mm(mm);
+
+	gtt->mm_free_page_table(mm);
+out:
+	kfree(mm);
+}
+
+static int shadow_mm(struct intel_vgpu_mm *mm)
+{
+	struct intel_vgpu *vgpu = mm->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_gtt *gtt = &gvt->gtt;
+	struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+	struct intel_vgpu_ppgtt_spt *spt;
+	struct intel_gvt_gtt_entry ge, se;
+	int i;
+	int ret;
+
+	if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+		return 0;
+
+	mm->shadowed = true;
+
+	for (i = 0; i < mm->page_table_entry_cnt; i++) {
+		ppgtt_get_guest_root_entry(mm, &ge, i);
+		if (!ops->test_present(&ge))
+			continue;
+
+		trace_gpt_change(vgpu->id, __func__, NULL,
+				ge.type, ge.val64, i);
+
+		spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+		if (IS_ERR(spt)) {
+			gvt_err("fail to populate guest root pointer\n");
+			ret = PTR_ERR(spt);
+			goto fail;
+		}
+		ppgtt_generate_shadow_entry(&se, spt, &ge);
+		ppgtt_set_shadow_root_entry(mm, &se, i);
+
+		trace_gpt_change(vgpu->id, "populate root pointer",
+				NULL, se.type, se.val64, i);
+	}
+	return 0;
+fail:
+	invalidate_mm(mm);
+	return ret;
+}
+
+/**
+ * intel_vgpu_create_mm - create a mm object for a vGPU
+ * @vgpu: a vGPU
+ * @mm_type: mm object type, should be PPGTT or GGTT
+ * @virtual_page_table: page table root pointers. Could be NULL if user wants
+ *	to populate shadow later.
+ * @page_table_level: describe the page table level of the mm object
+ * @pde_base_index: pde root pointer base in GGTT MMIO.
+ *
+ * This function is used to create a mm object for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code in pointer if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+		int mm_type, void *virtual_page_table, int page_table_level,
+		u32 pde_base_index)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_gtt *gtt = &gvt->gtt;
+	struct intel_vgpu_mm *mm;
+	int ret;
+
+	mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+	if (!mm) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mm->type = mm_type;
+
+	if (page_table_level == 1)
+		mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
+	else if (page_table_level == 3)
+		mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+	else if (page_table_level == 4)
+		mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+	else {
+		WARN_ON(1);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	mm->page_table_level = page_table_level;
+	mm->pde_base_index = pde_base_index;
+
+	mm->vgpu = vgpu;
+	mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
+
+	kref_init(&mm->ref);
+	atomic_set(&mm->pincount, 0);
+	INIT_LIST_HEAD(&mm->list);
+	INIT_LIST_HEAD(&mm->lru_list);
+	list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
+
+	ret = gtt->mm_alloc_page_table(mm);
+	if (ret) {
+		gvt_err("fail to allocate page table for mm\n");
+		goto fail;
+	}
+
+	mm->initialized = true;
+
+	if (virtual_page_table)
+		memcpy(mm->virtual_page_table, virtual_page_table,
+				mm->page_table_entry_size);
+
+	if (mm->has_shadow_page_table) {
+		ret = shadow_mm(mm);
+		if (ret)
+			goto fail;
+		list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+	}
+	return mm;
+fail:
+	gvt_err("fail to create mm\n");
+	if (mm)
+		intel_gvt_mm_unreference(mm);
+	return ERR_PTR(ret);
+}
+
+/**
+ * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
+ * @mm: a vGPU mm object
+ *
+ * This function is called when user doesn't want to use a vGPU mm object
+ */
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
+{
+	if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+		return;
+
+	atomic_dec(&mm->pincount);
+}
+
+/**
+ * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
+ * @vgpu: a vGPU
+ *
+ * This function is called when user wants to use a vGPU mm object. If this
+ * mm object hasn't been shadowed yet, the shadow will be populated at this
+ * time.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
+{
+	int ret;
+
+	if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+		return 0;
+
+	atomic_inc(&mm->pincount);
+
+	if (!mm->shadowed) {
+		ret = shadow_mm(mm);
+		if (ret)
+			return ret;
+	}
+
+	list_del_init(&mm->lru_list);
+	list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
+	return 0;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt)
+{
+	struct intel_vgpu_mm *mm;
+	struct list_head *pos, *n;
+
+	list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+
+		if (mm->type != INTEL_GVT_MM_PPGTT)
+			continue;
+		if (atomic_read(&mm->pincount))
+			continue;
+
+		list_del_init(&mm->lru_list);
+		invalidate_mm(mm);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * GMA translation APIs.
+ */
+static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
+		struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
+{
+	struct intel_vgpu *vgpu = mm->vgpu;
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	struct intel_vgpu_ppgtt_spt *s;
+
+	if (WARN_ON(!mm->has_shadow_page_table))
+		return -EINVAL;
+
+	s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+	if (!s)
+		return -ENXIO;
+
+	if (!guest)
+		ppgtt_get_shadow_entry(s, e, index);
+	else
+		ppgtt_get_guest_entry(s, e, index);
+	return 0;
+}
+
+/**
+ * intel_vgpu_gma_to_gpa - translate a gma to GPA
+ * @mm: mm object. could be a PPGTT or GGTT mm object
+ * @gma: graphics memory address in this mm object
+ *
+ * This function is used to translate a graphics memory address in specific
+ * graphics memory space to guest physical address.
+ *
+ * Returns:
+ * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
+{
+	struct intel_vgpu *vgpu = mm->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
+	struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
+	unsigned long gpa = INTEL_GVT_INVALID_ADDR;
+	unsigned long gma_index[4];
+	struct intel_gvt_gtt_entry e;
+	int i, index;
+	int ret;
+
+	if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
+		return INTEL_GVT_INVALID_ADDR;
+
+	if (mm->type == INTEL_GVT_MM_GGTT) {
+		if (!vgpu_gmadr_is_valid(vgpu, gma))
+			goto err;
+
+		ggtt_get_guest_entry(mm, &e,
+			gma_ops->gma_to_ggtt_pte_index(gma));
+		gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+			+ (gma & ~GTT_PAGE_MASK);
+
+		trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
+		return gpa;
+	}
+
+	switch (mm->page_table_level) {
+	case 4:
+		ppgtt_get_shadow_root_entry(mm, &e, 0);
+		gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+		gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+		gma_index[2] = gma_ops->gma_to_pde_index(gma);
+		gma_index[3] = gma_ops->gma_to_pte_index(gma);
+		index = 4;
+		break;
+	case 3:
+		ppgtt_get_shadow_root_entry(mm, &e,
+				gma_ops->gma_to_l3_pdp_index(gma));
+		gma_index[0] = gma_ops->gma_to_pde_index(gma);
+		gma_index[1] = gma_ops->gma_to_pte_index(gma);
+		index = 2;
+		break;
+	case 2:
+		ppgtt_get_shadow_root_entry(mm, &e,
+				gma_ops->gma_to_pde_index(gma));
+		gma_index[0] = gma_ops->gma_to_pte_index(gma);
+		index = 1;
+		break;
+	default:
+		WARN_ON(1);
+		goto err;
+	}
+
+	/* walk into the shadow page table and get gpa from guest entry */
+	for (i = 0; i < index; i++) {
+		ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+			(i == index - 1));
+		if (ret)
+			goto err;
+	}
+
+	gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+		+ (gma & ~GTT_PAGE_MASK);
+
+	trace_gma_translate(vgpu->id, "ppgtt", 0,
+			mm->page_table_level, gma, gpa);
+	return gpa;
+err:
+	gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+	return INTEL_GVT_INVALID_ADDR;
+}
+
+static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+	unsigned int off, void *p_data, unsigned int bytes)
+{
+	struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+	unsigned long index = off >> info->gtt_entry_size_shift;
+	struct intel_gvt_gtt_entry e;
+
+	if (bytes != 4 && bytes != 8)
+		return -EINVAL;
+
+	ggtt_get_guest_entry(ggtt_mm, &e, index);
+	memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
+			bytes);
+	return 0;
+}
+
+/**
+ * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data will be returned to guest
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register read
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+	void *p_data, unsigned int bytes)
+{
+	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+	int ret;
+
+	if (bytes != 4 && bytes != 8)
+		return -EINVAL;
+
+	off -= info->gtt_start_offset;
+	ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
+	return ret;
+}
+
+static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+	void *p_data, unsigned int bytes)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	const struct intel_gvt_device_info *info = &gvt->device_info;
+	struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+	struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+	unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
+	unsigned long gma;
+	struct intel_gvt_gtt_entry e, m;
+	int ret;
+
+	if (bytes != 4 && bytes != 8)
+		return -EINVAL;
+
+	gma = g_gtt_index << GTT_PAGE_SHIFT;
+
+	/* the VM may configure the whole GM space when ballooning is used */
+	if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
+				"vgpu%d: found oob ggtt write, offset %x\n",
+				vgpu->id, off)) {
+		return 0;
+	}
+
+	ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+	memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
+			bytes);
+
+	if (ops->test_present(&e)) {
+		ret = gtt_entry_p2m(vgpu, &e, &m);
+		if (ret) {
+			gvt_err("vgpu%d: fail to translate guest gtt entry\n",
+					vgpu->id);
+			return ret;
+		}
+	} else {
+		m = e;
+		m.val64 = 0;
+	}
+
+	ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+	return 0;
+}
+
+/*
+ * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data from guest write
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register write
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+	void *p_data, unsigned int bytes)
+{
+	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+	int ret;
+
+	if (bytes != 4 && bytes != 8)
+		return -EINVAL;
+
+	off -= info->gtt_start_offset;
+	ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
+	return ret;
+}
+
+static int alloc_scratch_pages(struct intel_vgpu *vgpu,
+		intel_gvt_gtt_type_t type)
+{
+	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+	int page_entry_num = GTT_PAGE_SIZE >>
+				vgpu->gvt->device_info.gtt_entry_size_shift;
+	struct page *scratch_pt;
+	unsigned long mfn;
+	int i;
+	void *p;
+
+	if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+		return -EINVAL;
+
+	scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+	if (!scratch_pt) {
+		gvt_err("fail to allocate scratch page\n");
+		return -ENOMEM;
+	}
+
+	p = kmap_atomic(scratch_pt);
+	mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+	if (mfn == INTEL_GVT_INVALID_ADDR) {
+		gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
+		kunmap_atomic(p);
+		__free_page(scratch_pt);
+		return -EFAULT;
+	}
+	gtt->scratch_pt[type].page_mfn = mfn;
+	gtt->scratch_pt[type].page = scratch_pt;
+	gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
+			vgpu->id, type, mfn);
+
+	/* Build the tree by full filled the scratch pt with the entries which
+	 * point to the next level scratch pt or scratch page. The
+	 * scratch_pt[type] indicate the scratch pt/scratch page used by the
+	 * 'type' pt.
+	 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
+	 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+	 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
+	 */
+	if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+		struct intel_gvt_gtt_entry se;
+
+		memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
+		se.type = get_entry_type(type - 1);
+		ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
+
+		/* The entry parameters like present/writeable/cache type
+		 * set to the same as i915's scratch page tree.
+		 */
+		se.val64 |= _PAGE_PRESENT | _PAGE_RW;
+		if (type == GTT_TYPE_PPGTT_PDE_PT)
+			se.val64 |= PPAT_CACHED_INDEX;
+
+		for (i = 0; i < page_entry_num; i++)
+			ops->set_entry(p, &se, i, false, 0, vgpu);
+	}
+
+	kunmap_atomic(p);
+
+	return 0;
+}
+
+static int release_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+	int i;
+
+	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+		if (vgpu->gtt.scratch_pt[i].page != NULL) {
+			__free_page(vgpu->gtt.scratch_pt[i].page);
+			vgpu->gtt.scratch_pt[i].page = NULL;
+			vgpu->gtt.scratch_pt[i].page_mfn = 0;
+		}
+	}
+
+	return 0;
+}
+
+static int create_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+	int i, ret;
+
+	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+		ret = alloc_scratch_pages(vgpu, i);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	release_scratch_page_tree(vgpu);
+	return ret;
+}
+
+/**
+ * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
+{
+	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+	struct intel_vgpu_mm *ggtt_mm;
+
+	hash_init(gtt->guest_page_hash_table);
+	hash_init(gtt->shadow_page_hash_table);
+
+	INIT_LIST_HEAD(&gtt->mm_list_head);
+	INIT_LIST_HEAD(&gtt->oos_page_list_head);
+	INIT_LIST_HEAD(&gtt->post_shadow_list_head);
+
+	ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
+			NULL, 1, 0);
+	if (IS_ERR(ggtt_mm)) {
+		gvt_err("fail to create mm for ggtt.\n");
+		return PTR_ERR(ggtt_mm);
+	}
+
+	gtt->ggtt_mm = ggtt_mm;
+
+	return create_scratch_page_tree(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean up per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
+{
+	struct list_head *pos, *n;
+	struct intel_vgpu_mm *mm;
+
+	ppgtt_free_all_shadow_page(vgpu);
+	release_scratch_page_tree(vgpu);
+
+	list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, list);
+		vgpu->gvt->gtt.mm_free_page_table(mm);
+		list_del(&mm->list);
+		list_del(&mm->lru_list);
+		kfree(mm);
+	}
+}
+
+static void clean_spt_oos(struct intel_gvt *gvt)
+{
+	struct intel_gvt_gtt *gtt = &gvt->gtt;
+	struct list_head *pos, *n;
+	struct intel_vgpu_oos_page *oos_page;
+
+	WARN(!list_empty(&gtt->oos_page_use_list_head),
+		"someone is still using oos page\n");
+
+	list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
+		oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
+		list_del(&oos_page->list);
+		kfree(oos_page);
+	}
+}
+
+static int setup_spt_oos(struct intel_gvt *gvt)
+{
+	struct intel_gvt_gtt *gtt = &gvt->gtt;
+	struct intel_vgpu_oos_page *oos_page;
+	int i;
+	int ret;
+
+	INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
+	INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
+
+	for (i = 0; i < preallocated_oos_pages; i++) {
+		oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
+		if (!oos_page) {
+			gvt_err("fail to pre-allocate oos page\n");
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		INIT_LIST_HEAD(&oos_page->list);
+		INIT_LIST_HEAD(&oos_page->vm_list);
+		oos_page->id = i;
+		list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
+	}
+
+	gvt_dbg_mm("%d oos pages preallocated\n", i);
+
+	return 0;
+fail:
+	clean_spt_oos(gvt);
+	return ret;
+}
+
+/**
+ * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ * @root_entry: PPGTT page table root pointers
+ *
+ * This function is used to find a PPGTT mm object from mm object pool
+ *
+ * Returns:
+ * pointer to mm object on success, NULL if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+		int page_table_level, void *root_entry)
+{
+	struct list_head *pos;
+	struct intel_vgpu_mm *mm;
+	u64 *src, *dst;
+
+	list_for_each(pos, &vgpu->gtt.mm_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, list);
+		if (mm->type != INTEL_GVT_MM_PPGTT)
+			continue;
+
+		if (mm->page_table_level != page_table_level)
+			continue;
+
+		src = root_entry;
+		dst = mm->virtual_page_table;
+
+		if (page_table_level == 3) {
+			if (src[0] == dst[0]
+					&& src[1] == dst[1]
+					&& src[2] == dst[2]
+					&& src[3] == dst[3])
+				return mm;
+		} else {
+			if (src[0] == dst[0])
+				return mm;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+		int page_table_level)
+{
+	u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+	struct intel_vgpu_mm *mm;
+
+	if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+		return -EINVAL;
+
+	mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+	if (mm) {
+		intel_gvt_mm_reference(mm);
+	} else {
+		mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
+				pdp, page_table_level, 0);
+		if (IS_ERR(mm)) {
+			gvt_err("fail to create mm\n");
+			return PTR_ERR(mm);
+		}
+	}
+	return 0;
+}
+
+/**
+ * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+		int page_table_level)
+{
+	u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+	struct intel_vgpu_mm *mm;
+
+	if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+		return -EINVAL;
+
+	mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+	if (!mm) {
+		gvt_err("fail to find ppgtt instance.\n");
+		return -EINVAL;
+	}
+	intel_gvt_mm_unreference(mm);
+	return 0;
+}
+
+/**
+ * intel_gvt_init_gtt - initialize mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to initialize
+ * the mm components of a GVT device.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_gvt_init_gtt(struct intel_gvt *gvt)
+{
+	int ret;
+
+	gvt_dbg_core("init gtt\n");
+
+	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+		gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+		gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
+		gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
+		gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
+	} else {
+		return -ENODEV;
+	}
+
+	if (enable_out_of_sync) {
+		ret = setup_spt_oos(gvt);
+		if (ret) {
+			gvt_err("fail to initialize SPT oos\n");
+			return ret;
+		}
+	}
+	INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+	return 0;
+}
+
+/**
+ * intel_gvt_clean_gtt - clean up mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the
+ * the mm components of a GVT device.
+ *
+ */
+void intel_gvt_clean_gtt(struct intel_gvt *gvt)
+{
+	if (enable_out_of_sync)
+		clean_spt_oos(gvt);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
new file mode 100644
index 0000000..d250013
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *    Zhenyu Wang <zhenyuw@linux.intel.com>
+ *    Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef _GVT_GTT_H_
+#define _GVT_GTT_H_
+
+#define GTT_PAGE_SHIFT		12
+#define GTT_PAGE_SIZE		(1UL << GTT_PAGE_SHIFT)
+#define GTT_PAGE_MASK		(~(GTT_PAGE_SIZE-1))
+
+struct intel_vgpu_mm;
+
+#define INTEL_GVT_GTT_HASH_BITS 8
+#define INTEL_GVT_INVALID_ADDR (~0UL)
+
+struct intel_gvt_gtt_entry {
+	u64 val64;
+	int type;
+};
+
+struct intel_gvt_gtt_pte_ops {
+	struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
+		struct intel_gvt_gtt_entry *e,
+		unsigned long index, bool hypervisor_access, unsigned long gpa,
+		struct intel_vgpu *vgpu);
+	struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
+		struct intel_gvt_gtt_entry *e,
+		unsigned long index, bool hypervisor_access, unsigned long gpa,
+		struct intel_vgpu *vgpu);
+	bool (*test_present)(struct intel_gvt_gtt_entry *e);
+	void (*clear_present)(struct intel_gvt_gtt_entry *e);
+	bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+	void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
+	unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
+};
+
+struct intel_gvt_gtt_gma_ops {
+	unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
+	unsigned long (*gma_to_pte_index)(unsigned long gma);
+	unsigned long (*gma_to_pde_index)(unsigned long gma);
+	unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
+	unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
+	unsigned long (*gma_to_pml4_index)(unsigned long gma);
+};
+
+struct intel_gvt_gtt {
+	struct intel_gvt_gtt_pte_ops *pte_ops;
+	struct intel_gvt_gtt_gma_ops *gma_ops;
+	int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
+	void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
+	struct list_head oos_page_use_list_head;
+	struct list_head oos_page_free_list_head;
+	struct list_head mm_lru_list_head;
+};
+
+enum {
+	INTEL_GVT_MM_GGTT = 0,
+	INTEL_GVT_MM_PPGTT,
+};
+
+typedef enum {
+	GTT_TYPE_INVALID = -1,
+
+	GTT_TYPE_GGTT_PTE,
+
+	GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+	GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+	GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+
+	GTT_TYPE_PPGTT_PTE_ENTRY,
+
+	GTT_TYPE_PPGTT_PDE_ENTRY,
+	GTT_TYPE_PPGTT_PDP_ENTRY,
+	GTT_TYPE_PPGTT_PML4_ENTRY,
+
+	GTT_TYPE_PPGTT_ROOT_ENTRY,
+
+	GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+	GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+
+	GTT_TYPE_PPGTT_ENTRY,
+
+	GTT_TYPE_PPGTT_PTE_PT,
+	GTT_TYPE_PPGTT_PDE_PT,
+	GTT_TYPE_PPGTT_PDP_PT,
+	GTT_TYPE_PPGTT_PML4_PT,
+
+	GTT_TYPE_MAX,
+} intel_gvt_gtt_type_t;
+
+struct intel_vgpu_mm {
+	int type;
+	bool initialized;
+	bool shadowed;
+
+	int page_table_entry_type;
+	u32 page_table_entry_size;
+	u32 page_table_entry_cnt;
+	void *virtual_page_table;
+	void *shadow_page_table;
+
+	int page_table_level;
+	bool has_shadow_page_table;
+	u32 pde_base_index;
+
+	struct list_head list;
+	struct kref ref;
+	atomic_t pincount;
+	struct list_head lru_list;
+	struct intel_vgpu *vgpu;
+};
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
+		struct intel_vgpu_mm *mm,
+		void *page_table, struct intel_gvt_gtt_entry *e,
+		unsigned long index);
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
+		struct intel_vgpu_mm *mm,
+		void *page_table, struct intel_gvt_gtt_entry *e,
+		unsigned long index);
+
+#define ggtt_get_guest_entry(mm, e, index) \
+	intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_set_guest_entry(mm, e, index) \
+	intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_get_shadow_entry(mm, e, index) \
+	intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ggtt_set_shadow_entry(mm, e, index) \
+	intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_get_guest_root_entry(mm, e, index) \
+	intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_set_guest_root_entry(mm, e, index) \
+	intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_get_shadow_root_entry(mm, e, index) \
+	intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_set_shadow_root_entry(mm, e, index) \
+	intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+		int mm_type, void *virtual_page_table, int page_table_level,
+		u32 pde_base_index);
+extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+
+struct intel_vgpu_guest_page;
+
+struct intel_vgpu_scratch_pt {
+	struct page *page;
+	unsigned long page_mfn;
+};
+
+
+struct intel_vgpu_gtt {
+	struct intel_vgpu_mm *ggtt_mm;
+	unsigned long active_ppgtt_mm_bitmap;
+	struct list_head mm_list_head;
+	DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+	DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+	atomic_t n_write_protected_guest_page;
+	struct list_head oos_page_list_head;
+	struct list_head post_shadow_list_head;
+	struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
+
+};
+
+extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+
+extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+
+extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+		int page_table_level, void *root_entry);
+
+struct intel_vgpu_oos_page;
+
+struct intel_vgpu_shadow_page {
+	void *vaddr;
+	struct page *page;
+	int type;
+	struct hlist_node node;
+	unsigned long mfn;
+};
+
+struct intel_vgpu_guest_page {
+	struct hlist_node node;
+	bool writeprotection;
+	unsigned long gfn;
+	int (*handler)(void *, u64, void *, int);
+	void *data;
+	unsigned long write_cnt;
+	struct intel_vgpu_oos_page *oos_page;
+};
+
+struct intel_vgpu_oos_page {
+	struct intel_vgpu_guest_page *guest_page;
+	struct list_head list;
+	struct list_head vm_list;
+	int id;
+	unsigned char mem[GTT_PAGE_SIZE];
+};
+
+#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
+
+struct intel_vgpu_ppgtt_spt {
+	struct intel_vgpu_shadow_page shadow_page;
+	struct intel_vgpu_guest_page guest_page;
+	int guest_page_type;
+	atomic_t refcount;
+	struct intel_vgpu *vgpu;
+	DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
+	struct list_head post_shadow_list;
+};
+
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *guest_page,
+		unsigned long gfn,
+		int (*handler)(void *gp, u64, void *, int),
+		void *data);
+
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *guest_page);
+
+int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *guest_page);
+
+void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *guest_page);
+
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+		struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
+
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
+
+static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
+{
+	kref_get(&mm->ref);
+}
+
+static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
+{
+	kref_put(&mm->ref, intel_vgpu_destroy_mm);
+}
+
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
+
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
+
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
+		unsigned long gma);
+
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+		int page_table_level, void *root_entry);
+
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+		int page_table_level);
+
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+		int page_table_level);
+
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+	unsigned int off, void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
+	unsigned int off, void *p_data, unsigned int bytes);
+
+#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 927f457..385969a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -19,12 +19,23 @@
  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ *    Niu Bing <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
  */
 
 #include <linux/types.h>
 #include <xen/xen.h>
+#include <linux/kthread.h>
 
 #include "i915_drv.h"
+#include "gvt.h"
 
 struct intel_gvt_host intel_gvt_host;
 
@@ -33,6 +44,13 @@ static const char * const supported_hypervisors[] = {
 	[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
 };
 
+struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
+	.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
+	.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
+	.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
+	.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
+};
+
 /**
  * intel_gvt_init_host - Load MPT modules and detect if we're running in host
  * @gvt: intel gvt device
@@ -47,6 +65,8 @@ static const char * const supported_hypervisors[] = {
  */
 int intel_gvt_init_host(void)
 {
+	int ret;
+
 	if (intel_gvt_host.initialized)
 		return 0;
 
@@ -72,7 +92,8 @@ int intel_gvt_init_host(void)
 		return -EINVAL;
 
 	/* Try to detect if we're running in host instead of VM. */
-	if (!intel_gvt_hypervisor_detect_host())
+	ret = intel_gvt_hypervisor_detect_host();
+	if (ret)
 		return -ENODEV;
 
 	gvt_dbg_core("Running with hypervisor %s in host mode\n",
@@ -84,9 +105,67 @@ int intel_gvt_init_host(void)
 
 static void init_device_info(struct intel_gvt *gvt)
 {
-	if (IS_BROADWELL(gvt->dev_priv))
-		gvt->device_info.max_support_vgpus = 8;
-	/* This function will grow large in GVT device model patches. */
+	struct intel_gvt_device_info *info = &gvt->device_info;
+	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+		info->max_support_vgpus = 8;
+		info->cfg_space_size = 256;
+		info->mmio_size = 2 * 1024 * 1024;
+		info->mmio_bar = 0;
+		info->gtt_start_offset = 8 * 1024 * 1024;
+		info->gtt_entry_size = 8;
+		info->gtt_entry_size_shift = 3;
+		info->gmadr_bytes_in_cmd = 8;
+		info->max_surface_size = 36 * 1024 * 1024;
+	}
+	info->msi_cap_offset = pdev->msi_cap;
+}
+
+static int gvt_service_thread(void *data)
+{
+	struct intel_gvt *gvt = (struct intel_gvt *)data;
+	int ret;
+
+	gvt_dbg_core("service thread start\n");
+
+	while (!kthread_should_stop()) {
+		ret = wait_event_interruptible(gvt->service_thread_wq,
+				kthread_should_stop() || gvt->service_request);
+
+		if (kthread_should_stop())
+			break;
+
+		if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
+			continue;
+
+		if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
+					(void *)&gvt->service_request)) {
+			mutex_lock(&gvt->lock);
+			intel_gvt_emulate_vblank(gvt);
+			mutex_unlock(&gvt->lock);
+		}
+	}
+
+	return 0;
+}
+
+static void clean_service_thread(struct intel_gvt *gvt)
+{
+	kthread_stop(gvt->service_thread);
+}
+
+static int init_service_thread(struct intel_gvt *gvt)
+{
+	init_waitqueue_head(&gvt->service_thread_wq);
+
+	gvt->service_thread = kthread_run(gvt_service_thread,
+			gvt, "gvt_service_thread");
+	if (IS_ERR(gvt->service_thread)) {
+		gvt_err("fail to start service thread.\n");
+		return PTR_ERR(gvt->service_thread);
+	}
+	return 0;
 }
 
 /**
@@ -99,14 +178,23 @@ static void init_device_info(struct intel_gvt *gvt)
  */
 void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
 {
-	struct intel_gvt *gvt = &dev_priv->gvt;
+	struct intel_gvt *gvt = to_gvt(dev_priv);
 
-	if (WARN_ON(!gvt->initialized))
+	if (WARN_ON(!gvt))
 		return;
 
-	/* Other de-initialization of GVT components will be introduced. */
+	clean_service_thread(gvt);
+	intel_gvt_clean_cmd_parser(gvt);
+	intel_gvt_clean_sched_policy(gvt);
+	intel_gvt_clean_workload_scheduler(gvt);
+	intel_gvt_clean_opregion(gvt);
+	intel_gvt_clean_gtt(gvt);
+	intel_gvt_clean_irq(gvt);
+	intel_gvt_clean_mmio_info(gvt);
+	intel_gvt_free_firmware(gvt);
 
-	gvt->initialized = false;
+	kfree(dev_priv->gvt);
+	dev_priv->gvt = NULL;
 }
 
 /**
@@ -122,7 +210,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
  */
 int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 {
-	struct intel_gvt *gvt = &dev_priv->gvt;
+	struct intel_gvt *gvt;
+	int ret;
+
 	/*
 	 * Cannot initialize GVT device without intel_gvt_host gets
 	 * initialized first.
@@ -130,16 +220,76 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	if (WARN_ON(!intel_gvt_host.initialized))
 		return -EINVAL;
 
-	if (WARN_ON(gvt->initialized))
+	if (WARN_ON(dev_priv->gvt))
 		return -EEXIST;
 
+	gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
+	if (!gvt)
+		return -ENOMEM;
+
 	gvt_dbg_core("init gvt device\n");
 
+	mutex_init(&gvt->lock);
+	gvt->dev_priv = dev_priv;
+
 	init_device_info(gvt);
-	/*
-	 * Other initialization of GVT components will be introduce here.
-	 */
+
+	ret = intel_gvt_setup_mmio_info(gvt);
+	if (ret)
+		return ret;
+
+	ret = intel_gvt_load_firmware(gvt);
+	if (ret)
+		goto out_clean_mmio_info;
+
+	ret = intel_gvt_init_irq(gvt);
+	if (ret)
+		goto out_free_firmware;
+
+	ret = intel_gvt_init_gtt(gvt);
+	if (ret)
+		goto out_clean_irq;
+
+	ret = intel_gvt_init_opregion(gvt);
+	if (ret)
+		goto out_clean_gtt;
+
+	ret = intel_gvt_init_workload_scheduler(gvt);
+	if (ret)
+		goto out_clean_opregion;
+
+	ret = intel_gvt_init_sched_policy(gvt);
+	if (ret)
+		goto out_clean_workload_scheduler;
+
+	ret = intel_gvt_init_cmd_parser(gvt);
+	if (ret)
+		goto out_clean_sched_policy;
+
+	ret = init_service_thread(gvt);
+	if (ret)
+		goto out_clean_cmd_parser;
+
 	gvt_dbg_core("gvt device creation is done\n");
-	gvt->initialized = true;
+	dev_priv->gvt = gvt;
 	return 0;
+
+out_clean_cmd_parser:
+	intel_gvt_clean_cmd_parser(gvt);
+out_clean_sched_policy:
+	intel_gvt_clean_sched_policy(gvt);
+out_clean_workload_scheduler:
+	intel_gvt_clean_workload_scheduler(gvt);
+out_clean_opregion:
+	intel_gvt_clean_opregion(gvt);
+out_clean_gtt:
+	intel_gvt_clean_gtt(gvt);
+out_clean_irq:
+	intel_gvt_clean_irq(gvt);
+out_free_firmware:
+	intel_gvt_free_firmware(gvt);
+out_clean_mmio_info:
+	intel_gvt_clean_mmio_info(gvt);
+	kfree(gvt);
+	return ret;
 }
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index fb619a6..62fc9e3 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -19,6 +19,15 @@
  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ *    Niu Bing <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
  */
 
 #ifndef _GVT_H_
@@ -26,6 +35,17 @@
 
 #include "debug.h"
 #include "hypercall.h"
+#include "mmio.h"
+#include "reg.h"
+#include "interrupt.h"
+#include "gtt.h"
+#include "display.h"
+#include "edid.h"
+#include "execlist.h"
+#include "scheduler.h"
+#include "sched_policy.h"
+#include "render.h"
+#include "cmd_parser.h"
 
 #define GVT_MAX_VGPU 8
 
@@ -45,25 +65,326 @@ extern struct intel_gvt_host intel_gvt_host;
 /* Describe per-platform limitations. */
 struct intel_gvt_device_info {
 	u32 max_support_vgpus;
-	/* This data structure will grow bigger in GVT device model patches */
+	u32 cfg_space_size;
+	u32 mmio_size;
+	u32 mmio_bar;
+	unsigned long msi_cap_offset;
+	u32 gtt_start_offset;
+	u32 gtt_entry_size;
+	u32 gtt_entry_size_shift;
+	int gmadr_bytes_in_cmd;
+	u32 max_surface_size;
+};
+
+/* GM resources owned by a vGPU */
+struct intel_vgpu_gm {
+	u64 aperture_sz;
+	u64 hidden_sz;
+	struct drm_mm_node low_gm_node;
+	struct drm_mm_node high_gm_node;
+};
+
+#define INTEL_GVT_MAX_NUM_FENCES 32
+
+/* Fences owned by a vGPU */
+struct intel_vgpu_fence {
+	struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+	u32 base;
+	u32 size;
+};
+
+struct intel_vgpu_mmio {
+	void *vreg;
+	void *sreg;
+	bool disable_warn_untrack;
+};
+
+#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
+#define INTEL_GVT_MAX_BAR_NUM 4
+
+struct intel_vgpu_pci_bar {
+	u64 size;
+	bool tracked;
+};
+
+struct intel_vgpu_cfg_space {
+	unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
+	struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+};
+
+#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
+
+#define INTEL_GVT_MAX_PIPE 4
+
+struct intel_vgpu_irq {
+	bool irq_warn_once[INTEL_GVT_EVENT_MAX];
+	DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+		       INTEL_GVT_EVENT_MAX);
+};
+
+struct intel_vgpu_opregion {
+	void *va;
+	u32 gfn[INTEL_GVT_OPREGION_PAGES];
+	struct page *pages[INTEL_GVT_OPREGION_PAGES];
+};
+
+#define vgpu_opregion(vgpu) (&(vgpu->opregion))
+
+#define INTEL_GVT_MAX_PORT 5
+
+struct intel_vgpu_display {
+	struct intel_vgpu_i2c_edid i2c_edid;
+	struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
+	struct intel_vgpu_sbi sbi;
 };
 
 struct intel_vgpu {
 	struct intel_gvt *gvt;
 	int id;
 	unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+	bool active;
+	bool resetting;
+	void *sched_data;
+
+	struct intel_vgpu_fence fence;
+	struct intel_vgpu_gm gm;
+	struct intel_vgpu_cfg_space cfg_space;
+	struct intel_vgpu_mmio mmio;
+	struct intel_vgpu_irq irq;
+	struct intel_vgpu_gtt gtt;
+	struct intel_vgpu_opregion opregion;
+	struct intel_vgpu_display display;
+	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+	struct list_head workload_q_head[I915_NUM_ENGINES];
+	struct kmem_cache *workloads;
+	atomic_t running_workload_num;
+	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+	struct i915_gem_context *shadow_ctx;
+	struct notifier_block shadow_ctx_notifier_block;
+};
+
+struct intel_gvt_gm {
+	unsigned long vgpu_allocated_low_gm_size;
+	unsigned long vgpu_allocated_high_gm_size;
+};
+
+struct intel_gvt_fence {
+	unsigned long vgpu_allocated_fence_num;
+};
+
+#define INTEL_GVT_MMIO_HASH_BITS 9
+
+struct intel_gvt_mmio {
+	u32 *mmio_attribute;
+	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
+};
+
+struct intel_gvt_firmware {
+	void *cfg_space;
+	void *mmio;
+	bool firmware_loaded;
+};
+
+struct intel_gvt_opregion {
+	void __iomem *opregion_va;
+	u32 opregion_pa;
 };
 
 struct intel_gvt {
 	struct mutex lock;
-	bool initialized;
-
 	struct drm_i915_private *dev_priv;
 	struct idr vgpu_idr;	/* vGPU IDR pool */
 
 	struct intel_gvt_device_info device_info;
+	struct intel_gvt_gm gm;
+	struct intel_gvt_fence fence;
+	struct intel_gvt_mmio mmio;
+	struct intel_gvt_firmware firmware;
+	struct intel_gvt_irq irq;
+	struct intel_gvt_gtt gtt;
+	struct intel_gvt_opregion opregion;
+	struct intel_gvt_workload_scheduler scheduler;
+	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+
+	struct task_struct *service_thread;
+	wait_queue_head_t service_thread_wq;
+	unsigned long service_request;
 };
 
+static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
+{
+	return i915->gvt;
+}
+
+enum {
+	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
+};
+
+static inline void intel_gvt_request_service(struct intel_gvt *gvt,
+		int service)
+{
+	set_bit(service, (void *)&gvt->service_request);
+	wake_up(&gvt->service_thread_wq);
+}
+
+void intel_gvt_free_firmware(struct intel_gvt *gvt);
+int intel_gvt_load_firmware(struct intel_gvt *gvt);
+
+/* Aperture/GM space definitions for GVT device */
+#define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+
+#define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_sz(gvt) \
+	((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+#define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+
+#define gvt_aperture_gmadr_base(gvt) (0)
+#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
+				     + gvt_aperture_sz(gvt) - 1)
+
+#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
+				    + gvt_aperture_sz(gvt))
+#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+				   + gvt_hidden_sz(gvt) - 1)
+
+#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+
+/* Aperture/GM space definitions for vGPU */
+#define vgpu_aperture_offset(vgpu)	((vgpu)->gm.low_gm_node.start)
+#define vgpu_hidden_offset(vgpu)	((vgpu)->gm.high_gm_node.start)
+#define vgpu_aperture_sz(vgpu)		((vgpu)->gm.aperture_sz)
+#define vgpu_hidden_sz(vgpu)		((vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_base(vgpu) \
+	(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
+
+#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_end(vgpu) \
+	(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
+#define vgpu_aperture_gmadr_end(vgpu) \
+	(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
+#define vgpu_hidden_gmadr_end(vgpu) \
+	(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
+
+#define vgpu_fence_base(vgpu) (vgpu->fence.base)
+#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+
+struct intel_vgpu_creation_params {
+	__u64 handle;
+	__u64 low_gm_sz;  /* in MB */
+	__u64 high_gm_sz; /* in MB */
+	__u64 fence_sz;
+	__s32 primary;
+	__u64 vgpu_id;
+};
+
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+			      struct intel_vgpu_creation_params *param);
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+	u32 fence, u64 value);
+
+/* Macros for easily accessing vGPU virtual/shadow register */
+#define vgpu_vreg(vgpu, reg) \
+	(*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg8(vgpu, reg) \
+	(*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg16(vgpu, reg) \
+	(*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg64(vgpu, reg) \
+	(*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg(vgpu, reg) \
+	(*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg8(vgpu, reg) \
+	(*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg16(vgpu, reg) \
+	(*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg64(vgpu, reg) \
+	(*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+
+#define for_each_active_vgpu(gvt, vgpu, id) \
+	idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
+		for_each_if(vgpu->active)
+
+static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
+					    u32 offset, u32 val, bool low)
+{
+	u32 *pval;
+
+	/* BAR offset should be 32 bits algiend */
+	offset = rounddown(offset, 4);
+	pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+
+	if (low) {
+		/*
+		 * only update bit 31 - bit 4,
+		 * leave the bit 3 - bit 0 unchanged.
+		 */
+		*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
+	}
+}
+
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+					 struct intel_vgpu_creation_params *
+					 param);
+
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+
+/* validating GM functions */
+#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
+	((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
+	 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
+	((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
+	 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_valid(vgpu, gmadr) \
+	 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
+	  (vgpu_gmadr_is_hidden(vgpu, gmadr))))
+
+#define gvt_gmadr_is_aperture(gvt, gmadr) \
+	 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
+	  (gmadr <= gvt_aperture_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_hidden(gvt, gmadr) \
+	  ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
+	   (gmadr <= gvt_hidden_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_valid(gvt, gmadr) \
+	  (gvt_gmadr_is_aperture(gvt, gmadr) || \
+	    gvt_gmadr_is_hidden(gvt, gmadr))
+
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+			     unsigned long *h_index);
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+			     unsigned long *g_index);
+
+int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes);
+
+void intel_gvt_clean_opregion(struct intel_gvt *gvt);
+int intel_gvt_init_opregion(struct intel_gvt *gvt);
+
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
+int setup_vgpu_mmio(struct intel_vgpu *vgpu);
+void populate_pvinfo_page(struct intel_vgpu *vgpu);
+
 #include "mpt.h"
 
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
new file mode 100644
index 0000000..1b3db0c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -0,0 +1,2847 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Eddie Dong <eddie.dong@intel.com>
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Pei Zhang <pei.zhang@intel.com>
+ *    Niu Bing <bing.niu@intel.com>
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+/* XXX FIXME i915 has changed PP_XXX definition */
+#define PCH_PP_STATUS  _MMIO(0xc7200)
+#define PCH_PP_CONTROL _MMIO(0xc7204)
+#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
+#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
+#define PCH_PP_DIVISOR _MMIO(0xc7210)
+
+/* Register contains RO bits */
+#define F_RO		(1 << 0)
+/* Register contains graphics address */
+#define F_GMADR		(1 << 1)
+/* Mode mask registers with high 16 bits as the mask bits */
+#define F_MODE_MASK	(1 << 2)
+/* This reg can be accessed by GPU commands */
+#define F_CMD_ACCESS	(1 << 3)
+/* This reg has been accessed by a VM */
+#define F_ACCESSED	(1 << 4)
+/* This reg has been accessed through GPU commands */
+#define F_CMD_ACCESSED	(1 << 5)
+/* This reg could be accessed by unaligned address */
+#define F_UNALIGN	(1 << 6)
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
+{
+	if (IS_BROADWELL(gvt->dev_priv))
+		return D_BDW;
+	else if (IS_SKYLAKE(gvt->dev_priv))
+		return D_SKL;
+
+	return 0;
+}
+
+bool intel_gvt_match_device(struct intel_gvt *gvt,
+		unsigned long device)
+{
+	return intel_gvt_get_device_type(gvt) & device;
+}
+
+static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+	void *p_data, unsigned int bytes)
+{
+	memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+}
+
+static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+	void *p_data, unsigned int bytes)
+{
+	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+}
+
+static int new_mmio_info(struct intel_gvt *gvt,
+		u32 offset, u32 flags, u32 size,
+		u32 addr_mask, u32 ro_mask, u32 device,
+		void *read, void *write)
+{
+	struct intel_gvt_mmio_info *info, *p;
+	u32 start, end, i;
+
+	if (!intel_gvt_match_device(gvt, device))
+		return 0;
+
+	if (WARN_ON(!IS_ALIGNED(offset, 4)))
+		return -EINVAL;
+
+	start = offset;
+	end = offset + size;
+
+	for (i = start; i < end; i += 4) {
+		info = kzalloc(sizeof(*info), GFP_KERNEL);
+		if (!info)
+			return -ENOMEM;
+
+		info->offset = i;
+		p = intel_gvt_find_mmio_info(gvt, info->offset);
+		if (p)
+			gvt_err("dup mmio definition offset %x\n",
+				info->offset);
+		info->size = size;
+		info->length = (i + 4) < end ? 4 : (end - i);
+		info->addr_mask = addr_mask;
+		info->device = device;
+		info->read = read ? read : intel_vgpu_default_mmio_read;
+		info->write = write ? write : intel_vgpu_default_mmio_write;
+		gvt->mmio.mmio_attribute[info->offset / 4] = flags;
+		INIT_HLIST_NODE(&info->node);
+		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+	}
+	return 0;
+}
+
+static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+{
+	enum intel_engine_id id;
+	struct intel_engine_cs *engine;
+
+	reg &= ~GENMASK(11, 0);
+	for_each_engine(engine, gvt->dev_priv, id) {
+		if (engine->mmio_base == reg)
+			return id;
+	}
+	return -1;
+}
+
+#define offset_to_fence_num(offset) \
+	((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
+
+#define fence_num_to_offset(num) \
+	(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
+
+static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
+		unsigned int fence_num, void *p_data, unsigned int bytes)
+{
+	if (fence_num >= vgpu_fence_sz(vgpu)) {
+		gvt_err("vgpu%d: found oob fence register access\n",
+				vgpu->id);
+		gvt_err("vgpu%d: total fence num %d access fence num %d\n",
+				vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+		memset(p_data, 0, bytes);
+	}
+	return 0;
+}
+
+static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+		void *p_data, unsigned int bytes)
+{
+	int ret;
+
+	ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
+			p_data, bytes);
+	if (ret)
+		return ret;
+	read_vreg(vgpu, off, p_data, bytes);
+	return 0;
+}
+
+static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+		void *p_data, unsigned int bytes)
+{
+	unsigned int fence_num = offset_to_fence_num(off);
+	int ret;
+
+	ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
+	if (ret)
+		return ret;
+	write_vreg(vgpu, off, p_data, bytes);
+
+	intel_vgpu_write_fence(vgpu, fence_num,
+			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
+	return 0;
+}
+
+#define CALC_MODE_MASK_REG(old, new) \
+	(((new) & GENMASK(31, 16)) \
+	 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
+	 | ((new) & ((new) >> 16))))
+
+static int mul_force_wake_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u32 old, new;
+	uint32_t ack_reg_offset;
+
+	old = vgpu_vreg(vgpu, offset);
+	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
+
+	if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+		switch (offset) {
+		case FORCEWAKE_RENDER_GEN9_REG:
+			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
+			break;
+		case FORCEWAKE_BLITTER_GEN9_REG:
+			ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
+			break;
+		case FORCEWAKE_MEDIA_GEN9_REG:
+			ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
+			break;
+		default:
+			/*should not hit here*/
+			gvt_err("invalid forcewake offset 0x%x\n", offset);
+			return 1;
+		}
+	} else {
+		ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
+	}
+
+	vgpu_vreg(vgpu, offset) = new;
+	vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
+	return 0;
+}
+
+static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes, unsigned long bitmap)
+{
+	struct intel_gvt_workload_scheduler *scheduler =
+		&vgpu->gvt->scheduler;
+
+	vgpu->resetting = true;
+
+	intel_vgpu_stop_schedule(vgpu);
+	/*
+	 * The current_vgpu will set to NULL after stopping the
+	 * scheduler when the reset is triggered by current vgpu.
+	 */
+	if (scheduler->current_vgpu == NULL) {
+		mutex_unlock(&vgpu->gvt->lock);
+		intel_gvt_wait_vgpu_idle(vgpu);
+		mutex_lock(&vgpu->gvt->lock);
+	}
+
+	intel_vgpu_reset_execlist(vgpu, bitmap);
+
+	/* full GPU reset */
+	if (bitmap == 0xff) {
+		mutex_unlock(&vgpu->gvt->lock);
+		intel_vgpu_clean_gtt(vgpu);
+		mutex_lock(&vgpu->gvt->lock);
+		setup_vgpu_mmio(vgpu);
+		populate_pvinfo_page(vgpu);
+		intel_vgpu_init_gtt(vgpu);
+	}
+
+	vgpu->resetting = false;
+
+	return 0;
+}
+
+static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 data;
+	u64 bitmap = 0;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	data = vgpu_vreg(vgpu, offset);
+
+	if (data & GEN6_GRDOM_FULL) {
+		gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
+		bitmap = 0xff;
+	}
+	if (data & GEN6_GRDOM_RENDER) {
+		gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+		bitmap |= (1 << RCS);
+	}
+	if (data & GEN6_GRDOM_MEDIA) {
+		gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+		bitmap |= (1 << VCS);
+	}
+	if (data & GEN6_GRDOM_BLT) {
+		gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+		bitmap |= (1 << BCS);
+	}
+	if (data & GEN6_GRDOM_VECS) {
+		gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+		bitmap |= (1 << VECS);
+	}
+	if (data & GEN8_GRDOM_MEDIA2) {
+		gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+		if (HAS_BSD2(vgpu->gvt->dev_priv))
+			bitmap |= (1 << VCS2);
+	}
+	return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+}
+
+static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
+}
+
+static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
+}
+
+static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
+		vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
+		vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
+		vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
+		vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
+
+	} else
+		vgpu_vreg(vgpu, PCH_PP_STATUS) &=
+			~(PP_ON | PP_SEQUENCE_POWER_DOWN
+					| PP_CYCLE_DELAY_ACTIVE);
+	return 0;
+}
+
+static int transconf_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
+		vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
+	else
+		vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
+	return 0;
+}
+
+static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
+		vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
+	else
+		vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
+
+	if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
+		vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
+	else
+		vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
+
+	return 0;
+}
+
+static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	*(u32 *)p_data = (1 << 17);
+	return 0;
+}
+
+static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	*(u32 *)p_data = 3;
+	return 0;
+}
+
+static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	*(u32 *)p_data = (0x2f << 16);
+	return 0;
+}
+
+static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 data;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	data = vgpu_vreg(vgpu, offset);
+
+	if (data & PIPECONF_ENABLE)
+		vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
+	else
+		vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+	intel_gvt_check_vblank_emulation(vgpu->gvt);
+	return 0;
+}
+
+static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
+		vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
+	} else {
+		vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
+		if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
+			vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
+				&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
+	}
+	return 0;
+}
+
+static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
+	return 0;
+}
+
+#define FDI_LINK_TRAIN_PATTERN1         0
+#define FDI_LINK_TRAIN_PATTERN2         1
+
+static int fdi_auto_training_started(struct intel_vgpu *vgpu)
+{
+	u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
+	u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
+	u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
+
+	if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
+			(rx_ctl & FDI_RX_ENABLE) &&
+			(rx_ctl & FDI_AUTO_TRAINING) &&
+			(tx_ctl & DP_TP_CTL_ENABLE) &&
+			(tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
+		return 1;
+	else
+		return 0;
+}
+
+static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
+		enum pipe pipe, unsigned int train_pattern)
+{
+	i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
+	unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
+	unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
+	unsigned int fdi_iir_check_bits;
+
+	fdi_rx_imr = FDI_RX_IMR(pipe);
+	fdi_tx_ctl = FDI_TX_CTL(pipe);
+	fdi_rx_ctl = FDI_RX_CTL(pipe);
+
+	if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
+		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
+		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
+		fdi_iir_check_bits = FDI_RX_BIT_LOCK;
+	} else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
+		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
+		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
+		fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
+	} else {
+		gvt_err("Invalid train pattern %d\n", train_pattern);
+		return -EINVAL;
+	}
+
+	fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
+	fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
+
+	/* If imr bit has been masked */
+	if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
+		return 0;
+
+	if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
+			== fdi_tx_check_bits)
+		&& ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
+			== fdi_rx_check_bits))
+		return 1;
+	else
+		return 0;
+}
+
+#define INVALID_INDEX (~0U)
+
+static unsigned int calc_index(unsigned int offset, unsigned int start,
+	unsigned int next, unsigned int end, i915_reg_t i915_end)
+{
+	unsigned int range = next - start;
+
+	if (!end)
+		end = i915_mmio_reg_offset(i915_end);
+	if (offset < start || offset > end)
+		return INVALID_INDEX;
+	offset -= start;
+	return offset / range;
+}
+
+#define FDI_RX_CTL_TO_PIPE(offset) \
+	calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
+
+#define FDI_TX_CTL_TO_PIPE(offset) \
+	calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
+
+#define FDI_RX_IMR_TO_PIPE(offset) \
+	calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
+
+static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	i915_reg_t fdi_rx_iir;
+	unsigned int index;
+	int ret;
+
+	if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+		index = FDI_RX_CTL_TO_PIPE(offset);
+	else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+		index = FDI_TX_CTL_TO_PIPE(offset);
+	else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
+		index = FDI_RX_IMR_TO_PIPE(offset);
+	else {
+		gvt_err("Unsupport registers %x\n", offset);
+		return -EINVAL;
+	}
+
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	fdi_rx_iir = FDI_RX_IIR(index);
+
+	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
+	if (ret < 0)
+		return ret;
+	if (ret)
+		vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
+
+	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
+	if (ret < 0)
+		return ret;
+	if (ret)
+		vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
+
+	if (offset == _FDI_RXA_CTL)
+		if (fdi_auto_training_started(vgpu))
+			vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
+				DP_TP_STATUS_AUTOTRAIN_DONE;
+	return 0;
+}
+
+#define DP_TP_CTL_TO_PORT(offset) \
+	calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
+
+static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	i915_reg_t status_reg;
+	unsigned int index;
+	u32 data;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	index = DP_TP_CTL_TO_PORT(offset);
+	data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
+	if (data == 0x2) {
+		status_reg = DP_TP_STATUS(index);
+		vgpu_vreg(vgpu, status_reg) |= (1 << 25);
+	}
+	return 0;
+}
+
+static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u32 reg_val;
+	u32 sticky_mask;
+
+	reg_val = *((u32 *)p_data);
+	sticky_mask = GENMASK(27, 26) | (1 << 24);
+
+	vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
+		(vgpu_vreg(vgpu, offset) & sticky_mask);
+	vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
+	return 0;
+}
+
+static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u32 data;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	data = vgpu_vreg(vgpu, offset);
+
+	if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
+		vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+	return 0;
+}
+
+static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u32 data;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	data = vgpu_vreg(vgpu, offset);
+
+	if (data & FDI_MPHY_IOSFSB_RESET_CTL)
+		vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
+	else
+		vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
+	return 0;
+}
+
+#define DSPSURF_TO_PIPE(offset) \
+	calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
+
+static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	unsigned int index = DSPSURF_TO_PIPE(offset);
+	i915_reg_t surflive_reg = DSPSURFLIVE(index);
+	int flip_event[] = {
+		[PIPE_A] = PRIMARY_A_FLIP_DONE,
+		[PIPE_B] = PRIMARY_B_FLIP_DONE,
+		[PIPE_C] = PRIMARY_C_FLIP_DONE,
+	};
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+	set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+	return 0;
+}
+
+#define SPRSURF_TO_PIPE(offset) \
+	calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
+
+static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	unsigned int index = SPRSURF_TO_PIPE(offset);
+	i915_reg_t surflive_reg = SPRSURFLIVE(index);
+	int flip_event[] = {
+		[PIPE_A] = SPRITE_A_FLIP_DONE,
+		[PIPE_B] = SPRITE_B_FLIP_DONE,
+		[PIPE_C] = SPRITE_C_FLIP_DONE,
+	};
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+	set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+	return 0;
+}
+
+static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
+		unsigned int reg)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	enum intel_gvt_event_type event;
+
+	if (reg == _DPA_AUX_CH_CTL)
+		event = AUX_CHANNEL_A;
+	else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+		event = AUX_CHANNEL_B;
+	else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+		event = AUX_CHANNEL_C;
+	else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+		event = AUX_CHANNEL_D;
+	else {
+		WARN_ON(true);
+		return -EINVAL;
+	}
+
+	intel_vgpu_trigger_virtual_event(vgpu, event);
+	return 0;
+}
+
+static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
+		unsigned int reg, int len, bool data_valid)
+{
+	/* mark transaction done */
+	value |= DP_AUX_CH_CTL_DONE;
+	value &= ~DP_AUX_CH_CTL_SEND_BUSY;
+	value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
+
+	if (data_valid)
+		value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
+	else
+		value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
+
+	/* message size */
+	value &= ~(0xf << 20);
+	value |= (len << 20);
+	vgpu_vreg(vgpu, reg) = value;
+
+	if (value & DP_AUX_CH_CTL_INTERRUPT)
+		return trigger_aux_channel_interrupt(vgpu, reg);
+	return 0;
+}
+
+static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
+		uint8_t t)
+{
+	if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
+		/* training pattern 1 for CR */
+		/* set LANE0_CR_DONE, LANE1_CR_DONE */
+		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
+		/* set LANE2_CR_DONE, LANE3_CR_DONE */
+		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
+	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+			DPCD_TRAINING_PATTERN_2) {
+		/* training pattern 2 for EQ */
+		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane0_1 */
+		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
+		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
+		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane2_3 */
+		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
+		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
+		/* set INTERLANE_ALIGN_DONE */
+		dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
+			DPCD_INTERLANE_ALIGN_DONE;
+	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+			DPCD_LINK_TRAINING_DISABLED) {
+		/* finish link training */
+		/* set sink status as synchronized */
+		dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
+	}
+}
+
+#define _REG_HSW_DP_AUX_CH_CTL(dp) \
+	((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
+
+#define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
+
+#define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
+
+#define dpy_is_valid_port(port)	\
+		(((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
+
+static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	struct intel_vgpu_display *display = &vgpu->display;
+	int msg, addr, ctrl, op, len;
+	int port_index = OFFSET_TO_DP_AUX_PORT(offset);
+	struct intel_vgpu_dpcd_data *dpcd = NULL;
+	struct intel_vgpu_port *port = NULL;
+	u32 data;
+
+	if (!dpy_is_valid_port(port_index)) {
+		gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+		return 0;
+	}
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	data = vgpu_vreg(vgpu, offset);
+
+	if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
+	    offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
+		/* SKL DPB/C/D aux ctl register changed */
+		return 0;
+	} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+		   offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
+		/* write to the data registers */
+		return 0;
+	}
+
+	if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
+		/* just want to clear the sticky bits */
+		vgpu_vreg(vgpu, offset) = 0;
+		return 0;
+	}
+
+	port = &display->ports[port_index];
+	dpcd = port->dpcd;
+
+	/* read out message from DATA1 register */
+	msg = vgpu_vreg(vgpu, offset + 4);
+	addr = (msg >> 8) & 0xffff;
+	ctrl = (msg >> 24) & 0xff;
+	len = msg & 0xff;
+	op = ctrl >> 4;
+
+	if (op == GVT_AUX_NATIVE_WRITE) {
+		int t;
+		uint8_t buf[16];
+
+		if ((addr + len + 1) >= DPCD_SIZE) {
+			/*
+			 * Write request exceeds what we supported,
+			 * DCPD spec: When a Source Device is writing a DPCD
+			 * address not supported by the Sink Device, the Sink
+			 * Device shall reply with AUX NACK and “M” equal to
+			 * zero.
+			 */
+
+			/* NAK the write */
+			vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
+			dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
+			return 0;
+		}
+
+		/*
+		 * Write request format: (command + address) occupies
+		 * 3 bytes, followed by (len + 1) bytes of data.
+		 */
+		if (WARN_ON((len + 4) > AUX_BURST_SIZE))
+			return -EINVAL;
+
+		/* unpack data from vreg to buf */
+		for (t = 0; t < 4; t++) {
+			u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
+
+			buf[t * 4] = (r >> 24) & 0xff;
+			buf[t * 4 + 1] = (r >> 16) & 0xff;
+			buf[t * 4 + 2] = (r >> 8) & 0xff;
+			buf[t * 4 + 3] = r & 0xff;
+		}
+
+		/* write to virtual DPCD */
+		if (dpcd && dpcd->data_valid) {
+			for (t = 0; t <= len; t++) {
+				int p = addr + t;
+
+				dpcd->data[p] = buf[t];
+				/* check for link training */
+				if (p == DPCD_TRAINING_PATTERN_SET)
+					dp_aux_ch_ctl_link_training(dpcd,
+							buf[t]);
+			}
+		}
+
+		/* ACK the write */
+		vgpu_vreg(vgpu, offset + 4) = 0;
+		dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
+				dpcd && dpcd->data_valid);
+		return 0;
+	}
+
+	if (op == GVT_AUX_NATIVE_READ) {
+		int idx, i, ret = 0;
+
+		if ((addr + len + 1) >= DPCD_SIZE) {
+			/*
+			 * read request exceeds what we supported
+			 * DPCD spec: A Sink Device receiving a Native AUX CH
+			 * read request for an unsupported DPCD address must
+			 * reply with an AUX ACK and read data set equal to
+			 * zero instead of replying with AUX NACK.
+			 */
+
+			/* ACK the READ*/
+			vgpu_vreg(vgpu, offset + 4) = 0;
+			vgpu_vreg(vgpu, offset + 8) = 0;
+			vgpu_vreg(vgpu, offset + 12) = 0;
+			vgpu_vreg(vgpu, offset + 16) = 0;
+			vgpu_vreg(vgpu, offset + 20) = 0;
+
+			dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+					true);
+			return 0;
+		}
+
+		for (idx = 1; idx <= 5; idx++) {
+			/* clear the data registers */
+			vgpu_vreg(vgpu, offset + 4 * idx) = 0;
+		}
+
+		/*
+		 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
+		 */
+		if (WARN_ON((len + 2) > AUX_BURST_SIZE))
+			return -EINVAL;
+
+		/* read from virtual DPCD to vreg */
+		/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
+		if (dpcd && dpcd->data_valid) {
+			for (i = 1; i <= (len + 1); i++) {
+				int t;
+
+				t = dpcd->data[addr + i - 1];
+				t <<= (24 - 8 * (i % 4));
+				ret |= t;
+
+				if ((i % 4 == 3) || (i == (len + 1))) {
+					vgpu_vreg(vgpu, offset +
+							(i / 4 + 1) * 4) = ret;
+					ret = 0;
+				}
+			}
+		}
+		dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+				dpcd && dpcd->data_valid);
+		return 0;
+	}
+
+	/* i2c transaction starts */
+	intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
+
+	if (data & DP_AUX_CH_CTL_INTERRUPT)
+		trigger_aux_channel_interrupt(vgpu, offset);
+	return 0;
+}
+
+static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	bool vga_disable;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
+
+	gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
+			vga_disable ? "Disable" : "Enable");
+	return 0;
+}
+
+static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
+		unsigned int sbi_offset)
+{
+	struct intel_vgpu_display *display = &vgpu->display;
+	int num = display->sbi.number;
+	int i;
+
+	for (i = 0; i < num; ++i)
+		if (display->sbi.registers[i].offset == sbi_offset)
+			break;
+
+	if (i == num)
+		return 0;
+
+	return display->sbi.registers[i].value;
+}
+
+static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
+		unsigned int offset, u32 value)
+{
+	struct intel_vgpu_display *display = &vgpu->display;
+	int num = display->sbi.number;
+	int i;
+
+	for (i = 0; i < num; ++i) {
+		if (display->sbi.registers[i].offset == offset)
+			break;
+	}
+
+	if (i == num) {
+		if (num == SBI_REG_MAX) {
+			gvt_err("vgpu%d: SBI caching meets maximum limits\n",
+					vgpu->id);
+			return;
+		}
+		display->sbi.number++;
+	}
+
+	display->sbi.registers[i].offset = offset;
+	display->sbi.registers[i].value = value;
+}
+
+static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+				SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
+		unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+		vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
+				sbi_offset);
+	}
+	read_vreg(vgpu, offset, p_data, bytes);
+	return 0;
+}
+
+static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 data;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	data = vgpu_vreg(vgpu, offset);
+
+	data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
+	data |= SBI_READY;
+
+	data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+	data |= SBI_RESPONSE_SUCCESS;
+
+	vgpu_vreg(vgpu, offset) = data;
+
+	if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+				SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
+		unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+
+		write_virtual_sbi_register(vgpu, sbi_offset,
+				vgpu_vreg(vgpu, SBI_DATA));
+	}
+	return 0;
+}
+
+#define _vgtif_reg(x) \
+	(VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
+
+static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	bool invalid_read = false;
+
+	read_vreg(vgpu, offset, p_data, bytes);
+
+	switch (offset) {
+	case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
+		if (offset + bytes > _vgtif_reg(vgt_id) + 4)
+			invalid_read = true;
+		break;
+	case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
+		_vgtif_reg(avail_rs.fence_num):
+		if (offset + bytes >
+			_vgtif_reg(avail_rs.fence_num) + 4)
+			invalid_read = true;
+		break;
+	case 0x78010:	/* vgt_caps */
+	case 0x7881c:
+		break;
+	default:
+		invalid_read = true;
+		break;
+	}
+	if (invalid_read)
+		gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+				offset, bytes, *(u32 *)p_data);
+	return 0;
+}
+
+static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
+{
+	int ret = 0;
+
+	switch (notification) {
+	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
+		ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
+		break;
+	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
+		ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
+		break;
+	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
+		ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
+		break;
+	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
+		ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
+		break;
+	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
+	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
+	case 1:	/* Remove this in guest driver. */
+		break;
+	default:
+		gvt_err("Invalid PV notification %d\n", notification);
+	}
+	return ret;
+}
+
+static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+	char *env[3] = {NULL, NULL, NULL};
+	char vmid_str[20];
+	char display_ready_str[20];
+
+	snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+	env[0] = display_ready_str;
+
+	snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
+	env[1] = vmid_str;
+
+	return kobject_uevent_env(kobj, KOBJ_ADD, env);
+}
+
+static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 data;
+	int ret;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	data = vgpu_vreg(vgpu, offset);
+
+	switch (offset) {
+	case _vgtif_reg(display_ready):
+		send_display_ready_uevent(vgpu, data ? 1 : 0);
+		break;
+	case _vgtif_reg(g2v_notify):
+		ret = handle_g2v_notification(vgpu, data);
+		break;
+	/* add xhot and yhot to handled list to avoid error log */
+	case 0x78830:
+	case 0x78834:
+	case _vgtif_reg(pdp[0].lo):
+	case _vgtif_reg(pdp[0].hi):
+	case _vgtif_reg(pdp[1].lo):
+	case _vgtif_reg(pdp[1].hi):
+	case _vgtif_reg(pdp[2].lo):
+	case _vgtif_reg(pdp[2].hi):
+	case _vgtif_reg(pdp[3].lo):
+	case _vgtif_reg(pdp[3].hi):
+	case _vgtif_reg(execlist_context_descriptor_lo):
+	case _vgtif_reg(execlist_context_descriptor_hi):
+		break;
+	default:
+		gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+				offset, bytes, data);
+		break;
+	}
+	return 0;
+}
+
+static int pf_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u32 val = *(u32 *)p_data;
+
+	if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
+	   offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
+	   offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
+		WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
+			  vgpu->id);
+		return 0;
+	}
+
+	return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
+}
+
+static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
+		vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
+	else
+		vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
+	return 0;
+}
+
+static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
+	unsigned int offset, void *p_data, unsigned int bytes)
+{
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
+		vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
+	return 0;
+}
+
+static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 mode;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	mode = vgpu_vreg(vgpu, offset);
+
+	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
+		WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
+				vgpu->id);
+		return 0;
+	}
+
+	return 0;
+}
+
+static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	u32 trtte = *(u32 *)p_data;
+
+	if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
+		WARN(1, "VM(%d): Use physical address for TRTT!\n",
+				vgpu->id);
+		return -EINVAL;
+	}
+	write_vreg(vgpu, offset, p_data, bytes);
+	/* TRTTE is not per-context */
+	I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+
+	return 0;
+}
+
+static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	u32 val = *(u32 *)p_data;
+
+	if (val & 1) {
+		/* unblock hw logic */
+		I915_WRITE(_MMIO(offset), val);
+	}
+	write_vreg(vgpu, offset, p_data, bytes);
+	return 0;
+}
+
+static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 v = 0;
+
+	if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
+		v |= (1 << 0);
+
+	if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
+		v |= (1 << 8);
+
+	if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
+		v |= (1 << 16);
+
+	if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
+		v |= (1 << 24);
+
+	vgpu_vreg(vgpu, offset) = v;
+
+	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 value = *(u32 *)p_data;
+	u32 cmd = value & 0xff;
+	u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
+
+	switch (cmd) {
+	case 0x6:
+		/**
+		 * "Read memory latency" command on gen9.
+		 * Below memory latency values are read
+		 * from skylake platform.
+		 */
+		if (!*data0)
+			*data0 = 0x1e1a1100;
+		else
+			*data0 = 0x61514b3d;
+		break;
+	case 0x5:
+		*data0 |= 0x1;
+		break;
+	}
+
+	gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
+		     vgpu->id, value, *data0);
+
+	value &= ~(1 << 31);
+	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
+static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u32 v = *(u32 *)p_data;
+
+	v &= (1 << 31) | (1 << 29) | (1 << 9) |
+	     (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+	v |= (v >> 1);
+
+	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
+}
+
+static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	i915_reg_t reg = {.reg = offset};
+
+	switch (offset) {
+	case 0x4ddc:
+		vgpu_vreg(vgpu, offset) = 0x8000003c;
+		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
+		if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER))
+			I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+		break;
+	case 0x42080:
+		vgpu_vreg(vgpu, offset) = 0x8000;
+		/* WaCompressedResourceDisplayNewHashMode:skl */
+		if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER))
+			I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 v = *(u32 *)p_data;
+
+	/* other bits are MBZ. */
+	v &= (1 << 31) | (1 << 30);
+	v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
+
+	vgpu_vreg(vgpu, offset) = v;
+
+	return 0;
+}
+
+static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+	struct intel_vgpu_execlist *execlist;
+	u32 data = *(u32 *)p_data;
+	int ret = 0;
+
+	if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+		return -EINVAL;
+
+	execlist = &vgpu->execlist[ring_id];
+
+	execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+	if (execlist->elsp_dwords.index == 3) {
+		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+		if(ret)
+			gvt_err("fail submit workload on ring %d\n", ring_id);
+	}
+
+	++execlist->elsp_dwords.index;
+	execlist->elsp_dwords.index &= 0x3;
+	return ret;
+}
+
+static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	u32 data = *(u32 *)p_data;
+	int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+	bool enable_execlist;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
+			|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+		enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
+
+		gvt_dbg_core("EXECLIST %s on ring %d\n",
+				(enable_execlist ? "enabling" : "disabling"),
+				ring_id);
+
+		if (enable_execlist)
+			intel_vgpu_start_schedule(vgpu);
+	}
+	return 0;
+}
+
+static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	int rc = 0;
+	unsigned int id = 0;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+
+	switch (offset) {
+	case 0x4260:
+		id = RCS;
+		break;
+	case 0x4264:
+		id = VCS;
+		break;
+	case 0x4268:
+		id = VCS2;
+		break;
+	case 0x426c:
+		id = BCS;
+		break;
+	case 0x4270:
+		id = VECS;
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	set_bit(id, (void *)vgpu->tlb_handle_pending);
+
+	return rc;
+}
+
+static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
+	unsigned int offset, void *p_data, unsigned int bytes)
+{
+	u32 data;
+
+	write_vreg(vgpu, offset, p_data, bytes);
+	data = vgpu_vreg(vgpu, offset);
+
+	if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+		data |= RESET_CTL_READY_TO_RESET;
+	else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+		data &= ~RESET_CTL_READY_TO_RESET;
+
+	vgpu_vreg(vgpu, offset) = data;
+	return 0;
+}
+
+#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
+	ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
+		f, s, am, rm, d, r, w); \
+	if (ret) \
+		return ret; \
+} while (0)
+
+#define MMIO_D(reg, d) \
+	MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_DH(reg, d, r, w) \
+	MMIO_F(reg, 4, 0, 0, 0, d, r, w)
+
+#define MMIO_DFH(reg, d, f, r, w) \
+	MMIO_F(reg, 4, f, 0, 0, d, r, w)
+
+#define MMIO_GM(reg, d, r, w) \
+	MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
+
+#define MMIO_RO(reg, d, f, rm, r, w) \
+	MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
+
+#define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
+	MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
+	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
+	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
+	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
+} while (0)
+
+#define MMIO_RING_D(prefix, d) \
+	MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_RING_DFH(prefix, d, f, r, w) \
+	MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
+
+#define MMIO_RING_GM(prefix, d, r, w) \
+	MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
+
+#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
+	MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
+
+static int init_generic_mmio_info(struct intel_gvt *gvt)
+{
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	int ret;
+
+	MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+
+	MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+	MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
+	MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
+	MMIO_D(SDEISR, D_ALL);
+
+	MMIO_RING_D(RING_HWSTAM, D_ALL);
+
+	MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+	MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+	MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+	MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+
+#define RING_REG(base) (base + 0x28)
+	MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x134)
+	MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+	MMIO_GM(0x2148, D_ALL, NULL, NULL);
+	MMIO_GM(CCID, D_ALL, NULL, NULL);
+	MMIO_GM(0x12198, D_ALL, NULL, NULL);
+	MMIO_D(GEN7_CXT_SIZE, D_ALL);
+
+	MMIO_RING_D(RING_TAIL, D_ALL);
+	MMIO_RING_D(RING_HEAD, D_ALL);
+	MMIO_RING_D(RING_CTL, D_ALL);
+	MMIO_RING_D(RING_ACTHD, D_ALL);
+	MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+
+	/* RING MODE */
+#define RING_REG(base) (base + 0x29c)
+	MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+#undef RING_REG
+
+	MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
+			ring_timestamp_mmio_read, NULL);
+	MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
+			ring_timestamp_mmio_read, NULL);
+
+	MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK, NULL, NULL);
+
+	MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_D(GAM_ECOCHK, D_ALL);
+	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_D(0x9030, D_ALL);
+	MMIO_D(0x20a0, D_ALL);
+	MMIO_D(0x2420, D_ALL);
+	MMIO_D(0x2430, D_ALL);
+	MMIO_D(0x2434, D_ALL);
+	MMIO_D(0x2438, D_ALL);
+	MMIO_D(0x243c, D_ALL);
+	MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(0xe184, D_ALL, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+
+	/* display */
+	MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_D(0x602a0, D_ALL);
+
+	MMIO_D(0x65050, D_ALL);
+	MMIO_D(0x650b4, D_ALL);
+
+	MMIO_D(0xc4040, D_ALL);
+	MMIO_D(DERRMR, D_ALL);
+
+	MMIO_D(PIPEDSL(PIPE_A), D_ALL);
+	MMIO_D(PIPEDSL(PIPE_B), D_ALL);
+	MMIO_D(PIPEDSL(PIPE_C), D_ALL);
+	MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
+
+	MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
+	MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
+	MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
+	MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+
+	MMIO_D(PIPESTAT(PIPE_A), D_ALL);
+	MMIO_D(PIPESTAT(PIPE_B), D_ALL);
+	MMIO_D(PIPESTAT(PIPE_C), D_ALL);
+	MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
+
+	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
+	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
+	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
+	MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
+	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
+	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
+	MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+	MMIO_D(CURCNTR(PIPE_A), D_ALL);
+	MMIO_D(CURCNTR(PIPE_B), D_ALL);
+	MMIO_D(CURCNTR(PIPE_C), D_ALL);
+
+	MMIO_D(CURPOS(PIPE_A), D_ALL);
+	MMIO_D(CURPOS(PIPE_B), D_ALL);
+	MMIO_D(CURPOS(PIPE_C), D_ALL);
+
+	MMIO_D(CURBASE(PIPE_A), D_ALL);
+	MMIO_D(CURBASE(PIPE_B), D_ALL);
+	MMIO_D(CURBASE(PIPE_C), D_ALL);
+
+	MMIO_D(0x700ac, D_ALL);
+	MMIO_D(0x710ac, D_ALL);
+	MMIO_D(0x720ac, D_ALL);
+
+	MMIO_D(0x70090, D_ALL);
+	MMIO_D(0x70094, D_ALL);
+	MMIO_D(0x70098, D_ALL);
+	MMIO_D(0x7009c, D_ALL);
+
+	MMIO_D(DSPCNTR(PIPE_A), D_ALL);
+	MMIO_D(DSPADDR(PIPE_A), D_ALL);
+	MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
+	MMIO_D(DSPPOS(PIPE_A), D_ALL);
+	MMIO_D(DSPSIZE(PIPE_A), D_ALL);
+	MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
+	MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
+	MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+
+	MMIO_D(DSPCNTR(PIPE_B), D_ALL);
+	MMIO_D(DSPADDR(PIPE_B), D_ALL);
+	MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
+	MMIO_D(DSPPOS(PIPE_B), D_ALL);
+	MMIO_D(DSPSIZE(PIPE_B), D_ALL);
+	MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
+	MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
+	MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+
+	MMIO_D(DSPCNTR(PIPE_C), D_ALL);
+	MMIO_D(DSPADDR(PIPE_C), D_ALL);
+	MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
+	MMIO_D(DSPPOS(PIPE_C), D_ALL);
+	MMIO_D(DSPSIZE(PIPE_C), D_ALL);
+	MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
+	MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
+	MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+
+	MMIO_D(SPRCTL(PIPE_A), D_ALL);
+	MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
+	MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
+	MMIO_D(SPRPOS(PIPE_A), D_ALL);
+	MMIO_D(SPRSIZE(PIPE_A), D_ALL);
+	MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
+	MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
+	MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
+	MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
+	MMIO_D(SPROFFSET(PIPE_A), D_ALL);
+	MMIO_D(SPRSCALE(PIPE_A), D_ALL);
+	MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+
+	MMIO_D(SPRCTL(PIPE_B), D_ALL);
+	MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
+	MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
+	MMIO_D(SPRPOS(PIPE_B), D_ALL);
+	MMIO_D(SPRSIZE(PIPE_B), D_ALL);
+	MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
+	MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
+	MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
+	MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
+	MMIO_D(SPROFFSET(PIPE_B), D_ALL);
+	MMIO_D(SPRSCALE(PIPE_B), D_ALL);
+	MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+
+	MMIO_D(SPRCTL(PIPE_C), D_ALL);
+	MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
+	MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
+	MMIO_D(SPRPOS(PIPE_C), D_ALL);
+	MMIO_D(SPRSIZE(PIPE_C), D_ALL);
+	MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
+	MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
+	MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
+	MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
+	MMIO_D(SPROFFSET(PIPE_C), D_ALL);
+	MMIO_D(SPRSCALE(PIPE_C), D_ALL);
+	MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+
+	MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
+	MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
+	MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
+	MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
+	MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
+	MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
+	MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
+	MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
+	MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
+
+	MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
+	MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
+	MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
+	MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
+	MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
+	MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
+	MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
+	MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
+	MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
+
+	MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
+	MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
+	MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
+	MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
+	MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
+	MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
+	MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
+	MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
+	MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
+
+	MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
+	MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
+	MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
+	MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
+	MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
+	MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
+	MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
+	MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
+
+	MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
+	MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
+	MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
+	MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
+	MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
+	MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
+	MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
+	MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
+
+	MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
+	MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
+	MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
+	MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
+	MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
+	MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
+	MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
+	MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
+
+	MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
+	MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
+	MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
+	MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
+	MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
+	MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
+	MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
+	MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
+
+	MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
+	MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
+	MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
+	MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
+	MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
+	MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
+	MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
+	MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
+
+	MMIO_D(PF_CTL(PIPE_A), D_ALL);
+	MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
+	MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
+	MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
+	MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
+
+	MMIO_D(PF_CTL(PIPE_B), D_ALL);
+	MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
+	MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
+	MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
+	MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
+
+	MMIO_D(PF_CTL(PIPE_C), D_ALL);
+	MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
+	MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
+	MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
+	MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
+
+	MMIO_D(WM0_PIPEA_ILK, D_ALL);
+	MMIO_D(WM0_PIPEB_ILK, D_ALL);
+	MMIO_D(WM0_PIPEC_IVB, D_ALL);
+	MMIO_D(WM1_LP_ILK, D_ALL);
+	MMIO_D(WM2_LP_ILK, D_ALL);
+	MMIO_D(WM3_LP_ILK, D_ALL);
+	MMIO_D(WM1S_LP_ILK, D_ALL);
+	MMIO_D(WM2S_LP_IVB, D_ALL);
+	MMIO_D(WM3S_LP_IVB, D_ALL);
+
+	MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
+	MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
+	MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
+	MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
+
+	MMIO_D(0x48268, D_ALL);
+
+	MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
+		gmbus_mmio_write);
+	MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+		dp_aux_ch_ctl_mmio_write);
+	MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+		dp_aux_ch_ctl_mmio_write);
+	MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+		dp_aux_ch_ctl_mmio_write);
+
+	MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
+
+	MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
+	MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
+
+	MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
+	MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
+	MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
+	MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+	MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+	MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+	MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+	MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+	MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+
+	MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
+	MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
+	MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
+	MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
+	MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
+	MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
+	MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
+
+	MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
+	MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
+	MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
+	MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
+	MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
+	MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
+	MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
+
+	MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
+	MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
+	MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
+	MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
+	MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
+	MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
+	MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
+	MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
+
+	MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
+	MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
+	MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
+
+	MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
+	MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
+	MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
+
+	MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
+	MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
+	MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
+
+	MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
+	MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
+	MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
+
+	MMIO_D(_FDI_RXA_MISC, D_ALL);
+	MMIO_D(_FDI_RXB_MISC, D_ALL);
+	MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
+	MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
+	MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
+	MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
+
+	MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
+	MMIO_D(PCH_PP_DIVISOR, D_ALL);
+	MMIO_D(PCH_PP_STATUS,  D_ALL);
+	MMIO_D(PCH_LVDS, D_ALL);
+	MMIO_D(_PCH_DPLL_A, D_ALL);
+	MMIO_D(_PCH_DPLL_B, D_ALL);
+	MMIO_D(_PCH_FPA0, D_ALL);
+	MMIO_D(_PCH_FPA1, D_ALL);
+	MMIO_D(_PCH_FPB0, D_ALL);
+	MMIO_D(_PCH_FPB1, D_ALL);
+	MMIO_D(PCH_DREF_CONTROL, D_ALL);
+	MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
+	MMIO_D(PCH_DPLL_SEL, D_ALL);
+
+	MMIO_D(0x61208, D_ALL);
+	MMIO_D(0x6120c, D_ALL);
+	MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
+	MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
+
+	MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
+	MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
+	MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
+	MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
+	MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
+	MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
+
+	MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
+		PORTA_HOTPLUG_STATUS_MASK
+		| PORTB_HOTPLUG_STATUS_MASK
+		| PORTC_HOTPLUG_STATUS_MASK
+		| PORTD_HOTPLUG_STATUS_MASK,
+		NULL, NULL);
+
+	MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
+	MMIO_D(FUSE_STRAP, D_ALL);
+	MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
+
+	MMIO_D(DISP_ARB_CTL, D_ALL);
+	MMIO_D(DISP_ARB_CTL2, D_ALL);
+
+	MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
+	MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
+	MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
+
+	MMIO_D(SOUTH_CHICKEN1, D_ALL);
+	MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
+	MMIO_D(_TRANSA_CHICKEN1, D_ALL);
+	MMIO_D(_TRANSB_CHICKEN1, D_ALL);
+	MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
+	MMIO_D(_TRANSA_CHICKEN2, D_ALL);
+	MMIO_D(_TRANSB_CHICKEN2, D_ALL);
+
+	MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
+	MMIO_D(ILK_DPFC_CONTROL, D_ALL);
+	MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
+	MMIO_D(ILK_DPFC_STATUS, D_ALL);
+	MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
+	MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
+	MMIO_D(ILK_FBC_RT_BASE, D_ALL);
+
+	MMIO_D(IPS_CTL, D_ALL);
+
+	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
+
+	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
+
+	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
+	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
+
+	MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
+	MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
+	MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
+	MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
+	MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
+	MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
+	MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_D(0x60110, D_ALL);
+	MMIO_D(0x61110, D_ALL);
+	MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+	MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+	MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+	MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+	MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+	MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+
+	MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
+	MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
+	MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+	MMIO_D(SPLL_CTL, D_ALL);
+	MMIO_D(_WRPLL_CTL1, D_ALL);
+	MMIO_D(_WRPLL_CTL2, D_ALL);
+	MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
+	MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
+	MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
+	MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
+	MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
+	MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
+	MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
+	MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
+
+	MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
+	MMIO_D(0x46508, D_ALL);
+
+	MMIO_D(0x49080, D_ALL);
+	MMIO_D(0x49180, D_ALL);
+	MMIO_D(0x49280, D_ALL);
+
+	MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
+	MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
+	MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
+
+	MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
+	MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
+	MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
+
+	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
+	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
+	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
+
+	MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
+	MMIO_D(SBI_ADDR, D_ALL);
+	MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
+	MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
+	MMIO_D(PIXCLK_GATE, D_ALL);
+
+	MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
+		dp_aux_ch_ctl_mmio_write);
+
+	MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+	MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+	MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+	MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+	MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+
+	MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
+	MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
+	MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
+	MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
+	MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
+
+	MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
+	MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
+	MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
+	MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
+	MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
+
+	MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
+	MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
+
+	MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
+	MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
+	MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
+	MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
+
+	MMIO_D(_TRANSA_MSA_MISC, D_ALL);
+	MMIO_D(_TRANSB_MSA_MISC, D_ALL);
+	MMIO_D(_TRANSC_MSA_MISC, D_ALL);
+	MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
+
+	MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
+	MMIO_D(FORCEWAKE_ACK, D_ALL);
+	MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
+	MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
+	MMIO_D(GTFIFODBG, D_ALL);
+	MMIO_D(GTFIFOCTL, D_ALL);
+	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
+	MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
+	MMIO_D(ECOBUS, D_ALL);
+	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
+	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
+	MMIO_D(GEN6_RPNSWREQ, D_ALL);
+	MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
+	MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
+	MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
+	MMIO_D(GEN6_RPSTAT1, D_ALL);
+	MMIO_D(GEN6_RP_CONTROL, D_ALL);
+	MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
+	MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
+	MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
+	MMIO_D(GEN6_RP_CUR_UP, D_ALL);
+	MMIO_D(GEN6_RP_PREV_UP, D_ALL);
+	MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
+	MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
+	MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
+	MMIO_D(GEN6_RP_UP_EI, D_ALL);
+	MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
+	MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
+	MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
+	MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
+	MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
+	MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
+	MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
+	MMIO_D(GEN6_RC_SLEEP, D_ALL);
+	MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
+	MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
+	MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
+	MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
+	MMIO_D(GEN6_PMINTRMSK, D_ALL);
+	MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+
+	MMIO_D(RSTDBYCTL, D_ALL);
+
+	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
+	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
+	MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
+	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
+
+	MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_D(TILECTL, D_ALL);
+
+	MMIO_D(GEN6_UCGCTL1, D_ALL);
+	MMIO_D(GEN6_UCGCTL2, D_ALL);
+
+	MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+	MMIO_D(GEN6_PCODE_DATA, D_ALL);
+	MMIO_D(0x13812c, D_ALL);
+	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
+	MMIO_D(HSW_EDRAM_CAP, D_ALL);
+	MMIO_D(HSW_IDICR, D_ALL);
+	MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
+
+	MMIO_D(0x3c, D_ALL);
+	MMIO_D(0x860, D_ALL);
+	MMIO_D(ECOSKPD, D_ALL);
+	MMIO_D(0x121d0, D_ALL);
+	MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
+	MMIO_D(0x41d0, D_ALL);
+	MMIO_D(GAC_ECO_BITS, D_ALL);
+	MMIO_D(0x6200, D_ALL);
+	MMIO_D(0x6204, D_ALL);
+	MMIO_D(0x6208, D_ALL);
+	MMIO_D(0x7118, D_ALL);
+	MMIO_D(0x7180, D_ALL);
+	MMIO_D(0x7408, D_ALL);
+	MMIO_D(0x7c00, D_ALL);
+	MMIO_D(GEN6_MBCTL, D_ALL);
+	MMIO_D(0x911c, D_ALL);
+	MMIO_D(0x9120, D_ALL);
+
+	MMIO_D(GAB_CTL, D_ALL);
+	MMIO_D(0x48800, D_ALL);
+	MMIO_D(0xce044, D_ALL);
+	MMIO_D(0xe6500, D_ALL);
+	MMIO_D(0xe6504, D_ALL);
+	MMIO_D(0xe6600, D_ALL);
+	MMIO_D(0xe6604, D_ALL);
+	MMIO_D(0xe6700, D_ALL);
+	MMIO_D(0xe6704, D_ALL);
+	MMIO_D(0xe6800, D_ALL);
+	MMIO_D(0xe6804, D_ALL);
+	MMIO_D(PCH_GMBUS4, D_ALL);
+	MMIO_D(PCH_GMBUS5, D_ALL);
+
+	MMIO_D(0x902c, D_ALL);
+	MMIO_D(0xec008, D_ALL);
+	MMIO_D(0xec00c, D_ALL);
+	MMIO_D(0xec008 + 0x18, D_ALL);
+	MMIO_D(0xec00c + 0x18, D_ALL);
+	MMIO_D(0xec008 + 0x18 * 2, D_ALL);
+	MMIO_D(0xec00c + 0x18 * 2, D_ALL);
+	MMIO_D(0xec008 + 0x18 * 3, D_ALL);
+	MMIO_D(0xec00c + 0x18 * 3, D_ALL);
+	MMIO_D(0xec408, D_ALL);
+	MMIO_D(0xec40c, D_ALL);
+	MMIO_D(0xec408 + 0x18, D_ALL);
+	MMIO_D(0xec40c + 0x18, D_ALL);
+	MMIO_D(0xec408 + 0x18 * 2, D_ALL);
+	MMIO_D(0xec40c + 0x18 * 2, D_ALL);
+	MMIO_D(0xec408 + 0x18 * 3, D_ALL);
+	MMIO_D(0xec40c + 0x18 * 3, D_ALL);
+	MMIO_D(0xfc810, D_ALL);
+	MMIO_D(0xfc81c, D_ALL);
+	MMIO_D(0xfc828, D_ALL);
+	MMIO_D(0xfc834, D_ALL);
+	MMIO_D(0xfcc00, D_ALL);
+	MMIO_D(0xfcc0c, D_ALL);
+	MMIO_D(0xfcc18, D_ALL);
+	MMIO_D(0xfcc24, D_ALL);
+	MMIO_D(0xfd000, D_ALL);
+	MMIO_D(0xfd00c, D_ALL);
+	MMIO_D(0xfd018, D_ALL);
+	MMIO_D(0xfd024, D_ALL);
+	MMIO_D(0xfd034, D_ALL);
+
+	MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
+	MMIO_D(0x2054, D_ALL);
+	MMIO_D(0x12054, D_ALL);
+	MMIO_D(0x22054, D_ALL);
+	MMIO_D(0x1a054, D_ALL);
+
+	MMIO_D(0x44070, D_ALL);
+
+	MMIO_D(0x215c, D_HSW_PLUS);
+	MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+	MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
+	MMIO_D(OACONTROL, D_HSW);
+	MMIO_D(0x2b00, D_BDW_PLUS);
+	MMIO_D(0x2360, D_BDW_PLUS);
+	MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+
+	MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+	MMIO_D(BCS_SWCTRL, D_ALL);
+
+	MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+	MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+	MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+	MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+	MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+	MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+	MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+	return 0;
+}
+
+static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+{
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	int ret;
+
+	MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+			intel_vgpu_reg_imr_handler);
+
+	MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
+
+	MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
+
+	MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
+
+	MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
+
+	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
+
+	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
+
+	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
+		intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
+
+	MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
+
+	MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
+
+	MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+	MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+	MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+	MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
+
+	MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
+		intel_vgpu_reg_master_irq_handler);
+
+	MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+	MMIO_D(0x1c134, D_BDW_PLUS);
+
+	MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+	MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE),  D_BDW_PLUS);
+	MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+	MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+	MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+	MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+	MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
+	MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+			NULL, NULL);
+	MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+			NULL, NULL);
+	MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+			ring_timestamp_mmio_read, NULL);
+
+	MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0xd0)
+	MMIO_RING_F(RING_REG, 4, F_RO, 0,
+		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+		ring_reset_ctl_write);
+	MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
+		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+		ring_reset_ctl_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x230)
+	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
+	MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x234)
+	MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+	MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x244)
+	MMIO_RING_D(RING_REG, D_BDW_PLUS);
+	MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x370)
+	MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+	MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
+			NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x3a0)
+	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+#undef RING_REG
+
+	MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
+	MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
+	MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
+	MMIO_D(0x1c1d0, D_BDW_PLUS);
+	MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
+	MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
+	MMIO_D(0x1c054, D_BDW_PLUS);
+
+	MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+	MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
+
+	MMIO_D(GAMTARBMODE, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0x270)
+	MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+	MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+	MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+	MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+
+	MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+
+	MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
+	MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
+	MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+
+	MMIO_D(WM_MISC, D_BDW);
+	MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
+
+	MMIO_D(0x66c00, D_BDW_PLUS);
+	MMIO_D(0x66c04, D_BDW_PLUS);
+
+	MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
+
+	MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
+	MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
+	MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
+
+	MMIO_D(0xfdc, D_BDW);
+	MMIO_D(GEN8_ROW_CHICKEN, D_BDW_PLUS);
+	MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
+	MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+
+	MMIO_D(0xb1f0, D_BDW);
+	MMIO_D(0xb1c0, D_BDW);
+	MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+	MMIO_D(0xb100, D_BDW);
+	MMIO_D(0xb10c, D_BDW);
+	MMIO_D(0xb110, D_BDW);
+
+	MMIO_DH(0x24d0, D_BDW_PLUS, NULL, NULL);
+	MMIO_DH(0x24d4, D_BDW_PLUS, NULL, NULL);
+	MMIO_DH(0x24d8, D_BDW_PLUS, NULL, NULL);
+	MMIO_DH(0x24dc, D_BDW_PLUS, NULL, NULL);
+
+	MMIO_D(0x83a4, D_BDW);
+	MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
+
+	MMIO_D(0x8430, D_BDW);
+
+	MMIO_D(0x110000, D_BDW_PLUS);
+
+	MMIO_D(0x48400, D_BDW_PLUS);
+
+	MMIO_D(0x6e570, D_BDW_PLUS);
+	MMIO_D(0x65f10, D_BDW_PLUS);
+
+	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(0xe180, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+	MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+
+	MMIO_D(0x2248, D_BDW);
+
+	return 0;
+}
+
+static int init_skl_mmio_info(struct intel_gvt *gvt)
+{
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	int ret;
+
+	MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+	MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
+	MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+	MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
+	MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+	MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
+
+	MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+	MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+	MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+
+	MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
+	MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
+
+	MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
+	MMIO_D(0xa210, D_SKL_PLUS);
+	MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+	MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+	MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
+	MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
+	MMIO_D(0x45504, D_SKL);
+	MMIO_D(0x45520, D_SKL);
+	MMIO_D(0x46000, D_SKL);
+	MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
+	MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
+	MMIO_D(0x6C040, D_SKL);
+	MMIO_D(0x6C048, D_SKL);
+	MMIO_D(0x6C050, D_SKL);
+	MMIO_D(0x6C044, D_SKL);
+	MMIO_D(0x6C04C, D_SKL);
+	MMIO_D(0x6C054, D_SKL);
+	MMIO_D(0x6c058, D_SKL);
+	MMIO_D(0x6c05c, D_SKL);
+	MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
+
+	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+	MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
+	MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+	MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
+	MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
+	MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
+
+	MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+	MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+	MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+	MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
+
+	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
+
+	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
+
+	MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
+	MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
+	MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
+
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+	MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
+
+	MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
+
+	MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
+
+	MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
+
+	MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
+
+	MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
+	MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
+
+	MMIO_D(0x70380, D_SKL);
+	MMIO_D(0x71380, D_SKL);
+	MMIO_D(0x72380, D_SKL);
+	MMIO_D(0x7039c, D_SKL);
+
+	MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_D(0x8f074, D_SKL);
+	MMIO_D(0x8f004, D_SKL);
+	MMIO_D(0x8f034, D_SKL);
+
+	MMIO_D(0xb11c, D_SKL);
+
+	MMIO_D(0x51000, D_SKL);
+	MMIO_D(0x6c00c, D_SKL);
+
+	MMIO_F(0xc800, 0x7f8, 0, 0, 0, D_SKL, NULL, NULL);
+	MMIO_F(0xb020, 0x80, 0, 0, 0, D_SKL, NULL, NULL);
+
+	MMIO_D(0xd08, D_SKL);
+	MMIO_D(0x20e0, D_SKL);
+	MMIO_D(0x20ec, D_SKL);
+
+	/* TRTT */
+	MMIO_D(0x4de0, D_SKL);
+	MMIO_D(0x4de4, D_SKL);
+	MMIO_D(0x4de8, D_SKL);
+	MMIO_D(0x4dec, D_SKL);
+	MMIO_D(0x4df0, D_SKL);
+	MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+	MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
+
+	MMIO_D(0x45008, D_SKL);
+
+	MMIO_D(0x46430, D_SKL);
+
+	MMIO_D(0x46520, D_SKL);
+
+	MMIO_D(0xc403c, D_SKL);
+	MMIO_D(0xb004, D_SKL);
+	MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
+
+	MMIO_D(0x65900, D_SKL);
+	MMIO_D(0x1082c0, D_SKL);
+	MMIO_D(0x4068, D_SKL);
+	MMIO_D(0x67054, D_SKL);
+	MMIO_D(0x6e560, D_SKL);
+	MMIO_D(0x6e554, D_SKL);
+	MMIO_D(0x2b20, D_SKL);
+	MMIO_D(0x65f00, D_SKL);
+	MMIO_D(0x65f08, D_SKL);
+	MMIO_D(0x320f0, D_SKL);
+
+	MMIO_D(_REG_VCS2_EXCC, D_SKL);
+	MMIO_D(0x70034, D_SKL);
+	MMIO_D(0x71034, D_SKL);
+	MMIO_D(0x72034, D_SKL);
+
+	MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
+	MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
+	MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
+	MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
+	MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
+	MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
+
+	MMIO_D(0x44500, D_SKL);
+	return 0;
+}
+
+/**
+ * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
+ * @gvt: GVT device
+ * @offset: register offset
+ *
+ * This function is used to find the MMIO information entry from hash table
+ *
+ * Returns:
+ * pointer to MMIO information entry, NULL if not exists
+ */
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+	unsigned int offset)
+{
+	struct intel_gvt_mmio_info *e;
+
+	WARN_ON(!IS_ALIGNED(offset, 4));
+
+	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
+		if (e->offset == offset)
+			return e;
+	}
+	return NULL;
+}
+
+/**
+ * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the MMIO
+ * information table of GVT device
+ *
+ */
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
+{
+	struct hlist_node *tmp;
+	struct intel_gvt_mmio_info *e;
+	int i;
+
+	hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
+		kfree(e);
+
+	vfree(gvt->mmio.mmio_attribute);
+	gvt->mmio.mmio_attribute = NULL;
+}
+
+/**
+ * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to setup the MMIO
+ * information table for GVT device
+ *
+ * Returns:
+ * zero on success, negative if failed.
+ */
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
+{
+	struct intel_gvt_device_info *info = &gvt->device_info;
+	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	int ret;
+
+	gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
+	if (!gvt->mmio.mmio_attribute)
+		return -ENOMEM;
+
+	ret = init_generic_mmio_info(gvt);
+	if (ret)
+		goto err;
+
+	if (IS_BROADWELL(dev_priv)) {
+		ret = init_broadwell_mmio_info(gvt);
+		if (ret)
+			goto err;
+	} else if (IS_SKYLAKE(dev_priv)) {
+		ret = init_broadwell_mmio_info(gvt);
+		if (ret)
+			goto err;
+		ret = init_skl_mmio_info(gvt);
+		if (ret)
+			goto err;
+	}
+	return 0;
+err:
+	intel_gvt_clean_mmio_info(gvt);
+	return ret;
+}
+
+/**
+ * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
+{
+	gvt->mmio.mmio_attribute[offset >> 2] |=
+		F_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+		unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] &
+		F_CMD_ACCESS;
+}
+
+/**
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
+		unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] &
+		F_UNALIGN;
+}
+
+/**
+ * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+		unsigned int offset)
+{
+	gvt->mmio.mmio_attribute[offset >> 2] |=
+		F_CMD_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
+ *
+ */
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] &
+		F_MODE_MASK;
+}
+
+/**
+ * intel_vgpu_default_mmio_read - default MMIO read handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	read_vreg(vgpu, offset, p_data, bytes);
+	return 0;
+}
+
+/**
+ * intel_t_default_mmio_write - default MMIO write handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+		void *p_data, unsigned int bytes)
+{
+	write_vreg(vgpu, offset, p_data, bytes);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 254df8b..027ef55 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -19,17 +19,51 @@
  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
+ *
+ * Authors:
+ *    Eddie Dong <eddie.dong@intel.com>
+ *    Dexuan Cui
+ *    Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
  */
 
 #ifndef _GVT_HYPERCALL_H_
 #define _GVT_HYPERCALL_H_
 
+struct intel_gvt_io_emulation_ops {
+	int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
+	int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
+	int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
+	int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
+};
+
+extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
+
 /*
  * Specific GVT-g MPT modules function collections. Currently GVT-g supports
  * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
  */
 struct intel_gvt_mpt {
 	int (*detect_host)(void);
+	int (*attach_vgpu)(void *vgpu, unsigned long *handle);
+	void (*detach_vgpu)(unsigned long handle);
+	int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
+	unsigned long (*from_virt_to_mfn)(void *p);
+	int (*set_wp_page)(unsigned long handle, u64 gfn);
+	int (*unset_wp_page)(unsigned long handle, u64 gfn);
+	int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+			unsigned long len);
+	int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+			 unsigned long len);
+	unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+	int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
+			      unsigned long mfn, unsigned int nr, bool map,
+			      int type);
+	int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
+			     bool map);
 };
 
 extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
new file mode 100644
index 0000000..f7be02a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ *    Min he <min.he@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/* common offset among interrupt control registers */
+#define regbase_to_isr(base)	(base)
+#define regbase_to_imr(base)	(base + 0x4)
+#define regbase_to_iir(base)	(base + 0x8)
+#define regbase_to_ier(base)	(base + 0xC)
+
+#define iir_to_regbase(iir)    (iir - 0x8)
+#define ier_to_regbase(ier)    (ier - 0xC)
+
+#define get_event_virt_handler(irq, e)	(irq->events[e].v_handler)
+#define get_irq_info(irq, e)		(irq->events[e].info)
+
+#define irq_to_gvt(irq) \
+	container_of(irq, struct intel_gvt, irq)
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+		struct intel_gvt_irq_info *info);
+
+static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
+	[RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
+	[RCS_DEBUG] = "Render EU debug from SVG",
+	[RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
+	[RCS_CMD_STREAMER_ERR] = "Render CS error interrupt",
+	[RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify",
+	[RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded",
+	[RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults",
+	[RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt",
+
+	[VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT",
+	[VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status",
+	[VCS_CMD_STREAMER_ERR] = "Video CS error interrupt",
+	[VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify",
+	[VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded",
+	[VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults",
+	[VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt",
+	[VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT",
+	[VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify",
+	[VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt",
+
+	[BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT",
+	[BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status",
+	[BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt",
+	[BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify",
+	[BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults",
+	[BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt",
+
+	[VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify",
+	[VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt",
+
+	[PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun",
+	[PIPE_A_CRC_ERR] = "Pipe A CRC error",
+	[PIPE_A_CRC_DONE] = "Pipe A CRC done",
+	[PIPE_A_VSYNC] = "Pipe A vsync",
+	[PIPE_A_LINE_COMPARE] = "Pipe A line compare",
+	[PIPE_A_ODD_FIELD] = "Pipe A odd field",
+	[PIPE_A_EVEN_FIELD] = "Pipe A even field",
+	[PIPE_A_VBLANK] = "Pipe A vblank",
+	[PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun",
+	[PIPE_B_CRC_ERR] = "Pipe B CRC error",
+	[PIPE_B_CRC_DONE] = "Pipe B CRC done",
+	[PIPE_B_VSYNC] = "Pipe B vsync",
+	[PIPE_B_LINE_COMPARE] = "Pipe B line compare",
+	[PIPE_B_ODD_FIELD] = "Pipe B odd field",
+	[PIPE_B_EVEN_FIELD] = "Pipe B even field",
+	[PIPE_B_VBLANK] = "Pipe B vblank",
+	[PIPE_C_VBLANK] = "Pipe C vblank",
+	[DPST_PHASE_IN] = "DPST phase in event",
+	[DPST_HISTOGRAM] = "DPST histogram event",
+	[GSE] = "GSE",
+	[DP_A_HOTPLUG] = "DP A Hotplug",
+	[AUX_CHANNEL_A] = "AUX Channel A",
+	[PERF_COUNTER] = "Performance counter",
+	[POISON] = "Poison",
+	[GTT_FAULT] = "GTT fault",
+	[PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done",
+	[PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done",
+	[PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done",
+	[SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done",
+	[SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done",
+	[SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done",
+
+	[PCU_THERMAL] = "PCU Thermal Event",
+	[PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event",
+
+	[FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A",
+	[AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A",
+	[AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A",
+	[FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B",
+	[AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B",
+	[AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B",
+	[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
+	[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
+	[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
+	[ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+	[GMBUS] = "Gmbus",
+	[SDVO_B_HOTPLUG] = "SDVO B hotplug",
+	[CRT_HOTPLUG] = "CRT Hotplug",
+	[DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug",
+	[DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug",
+	[DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug",
+	[AUX_CHANNEL_B] = "AUX Channel B",
+	[AUX_CHANNEL_C] = "AUX Channel C",
+	[AUX_CHANNEL_D] = "AUX Channel D",
+	[AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B",
+	[AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C",
+	[AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D",
+
+	[INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!",
+};
+
+static inline struct intel_gvt_irq_info *regbase_to_irq_info(
+		struct intel_gvt *gvt,
+		unsigned int reg)
+{
+	struct intel_gvt_irq *irq = &gvt->irq;
+	int i;
+
+	for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+		if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg)
+			return irq->info[i];
+	}
+
+	return NULL;
+}
+
+/**
+ * intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IMR register bit change
+ * behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+	unsigned int reg, void *p_data, unsigned int bytes)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+	u32 changed, masked, unmasked;
+	u32 imr = *(u32 *)p_data;
+
+	gvt_dbg_irq("write IMR %x with val %x\n",
+		reg, imr);
+
+	gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
+
+	/* figure out newly masked/unmasked bits */
+	changed = vgpu_vreg(vgpu, reg) ^ imr;
+	masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+	unmasked = masked ^ changed;
+
+	gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
+		changed, masked, unmasked);
+
+	vgpu_vreg(vgpu, reg) = imr;
+
+	ops->check_pending_irq(vgpu);
+	gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+	return 0;
+}
+
+/**
+ * intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the master IRQ register on gen8+.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+	unsigned int reg, void *p_data, unsigned int bytes)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+	u32 changed, enabled, disabled;
+	u32 ier = *(u32 *)p_data;
+	u32 virtual_ier = vgpu_vreg(vgpu, reg);
+
+	gvt_dbg_irq("write master irq reg %x with val %x\n",
+		reg, ier);
+
+	gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+
+	/*
+	 * GEN8_MASTER_IRQ is a special irq register,
+	 * only bit 31 is allowed to be modified
+	 * and treated as an IER bit.
+	 */
+	ier &= GEN8_MASTER_IRQ_CONTROL;
+	virtual_ier &= GEN8_MASTER_IRQ_CONTROL;
+	vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
+	vgpu_vreg(vgpu, reg) |= ier;
+
+	/* figure out newly enabled/disable bits */
+	changed = virtual_ier ^ ier;
+	enabled = (virtual_ier & changed) ^ changed;
+	disabled = enabled ^ changed;
+
+	gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+			changed, enabled, disabled);
+
+	ops->check_pending_irq(vgpu);
+	gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+	return 0;
+}
+
+/**
+ * intel_vgpu_reg_ier_handler - Generic IER write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IER register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+	unsigned int reg, void *p_data, unsigned int bytes)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+	struct intel_gvt_irq_info *info;
+	u32 changed, enabled, disabled;
+	u32 ier = *(u32 *)p_data;
+
+	gvt_dbg_irq("write IER %x with val %x\n",
+		reg, ier);
+
+	gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+
+	/* figure out newly enabled/disable bits */
+	changed = vgpu_vreg(vgpu, reg) ^ ier;
+	enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+	disabled = enabled ^ changed;
+
+	gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+			changed, enabled, disabled);
+	vgpu_vreg(vgpu, reg) = ier;
+
+	info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
+	if (WARN_ON(!info))
+		return -EINVAL;
+
+	if (info->has_upstream_irq)
+		update_upstream_irq(vgpu, info);
+
+	ops->check_pending_irq(vgpu);
+	gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+	return 0;
+}
+
+/**
+ * intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IIR register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+	void *p_data, unsigned int bytes)
+{
+	struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
+		iir_to_regbase(reg));
+	u32 iir = *(u32 *)p_data;
+
+	gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+
+	if (WARN_ON(!info))
+		return -EINVAL;
+
+	vgpu_vreg(vgpu, reg) &= ~iir;
+
+	if (info->has_upstream_irq)
+		update_upstream_irq(vgpu, info);
+	return 0;
+}
+
+static struct intel_gvt_irq_map gen8_irq_map[] = {
+	{ INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 },
+	{ INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 },
+	{ -1, -1, ~0 },
+};
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+		struct intel_gvt_irq_info *info)
+{
+	struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+	struct intel_gvt_irq_map *map = irq->irq_map;
+	struct intel_gvt_irq_info *up_irq_info = NULL;
+	u32 set_bits = 0;
+	u32 clear_bits = 0;
+	int bit;
+	u32 val = vgpu_vreg(vgpu,
+			regbase_to_iir(i915_mmio_reg_offset(info->reg_base)))
+		& vgpu_vreg(vgpu,
+			regbase_to_ier(i915_mmio_reg_offset(info->reg_base)));
+
+	if (!info->has_upstream_irq)
+		return;
+
+	for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+		if (info->group != map->down_irq_group)
+			continue;
+
+		if (!up_irq_info)
+			up_irq_info = irq->info[map->up_irq_group];
+		else
+			WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+
+		bit = map->up_irq_bit;
+
+		if (val & map->down_irq_bitmask)
+			set_bits |= (1 << bit);
+		else
+			clear_bits |= (1 << bit);
+	}
+
+	WARN_ON(!up_irq_info);
+
+	if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
+		u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
+
+		vgpu_vreg(vgpu, isr) &= ~clear_bits;
+		vgpu_vreg(vgpu, isr) |= set_bits;
+	} else {
+		u32 iir = regbase_to_iir(
+			i915_mmio_reg_offset(up_irq_info->reg_base));
+		u32 imr = regbase_to_imr(
+			i915_mmio_reg_offset(up_irq_info->reg_base));
+
+		vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr));
+	}
+
+	if (up_irq_info->has_upstream_irq)
+		update_upstream_irq(vgpu, up_irq_info);
+}
+
+static void init_irq_map(struct intel_gvt_irq *irq)
+{
+	struct intel_gvt_irq_map *map;
+	struct intel_gvt_irq_info *up_info, *down_info;
+	int up_bit;
+
+	for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+		up_info = irq->info[map->up_irq_group];
+		up_bit = map->up_irq_bit;
+		down_info = irq->info[map->down_irq_group];
+
+		set_bit(up_bit, up_info->downstream_irq_bitmap);
+		down_info->has_upstream_irq = true;
+
+		gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n",
+			up_info->group, up_bit,
+			down_info->group, map->down_irq_bitmask);
+	}
+}
+
+/* =======================vEvent injection===================== */
+static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+{
+	return intel_gvt_hypervisor_inject_msi(vgpu);
+}
+
+static void propagate_event(struct intel_gvt_irq *irq,
+	enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+	struct intel_gvt_irq_info *info;
+	unsigned int reg_base;
+	int bit;
+
+	info = get_irq_info(irq, event);
+	if (WARN_ON(!info))
+		return;
+
+	reg_base = i915_mmio_reg_offset(info->reg_base);
+	bit = irq->events[event].bit;
+
+	if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
+					regbase_to_imr(reg_base)))) {
+		gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
+				bit, irq_name[event], vgpu->id);
+		set_bit(bit, (void *)&vgpu_vreg(vgpu,
+					regbase_to_iir(reg_base)));
+	}
+}
+
+/* =======================vEvent Handlers===================== */
+static void handle_default_event_virt(struct intel_gvt_irq *irq,
+	enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+	if (!vgpu->irq.irq_warn_once[event]) {
+		gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n",
+			vgpu->id, event, irq_name[event]);
+		vgpu->irq.irq_warn_once[event] = true;
+	}
+	propagate_event(irq, event, vgpu);
+}
+
+/* =====================GEN specific logic======================= */
+/* GEN8 interrupt routines. */
+
+#define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \
+static struct intel_gvt_irq_info gen8_##regname##_info = { \
+	.name = #regname"-IRQ", \
+	.reg_base = (regbase), \
+	.bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
+		INTEL_GVT_EVENT_RESERVED}, \
+}
+
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ);
+
+static struct intel_gvt_irq_info gvt_base_pch_info = {
+	.name = "PCH-IRQ",
+	.reg_base = SDEISR,
+	.bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
+		INTEL_GVT_EVENT_RESERVED},
+};
+
+static void gen8_check_pending_irq(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+	int i;
+
+	if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) &
+				GEN8_MASTER_IRQ_CONTROL))
+		return;
+
+	for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+		struct intel_gvt_irq_info *info = irq->info[i];
+		u32 reg_base;
+
+		if (!info->has_upstream_irq)
+			continue;
+
+		reg_base = i915_mmio_reg_offset(info->reg_base);
+		if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base))
+				& vgpu_vreg(vgpu, regbase_to_ier(reg_base))))
+			update_upstream_irq(vgpu, info);
+	}
+
+	if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ))
+			& ~GEN8_MASTER_IRQ_CONTROL)
+		inject_virtual_interrupt(vgpu);
+}
+
+static void gen8_init_irq(
+		struct intel_gvt_irq *irq)
+{
+	struct intel_gvt *gvt = irq_to_gvt(irq);
+
+#define SET_BIT_INFO(s, b, e, i)		\
+	do {					\
+		s->events[e].bit = b;		\
+		s->events[e].info = s->info[i];	\
+		s->info[i]->bit_to_event[b] = e;\
+	} while (0)
+
+#define SET_IRQ_GROUP(s, g, i) \
+	do { \
+		s->info[g] = i; \
+		(i)->group = g; \
+		set_bit(g, s->irq_info_bitmap); \
+	} while (0)
+
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info);
+	SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info);
+
+	/* GEN8 level 2 interrupts. */
+
+	/* GEN8 interrupt GT0 events */
+	SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+	SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0);
+	SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+	SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+	SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0);
+	SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+	/* GEN8 interrupt GT1 events */
+	SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1);
+	SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
+	SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
+
+	if (HAS_BSD2(gvt->dev_priv)) {
+		SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
+			INTEL_GVT_IRQ_INFO_GT1);
+		SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
+			INTEL_GVT_IRQ_INFO_GT1);
+		SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH,
+			INTEL_GVT_IRQ_INFO_GT1);
+	}
+
+	/* GEN8 interrupt GT3 events */
+	SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3);
+	SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3);
+	SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3);
+
+	SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+	SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+	SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+	/* GEN8 interrupt DE PORT events */
+	SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT);
+	SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+	/* GEN8 interrupt DE MISC events */
+	SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC);
+
+	/* PCH events */
+	SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH);
+	SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+	SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+	SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+	SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+
+	if (IS_BROADWELL(gvt->dev_priv)) {
+		SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
+		SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
+		SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
+
+		SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+		SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+
+		SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+		SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+
+		SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+		SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+	} else if (IS_SKYLAKE(gvt->dev_priv)) {
+		SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
+		SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
+		SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+		SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+		SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+		SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+	}
+
+	/* GEN8 interrupt PCU events */
+	SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU);
+	SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
+}
+
+static struct intel_gvt_irq_ops gen8_irq_ops = {
+	.init_irq = gen8_init_irq,
+	.check_pending_irq = gen8_check_pending_irq,
+};
+
+/**
+ * intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
+ * @vgpu: a vGPU
+ * @event: interrupt event
+ *
+ * This function is used to trigger a virtual interrupt event for vGPU.
+ * The caller provides the event to be triggered, the framework itself
+ * will emulate the IRQ register bit change.
+ *
+ */
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+	enum intel_gvt_event_type event)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_irq *irq = &gvt->irq;
+	gvt_event_virt_handler_t handler;
+	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+
+	handler = get_event_virt_handler(irq, event);
+	WARN_ON(!handler);
+
+	handler(irq, event, vgpu);
+
+	ops->check_pending_irq(vgpu);
+}
+
+static void init_events(
+	struct intel_gvt_irq *irq)
+{
+	int i;
+
+	for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) {
+		irq->events[i].info = NULL;
+		irq->events[i].v_handler = handle_default_event_virt;
+	}
+}
+
+static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
+{
+	struct intel_gvt_vblank_timer *vblank_timer;
+	struct intel_gvt_irq *irq;
+	struct intel_gvt *gvt;
+
+	vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
+	irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
+	gvt = container_of(irq, struct intel_gvt, irq);
+
+	intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
+	hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
+	return HRTIMER_RESTART;
+}
+
+/**
+ * intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver unloading stage, to clean up GVT-g IRQ
+ * emulation subsystem.
+ *
+ */
+void intel_gvt_clean_irq(struct intel_gvt *gvt)
+{
+	struct intel_gvt_irq *irq = &gvt->irq;
+
+	hrtimer_cancel(&irq->vblank_timer.timer);
+}
+
+#define VBLNAK_TIMER_PERIOD 16000000
+
+/**
+ * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver loading stage, to initialize the GVT-g IRQ
+ * emulation subsystem.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_irq(struct intel_gvt *gvt)
+{
+	struct intel_gvt_irq *irq = &gvt->irq;
+	struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
+
+	gvt_dbg_core("init irq framework\n");
+
+	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+		irq->ops = &gen8_irq_ops;
+		irq->irq_map = gen8_irq_map;
+	} else {
+		WARN_ON(1);
+		return -ENODEV;
+	}
+
+	/* common event initialization */
+	init_events(irq);
+
+	/* gen specific initialization */
+	irq->ops->init_irq(irq);
+
+	init_irq_map(irq);
+
+	hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	vblank_timer->timer.function = vblank_timer_fn;
+	vblank_timer->period = VBLNAK_TIMER_PERIOD;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
new file mode 100644
index 0000000..5313fb1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ *    Min he <min.he@intel.com>
+ *
+ */
+
+#ifndef _GVT_INTERRUPT_H_
+#define _GVT_INTERRUPT_H_
+
+enum intel_gvt_event_type {
+	RCS_MI_USER_INTERRUPT = 0,
+	RCS_DEBUG,
+	RCS_MMIO_SYNC_FLUSH,
+	RCS_CMD_STREAMER_ERR,
+	RCS_PIPE_CONTROL,
+	RCS_L3_PARITY_ERR,
+	RCS_WATCHDOG_EXCEEDED,
+	RCS_PAGE_DIRECTORY_FAULT,
+	RCS_AS_CONTEXT_SWITCH,
+	RCS_MONITOR_BUFF_HALF_FULL,
+
+	VCS_MI_USER_INTERRUPT,
+	VCS_MMIO_SYNC_FLUSH,
+	VCS_CMD_STREAMER_ERR,
+	VCS_MI_FLUSH_DW,
+	VCS_WATCHDOG_EXCEEDED,
+	VCS_PAGE_DIRECTORY_FAULT,
+	VCS_AS_CONTEXT_SWITCH,
+
+	VCS2_MI_USER_INTERRUPT,
+	VCS2_MI_FLUSH_DW,
+	VCS2_AS_CONTEXT_SWITCH,
+
+	BCS_MI_USER_INTERRUPT,
+	BCS_MMIO_SYNC_FLUSH,
+	BCS_CMD_STREAMER_ERR,
+	BCS_MI_FLUSH_DW,
+	BCS_PAGE_DIRECTORY_FAULT,
+	BCS_AS_CONTEXT_SWITCH,
+
+	VECS_MI_USER_INTERRUPT,
+	VECS_MI_FLUSH_DW,
+	VECS_AS_CONTEXT_SWITCH,
+
+	PIPE_A_FIFO_UNDERRUN,
+	PIPE_B_FIFO_UNDERRUN,
+	PIPE_A_CRC_ERR,
+	PIPE_B_CRC_ERR,
+	PIPE_A_CRC_DONE,
+	PIPE_B_CRC_DONE,
+	PIPE_A_ODD_FIELD,
+	PIPE_B_ODD_FIELD,
+	PIPE_A_EVEN_FIELD,
+	PIPE_B_EVEN_FIELD,
+	PIPE_A_LINE_COMPARE,
+	PIPE_B_LINE_COMPARE,
+	PIPE_C_LINE_COMPARE,
+	PIPE_A_VBLANK,
+	PIPE_B_VBLANK,
+	PIPE_C_VBLANK,
+	PIPE_A_VSYNC,
+	PIPE_B_VSYNC,
+	PIPE_C_VSYNC,
+	PRIMARY_A_FLIP_DONE,
+	PRIMARY_B_FLIP_DONE,
+	PRIMARY_C_FLIP_DONE,
+	SPRITE_A_FLIP_DONE,
+	SPRITE_B_FLIP_DONE,
+	SPRITE_C_FLIP_DONE,
+
+	PCU_THERMAL,
+	PCU_PCODE2DRIVER_MAILBOX,
+
+	DPST_PHASE_IN,
+	DPST_HISTOGRAM,
+	GSE,
+	DP_A_HOTPLUG,
+	AUX_CHANNEL_A,
+	PERF_COUNTER,
+	POISON,
+	GTT_FAULT,
+	ERROR_INTERRUPT_COMBINED,
+
+	FDI_RX_INTERRUPTS_TRANSCODER_A,
+	AUDIO_CP_CHANGE_TRANSCODER_A,
+	AUDIO_CP_REQUEST_TRANSCODER_A,
+	FDI_RX_INTERRUPTS_TRANSCODER_B,
+	AUDIO_CP_CHANGE_TRANSCODER_B,
+	AUDIO_CP_REQUEST_TRANSCODER_B,
+	FDI_RX_INTERRUPTS_TRANSCODER_C,
+	AUDIO_CP_CHANGE_TRANSCODER_C,
+	AUDIO_CP_REQUEST_TRANSCODER_C,
+	ERR_AND_DBG,
+	GMBUS,
+	SDVO_B_HOTPLUG,
+	CRT_HOTPLUG,
+	DP_B_HOTPLUG,
+	DP_C_HOTPLUG,
+	DP_D_HOTPLUG,
+	AUX_CHANNEL_B,
+	AUX_CHANNEL_C,
+	AUX_CHANNEL_D,
+	AUDIO_POWER_STATE_CHANGE_B,
+	AUDIO_POWER_STATE_CHANGE_C,
+	AUDIO_POWER_STATE_CHANGE_D,
+
+	INTEL_GVT_EVENT_RESERVED,
+	INTEL_GVT_EVENT_MAX,
+};
+
+struct intel_gvt_irq;
+struct intel_gvt;
+
+typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
+	enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
+
+struct intel_gvt_irq_ops {
+	void (*init_irq)(struct intel_gvt_irq *irq);
+	void (*check_pending_irq)(struct intel_vgpu *vgpu);
+};
+
+/* the list of physical interrupt control register groups */
+enum intel_gvt_irq_type {
+	INTEL_GVT_IRQ_INFO_GT,
+	INTEL_GVT_IRQ_INFO_DPY,
+	INTEL_GVT_IRQ_INFO_PCH,
+	INTEL_GVT_IRQ_INFO_PM,
+
+	INTEL_GVT_IRQ_INFO_MASTER,
+	INTEL_GVT_IRQ_INFO_GT0,
+	INTEL_GVT_IRQ_INFO_GT1,
+	INTEL_GVT_IRQ_INFO_GT2,
+	INTEL_GVT_IRQ_INFO_GT3,
+	INTEL_GVT_IRQ_INFO_DE_PIPE_A,
+	INTEL_GVT_IRQ_INFO_DE_PIPE_B,
+	INTEL_GVT_IRQ_INFO_DE_PIPE_C,
+	INTEL_GVT_IRQ_INFO_DE_PORT,
+	INTEL_GVT_IRQ_INFO_DE_MISC,
+	INTEL_GVT_IRQ_INFO_AUD,
+	INTEL_GVT_IRQ_INFO_PCU,
+
+	INTEL_GVT_IRQ_INFO_MAX,
+};
+
+#define INTEL_GVT_IRQ_BITWIDTH	32
+
+/* device specific interrupt bit definitions */
+struct intel_gvt_irq_info {
+	char *name;
+	i915_reg_t reg_base;
+	enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
+	unsigned long warned;
+	int group;
+	DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
+	bool has_upstream_irq;
+};
+
+/* per-event information */
+struct intel_gvt_event_info {
+	int bit;				/* map to register bit */
+	int policy;				/* forwarding policy */
+	struct intel_gvt_irq_info *info;	/* register info */
+	gvt_event_virt_handler_t v_handler;	/* for v_event */
+};
+
+struct intel_gvt_irq_map {
+	int up_irq_group;
+	int up_irq_bit;
+	int down_irq_group;
+	u32 down_irq_bitmask;
+};
+
+struct intel_gvt_vblank_timer {
+	struct hrtimer timer;
+	u64 period;
+};
+
+/* structure containing device specific IRQ state */
+struct intel_gvt_irq {
+	struct intel_gvt_irq_ops *ops;
+	struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
+	DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
+	struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
+	DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+	struct intel_gvt_irq_map *irq_map;
+	struct intel_gvt_vblank_timer vblank_timer;
+};
+
+int intel_gvt_init_irq(struct intel_gvt *gvt);
+void intel_gvt_clean_irq(struct intel_gvt *gvt);
+
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+	enum intel_gvt_event_type event);
+
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+	void *p_data, unsigned int bytes);
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+	unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+	unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+	unsigned int reg, void *p_data, unsigned int bytes);
+
+int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
+int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
+int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
+
+#endif /* _GVT_INTERRUPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
new file mode 100644
index 0000000..585b01f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Ke Yu
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Dexuan Cui
+ *
+ * Contributors:
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Min He <min.he@intel.com>
+ *    Niu Bing <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/**
+ * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
+{
+	u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
+			  ~GENMASK(3, 0);
+	return gpa - gttmmio_gpa;
+}
+
+#define reg_is_mmio(gvt, reg)  \
+	(reg >= 0 && reg < gvt->device_info.mmio_size)
+
+#define reg_is_gtt(gvt, reg)   \
+	(reg >= gvt->device_info.gtt_start_offset \
+	 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+
+/**
+ * intel_vgpu_emulate_mmio_read - emulate MMIO read
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
+		void *p_data, unsigned int bytes)
+{
+	struct intel_vgpu *vgpu = __vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_mmio_info *mmio;
+	unsigned int offset = 0;
+	int ret = -EINVAL;
+
+	mutex_lock(&gvt->lock);
+
+	if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+		struct intel_vgpu_guest_page *gp;
+
+		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+		if (gp) {
+			ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
+					p_data, bytes);
+			if (ret) {
+				gvt_err("vgpu%d: guest page read error %d, "
+					"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+					vgpu->id, ret,
+					gp->gfn, pa, *(u32 *)p_data, bytes);
+			}
+			mutex_unlock(&gvt->lock);
+			return ret;
+		}
+	}
+
+	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+	if (WARN_ON(bytes > 8))
+		goto err;
+
+	if (reg_is_gtt(gvt, offset)) {
+		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+			goto err;
+		if (WARN_ON(bytes != 4 && bytes != 8))
+			goto err;
+		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+			goto err;
+
+		ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
+				p_data, bytes);
+		if (ret)
+			goto err;
+		mutex_unlock(&gvt->lock);
+		return ret;
+	}
+
+	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+		ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
+		mutex_unlock(&gvt->lock);
+		return ret;
+	}
+
+	if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+		goto err;
+
+	mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+	if (!mmio && !vgpu->mmio.disable_warn_untrack) {
+		gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
+				vgpu->id, offset, bytes, *(u32 *)p_data);
+
+		if (offset == 0x206c) {
+			gvt_err("------------------------------------------\n");
+			gvt_err("vgpu%d: likely triggers a gfx reset\n",
+			vgpu->id);
+			gvt_err("------------------------------------------\n");
+			vgpu->mmio.disable_warn_untrack = true;
+		}
+	}
+
+	if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+		if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+			goto err;
+	}
+
+	if (mmio) {
+		if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+			if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+				goto err;
+			if (WARN_ON(mmio->offset != offset))
+				goto err;
+		}
+		ret = mmio->read(vgpu, offset, p_data, bytes);
+	} else
+		ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+
+	if (ret)
+		goto err;
+
+	intel_gvt_mmio_set_accessed(gvt, offset);
+	mutex_unlock(&gvt->lock);
+	return 0;
+err:
+	gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
+			vgpu->id, offset, bytes);
+	mutex_unlock(&gvt->lock);
+	return ret;
+}
+
+/**
+ * intel_vgpu_emulate_mmio_write - emulate MMIO write
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
+		void *p_data, unsigned int bytes)
+{
+	struct intel_vgpu *vgpu = __vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_mmio_info *mmio;
+	unsigned int offset = 0;
+	u32 old_vreg = 0, old_sreg = 0;
+	int ret = -EINVAL;
+
+	mutex_lock(&gvt->lock);
+
+	if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+		struct intel_vgpu_guest_page *gp;
+
+		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+		if (gp) {
+			ret = gp->handler(gp, pa, p_data, bytes);
+			if (ret) {
+				gvt_err("vgpu%d: guest page write error %d, "
+					"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+					vgpu->id, ret,
+					gp->gfn, pa, *(u32 *)p_data, bytes);
+			}
+			mutex_unlock(&gvt->lock);
+			return ret;
+		}
+	}
+
+	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+	if (WARN_ON(bytes > 8))
+		goto err;
+
+	if (reg_is_gtt(gvt, offset)) {
+		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+			goto err;
+		if (WARN_ON(bytes != 4 && bytes != 8))
+			goto err;
+		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+			goto err;
+
+		ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
+				p_data, bytes);
+		if (ret)
+			goto err;
+		mutex_unlock(&gvt->lock);
+		return ret;
+	}
+
+	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
+		mutex_unlock(&gvt->lock);
+		return ret;
+	}
+
+	mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+	if (!mmio && !vgpu->mmio.disable_warn_untrack)
+		gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+				vgpu->id, offset, bytes, *(u32 *)p_data);
+
+	if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+		if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+			goto err;
+	}
+
+	if (mmio) {
+		u64 ro_mask = mmio->ro_mask;
+
+		if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+			if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+				goto err;
+			if (WARN_ON(mmio->offset != offset))
+				goto err;
+		}
+
+		if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+			old_vreg = vgpu_vreg(vgpu, offset);
+			old_sreg = vgpu_sreg(vgpu, offset);
+		}
+
+		if (!ro_mask) {
+			ret = mmio->write(vgpu, offset, p_data, bytes);
+		} else {
+			/* Protect RO bits like HW */
+			u64 data = 0;
+
+			/* all register bits are RO. */
+			if (ro_mask == ~(u64)0) {
+				gvt_err("vgpu%d: try to write RO reg %x\n",
+						vgpu->id, offset);
+				ret = 0;
+				goto out;
+			}
+			/* keep the RO bits in the virtual register */
+			memcpy(&data, p_data, bytes);
+			data &= ~mmio->ro_mask;
+			data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
+			ret = mmio->write(vgpu, offset, &data, bytes);
+		}
+
+		/* higher 16bits of mode ctl regs are mask bits for change */
+		if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
+
+			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
+				| (vgpu_vreg(vgpu, offset) & mask);
+			vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
+				| (vgpu_sreg(vgpu, offset) & mask);
+		}
+	} else
+		ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+				bytes);
+	if (ret)
+		goto err;
+out:
+	intel_gvt_mmio_set_accessed(gvt, offset);
+	mutex_unlock(&gvt->lock);
+	return 0;
+err:
+	gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
+			vgpu->id, offset, bytes);
+	mutex_unlock(&gvt->lock);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
new file mode 100644
index 0000000..9dc739a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Ke Yu
+ *    Kevin Tian <kevin.tian@intel.com>
+ *    Dexuan Cui
+ *
+ * Contributors:
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Min He <min.he@intel.com>
+ *    Niu Bing <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_MMIO_H_
+#define _GVT_MMIO_H_
+
+struct intel_gvt;
+struct intel_vgpu;
+
+#define D_SNB   (1 << 0)
+#define D_IVB   (1 << 1)
+#define D_HSW   (1 << 2)
+#define D_BDW   (1 << 3)
+#define D_SKL	(1 << 4)
+
+#define D_GEN9PLUS	(D_SKL)
+#define D_GEN8PLUS	(D_BDW | D_SKL)
+#define D_GEN75PLUS	(D_HSW | D_BDW | D_SKL)
+#define D_GEN7PLUS	(D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_SKL_PLUS	(D_SKL)
+#define D_BDW_PLUS	(D_BDW | D_SKL)
+#define D_HSW_PLUS	(D_HSW | D_BDW | D_SKL)
+#define D_IVB_PLUS	(D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_PRE_BDW	(D_SNB | D_IVB | D_HSW)
+#define D_PRE_SKL	(D_SNB | D_IVB | D_HSW | D_BDW)
+#define D_ALL		(D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
+
+struct intel_gvt_mmio_info {
+	u32 offset;
+	u32 size;
+	u32 length;
+	u32 addr_mask;
+	u64 ro_mask;
+	u32 device;
+	int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+	int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+	u32 addr_range;
+	struct hlist_node node;
+};
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
+bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
+
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
+
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+						     unsigned int offset);
+#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
+	typeof(reg) __reg = reg; \
+	u32 *offset = (u32 *)&__reg; \
+	*offset; \
+})
+
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
+int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data,
+				 unsigned int bytes);
+int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data,
+				  unsigned int bytes);
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+				  unsigned int offset);
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+				     unsigned int offset);
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+				 void *p_data, unsigned int bytes);
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+				  void *p_data, unsigned int bytes);
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 03601e3..6785878 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -19,6 +19,15 @@
  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
+ *
+ * Authors:
+ *    Eddie Dong <eddie.dong@intel.com>
+ *    Dexuan Cui
+ *    Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
  */
 
 #ifndef _GVT_MPT_H_
@@ -46,4 +55,215 @@ static inline int intel_gvt_hypervisor_detect_host(void)
 	return intel_gvt_host.mpt->detect_host();
 }
 
+/**
+ * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
+{
+	return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
+}
+
+/**
+ * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
+{
+	intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
+}
+
+#define MSI_CAP_CONTROL(offset) (offset + 2)
+#define MSI_CAP_ADDRESS(offset) (offset + 4)
+#define MSI_CAP_DATA(offset) (offset + 8)
+#define MSI_CAP_EN 0x1
+
+/**
+ * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
+{
+	unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+	u16 control, data;
+	u32 addr;
+	int ret;
+
+	control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
+	addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
+	data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
+
+	/* Do not generate MSI if MSIEN is disable */
+	if (!(control & MSI_CAP_EN))
+		return 0;
+
+	if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+		return -EINVAL;
+
+	gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
+		    data);
+
+	ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
+ * @p: host kernel virtual address
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
+{
+	return intel_gvt_host.mpt->from_virt_to_mfn(p);
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *p)
+{
+	int ret;
+
+	if (p->writeprotection)
+		return 0;
+
+	ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
+	if (ret)
+		return ret;
+	p->writeprotection = true;
+	atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
+	return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
+ * guest page
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_guest_page *p)
+{
+	int ret;
+
+	if (!p->writeprotection)
+		return 0;
+
+	ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
+	if (ret)
+		return ret;
+	p->writeprotection = false;
+	atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
+	return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
+		unsigned long gpa, void *buf, unsigned long len)
+{
+	return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
+		unsigned long gpa, void *buf, unsigned long len)
+{
+	return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
+ * @vgpu: a vGPU
+ * @gpfn: guest pfn
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
+		struct intel_vgpu *vgpu, unsigned long gfn)
+{
+	return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
+}
+
+enum {
+	GVT_MAP_APERTURE = 0,
+	GVT_MAP_OPREGION,
+};
+
+/**
+ * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
+ * @vgpu: a vGPU
+ * @gfn: guest PFN
+ * @mfn: host PFN
+ * @nr: amount of PFNs
+ * @map: map or unmap
+ * @type: map type
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
+		struct intel_vgpu *vgpu, unsigned long gfn,
+		unsigned long mfn, unsigned int nr,
+		bool map, int type)
+{
+	return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
+						  map, type);
+}
+
+/**
+ * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
+ * @vgpu: a vGPU
+ * @start: the beginning of the guest physical address region
+ * @end: the end of the guest physical address region
+ * @map: map or unmap
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_trap_area(
+		struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
+{
+	return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
+}
+
 #endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
new file mode 100644
index 0000000..9521891
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+	void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
+	u8 *buf;
+	int i;
+
+	if (WARN((vgpu_opregion(vgpu)->va),
+			"vgpu%d: opregion has been initialized already.\n",
+			vgpu->id))
+		return -EINVAL;
+
+	vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
+			GFP_DMA32 | __GFP_ZERO,
+			INTEL_GVT_OPREGION_PORDER);
+
+	if (!vgpu_opregion(vgpu)->va)
+		return -ENOMEM;
+
+	memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
+			INTEL_GVT_OPREGION_SIZE);
+
+	for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+		vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+
+	/* for unknown reason, the value in LID field is incorrect
+	 * which block the windows guest, so workaround it by force
+	 * setting it to "OPEN"
+	 */
+	buf = (u8 *)vgpu_opregion(vgpu)->va;
+	buf[INTEL_GVT_OPREGION_CLID] = 0x3;
+
+	return 0;
+}
+
+static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
+{
+	u64 mfn;
+	int i, ret;
+
+	for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
+		mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+			+ i * PAGE_SIZE);
+		if (mfn == INTEL_GVT_INVALID_ADDR) {
+			gvt_err("fail to get MFN from VA\n");
+			return -EINVAL;
+		}
+		ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
+				vgpu_opregion(vgpu)->gfn[i],
+				mfn, 1, map, GVT_MAP_OPREGION);
+		if (ret) {
+			gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+/**
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
+{
+	int i;
+
+	gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
+
+	if (!vgpu_opregion(vgpu)->va)
+		return;
+
+	if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
+		vunmap(vgpu_opregion(vgpu)->va);
+		for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
+			if (vgpu_opregion(vgpu)->pages[i]) {
+				put_page(vgpu_opregion(vgpu)->pages[i]);
+				vgpu_opregion(vgpu)->pages[i] = NULL;
+			}
+		}
+	} else {
+		map_vgpu_opregion(vgpu, false);
+		free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+				INTEL_GVT_OPREGION_PORDER);
+	}
+
+	vgpu_opregion(vgpu)->va = NULL;
+}
+
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+	int ret;
+
+	gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+
+	if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+		gvt_dbg_core("emulate opregion from kernel\n");
+
+		ret = init_vgpu_opregion(vgpu, gpa);
+		if (ret)
+			return ret;
+
+		ret = map_vgpu_opregion(vgpu, true);
+		if (ret)
+			return ret;
+	} else {
+		gvt_dbg_core("emulate opregion from userspace\n");
+
+		/*
+		 * If opregion pages are not allocated from host kenrel,
+		 * most of the params are meaningless
+		 */
+		ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
+				0, /* not used */
+				0, /* not used */
+				2, /* not used */
+				1,
+				GVT_MAP_OPREGION);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/**
+ * intel_gvt_clean_opregion - clean host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ */
+void intel_gvt_clean_opregion(struct intel_gvt *gvt)
+{
+	memunmap(gvt->opregion.opregion_va);
+	gvt->opregion.opregion_va = NULL;
+}
+
+/**
+ * intel_gvt_init_opregion - initialize host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_opregion(struct intel_gvt *gvt)
+{
+	gvt_dbg_core("init host opregion\n");
+
+	pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
+			&gvt->opregion.opregion_pa);
+
+	gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
+					     INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
+	if (!gvt->opregion.opregion_va) {
+		gvt_err("fail to map host opregion\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+#define GVT_OPREGION_FUNC(scic)					\
+	({							\
+	 u32 __ret;						\
+	 __ret = (scic & OPREGION_SCIC_FUNC_MASK) >>		\
+	 OPREGION_SCIC_FUNC_SHIFT;				\
+	 __ret;							\
+	 })
+
+#define GVT_OPREGION_SUBFUNC(scic)				\
+	({							\
+	 u32 __ret;						\
+	 __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >>		\
+	 OPREGION_SCIC_SUBFUNC_SHIFT;				\
+	 __ret;							\
+	 })
+
+static const char *opregion_func_name(u32 func)
+{
+	const char *name = NULL;
+
+	switch (func) {
+	case 0 ... 3:
+	case 5:
+	case 7 ... 15:
+		name = "Reserved";
+		break;
+
+	case 4:
+		name = "Get BIOS Data";
+		break;
+
+	case 6:
+		name = "System BIOS Callbacks";
+		break;
+
+	default:
+		name = "Unknown";
+		break;
+	}
+	return name;
+}
+
+static const char *opregion_subfunc_name(u32 subfunc)
+{
+	const char *name = NULL;
+
+	switch (subfunc) {
+	case 0:
+		name = "Supported Calls";
+		break;
+
+	case 1:
+		name = "Requested Callbacks";
+		break;
+
+	case 2 ... 3:
+	case 8 ... 9:
+		name = "Reserved";
+		break;
+
+	case 5:
+		name = "Boot Display";
+		break;
+
+	case 6:
+		name = "TV-Standard/Video-Connector";
+		break;
+
+	case 7:
+		name = "Internal Graphics";
+		break;
+
+	case 10:
+		name = "Spread Spectrum Clocks";
+		break;
+
+	case 11:
+		name = "Get AKSV";
+		break;
+
+	default:
+		name = "Unknown";
+		break;
+	}
+	return name;
+};
+
+static bool querying_capabilities(u32 scic)
+{
+	u32 func, subfunc;
+
+	func = GVT_OPREGION_FUNC(scic);
+	subfunc = GVT_OPREGION_SUBFUNC(scic);
+
+	if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+		subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
+		|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+		 subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
+		|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
+		 subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
+		return true;
+	}
+	return false;
+}
+
+/**
+ * intel_vgpu_emulate_opregion_request - emulating OpRegion request
+ * @vgpu: a vGPU
+ * @swsci: SWSCI request
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
+{
+	u32 *scic, *parm;
+	u32 func, subfunc;
+
+	scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
+	parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+
+	if (!(swsci & SWSCI_SCI_SELECT)) {
+		gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+		return 0;
+	}
+	/* ignore non 0->1 trasitions */
+	if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
+				& SWSCI_SCI_TRIGGER) ||
+			!(swsci & SWSCI_SCI_TRIGGER)) {
+		return 0;
+	}
+
+	func = GVT_OPREGION_FUNC(*scic);
+	subfunc = GVT_OPREGION_SUBFUNC(*scic);
+	if (!querying_capabilities(*scic)) {
+		gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+				" subfunc \"%s\"\n",
+				vgpu->id,
+				opregion_func_name(func),
+				opregion_subfunc_name(subfunc));
+		/*
+		 * emulate exit status of function call, '0' means
+		 * "failure, generic, unsupported or unknown cause"
+		 */
+		*scic &= ~OPREGION_SCIC_EXIT_MASK;
+		return 0;
+	}
+
+	*scic = 0;
+	*parm = 0;
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
new file mode 100644
index 0000000..0dfe789
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_REG_H
+#define _GVT_REG_H
+
+#define INTEL_GVT_PCI_CLASS_VGA_OTHER   0x80
+
+#define INTEL_GVT_PCI_GMCH_CONTROL	0x50
+#define   BDW_GMCH_GMS_SHIFT		8
+#define   BDW_GMCH_GMS_MASK		0xff
+
+#define INTEL_GVT_PCI_SWSCI		0xe8
+#define   SWSCI_SCI_SELECT		(1 << 15)
+#define   SWSCI_SCI_TRIGGER		1
+
+#define INTEL_GVT_PCI_OPREGION		0xfc
+
+#define INTEL_GVT_OPREGION_CLID		0x1AC
+#define INTEL_GVT_OPREGION_SCIC		0x200
+#define   OPREGION_SCIC_FUNC_MASK	0x1E
+#define   OPREGION_SCIC_FUNC_SHIFT	1
+#define   OPREGION_SCIC_SUBFUNC_MASK	0xFF00
+#define   OPREGION_SCIC_SUBFUNC_SHIFT	8
+#define   OPREGION_SCIC_EXIT_MASK	0xE0
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA         4
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS    6
+#define INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS      0
+#define INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS 1
+#define INTEL_GVT_OPREGION_PARM                   0x204
+
+#define INTEL_GVT_OPREGION_PAGES	2
+#define INTEL_GVT_OPREGION_PORDER	1
+#define INTEL_GVT_OPREGION_SIZE		(2 * 4096)
+
+#define VGT_SPRSTRIDE(pipe)	_PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
+
+#define _REG_VECS_EXCC		0x1A028
+#define _REG_VCS2_EXCC		0x1c028
+
+#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+
+#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
+		((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+
+#define FORCEWAKE_RENDER_GEN9_REG 0xa278
+#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
+#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
+#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
+#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
+#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
+#define FORCEWAKE_ACK_HSW_REG 0x130044
+
+#define RB_HEAD_OFF_MASK	((1U << 21) - (1U << 2))
+#define RB_TAIL_OFF_MASK	((1U << 21) - (1U << 3))
+#define RB_TAIL_SIZE_MASK	((1U << 21) - (1U << 12))
+#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
new file mode 100644
index 0000000..44136b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eddie Dong <eddie.dong@intel.com>
+ *    Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *    Changbin Du <changbin.du@intel.com>
+ *    Zhenyu Wang <zhenyuw@linux.intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+struct render_mmio {
+	int ring_id;
+	i915_reg_t reg;
+	u32 mask;
+	bool in_context;
+	u32 value;
+};
+
+static struct render_mmio gen8_render_mmio_list[] = {
+	{RCS, _MMIO(0x229c), 0xffff, false},
+	{RCS, _MMIO(0x2248), 0x0, false},
+	{RCS, _MMIO(0x2098), 0x0, false},
+	{RCS, _MMIO(0x20c0), 0xffff, true},
+	{RCS, _MMIO(0x24d0), 0, false},
+	{RCS, _MMIO(0x24d4), 0, false},
+	{RCS, _MMIO(0x24d8), 0, false},
+	{RCS, _MMIO(0x24dc), 0, false},
+	{RCS, _MMIO(0x7004), 0xffff, true},
+	{RCS, _MMIO(0x7008), 0xffff, true},
+	{RCS, _MMIO(0x7000), 0xffff, true},
+	{RCS, _MMIO(0x7010), 0xffff, true},
+	{RCS, _MMIO(0x7300), 0xffff, true},
+	{RCS, _MMIO(0x83a4), 0xffff, true},
+
+	{BCS, _MMIO(0x2229c), 0xffff, false},
+	{BCS, _MMIO(0x2209c), 0xffff, false},
+	{BCS, _MMIO(0x220c0), 0xffff, false},
+	{BCS, _MMIO(0x22098), 0x0, false},
+	{BCS, _MMIO(0x22028), 0x0, false},
+};
+
+static struct render_mmio gen9_render_mmio_list[] = {
+	{RCS, _MMIO(0x229c), 0xffff, false},
+	{RCS, _MMIO(0x2248), 0x0, false},
+	{RCS, _MMIO(0x2098), 0x0, false},
+	{RCS, _MMIO(0x20c0), 0xffff, true},
+	{RCS, _MMIO(0x24d0), 0, false},
+	{RCS, _MMIO(0x24d4), 0, false},
+	{RCS, _MMIO(0x24d8), 0, false},
+	{RCS, _MMIO(0x24dc), 0, false},
+	{RCS, _MMIO(0x7004), 0xffff, true},
+	{RCS, _MMIO(0x7008), 0xffff, true},
+	{RCS, _MMIO(0x7000), 0xffff, true},
+	{RCS, _MMIO(0x7010), 0xffff, true},
+	{RCS, _MMIO(0x7300), 0xffff, true},
+	{RCS, _MMIO(0x83a4), 0xffff, true},
+
+	{RCS, _MMIO(0x40e0), 0, false},
+	{RCS, _MMIO(0x40e4), 0, false},
+	{RCS, _MMIO(0x2580), 0xffff, true},
+	{RCS, _MMIO(0x7014), 0xffff, true},
+	{RCS, _MMIO(0x20ec), 0xffff, false},
+	{RCS, _MMIO(0xb118), 0, false},
+	{RCS, _MMIO(0xe100), 0xffff, true},
+	{RCS, _MMIO(0xe180), 0xffff, true},
+	{RCS, _MMIO(0xe184), 0xffff, true},
+	{RCS, _MMIO(0xe188), 0xffff, true},
+	{RCS, _MMIO(0xe194), 0xffff, true},
+	{RCS, _MMIO(0x4de0), 0, false},
+	{RCS, _MMIO(0x4de4), 0, false},
+	{RCS, _MMIO(0x4de8), 0, false},
+	{RCS, _MMIO(0x4dec), 0, false},
+	{RCS, _MMIO(0x4df0), 0, false},
+	{RCS, _MMIO(0x4df4), 0, false},
+
+	{BCS, _MMIO(0x2229c), 0xffff, false},
+	{BCS, _MMIO(0x2209c), 0xffff, false},
+	{BCS, _MMIO(0x220c0), 0xffff, false},
+	{BCS, _MMIO(0x22098), 0x0, false},
+	{BCS, _MMIO(0x22028), 0x0, false},
+
+	{VCS2, _MMIO(0x1c028), 0xffff, false},
+
+	{VECS, _MMIO(0x1a028), 0xffff, false},
+};
+
+static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
+static u32 gen9_render_mocs_L3[32];
+
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	enum forcewake_domains fw;
+	i915_reg_t reg;
+	u32 regs[] = {
+		[RCS] = 0x4260,
+		[VCS] = 0x4264,
+		[VCS2] = 0x4268,
+		[BCS] = 0x426c,
+		[VECS] = 0x4270,
+	};
+
+	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+		return;
+
+	if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
+		return;
+
+	reg = _MMIO(regs[ring_id]);
+
+	/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+	 * we need to put a forcewake when invalidating RCS TLB caches,
+	 * otherwise device can go to RC6 state and interrupt invalidation
+	 * process
+	 */
+	fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+					    FW_REG_READ | FW_REG_WRITE);
+	if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+		fw |= FORCEWAKE_RENDER;
+
+	intel_uncore_forcewake_get(dev_priv, fw);
+
+	I915_WRITE_FW(reg, 0x1);
+
+	if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+		gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+	else
+		vgpu_vreg(vgpu, regs[ring_id]) = 0;
+
+	intel_uncore_forcewake_put(dev_priv, fw);
+
+	gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+}
+
+static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	i915_reg_t offset, l3_offset;
+	u32 regs[] = {
+		[RCS] = 0xc800,
+		[VCS] = 0xc900,
+		[VCS2] = 0xca00,
+		[BCS] = 0xcc00,
+		[VECS] = 0xcb00,
+	};
+	int i;
+
+	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+		return;
+
+	if (!IS_SKYLAKE(dev_priv))
+		return;
+
+	offset.reg = regs[ring_id];
+	for (i = 0; i < 64; i++) {
+		gen9_render_mocs[ring_id][i] = I915_READ(offset);
+		I915_WRITE(offset, vgpu_vreg(vgpu, offset));
+		POSTING_READ(offset);
+		offset.reg += 4;
+	}
+
+	if (ring_id == RCS) {
+		l3_offset.reg = 0xb020;
+		for (i = 0; i < 32; i++) {
+			gen9_render_mocs_L3[i] = I915_READ(l3_offset);
+			I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
+			POSTING_READ(l3_offset);
+			l3_offset.reg += 4;
+		}
+	}
+}
+
+static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	i915_reg_t offset, l3_offset;
+	u32 regs[] = {
+		[RCS] = 0xc800,
+		[VCS] = 0xc900,
+		[VCS2] = 0xca00,
+		[BCS] = 0xcc00,
+		[VECS] = 0xcb00,
+	};
+	int i;
+
+	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+		return;
+
+	if (!IS_SKYLAKE(dev_priv))
+		return;
+
+	offset.reg = regs[ring_id];
+	for (i = 0; i < 64; i++) {
+		vgpu_vreg(vgpu, offset) = I915_READ(offset);
+		I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
+		POSTING_READ(offset);
+		offset.reg += 4;
+	}
+
+	if (ring_id == RCS) {
+		l3_offset.reg = 0xb020;
+		for (i = 0; i < 32; i++) {
+			vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
+			I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
+			POSTING_READ(l3_offset);
+			l3_offset.reg += 4;
+		}
+	}
+}
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct render_mmio *mmio;
+	u32 v;
+	int i, array_size;
+
+	if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+		mmio = gen9_render_mmio_list;
+		array_size = ARRAY_SIZE(gen9_render_mmio_list);
+		load_mocs(vgpu, ring_id);
+	} else {
+		mmio = gen8_render_mmio_list;
+		array_size = ARRAY_SIZE(gen8_render_mmio_list);
+	}
+
+	for (i = 0; i < array_size; i++, mmio++) {
+		if (mmio->ring_id != ring_id)
+			continue;
+
+		mmio->value = I915_READ(mmio->reg);
+		if (mmio->mask)
+			v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
+		else
+			v = vgpu_vreg(vgpu, mmio->reg);
+
+		I915_WRITE(mmio->reg, v);
+		POSTING_READ(mmio->reg);
+
+		gvt_dbg_render("load reg %x old %x new %x\n",
+				i915_mmio_reg_offset(mmio->reg),
+				mmio->value, v);
+	}
+	handle_tlb_pending_event(vgpu, ring_id);
+}
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct render_mmio *mmio;
+	u32 v;
+	int i, array_size;
+
+	if (IS_SKYLAKE(dev_priv)) {
+		mmio = gen9_render_mmio_list;
+		array_size = ARRAY_SIZE(gen9_render_mmio_list);
+		restore_mocs(vgpu, ring_id);
+	} else {
+		mmio = gen8_render_mmio_list;
+		array_size = ARRAY_SIZE(gen8_render_mmio_list);
+	}
+
+	for (i = 0; i < array_size; i++, mmio++) {
+		if (mmio->ring_id != ring_id)
+			continue;
+
+		vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+
+		if (mmio->mask) {
+			vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
+			v = mmio->value | (mmio->mask << 16);
+		} else
+			v = mmio->value;
+
+		I915_WRITE(mmio->reg, v);
+		POSTING_READ(mmio->reg);
+
+		gvt_dbg_render("restore reg %x old %x new %x\n",
+				i915_mmio_reg_offset(mmio->reg),
+				mmio->value, v);
+	}
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
new file mode 100644
index 0000000..dac1a3c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eddie Dong <eddie.dong@intel.com>
+ *    Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *    Changbin Du <changbin.du@intel.com>
+ *    Zhenyu Wang <zhenyuw@linux.intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef __GVT_RENDER_H__
+#define __GVT_RENDER_H__
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
new file mode 100644
index 0000000..678b0be
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Anhua Xu
+ *    Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
+{
+	enum intel_engine_id i;
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+		if (!list_empty(workload_q_head(vgpu, i)))
+			return true;
+	}
+
+	return false;
+}
+
+static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
+{
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	enum intel_engine_id i;
+	struct intel_engine_cs *engine;
+
+	/* no target to schedule */
+	if (!scheduler->next_vgpu)
+		return;
+
+	gvt_dbg_sched("try to schedule next vgpu %d\n",
+			scheduler->next_vgpu->id);
+
+	/*
+	 * after the flag is set, workload dispatch thread will
+	 * stop dispatching workload for current vgpu
+	 */
+	scheduler->need_reschedule = true;
+
+	/* still have uncompleted workload? */
+	for_each_engine(engine, gvt->dev_priv, i) {
+		if (scheduler->current_workload[i]) {
+			gvt_dbg_sched("still have running workload\n");
+			return;
+		}
+	}
+
+	gvt_dbg_sched("switch to next vgpu %d\n",
+			scheduler->next_vgpu->id);
+
+	/* switch current vgpu */
+	scheduler->current_vgpu = scheduler->next_vgpu;
+	scheduler->next_vgpu = NULL;
+
+	scheduler->need_reschedule = false;
+
+	/* wake up workload dispatch thread */
+	for_each_engine(engine, gvt->dev_priv, i)
+		wake_up(&scheduler->waitq[i]);
+}
+
+struct tbs_vgpu_data {
+	struct list_head list;
+	struct intel_vgpu *vgpu;
+	/* put some per-vgpu sched stats here */
+};
+
+struct tbs_sched_data {
+	struct intel_gvt *gvt;
+	struct delayed_work work;
+	unsigned long period;
+	struct list_head runq_head;
+};
+
+#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+
+static void tbs_sched_func(struct work_struct *work)
+{
+	struct tbs_sched_data *sched_data = container_of(work,
+			struct tbs_sched_data, work.work);
+	struct tbs_vgpu_data *vgpu_data;
+
+	struct intel_gvt *gvt = sched_data->gvt;
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+	struct intel_vgpu *vgpu = NULL;
+	struct list_head *pos, *head;
+
+	mutex_lock(&gvt->lock);
+
+	/* no vgpu or has already had a target */
+	if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+		goto out;
+
+	if (scheduler->current_vgpu) {
+		vgpu_data = scheduler->current_vgpu->sched_data;
+		head = &vgpu_data->list;
+	} else {
+		gvt_dbg_sched("no current vgpu search from q head\n");
+		head = &sched_data->runq_head;
+	}
+
+	/* search a vgpu with pending workload */
+	list_for_each(pos, head) {
+		if (pos == &sched_data->runq_head)
+			continue;
+
+		vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
+			continue;
+
+		vgpu = vgpu_data->vgpu;
+		break;
+	}
+
+	if (vgpu) {
+		scheduler->next_vgpu = vgpu;
+		gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
+	}
+out:
+	if (scheduler->next_vgpu) {
+		gvt_dbg_sched("try to schedule next vgpu %d\n",
+				scheduler->next_vgpu->id);
+		try_to_schedule_next_vgpu(gvt);
+	}
+
+	/*
+	 * still have vgpu on runq
+	 * or last schedule haven't finished due to running workload
+	 */
+	if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+		schedule_delayed_work(&sched_data->work, sched_data->period);
+
+	mutex_unlock(&gvt->lock);
+}
+
+static int tbs_sched_init(struct intel_gvt *gvt)
+{
+	struct intel_gvt_workload_scheduler *scheduler =
+		&gvt->scheduler;
+
+	struct tbs_sched_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&data->runq_head);
+	INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+	data->period = GVT_DEFAULT_TIME_SLICE;
+	data->gvt = gvt;
+
+	scheduler->sched_data = data;
+	return 0;
+}
+
+static void tbs_sched_clean(struct intel_gvt *gvt)
+{
+	struct intel_gvt_workload_scheduler *scheduler =
+		&gvt->scheduler;
+	struct tbs_sched_data *data = scheduler->sched_data;
+
+	cancel_delayed_work(&data->work);
+	kfree(data);
+	scheduler->sched_data = NULL;
+}
+
+static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
+{
+	struct tbs_vgpu_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->vgpu = vgpu;
+	INIT_LIST_HEAD(&data->list);
+
+	vgpu->sched_data = data;
+	return 0;
+}
+
+static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
+{
+	kfree(vgpu->sched_data);
+	vgpu->sched_data = NULL;
+}
+
+static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
+{
+	struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
+	struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+	if (!list_empty(&vgpu_data->list))
+		return;
+
+	list_add_tail(&vgpu_data->list, &sched_data->runq_head);
+	schedule_delayed_work(&sched_data->work, sched_data->period);
+}
+
+static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
+{
+	struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+	list_del_init(&vgpu_data->list);
+}
+
+static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
+	.init = tbs_sched_init,
+	.clean = tbs_sched_clean,
+	.init_vgpu = tbs_sched_init_vgpu,
+	.clean_vgpu = tbs_sched_clean_vgpu,
+	.start_schedule = tbs_sched_start_schedule,
+	.stop_schedule = tbs_sched_stop_schedule,
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
+{
+	gvt->scheduler.sched_ops = &tbs_schedule_ops;
+
+	return gvt->scheduler.sched_ops->init(gvt);
+}
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
+{
+	gvt->scheduler.sched_ops->clean(gvt);
+}
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
+{
+	return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+}
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
+{
+	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+}
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
+{
+	gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+
+	vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+}
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt_workload_scheduler *scheduler =
+		&vgpu->gvt->scheduler;
+
+	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+
+	scheduler->sched_ops->stop_schedule(vgpu);
+
+	if (scheduler->next_vgpu == vgpu)
+		scheduler->next_vgpu = NULL;
+
+	if (scheduler->current_vgpu == vgpu) {
+		/* stop workload dispatching */
+		scheduler->need_reschedule = true;
+		scheduler->current_vgpu = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
new file mode 100644
index 0000000..bb8b909
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Anhua Xu
+ *    Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef __GVT_SCHED_POLICY__
+#define __GVT_SCHED_POLICY__
+
+struct intel_gvt_sched_policy_ops {
+	int (*init)(struct intel_gvt *gvt);
+	void (*clean)(struct intel_gvt *gvt);
+	int (*init_vgpu)(struct intel_vgpu *vgpu);
+	void (*clean_vgpu)(struct intel_vgpu *vgpu);
+	void (*start_schedule)(struct intel_vgpu *vgpu);
+	void (*stop_schedule)(struct intel_vgpu *vgpu);
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
new file mode 100644
index 0000000..843a5de
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Chanbin Du <changbin.du@intel.com>
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define RING_CTX_OFF(x) \
+	offsetof(struct execlist_ring_context, x)
+
+static void set_context_pdp_root_pointer(
+		struct execlist_ring_context *ring_context,
+		u32 pdp[8])
+{
+	struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
+	int i;
+
+	for (i = 0; i < 8; i++)
+		pdp_pair[i].val = pdp[7 - i];
+}
+
+static int populate_shadow_context(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	int ring_id = workload->ring_id;
+	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+	struct drm_i915_gem_object *ctx_obj =
+		shadow_ctx->engine[ring_id].state->obj;
+	struct execlist_ring_context *shadow_ring_context;
+	struct page *page;
+	void *dst;
+	unsigned long context_gpa, context_page_num;
+	int i;
+
+	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+			workload->ctx_desc.lrca);
+
+	context_page_num = intel_lr_context_size(
+			gvt->dev_priv->engine[ring_id]);
+
+	context_page_num = context_page_num >> PAGE_SHIFT;
+
+	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+		context_page_num = 19;
+
+	i = 2;
+
+	while (i < context_page_num) {
+		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+				(u32)((workload->ctx_desc.lrca + i) <<
+				GTT_PAGE_SHIFT));
+		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+			gvt_err("Invalid guest context descriptor\n");
+			return -EINVAL;
+		}
+
+		page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+		dst = kmap_atomic(page);
+		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+				GTT_PAGE_SIZE);
+		kunmap_atomic(dst);
+		i++;
+	}
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	shadow_ring_context = kmap_atomic(page);
+
+#define COPY_REG(name) \
+	intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+		+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+	COPY_REG(ctx_ctrl);
+	COPY_REG(ctx_timestamp);
+
+	if (ring_id == RCS) {
+		COPY_REG(bb_per_ctx_ptr);
+		COPY_REG(rcs_indirect_ctx);
+		COPY_REG(rcs_indirect_ctx_offset);
+	}
+#undef COPY_REG
+
+	set_context_pdp_root_pointer(shadow_ring_context,
+				     workload->shadow_mm->shadow_page_table);
+
+	intel_gvt_hypervisor_read_gpa(vgpu,
+			workload->ring_context_gpa +
+			sizeof(*shadow_ring_context),
+			(void *)shadow_ring_context +
+			sizeof(*shadow_ring_context),
+			GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+	kunmap_atomic(shadow_ring_context);
+	return 0;
+}
+
+static int shadow_context_status_change(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	struct intel_vgpu *vgpu = container_of(nb,
+			struct intel_vgpu, shadow_ctx_notifier_block);
+	struct drm_i915_gem_request *req =
+		(struct drm_i915_gem_request *)data;
+	struct intel_gvt_workload_scheduler *scheduler =
+		&vgpu->gvt->scheduler;
+	struct intel_vgpu_workload *workload =
+		scheduler->current_workload[req->engine->id];
+
+	switch (action) {
+	case INTEL_CONTEXT_SCHEDULE_IN:
+		intel_gvt_load_render_mmio(workload->vgpu,
+					   workload->ring_id);
+		atomic_set(&workload->shadow_ctx_active, 1);
+		break;
+	case INTEL_CONTEXT_SCHEDULE_OUT:
+		intel_gvt_restore_render_mmio(workload->vgpu,
+					      workload->ring_id);
+		atomic_set(&workload->shadow_ctx_active, 0);
+		break;
+	default:
+		WARN_ON(1);
+		return NOTIFY_OK;
+	}
+	wake_up(&workload->shadow_ctx_status_wq);
+	return NOTIFY_OK;
+}
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	int ring_id = workload->ring_id;
+	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+	struct drm_i915_gem_request *rq;
+	int ret;
+
+	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+		ring_id, workload);
+
+	shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+				    GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+	if (IS_ERR(rq)) {
+		gvt_err("fail to allocate gem request\n");
+		workload->status = PTR_ERR(rq);
+		return workload->status;
+	}
+
+	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+	workload->req = i915_gem_request_get(rq);
+
+	mutex_lock(&gvt->lock);
+
+	ret = intel_gvt_scan_and_shadow_workload(workload);
+	if (ret)
+		goto err;
+
+	ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+	if (ret)
+		goto err;
+
+	ret = populate_shadow_context(workload);
+	if (ret)
+		goto err;
+
+	if (workload->prepare) {
+		ret = workload->prepare(workload);
+		if (ret)
+			goto err;
+	}
+
+	mutex_unlock(&gvt->lock);
+
+	gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
+			ring_id, workload->req);
+
+	i915_add_request_no_flush(rq);
+	workload->dispatched = true;
+	return 0;
+err:
+	workload->status = ret;
+
+	mutex_unlock(&gvt->lock);
+
+	i915_add_request_no_flush(rq);
+	return ret;
+}
+
+static struct intel_vgpu_workload *pick_next_workload(
+		struct intel_gvt *gvt, int ring_id)
+{
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	struct intel_vgpu_workload *workload = NULL;
+
+	mutex_lock(&gvt->lock);
+
+	/*
+	 * no current vgpu / will be scheduled out / no workload
+	 * bail out
+	 */
+	if (!scheduler->current_vgpu) {
+		gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+		goto out;
+	}
+
+	if (scheduler->need_reschedule) {
+		gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+		goto out;
+	}
+
+	if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
+		gvt_dbg_sched("ring id %d stop - no available workload\n",
+				ring_id);
+		goto out;
+	}
+
+	/*
+	 * still have current workload, maybe the workload disptacher
+	 * fail to submit it for some reason, resubmit it.
+	 */
+	if (scheduler->current_workload[ring_id]) {
+		workload = scheduler->current_workload[ring_id];
+		gvt_dbg_sched("ring id %d still have current workload %p\n",
+				ring_id, workload);
+		goto out;
+	}
+
+	/*
+	 * pick a workload as current workload
+	 * once current workload is set, schedule policy routines
+	 * will wait the current workload is finished when trying to
+	 * schedule out a vgpu.
+	 */
+	scheduler->current_workload[ring_id] = container_of(
+			workload_q_head(scheduler->current_vgpu, ring_id)->next,
+			struct intel_vgpu_workload, list);
+
+	workload = scheduler->current_workload[ring_id];
+
+	gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+
+	atomic_inc(&workload->vgpu->running_workload_num);
+out:
+	mutex_unlock(&gvt->lock);
+	return workload;
+}
+
+static void update_guest_context(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	struct intel_gvt *gvt = vgpu->gvt;
+	int ring_id = workload->ring_id;
+	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+	struct drm_i915_gem_object *ctx_obj =
+		shadow_ctx->engine[ring_id].state->obj;
+	struct execlist_ring_context *shadow_ring_context;
+	struct page *page;
+	void *src;
+	unsigned long context_gpa, context_page_num;
+	int i;
+
+	gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
+			workload->ctx_desc.lrca);
+
+	context_page_num = intel_lr_context_size(
+			gvt->dev_priv->engine[ring_id]);
+
+	context_page_num = context_page_num >> PAGE_SHIFT;
+
+	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+		context_page_num = 19;
+
+	i = 2;
+
+	while (i < context_page_num) {
+		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+				(u32)((workload->ctx_desc.lrca + i) <<
+					GTT_PAGE_SHIFT));
+		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+			gvt_err("invalid guest context descriptor\n");
+			return;
+		}
+
+		page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+		src = kmap_atomic(page);
+		intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
+				GTT_PAGE_SIZE);
+		kunmap_atomic(src);
+		i++;
+	}
+
+	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
+		RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	shadow_ring_context = kmap_atomic(page);
+
+#define COPY_REG(name) \
+	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
+		RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+	COPY_REG(ctx_ctrl);
+	COPY_REG(ctx_timestamp);
+
+#undef COPY_REG
+
+	intel_gvt_hypervisor_write_gpa(vgpu,
+			workload->ring_context_gpa +
+			sizeof(*shadow_ring_context),
+			(void *)shadow_ring_context +
+			sizeof(*shadow_ring_context),
+			GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+	kunmap_atomic(shadow_ring_context);
+}
+
+static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
+{
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	struct intel_vgpu_workload *workload;
+	int event;
+
+	mutex_lock(&gvt->lock);
+
+	workload = scheduler->current_workload[ring_id];
+
+	if (!workload->status && !workload->vgpu->resetting) {
+		wait_event(workload->shadow_ctx_status_wq,
+			   !atomic_read(&workload->shadow_ctx_active));
+
+		update_guest_context(workload);
+
+		for_each_set_bit(event, workload->pending_events,
+				 INTEL_GVT_EVENT_MAX)
+			intel_vgpu_trigger_virtual_event(workload->vgpu,
+					event);
+	}
+
+	gvt_dbg_sched("ring id %d complete workload %p status %d\n",
+			ring_id, workload, workload->status);
+
+	scheduler->current_workload[ring_id] = NULL;
+
+	atomic_dec(&workload->vgpu->running_workload_num);
+
+	list_del_init(&workload->list);
+	workload->complete(workload);
+
+	wake_up(&scheduler->workload_complete_wq);
+	mutex_unlock(&gvt->lock);
+}
+
+struct workload_thread_param {
+	struct intel_gvt *gvt;
+	int ring_id;
+};
+
+static DEFINE_MUTEX(scheduler_mutex);
+
+static int workload_thread(void *priv)
+{
+	struct workload_thread_param *p = (struct workload_thread_param *)priv;
+	struct intel_gvt *gvt = p->gvt;
+	int ring_id = p->ring_id;
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	struct intel_vgpu_workload *workload = NULL;
+	long lret;
+	int ret;
+	bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+	kfree(p);
+
+	gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+
+	while (!kthread_should_stop()) {
+		add_wait_queue(&scheduler->waitq[ring_id], &wait);
+		do {
+			workload = pick_next_workload(gvt, ring_id);
+			if (workload)
+				break;
+			wait_woken(&wait, TASK_INTERRUPTIBLE,
+				   MAX_SCHEDULE_TIMEOUT);
+		} while (!kthread_should_stop());
+		remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+
+		if (!workload)
+			break;
+
+		mutex_lock(&scheduler_mutex);
+
+		gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
+				workload->ring_id, workload,
+				workload->vgpu->id);
+
+		intel_runtime_pm_get(gvt->dev_priv);
+
+		gvt_dbg_sched("ring id %d will dispatch workload %p\n",
+				workload->ring_id, workload);
+
+		if (need_force_wake)
+			intel_uncore_forcewake_get(gvt->dev_priv,
+					FORCEWAKE_ALL);
+
+		mutex_lock(&gvt->dev_priv->drm.struct_mutex);
+		ret = dispatch_workload(workload);
+		mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
+
+		if (ret) {
+			gvt_err("fail to dispatch workload, skip\n");
+			goto complete;
+		}
+
+		gvt_dbg_sched("ring id %d wait workload %p\n",
+				workload->ring_id, workload);
+
+		lret = i915_wait_request(workload->req,
+					 0, MAX_SCHEDULE_TIMEOUT);
+		if (lret < 0) {
+			workload->status = lret;
+			gvt_err("fail to wait workload, skip\n");
+		} else {
+			workload->status = 0;
+		}
+
+complete:
+		gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+				workload, workload->status);
+
+		mutex_lock(&gvt->dev_priv->drm.struct_mutex);
+		complete_current_workload(gvt, ring_id);
+		mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
+
+		i915_gem_request_put(fetch_and_zero(&workload->req));
+
+		if (need_force_wake)
+			intel_uncore_forcewake_put(gvt->dev_priv,
+					FORCEWAKE_ALL);
+
+		intel_runtime_pm_put(gvt->dev_priv);
+
+		mutex_unlock(&scheduler_mutex);
+
+	}
+	return 0;
+}
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+	if (atomic_read(&vgpu->running_workload_num)) {
+		gvt_dbg_sched("wait vgpu idle\n");
+
+		wait_event(scheduler->workload_complete_wq,
+				!atomic_read(&vgpu->running_workload_num));
+	}
+}
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
+{
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	int i;
+
+	gvt_dbg_core("clean workload scheduler\n");
+
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		if (scheduler->thread[i]) {
+			kthread_stop(scheduler->thread[i]);
+			scheduler->thread[i] = NULL;
+		}
+	}
+}
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
+{
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	struct workload_thread_param *param = NULL;
+	int ret;
+	int i;
+
+	gvt_dbg_core("init workload scheduler\n");
+
+	init_waitqueue_head(&scheduler->workload_complete_wq);
+
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		/* check ring mask at init time */
+		if (!HAS_ENGINE(gvt->dev_priv, i))
+			continue;
+
+		init_waitqueue_head(&scheduler->waitq[i]);
+
+		param = kzalloc(sizeof(*param), GFP_KERNEL);
+		if (!param) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		param->gvt = gvt;
+		param->ring_id = i;
+
+		scheduler->thread[i] = kthread_run(workload_thread, param,
+			"gvt workload %d", i);
+		if (IS_ERR(scheduler->thread[i])) {
+			gvt_err("fail to create workload thread\n");
+			ret = PTR_ERR(scheduler->thread[i]);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	intel_gvt_clean_workload_scheduler(gvt);
+	kfree(param);
+	param = NULL;
+	return ret;
+}
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
+			&vgpu->shadow_ctx_notifier_block);
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+
+	/* a little hacky to mark as ctx closed */
+	vgpu->shadow_ctx->closed = true;
+	i915_gem_context_put(vgpu->shadow_ctx);
+
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+{
+	atomic_set(&vgpu->running_workload_num, 0);
+
+	vgpu->shadow_ctx = i915_gem_context_create_gvt(
+			&vgpu->gvt->dev_priv->drm);
+	if (IS_ERR(vgpu->shadow_ctx))
+		return PTR_ERR(vgpu->shadow_ctx);
+
+	vgpu->shadow_ctx->engine[RCS].initialised = true;
+
+	vgpu->shadow_ctx_notifier_block.notifier_call =
+		shadow_context_status_change;
+
+	atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
+				       &vgpu->shadow_ctx_notifier_block);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
new file mode 100644
index 0000000..3b30c28
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *    Chanbin Du <changbin.du@intel.com>
+ *    Min He <min.he@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *    Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#ifndef _GVT_SCHEDULER_H_
+#define _GVT_SCHEDULER_H_
+
+struct intel_gvt_workload_scheduler {
+	struct intel_vgpu *current_vgpu;
+	struct intel_vgpu *next_vgpu;
+	struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
+	bool need_reschedule;
+
+	wait_queue_head_t workload_complete_wq;
+	struct task_struct *thread[I915_NUM_ENGINES];
+	wait_queue_head_t waitq[I915_NUM_ENGINES];
+
+	void *sched_data;
+	struct intel_gvt_sched_policy_ops *sched_ops;
+};
+
+#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
+#define INDIRECT_CTX_SIZE_MASK 0x3f
+struct shadow_indirect_ctx {
+	struct drm_i915_gem_object *obj;
+	unsigned long guest_gma;
+	unsigned long shadow_gma;
+	void *shadow_va;
+	uint32_t size;
+};
+
+#define PER_CTX_ADDR_MASK 0xfffff000
+struct shadow_per_ctx {
+	unsigned long guest_gma;
+	unsigned long shadow_gma;
+};
+
+struct intel_shadow_wa_ctx {
+	struct intel_vgpu_workload *workload;
+	struct shadow_indirect_ctx indirect_ctx;
+	struct shadow_per_ctx per_ctx;
+
+};
+
+struct intel_vgpu_workload {
+	struct intel_vgpu *vgpu;
+	int ring_id;
+	struct drm_i915_gem_request *req;
+	/* if this workload has been dispatched to i915? */
+	bool dispatched;
+	int status;
+
+	struct intel_vgpu_mm *shadow_mm;
+
+	/* different submission model may need different handler */
+	int (*prepare)(struct intel_vgpu_workload *);
+	int (*complete)(struct intel_vgpu_workload *);
+	struct list_head list;
+
+	DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+	void *shadow_ring_buffer_va;
+
+	/* execlist context information */
+	struct execlist_ctx_descriptor_format ctx_desc;
+	struct execlist_ring_context *ring_context;
+	unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+	bool restore_inhibit;
+	struct intel_vgpu_elsp_dwords elsp_dwords;
+	bool emulate_schedule_in;
+	atomic_t shadow_ctx_active;
+	wait_queue_head_t shadow_ctx_status_wq;
+	u64 ring_context_gpa;
+
+	/* shadow batch buffer */
+	struct list_head shadow_bb;
+	struct intel_shadow_wa_ctx wa_ctx;
+};
+
+/* Intel shadow batch buffer is a i915 gem object */
+struct intel_shadow_bb_entry {
+	struct list_head list;
+	struct drm_i915_gem_object *obj;
+	void *va;
+	unsigned long len;
+	void *bb_start_cmd_va;
+};
+
+#define workload_q_head(vgpu, ring_id) \
+	(&(vgpu->workload_q_head[ring_id]))
+
+#define queue_workload(workload) do { \
+	list_add_tail(&workload->list, \
+	workload_q_head(workload->vgpu, workload->ring_id)); \
+	wake_up(&workload->vgpu->gvt-> \
+	scheduler.waitq[workload->ring_id]); \
+} while (0)
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
new file mode 100644
index 0000000..53a2d10
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright © 2011-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#if !defined(_GVT_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _GVT_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+#include <asm/tsc.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gvt
+
+TRACE_EVENT(spt_alloc,
+	TP_PROTO(int id, void *spt, int type, unsigned long mfn,
+		unsigned long gpt_gfn),
+
+	TP_ARGS(id, spt, type, mfn, gpt_gfn),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__field(void *, spt)
+		__field(int, type)
+		__field(unsigned long, mfn)
+		__field(unsigned long, gpt_gfn)
+		),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->spt = spt;
+		__entry->type = type;
+		__entry->mfn = mfn;
+		__entry->gpt_gfn = gpt_gfn;
+	),
+
+	TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
+		__entry->id,
+		__entry->spt,
+		__entry->type,
+		__entry->mfn,
+		__entry->gpt_gfn)
+);
+
+TRACE_EVENT(spt_free,
+	TP_PROTO(int id, void *spt, int type),
+
+	TP_ARGS(id, spt, type),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__field(void *, spt)
+		__field(int, type)
+		),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->spt = spt;
+		__entry->type = type;
+	),
+
+	TP_printk("VM%u [free] spt %p type %d\n",
+		__entry->id,
+		__entry->spt,
+		__entry->type)
+);
+
+#define MAX_BUF_LEN 256
+
+TRACE_EVENT(gma_index,
+	TP_PROTO(const char *prefix, unsigned long gma,
+		unsigned long index),
+
+	TP_ARGS(prefix, gma, index),
+
+	TP_STRUCT__entry(
+		__array(char, buf, MAX_BUF_LEN)
+	),
+
+	TP_fast_assign(
+		snprintf(__entry->buf, MAX_BUF_LEN,
+			"%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
+	),
+
+	TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gma_translate,
+	TP_PROTO(int id, char *type, int ring_id, int pt_level,
+		unsigned long gma, unsigned long gpa),
+
+	TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+
+	TP_STRUCT__entry(
+		__array(char, buf, MAX_BUF_LEN)
+	),
+
+	TP_fast_assign(
+		snprintf(__entry->buf, MAX_BUF_LEN,
+			"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
+				id, type, ring_id, pt_level, gma, gpa);
+	),
+
+	TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_refcount,
+	TP_PROTO(int id, char *action, void *spt, int before, int after),
+
+	TP_ARGS(id, action, spt, before, after),
+
+	TP_STRUCT__entry(
+		__array(char, buf, MAX_BUF_LEN)
+	),
+
+	TP_fast_assign(
+		snprintf(__entry->buf, MAX_BUF_LEN,
+			"VM%d [%s] spt %p before %d -> after %d\n",
+				id, action, spt, before, after);
+	),
+
+	TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_change,
+	TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
+		int type),
+
+	TP_ARGS(id, action, spt, gfn, type),
+
+	TP_STRUCT__entry(
+		__array(char, buf, MAX_BUF_LEN)
+	),
+
+	TP_fast_assign(
+		snprintf(__entry->buf, MAX_BUF_LEN,
+			"VM%d [%s] spt %p gfn 0x%lx type %d\n",
+				id, action, spt, gfn, type);
+	),
+
+	TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gpt_change,
+	TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
+		unsigned long index),
+
+	TP_ARGS(id, tag, spt, type, v, index),
+
+	TP_STRUCT__entry(
+		__array(char, buf, MAX_BUF_LEN)
+	),
+
+	TP_fast_assign(
+		snprintf(__entry->buf, MAX_BUF_LEN,
+		"VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
+			id, tag, spt, type, v, index);
+	),
+
+	TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_change,
+	TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
+
+	TP_ARGS(id, tag, page_id, gpt, type),
+
+	TP_STRUCT__entry(
+		__array(char, buf, MAX_BUF_LEN)
+	),
+
+	TP_fast_assign(
+		snprintf(__entry->buf, MAX_BUF_LEN,
+		"VM%d [oos %s] page id %d gpt %p type %d\n",
+			id, tag, page_id, gpt, type);
+	),
+
+	TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_sync,
+	TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
+		unsigned long index),
+
+	TP_ARGS(id, page_id, gpt, type, v, index),
+
+	TP_STRUCT__entry(
+		__array(char, buf, MAX_BUF_LEN)
+	),
+
+	TP_fast_assign(
+	snprintf(__entry->buf, MAX_BUF_LEN,
+	"VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
+				id, page_id, gpt, type, v, index);
+	),
+
+	TP_printk("%s", __entry->buf)
+);
+
+#define MAX_CMD_STR_LEN	256
+TRACE_EVENT(gvt_command,
+		TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
+
+		TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
+
+		TP_STRUCT__entry(
+			__field(u8, vm_id)
+			__field(u8, ring_id)
+			__field(int, i)
+			__array(char, tmp_buf, MAX_CMD_STR_LEN)
+			__array(char, cmd_str, MAX_CMD_STR_LEN)
+			),
+
+		TP_fast_assign(
+			__entry->vm_id = vm_id;
+			__entry->ring_id = ring_id;
+			__entry->cmd_str[0] = '\0';
+			snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
+			strcat(__entry->cmd_str, __entry->tmp_buf);
+			entry->i = 0;
+			while (cmd_len > 0) {
+				if (cmd_len >= 8) {
+					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
+						cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
+						cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
+					__entry->i += 8;
+					cmd_len -= 8;
+					strcat(__entry->cmd_str, __entry->tmp_buf);
+				} else if (cmd_len >= 4) {
+					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
+						cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
+					__entry->i += 4;
+					cmd_len -= 4;
+					strcat(__entry->cmd_str, __entry->tmp_buf);
+				} else if (cmd_len >= 2) {
+					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
+					__entry->i += 2;
+					cmd_len -= 2;
+					strcat(__entry->cmd_str, __entry->tmp_buf);
+				} else if (cmd_len == 1) {
+					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
+					__entry->i += 1;
+					cmd_len -= 1;
+					strcat(__entry->cmd_str, __entry->tmp_buf);
+				}
+			}
+			strcat(__entry->cmd_str, "\n");
+		),
+
+		TP_printk("%s", __entry->cmd_str)
+);
+#endif /* _GVT_TRACE_H_ */
+
+/* This part must be out of protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/gvt/trace_points.c
similarity index 64%
rename from drivers/gpu/drm/i915/i915_gem_dmabuf.h
rename to drivers/gpu/drm/i915/gvt/trace_points.c
index 9131555..a3deed69 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.h
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Intel Corporation
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -16,30 +16,21 @@
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ *    Zhi Wang <zhi.a.wang@intel.com>
  *
  */
 
-#ifndef _I915_GEM_DMABUF_H_
-#define _I915_GEM_DMABUF_H_
+#include "trace.h"
 
-#include <linux/dma-buf.h>
-
-static inline struct reservation_object *
-i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
-{
-	struct dma_buf *dma_buf;
-
-	if (obj->base.dma_buf)
-		dma_buf = obj->base.dma_buf;
-	else if (obj->base.import_attach)
-		dma_buf = obj->base.import_attach->dmabuf;
-	else
-		return NULL;
-
-	return dma_buf->resv;
-}
-
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
new file mode 100644
index 0000000..4f54005
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eddie Dong <eddie.dong@intel.com>
+ *    Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Zhi Wang <zhi.a.wang@intel.com>
+ *    Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+	vfree(vgpu->mmio.vreg);
+	vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
+
+int setup_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	const struct intel_gvt_device_info *info = &gvt->device_info;
+
+	vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+	if (!vgpu->mmio.vreg)
+		return -ENOMEM;
+
+	vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+	memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+	memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+	vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+	/* set the bit 0:2(Core C-State ) to C0 */
+	vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+	return 0;
+}
+
+static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
+	struct intel_vgpu_creation_params *param)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	const struct intel_gvt_device_info *info = &gvt->device_info;
+	u16 *gmch_ctl;
+	int i;
+
+	memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+	       info->cfg_space_size);
+
+	if (!param->primary) {
+		vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+			INTEL_GVT_PCI_CLASS_VGA_OTHER;
+		vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+			INTEL_GVT_PCI_CLASS_VGA_OTHER;
+	}
+
+	/* Show guest that there isn't any stolen memory.*/
+	gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+	*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+	intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+				 gvt_aperture_pa_base(gvt), true);
+
+	vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+					     | PCI_COMMAND_MEMORY
+					     | PCI_COMMAND_MASTER);
+	/*
+	 * Clear the bar upper 32bit and let guest to assign the new value
+	 */
+	memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+	memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+
+	for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+		vgpu->cfg_space.bar[i].size = pci_resource_len(
+					      gvt->dev_priv->drm.pdev, i * 2);
+		vgpu->cfg_space.bar[i].tracked = false;
+	}
+}
+
+void populate_pvinfo_page(struct intel_vgpu *vgpu)
+{
+	/* setup the ballooning information */
+	vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
+	vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
+	vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
+	vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
+	vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+	vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+		vgpu_aperture_gmadr_base(vgpu);
+	vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
+		vgpu_aperture_sz(vgpu);
+	vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+		vgpu_hidden_gmadr_base(vgpu);
+	vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
+		vgpu_hidden_sz(vgpu);
+
+	vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+
+	gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
+	gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
+		vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
+	gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
+		vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
+	gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
+
+	WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+
+	mutex_lock(&gvt->lock);
+
+	vgpu->active = false;
+	idr_remove(&gvt->vgpu_idr, vgpu->id);
+
+	if (atomic_read(&vgpu->running_workload_num)) {
+		mutex_unlock(&gvt->lock);
+		intel_gvt_wait_vgpu_idle(vgpu);
+		mutex_lock(&gvt->lock);
+	}
+
+	intel_vgpu_stop_schedule(vgpu);
+	intel_vgpu_clean_sched_policy(vgpu);
+	intel_vgpu_clean_gvt_context(vgpu);
+	intel_vgpu_clean_execlist(vgpu);
+	intel_vgpu_clean_display(vgpu);
+	intel_vgpu_clean_opregion(vgpu);
+	intel_vgpu_clean_gtt(vgpu);
+	intel_gvt_hypervisor_detach_vgpu(vgpu);
+	intel_vgpu_free_resource(vgpu);
+	clean_vgpu_mmio(vgpu);
+	vfree(vgpu);
+
+	mutex_unlock(&gvt->lock);
+}
+
+/**
+ * intel_gvt_create_vgpu - create a virtual GPU
+ * @gvt: GVT device
+ * @param: vGPU creation parameters
+ *
+ * This function is called when user wants to create a virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+		struct intel_vgpu_creation_params *param)
+{
+	struct intel_vgpu *vgpu;
+	int ret;
+
+	gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
+			param->handle, param->low_gm_sz, param->high_gm_sz,
+			param->fence_sz);
+
+	vgpu = vzalloc(sizeof(*vgpu));
+	if (!vgpu)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_lock(&gvt->lock);
+
+	ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
+	if (ret < 0)
+		goto out_free_vgpu;
+
+	vgpu->id = ret;
+	vgpu->handle = param->handle;
+	vgpu->gvt = gvt;
+	bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
+
+	setup_vgpu_cfg_space(vgpu, param);
+
+	ret = setup_vgpu_mmio(vgpu);
+	if (ret)
+		goto out_free_vgpu;
+
+	ret = intel_vgpu_alloc_resource(vgpu, param);
+	if (ret)
+		goto out_clean_vgpu_mmio;
+
+	populate_pvinfo_page(vgpu);
+
+	ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
+	if (ret)
+		goto out_clean_vgpu_resource;
+
+	ret = intel_vgpu_init_gtt(vgpu);
+	if (ret)
+		goto out_detach_hypervisor_vgpu;
+
+	if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
+		ret = intel_vgpu_init_opregion(vgpu, 0);
+		if (ret)
+			goto out_clean_gtt;
+	}
+
+	ret = intel_vgpu_init_display(vgpu);
+	if (ret)
+		goto out_clean_opregion;
+
+	ret = intel_vgpu_init_execlist(vgpu);
+	if (ret)
+		goto out_clean_display;
+
+	ret = intel_vgpu_init_gvt_context(vgpu);
+	if (ret)
+		goto out_clean_execlist;
+
+	ret = intel_vgpu_init_sched_policy(vgpu);
+	if (ret)
+		goto out_clean_shadow_ctx;
+
+	vgpu->active = true;
+	mutex_unlock(&gvt->lock);
+
+	return vgpu;
+
+out_clean_shadow_ctx:
+	intel_vgpu_clean_gvt_context(vgpu);
+out_clean_execlist:
+	intel_vgpu_clean_execlist(vgpu);
+out_clean_display:
+	intel_vgpu_clean_display(vgpu);
+out_clean_opregion:
+	intel_vgpu_clean_opregion(vgpu);
+out_clean_gtt:
+	intel_vgpu_clean_gtt(vgpu);
+out_detach_hypervisor_vgpu:
+	intel_gvt_hypervisor_detach_vgpu(vgpu);
+out_clean_vgpu_resource:
+	intel_vgpu_free_resource(vgpu);
+out_clean_vgpu_mmio:
+	clean_vgpu_mmio(vgpu);
+out_free_vgpu:
+	vfree(vgpu);
+	mutex_unlock(&gvt->lock);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 70980f8..f5039f4 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
 	}
 
 	if (ret == 0 && needs_clflush_after)
-		drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+		drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
 	i915_gem_object_unpin_map(shadow_batch_obj);
 
 	return ret;
@@ -1308,10 +1308,11 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	bool active = false;
 
 	/* If the command parser is not enabled, report 0 - unsupported */
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		if (intel_engine_needs_cmd_parser(engine)) {
 			active = true;
 			break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 27b0e34..b681d42 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -79,10 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
-	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
 #undef PRINT_FLAG
-#undef SEP_SEMICOLON
 
 	return 0;
 }
@@ -109,12 +107,12 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
 
 static char get_global_flag(struct drm_i915_gem_object *obj)
 {
-	return i915_gem_object_to_ggtt(obj, NULL) ?  'g' : ' ';
+	return !list_empty(&obj->userfault_link) ? 'g' : ' ';
 }
 
 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
 {
-	return obj->mapping ? 'M' : ' ';
+	return obj->mm.mapping ? 'M' : ' ';
 }
 
 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -138,11 +136,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 	unsigned int frontbuffer_bits;
 	int pin_count = 0;
-	enum intel_engine_id id;
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
+	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
 		   &obj->base,
 		   get_active_flag(obj),
 		   get_pin_flag(obj),
@@ -151,17 +148,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   get_pin_mapped_flag(obj),
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
-		   obj->base.write_domain);
-	for_each_engine_id(engine, dev_priv, id)
-		seq_printf(m, "%x ",
-			   i915_gem_active_get_seqno(&obj->last_read[id],
-						     &obj->base.dev->struct_mutex));
-	seq_printf(m, "] %x %s%s%s",
-		   i915_gem_active_get_seqno(&obj->last_write,
-					     &obj->base.dev->struct_mutex),
+		   obj->base.write_domain,
 		   i915_cache_level_str(dev_priv, obj->cache_level),
-		   obj->dirty ? " dirty" : "",
-		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+		   obj->mm.dirty ? " dirty" : "",
+		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
@@ -188,18 +178,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	}
 	if (obj->stolen)
 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
-	if (obj->pin_display || obj->fault_mappable) {
-		char s[3], *t = s;
-		if (obj->pin_display)
-			*t++ = 'p';
-		if (obj->fault_mappable)
-			*t++ = 'f';
-		*t = '\0';
-		seq_printf(m, " (%s mappable)", s);
-	}
 
-	engine = i915_gem_active_get_engine(&obj->last_write,
-					    &dev_priv->drm.struct_mutex);
+	engine = i915_gem_object_last_write_engine(obj);
 	if (engine)
 		seq_printf(m, " (%s)", engine->name);
 
@@ -237,7 +217,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 		return ret;
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
 		if (obj->stolen == NULL)
 			continue;
 
@@ -247,7 +227,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
 		if (obj->stolen == NULL)
 			continue;
 
@@ -334,11 +314,12 @@ static void print_batch_pool_stats(struct seq_file *m,
 	struct drm_i915_gem_object *obj;
 	struct file_stats stats;
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int j;
 
 	memset(&stats, 0, sizeof(stats));
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
 			list_for_each_entry(obj,
 					    &engine->batch_pool.cache_list[j],
@@ -402,23 +383,23 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	if (ret)
 		return ret;
 
-	seq_printf(m, "%u objects, %zu bytes\n",
+	seq_printf(m, "%u objects, %llu bytes\n",
 		   dev_priv->mm.object_count,
 		   dev_priv->mm.object_memory);
 
 	size = count = 0;
 	mapped_size = mapped_count = 0;
 	purgeable_size = purgeable_count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
 		size += obj->base.size;
 		++count;
 
-		if (obj->madv == I915_MADV_DONTNEED) {
+		if (obj->mm.madv == I915_MADV_DONTNEED) {
 			purgeable_size += obj->base.size;
 			++purgeable_count;
 		}
 
-		if (obj->mapping) {
+		if (obj->mm.mapping) {
 			mapped_count++;
 			mapped_size += obj->base.size;
 		}
@@ -426,7 +407,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
 	size = count = dpy_size = dpy_count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
 		size += obj->base.size;
 		++count;
 
@@ -435,12 +416,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 			++dpy_count;
 		}
 
-		if (obj->madv == I915_MADV_DONTNEED) {
+		if (obj->mm.madv == I915_MADV_DONTNEED) {
 			purgeable_size += obj->base.size;
 			++purgeable_count;
 		}
 
-		if (obj->mapping) {
+		if (obj->mm.mapping) {
 			mapped_count++;
 			mapped_size += obj->base.size;
 		}
@@ -512,7 +493,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 		return ret;
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
 		if (show_pin_display_only && !obj->pin_display)
 			continue;
 
@@ -566,12 +547,12 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 					   pipe, plane);
 			}
 			if (work->flip_queued_req) {
-				struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
+				struct intel_engine_cs *engine = work->flip_queued_req->engine;
 
 				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
 					   engine->name,
-					   i915_gem_request_get_seqno(work->flip_queued_req),
-					   dev_priv->next_seqno,
+					   work->flip_queued_req->global_seqno,
+					   atomic_read(&dev_priv->gt.global_timeline.next_seqno),
 					   intel_engine_get_seqno(engine),
 					   i915_gem_request_completed(work->flip_queued_req));
 			} else
@@ -607,6 +588,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 	struct drm_device *dev = &dev_priv->drm;
 	struct drm_i915_gem_object *obj;
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int total = 0;
 	int ret, j;
 
@@ -614,7 +596,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 	if (ret)
 		return ret;
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
 			int count;
 
@@ -645,12 +627,23 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 	return 0;
 }
 
+static void print_request(struct seq_file *m,
+			  struct drm_i915_gem_request *rq,
+			  const char *prefix)
+{
+	seq_printf(m, "%s%x [%x:%x] @ %d: %s\n", prefix,
+		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+		   rq->timeline->common->name);
+}
+
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_device *dev = &dev_priv->drm;
-	struct intel_engine_cs *engine;
 	struct drm_i915_gem_request *req;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int ret, any;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -658,29 +651,18 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 		return ret;
 
 	any = 0;
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		int count;
 
 		count = 0;
-		list_for_each_entry(req, &engine->request_list, link)
+		list_for_each_entry(req, &engine->timeline->requests, link)
 			count++;
 		if (count == 0)
 			continue;
 
 		seq_printf(m, "%s requests: %d\n", engine->name, count);
-		list_for_each_entry(req, &engine->request_list, link) {
-			struct pid *pid = req->ctx->pid;
-			struct task_struct *task;
-
-			rcu_read_lock();
-			task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
-			seq_printf(m, "    %x @ %d: %s [%d]\n",
-				   req->fence.seqno,
-				   (int) (jiffies - req->emitted_jiffies),
-				   task ? task->comm : "<unknown>",
-				   task ? task->pid : -1);
-			rcu_read_unlock();
-		}
+		list_for_each_entry(req, &engine->timeline->requests, link)
+			print_request(m, req, "    ");
 
 		any++;
 	}
@@ -701,22 +683,23 @@ static void i915_ring_seqno_info(struct seq_file *m,
 	seq_printf(m, "Current sequence (%s): %x\n",
 		   engine->name, intel_engine_get_seqno(engine));
 
-	spin_lock(&b->lock);
+	spin_lock_irq(&b->lock);
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
 		struct intel_wait *w = container_of(rb, typeof(*w), node);
 
 		seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
 			   engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
 	}
-	spin_unlock(&b->lock);
+	spin_unlock_irq(&b->lock);
 }
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		i915_ring_seqno_info(m, engine);
 
 	return 0;
@@ -727,6 +710,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int i, pipe;
 
 	intel_runtime_pm_get(dev_priv);
@@ -743,17 +727,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 			   I915_READ(VLV_IIR_RW));
 		seq_printf(m, "Display IMR:\t%08x\n",
 			   I915_READ(VLV_IMR));
-		for_each_pipe(dev_priv, pipe)
+		for_each_pipe(dev_priv, pipe) {
+			enum intel_display_power_domain power_domain;
+
+			power_domain = POWER_DOMAIN_PIPE(pipe);
+			if (!intel_display_power_get_if_enabled(dev_priv,
+								power_domain)) {
+				seq_printf(m, "Pipe %c power disabled\n",
+					   pipe_name(pipe));
+				continue;
+			}
+
 			seq_printf(m, "Pipe %c stat:\t%08x\n",
 				   pipe_name(pipe),
 				   I915_READ(PIPESTAT(pipe)));
 
+			intel_display_power_put(dev_priv, power_domain);
+		}
+
+		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 		seq_printf(m, "Port hotplug:\t%08x\n",
 			   I915_READ(PORT_HOTPLUG_EN));
 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 			   I915_READ(VLV_DPFLIPSTAT));
 		seq_printf(m, "DPINVGTT:\t%08x\n",
 			   I915_READ(DPINVGTT));
+		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
 		for (i = 0; i < 4; i++) {
 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -895,7 +894,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 			   I915_READ(GTIMR));
 	}
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		if (INTEL_GEN(dev_priv) >= 6) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
@@ -943,7 +942,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
 	const u32 *hws;
 	int i;
 
-	engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
+	engine = dev_priv->engine[(uintptr_t)node->info_ent->data];
 	hws = engine->status_page.page_addr;
 	if (hws == NULL)
 		return 0;
@@ -956,6 +955,8 @@ static int i915_hws_info(struct seq_file *m, void *data)
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
 static ssize_t
 i915_error_state_write(struct file *filp,
 		       const char __user *ubuf,
@@ -1038,19 +1039,14 @@ static const struct file_operations i915_error_state_fops = {
 	.release = i915_error_state_release,
 };
 
+#endif
+
 static int
 i915_next_seqno_get(void *data, u64 *val)
 {
 	struct drm_i915_private *dev_priv = data;
-	int ret;
 
-	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
-	if (ret)
-		return ret;
-
-	*val = dev_priv->next_seqno;
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
+	*val = atomic_read(&dev_priv->gt.global_timeline.next_seqno);
 	return 0;
 }
 
@@ -1065,7 +1061,7 @@ i915_next_seqno_set(void *data, u64 val)
 	if (ret)
 		return ret;
 
-	ret = i915_gem_set_seqno(dev, val);
+	ret = i915_gem_set_global_seqno(dev, val);
 	mutex_unlock(&dev->struct_mutex);
 
 	return ret;
@@ -1277,15 +1273,42 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 	return ret;
 }
 
+static void i915_instdone_info(struct drm_i915_private *dev_priv,
+			       struct seq_file *m,
+			       struct intel_instdone *instdone)
+{
+	int slice;
+	int subslice;
+
+	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
+		   instdone->instdone);
+
+	if (INTEL_GEN(dev_priv) <= 3)
+		return;
+
+	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
+		   instdone->slice_common);
+
+	if (INTEL_GEN(dev_priv) <= 6)
+		return;
+
+	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+			   slice, subslice, instdone->sampler[slice][subslice]);
+
+	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
+			   slice, subslice, instdone->row[slice][subslice]);
+}
+
 static int i915_hangcheck_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
 	u64 acthd[I915_NUM_ENGINES];
 	u32 seqno[I915_NUM_ENGINES];
-	u32 instdone[I915_NUM_INSTDONE_REG];
+	struct intel_instdone instdone;
 	enum intel_engine_id id;
-	int j;
 
 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
 		seq_printf(m, "Wedged\n");
@@ -1303,12 +1326,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_engine_id(engine, dev_priv, id) {
+	for_each_engine(engine, dev_priv, id) {
 		acthd[id] = intel_engine_get_active_head(engine);
 		seqno[id] = intel_engine_get_seqno(engine);
 	}
 
-	i915_get_extra_instdone(dev_priv, instdone);
+	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
 
 	intel_runtime_pm_put(dev_priv);
 
@@ -1319,16 +1342,27 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	} else
 		seq_printf(m, "Hangcheck inactive\n");
 
-	for_each_engine_id(engine, dev_priv, id) {
+	for_each_engine(engine, dev_priv, id) {
+		struct intel_breadcrumbs *b = &engine->breadcrumbs;
+		struct rb_node *rb;
+
 		seq_printf(m, "%s:\n", engine->name);
 		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
-			   engine->hangcheck.seqno,
-			   seqno[id],
-			   engine->last_submitted_seqno);
+			   engine->hangcheck.seqno, seqno[id],
+			   intel_engine_last_submit(engine));
 		seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
 			   yesno(intel_engine_has_waiter(engine)),
 			   yesno(test_bit(engine->id,
 					  &dev_priv->gpu_error.missed_irq_rings)));
+		spin_lock_irq(&b->lock);
+		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+			struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+			seq_printf(m, "\t%s [%d] waiting for %x\n",
+				   w->tsk->comm, w->tsk->pid, w->seqno);
+		}
+		spin_unlock_irq(&b->lock);
+
 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
 			   (long long)engine->hangcheck.acthd,
 			   (long long)acthd[id]);
@@ -1336,18 +1370,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 		seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
 
 		if (engine->id == RCS) {
-			seq_puts(m, "\tinstdone read =");
+			seq_puts(m, "\tinstdone read =\n");
 
-			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
-				seq_printf(m, " 0x%08x", instdone[j]);
+			i915_instdone_info(dev_priv, m, &instdone);
 
-			seq_puts(m, "\n\tinstdone accu =");
+			seq_puts(m, "\tinstdone accu =\n");
 
-			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
-				seq_printf(m, " 0x%08x",
-					   engine->hangcheck.instdone[j]);
-
-			seq_puts(m, "\n");
+			i915_instdone_info(dev_priv, m,
+					   &engine->hangcheck.instdone);
 		}
 	}
 
@@ -1357,14 +1387,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 static int ironlake_drpc_info(struct seq_file *m)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
 	u32 rgvmodectl, rstdbyctl;
 	u16 crstandvid;
-	int ret;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	intel_runtime_pm_get(dev_priv);
 
 	rgvmodectl = I915_READ(MEMMODECTL);
@@ -1372,7 +1397,6 @@ static int ironlake_drpc_info(struct seq_file *m)
 	crstandvid = I915_READ16(CRSTANDVID);
 
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
 	seq_printf(m, "Boost freq: %d\n",
@@ -1635,10 +1659,13 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 		seq_printf(m, "FBC disabled: %s\n",
 			   dev_priv->fbc.no_fbc_reason);
 
-	if (INTEL_GEN(dev_priv) >= 7)
+	if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
+		uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
+				BDW_FBC_COMPRESSION_MASK :
+				IVB_FBC_COMPRESSION_MASK;
 		seq_printf(m, "Compressing: %s\n",
-			   yesno(I915_READ(FBC_STATUS2) &
-				 FBC_COMPRESSION_MASK));
+			   yesno(I915_READ(FBC_STATUS2) & mask));
+	}
 
 	mutex_unlock(&dev_priv->fbc.lock);
 	intel_runtime_pm_put(dev_priv);
@@ -1717,6 +1744,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 	bool sr_enabled = false;
 
 	intel_runtime_pm_get(dev_priv);
+	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
 	if (HAS_PCH_SPLIT(dev_priv))
 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
@@ -1730,6 +1758,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 
+	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 	intel_runtime_pm_put(dev_priv);
 
 	seq_printf(m, "self-refresh: %s\n",
@@ -1909,6 +1938,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
+	enum intel_engine_id id;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1935,7 +1965,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
 		seq_putc(m, '\n');
 
-		for_each_engine(engine, dev_priv) {
+		for_each_engine(engine, dev_priv, id) {
 			struct intel_context *ce = &ctx->engine[engine->id];
 
 			seq_printf(m, "%s: ", engine->name);
@@ -1974,7 +2004,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
 		seq_printf(m, "\tBound in GGTT at 0x%08x\n",
 			   i915_ggtt_offset(vma));
 
-	if (i915_gem_object_get_pages(vma->obj)) {
+	if (i915_gem_object_pin_pages(vma->obj)) {
 		seq_puts(m, "\tFailed to get pages for context object\n\n");
 		return;
 	}
@@ -1993,6 +2023,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
 		kunmap_atomic(reg_state);
 	}
 
+	i915_gem_object_unpin_pages(vma->obj);
 	seq_putc(m, '\n');
 }
 
@@ -2002,6 +2033,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
+	enum intel_engine_id id;
 	int ret;
 
 	if (!i915.enable_execlists) {
@@ -2014,7 +2046,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 		return ret;
 
 	list_for_each_entry(ctx, &dev_priv->context_list, link)
-		for_each_engine(engine, dev_priv)
+		for_each_engine(engine, dev_priv, id)
 			i915_dump_lrc_obj(m, ctx, engine);
 
 	mutex_unlock(&dev->struct_mutex);
@@ -2022,84 +2054,6 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 	return 0;
 }
 
-static int i915_execlists(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_engine_cs *engine;
-	u32 status_pointer;
-	u8 read_pointer;
-	u8 write_pointer;
-	u32 status;
-	u32 ctx_id;
-	struct list_head *cursor;
-	int i, ret;
-
-	if (!i915.enable_execlists) {
-		seq_puts(m, "Logical Ring Contexts are disabled\n");
-		return 0;
-	}
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	intel_runtime_pm_get(dev_priv);
-
-	for_each_engine(engine, dev_priv) {
-		struct drm_i915_gem_request *head_req = NULL;
-		int count = 0;
-
-		seq_printf(m, "%s\n", engine->name);
-
-		status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
-		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
-		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
-			   status, ctx_id);
-
-		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
-		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
-
-		read_pointer = GEN8_CSB_READ_PTR(status_pointer);
-		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
-		if (read_pointer > write_pointer)
-			write_pointer += GEN8_CSB_ENTRIES;
-		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
-			   read_pointer, write_pointer);
-
-		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
-			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
-			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
-
-			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
-				   i, status, ctx_id);
-		}
-
-		spin_lock_bh(&engine->execlist_lock);
-		list_for_each(cursor, &engine->execlist_queue)
-			count++;
-		head_req = list_first_entry_or_null(&engine->execlist_queue,
-						    struct drm_i915_gem_request,
-						    execlist_link);
-		spin_unlock_bh(&engine->execlist_lock);
-
-		seq_printf(m, "\t%d requests in queue\n", count);
-		if (head_req) {
-			seq_printf(m, "\tHead request context: %u\n",
-				   head_req->ctx->hw_id);
-			seq_printf(m, "\tHead request tail: %u\n",
-				   head_req->tail);
-		}
-
-		seq_putc(m, '\n');
-	}
-
-	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
 static const char *swizzle_string(unsigned swizzle)
 {
 	switch (swizzle) {
@@ -2127,12 +2081,7 @@ static const char *swizzle_string(unsigned swizzle)
 static int i915_swizzle_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	int ret;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	intel_runtime_pm_get(dev_priv);
 
 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
@@ -2172,7 +2121,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 		seq_puts(m, "L-shaped memory detected\n");
 
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -2201,14 +2149,15 @@ static int per_file_ctx(int id, void *ptr, void *data)
 static void gen8_ppgtt_info(struct seq_file *m,
 			    struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *engine;
 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int i;
 
 	if (!ppgtt)
 		return;
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		seq_printf(m, "%s\n", engine->name);
 		for (i = 0; i < 4; i++) {
 			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -2223,11 +2172,12 @@ static void gen6_ppgtt_info(struct seq_file *m,
 			    struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
 	if (IS_GEN6(dev_priv))
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		seq_printf(m, "%s\n", engine->name);
 		if (IS_GEN7(dev_priv))
 			seq_printf(m, "GFX_MODE: 0x%08x\n",
@@ -2296,9 +2246,10 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
 static int count_irq_waiters(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int count = 0;
 
-	for_each_engine(engine, i915)
+	for_each_engine(engine, i915, id)
 		count += intel_engine_has_waiter(engine);
 
 	return count;
@@ -2325,8 +2276,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 	struct drm_file *file;
 
 	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
-	seq_printf(m, "GPU busy? %s [%x]\n",
-		   yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
+	seq_printf(m, "GPU busy? %s [%d requests]\n",
+		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
 	seq_printf(m, "Frequency requested %d\n",
 		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
@@ -2361,7 +2312,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 
 	if (INTEL_GEN(dev_priv) >= 6 &&
 	    dev_priv->rps.enabled &&
-	    dev_priv->gt.active_engines) {
+	    dev_priv->gt.active_requests) {
 		u32 rpup, rpupei;
 		u32 rpdown, rpdownei;
 
@@ -2442,6 +2393,32 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
 	return 0;
 }
 
+static void i915_guc_log_info(struct seq_file *m,
+			      struct drm_i915_private *dev_priv)
+{
+	struct intel_guc *guc = &dev_priv->guc;
+
+	seq_puts(m, "\nGuC logging stats:\n");
+
+	seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
+		   guc->log.flush_count[GUC_ISR_LOG_BUFFER],
+		   guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
+
+	seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
+		   guc->log.flush_count[GUC_DPC_LOG_BUFFER],
+		   guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
+
+	seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
+		   guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
+		   guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
+
+	seq_printf(m, "\tTotal flush interrupt count: %u\n",
+		   guc->log.flush_interrupt_count);
+
+	seq_printf(m, "\tCapture miss count: %u\n",
+		   guc->log.capture_miss_count);
+}
+
 static void i915_guc_client_info(struct seq_file *m,
 				 struct drm_i915_private *dev_priv,
 				 struct i915_guc_client *client)
@@ -2461,7 +2438,7 @@ static void i915_guc_client_info(struct seq_file *m,
 	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
 	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
 
-	for_each_engine_id(engine, dev_priv, id) {
+	for_each_engine(engine, dev_priv, id) {
 		u64 submissions = client->submissions[id];
 		tot += submissions;
 		seq_printf(m, "\tSubmissions: %llu %s\n",
@@ -2504,7 +2481,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
 	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
 
 	seq_printf(m, "\nGuC submissions:\n");
-	for_each_engine_id(engine, dev_priv, id) {
+	for_each_engine(engine, dev_priv, id) {
 		u64 submissions = guc.submissions[id];
 		total += submissions;
 		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
@@ -2515,6 +2492,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
 	i915_guc_client_info(m, dev_priv, &client);
 
+	i915_guc_log_info(m, dev_priv);
+
 	/* Add more as required ... */
 
 	return 0;
@@ -2526,10 +2505,10 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 	struct drm_i915_gem_object *obj;
 	int i = 0, pg;
 
-	if (!dev_priv->guc.log_vma)
+	if (!dev_priv->guc.log.vma)
 		return 0;
 
-	obj = dev_priv->guc.log_vma->obj;
+	obj = dev_priv->guc.log.vma->obj;
 	for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
 		u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
 
@@ -2546,6 +2525,44 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 	return 0;
 }
 
+static int i915_guc_log_control_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	if (!dev_priv->guc.log.vma)
+		return -EINVAL;
+
+	*val = i915.guc_log_level;
+
+	return 0;
+}
+
+static int i915_guc_log_control_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int ret;
+
+	if (!dev_priv->guc.log.vma)
+		return -EINVAL;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	intel_runtime_pm_get(dev_priv);
+	ret = i915_guc_log_control(dev_priv, val);
+	intel_runtime_pm_put(dev_priv);
+
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
+			i915_guc_log_control_get, i915_guc_log_control_set,
+			"%lld\n");
+
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2575,11 +2592,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
 	else {
 		for_each_pipe(dev_priv, pipe) {
+			enum transcoder cpu_transcoder =
+				intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+			enum intel_display_power_domain power_domain;
+
+			power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+			if (!intel_display_power_get_if_enabled(dev_priv,
+								power_domain))
+				continue;
+
 			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
 				VLV_EDP_PSR_CURR_STATE_MASK;
 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
 				enabled = true;
+
+			intel_display_power_put(dev_priv, power_domain);
 		}
 	}
 
@@ -3121,6 +3149,144 @@ static int i915_display_info(struct seq_file *m, void *unused)
 	return 0;
 }
 
+static int i915_engine_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	intel_runtime_pm_get(dev_priv);
+
+	for_each_engine(engine, dev_priv, id) {
+		struct intel_breadcrumbs *b = &engine->breadcrumbs;
+		struct drm_i915_gem_request *rq;
+		struct rb_node *rb;
+		u64 addr;
+
+		seq_printf(m, "%s\n", engine->name);
+		seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
+			   intel_engine_get_seqno(engine),
+			   intel_engine_last_submit(engine),
+			   engine->hangcheck.seqno,
+			   engine->hangcheck.score);
+
+		rcu_read_lock();
+
+		seq_printf(m, "\tRequests:\n");
+
+		rq = list_first_entry(&engine->timeline->requests,
+				      struct drm_i915_gem_request, link);
+		if (&rq->link != &engine->timeline->requests)
+			print_request(m, rq, "\t\tfirst  ");
+
+		rq = list_last_entry(&engine->timeline->requests,
+				     struct drm_i915_gem_request, link);
+		if (&rq->link != &engine->timeline->requests)
+			print_request(m, rq, "\t\tlast   ");
+
+		rq = i915_gem_find_active_request(engine);
+		if (rq) {
+			print_request(m, rq, "\t\tactive ");
+			seq_printf(m,
+				   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+				   rq->head, rq->postfix, rq->tail,
+				   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+				   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+		}
+
+		seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+			   I915_READ(RING_START(engine->mmio_base)),
+			   rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+		seq_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
+			   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+			   rq ? rq->ring->head : 0);
+		seq_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
+			   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+			   rq ? rq->ring->tail : 0);
+		seq_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
+			   I915_READ(RING_CTL(engine->mmio_base)),
+			   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+		rcu_read_unlock();
+
+		addr = intel_engine_get_active_head(engine);
+		seq_printf(m, "\tACTHD:  0x%08x_%08x\n",
+			   upper_32_bits(addr), lower_32_bits(addr));
+		addr = intel_engine_get_last_batch_head(engine);
+		seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+			   upper_32_bits(addr), lower_32_bits(addr));
+
+		if (i915.enable_execlists) {
+			u32 ptr, read, write;
+
+			seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
+				   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+				   I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+			ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+			read = GEN8_CSB_READ_PTR(ptr);
+			write = GEN8_CSB_WRITE_PTR(ptr);
+			seq_printf(m, "\tExeclist CSB read %d, write %d\n",
+				   read, write);
+			if (read >= GEN8_CSB_ENTRIES)
+				read = 0;
+			if (write >= GEN8_CSB_ENTRIES)
+				write = 0;
+			if (read > write)
+				write += GEN8_CSB_ENTRIES;
+			while (read < write) {
+				unsigned int idx = ++read % GEN8_CSB_ENTRIES;
+
+				seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+					   idx,
+					   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+					   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+			}
+
+			rcu_read_lock();
+			rq = READ_ONCE(engine->execlist_port[0].request);
+			if (rq)
+				print_request(m, rq, "\t\tELSP[0] ");
+			else
+				seq_printf(m, "\t\tELSP[0] idle\n");
+			rq = READ_ONCE(engine->execlist_port[1].request);
+			if (rq)
+				print_request(m, rq, "\t\tELSP[1] ");
+			else
+				seq_printf(m, "\t\tELSP[1] idle\n");
+			rcu_read_unlock();
+
+			spin_lock_irq(&engine->execlist_lock);
+			list_for_each_entry(rq, &engine->execlist_queue, execlist_link) {
+				print_request(m, rq, "\t\tQ ");
+			}
+			spin_unlock_irq(&engine->execlist_lock);
+		} else if (INTEL_GEN(dev_priv) > 6) {
+			seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+				   I915_READ(RING_PP_DIR_BASE(engine)));
+			seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+				   I915_READ(RING_PP_DIR_BASE_READ(engine)));
+			seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+				   I915_READ(RING_PP_DIR_DCLV(engine)));
+		}
+
+		spin_lock_irq(&b->lock);
+		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+			struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+			seq_printf(m, "\t%s [%d] waiting for %x\n",
+				   w->tsk->comm, w->tsk->pid, w->seqno);
+		}
+		spin_unlock_irq(&b->lock);
+
+		seq_puts(m, "\n");
+	}
+
+	intel_runtime_pm_put(dev_priv);
+
+	return 0;
+}
+
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3147,7 +3313,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 		page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
 
 		seqno = (uint64_t *)kmap_atomic(page);
-		for_each_engine_id(engine, dev_priv, id) {
+		for_each_engine(engine, dev_priv, id) {
 			uint64_t offset;
 
 			seq_printf(m, "%s\n", engine->name);
@@ -3172,22 +3338,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 		kunmap_atomic(seqno);
 	} else {
 		seq_puts(m, "  Last signal:");
-		for_each_engine(engine, dev_priv)
+		for_each_engine(engine, dev_priv, id)
 			for (j = 0; j < num_rings; j++)
 				seq_printf(m, "0x%08x\n",
 					   I915_READ(engine->semaphore.mbox.signal[j]));
 		seq_putc(m, '\n');
 	}
 
-	seq_puts(m, "\nSync seqno:\n");
-	for_each_engine(engine, dev_priv) {
-		for (j = 0; j < num_rings; j++)
-			seq_printf(m, "  0x%08x ",
-				   engine->semaphore.sync_seqno[j]);
-		seq_putc(m, '\n');
-	}
-	seq_putc(m, '\n');
-
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
@@ -3236,7 +3393,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 	intel_runtime_pm_get(dev_priv);
 
 	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
-	for_each_engine_id(engine, dev_priv, id)
+	for_each_engine(engine, dev_priv, id)
 		seq_printf(m, "HW whitelist count for %s: %d\n",
 			   engine->name, workarounds->hw_whitelist_count[id]);
 	for (i = 0; i < workarounds->count; ++i) {
@@ -3280,7 +3437,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 	for_each_pipe(dev_priv, pipe) {
 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
 
-		for_each_plane(dev_priv, pipe, plane) {
+		for_each_universal_plane(dev_priv, pipe, plane) {
 			entry = &ddb->plane[pipe][plane];
 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
 				   entry->start, entry->end,
@@ -3914,8 +4071,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
 					bool enable)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *crtc =
-		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
 	struct intel_crtc_state *pipe_config;
 	struct drm_atomic_state *state;
 	int ret = 0;
@@ -3941,10 +4097,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
 
 	ret = drm_atomic_commit(state);
 out:
-	drm_modeset_unlock_all(dev);
 	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
-	if (ret)
-		drm_atomic_state_free(state);
+	drm_modeset_unlock_all(dev);
+	drm_atomic_state_put(state);
 }
 
 static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -3982,10 +4137,8 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
 			       enum pipe pipe,
 			       enum intel_pipe_crc_source source)
 {
-	struct drm_device *dev = &dev_priv->drm;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-	struct intel_crtc *crtc =
-			to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	enum intel_display_power_domain power_domain;
 	u32 val = 0; /* shut up gcc */
 	int ret;
@@ -4056,15 +4209,15 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
 	/* real source -> none transition */
 	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
 		struct intel_pipe_crc_entry *entries;
-		struct intel_crtc *crtc =
-			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+								  pipe);
 
 		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
 				 pipe_name(pipe));
 
 		drm_modeset_lock(&crtc->base.mutex, NULL);
 		if (crtc->base.state->active)
-			intel_wait_for_vblank(dev, pipe);
+			intel_wait_for_vblank(dev_priv, pipe);
 		drm_modeset_unlock(&crtc->base.mutex);
 
 		spin_lock_irq(&pipe_crc->lock);
@@ -4463,7 +4616,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 	else if (IS_VALLEYVIEW(dev_priv))
 		num_levels = 1;
 	else
-		num_levels = ilk_wm_max_level(dev) + 1;
+		num_levels = ilk_wm_max_level(dev_priv) + 1;
 
 	drm_modeset_lock_all(dev);
 
@@ -4579,7 +4732,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
 	else if (IS_VALLEYVIEW(dev_priv))
 		num_levels = 1;
 	else
-		num_levels = ilk_wm_max_level(dev) + 1;
+		num_levels = ilk_wm_max_level(dev_priv) + 1;
 
 	if (len >= sizeof(tmp))
 		return -EINVAL;
@@ -4704,13 +4857,9 @@ i915_wedged_set(void *data, u64 val)
 	if (i915_reset_in_progress(&dev_priv->gpu_error))
 		return -EAGAIN;
 
-	intel_runtime_pm_get(dev_priv);
-
 	i915_handle_error(dev_priv, val,
 			  "Manually setting wedged to %llu", val);
 
-	intel_runtime_pm_put(dev_priv);
-
 	return 0;
 }
 
@@ -4778,10 +4927,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
 #define DROP_BOUND 0x2
 #define DROP_RETIRE 0x4
 #define DROP_ACTIVE 0x8
-#define DROP_ALL (DROP_UNBOUND | \
-		  DROP_BOUND | \
-		  DROP_RETIRE | \
-		  DROP_ACTIVE)
+#define DROP_FREED 0x10
+#define DROP_ALL (DROP_UNBOUND	| \
+		  DROP_BOUND	| \
+		  DROP_RETIRE	| \
+		  DROP_ACTIVE	| \
+		  DROP_FREED)
 static int
 i915_drop_caches_get(void *data, u64 *val)
 {
@@ -4825,6 +4976,11 @@ i915_drop_caches_set(void *data, u64 val)
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 
+	if (val & DROP_FREED) {
+		synchronize_rcu();
+		flush_work(&dev_priv->mm.free_work);
+	}
+
 	return ret;
 }
 
@@ -4945,22 +5101,16 @@ static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
 	struct drm_i915_private *dev_priv = data;
-	struct drm_device *dev = &dev_priv->drm;
 	u32 snpcr;
-	int ret;
 
 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
 		return -ENODEV;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	intel_runtime_pm_get(dev_priv);
 
 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
@@ -5275,7 +5425,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
 	{"i915_context_status", i915_context_status, 0},
 	{"i915_dump_lrc", i915_dump_lrc, 0},
-	{"i915_execlists", i915_execlists, 0},
 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
 	{"i915_swizzle_info", i915_swizzle_info, 0},
 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -5287,6 +5436,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_power_domain_info", i915_power_domain_info, 0},
 	{"i915_dmc_info", i915_dmc_info, 0},
 	{"i915_display_info", i915_display_info, 0},
+	{"i915_engine_info", i915_engine_info, 0},
 	{"i915_semaphore_status", i915_semaphore_status, 0},
 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
@@ -5309,7 +5459,9 @@ static const struct i915_debugfs_files {
 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 	{"i915_error_state", &i915_error_state_fops},
+#endif
 	{"i915_next_seqno", &i915_next_seqno_fops},
 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
@@ -5318,7 +5470,8 @@ static const struct i915_debugfs_files {
 	{"i915_fbc_false_color", &i915_fbc_fc_fops},
 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
-	{"i915_dp_test_active", &i915_displayport_test_active_fops}
+	{"i915_dp_test_active", &i915_displayport_test_active_fops},
+	{"i915_guc_log_control", &i915_guc_log_control_fops}
 };
 
 void intel_display_crc_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bfb2efd..b72c24f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -114,7 +114,7 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
 		      fmt, ##__VA_ARGS__)
 
 
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
 {
 	enum intel_pch ret = PCH_NOP;
 
@@ -125,16 +125,16 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
 	 * make an educated guess as to which PCH is really there.
 	 */
 
-	if (IS_GEN5(dev)) {
+	if (IS_GEN5(dev_priv)) {
 		ret = PCH_IBX;
 		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
-	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+	} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
 		ret = PCH_CPT;
 		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
-	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		ret = PCH_LPT;
 		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
-	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		ret = PCH_SPT;
 		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
 	}
@@ -174,40 +174,46 @@ static void intel_detect_pch(struct drm_device *dev)
 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_IBX;
 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
-				WARN_ON(!IS_GEN5(dev));
+				WARN_ON(!IS_GEN5(dev_priv));
 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_CPT;
 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
-				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+				WARN_ON(!(IS_GEN6(dev_priv) ||
+					IS_IVYBRIDGE(dev_priv)));
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 				/* PantherPoint is CPT compatible */
 				dev_priv->pch_type = PCH_CPT;
 				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
-				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+				WARN_ON(!(IS_GEN6(dev_priv) ||
+					IS_IVYBRIDGE(dev_priv)));
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
-				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
-				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+				WARN_ON(!IS_HASWELL(dev_priv) &&
+					!IS_BROADWELL(dev_priv));
+				WARN_ON(IS_HSW_ULT(dev_priv) ||
+					IS_BDW_ULT(dev_priv));
 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
-				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
-				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+				WARN_ON(!IS_HASWELL(dev_priv) &&
+					!IS_BROADWELL(dev_priv));
+				WARN_ON(!IS_HSW_ULT(dev_priv) &&
+					!IS_BDW_ULT(dev_priv));
 			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_SPT;
 				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
-				WARN_ON(!IS_SKYLAKE(dev) &&
-					!IS_KABYLAKE(dev));
+				WARN_ON(!IS_SKYLAKE(dev_priv) &&
+					!IS_KABYLAKE(dev_priv));
 			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_SPT;
 				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
-				WARN_ON(!IS_SKYLAKE(dev) &&
-					!IS_KABYLAKE(dev));
+				WARN_ON(!IS_SKYLAKE(dev_priv) &&
+					!IS_KABYLAKE(dev_priv));
 			} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_KBP;
 				DRM_DEBUG_KMS("Found KabyPoint PCH\n");
-				WARN_ON(!IS_KABYLAKE(dev));
+				WARN_ON(!IS_KABYLAKE(dev_priv));
 			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
 				   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
 				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -215,7 +221,8 @@ static void intel_detect_pch(struct drm_device *dev)
 					    PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
 				    pch->subsystem_device ==
 					    PCI_SUBDEVICE_ID_QEMU)) {
-				dev_priv->pch_type = intel_virt_detect_pch(dev);
+				dev_priv->pch_type =
+					intel_virt_detect_pch(dev_priv);
 			} else
 				continue;
 
@@ -255,16 +262,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
 		value = dev_priv->overlay ? 1 : 0;
 		break;
 	case I915_PARAM_HAS_BSD:
-		value = intel_engine_initialized(&dev_priv->engine[VCS]);
+		value = !!dev_priv->engine[VCS];
 		break;
 	case I915_PARAM_HAS_BLT:
-		value = intel_engine_initialized(&dev_priv->engine[BCS]);
+		value = !!dev_priv->engine[BCS];
 		break;
 	case I915_PARAM_HAS_VEBOX:
-		value = intel_engine_initialized(&dev_priv->engine[VECS]);
+		value = !!dev_priv->engine[VECS];
 		break;
 	case I915_PARAM_HAS_BSD2:
-		value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+		value = !!dev_priv->engine[VCS2];
 		break;
 	case I915_PARAM_HAS_EXEC_CONSTANTS:
 		value = INTEL_GEN(dev_priv) >= 4;
@@ -417,12 +424,12 @@ intel_setup_mchbar(struct drm_device *dev)
 	u32 temp;
 	bool enabled;
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		return;
 
 	dev_priv->mchbar_need_disable = false;
 
-	if (IS_I915G(dev) || IS_I915GM(dev)) {
+	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
 		enabled = !!(temp & DEVEN_MCHBAR_EN);
 	} else {
@@ -440,7 +447,7 @@ intel_setup_mchbar(struct drm_device *dev)
 	dev_priv->mchbar_need_disable = true;
 
 	/* Space is allocated or reserved, so enable it. */
-	if (IS_I915G(dev) || IS_I915GM(dev)) {
+	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
 				       temp | DEVEN_MCHBAR_EN);
 	} else {
@@ -456,7 +463,7 @@ intel_teardown_mchbar(struct drm_device *dev)
 	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 
 	if (dev_priv->mchbar_need_disable) {
-		if (IS_I915G(dev) || IS_I915GM(dev)) {
+		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 			u32 deven_val;
 
 			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
@@ -530,40 +537,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
 	.can_switch = i915_switcheroo_can_switch,
 };
 
-static void i915_gem_fini(struct drm_device *dev)
+static void i915_gem_fini(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	i915_gem_cleanup_engines(&dev_priv->drm);
+	i915_gem_context_fini(&dev_priv->drm);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
 
-	/*
-	 * Neither the BIOS, ourselves or any other kernel
-	 * expects the system to be in execlists mode on startup,
-	 * so we need to reset the GPU back to legacy mode. And the only
-	 * known way to disable logical contexts is through a GPU reset.
-	 *
-	 * So in order to leave the system in a known default configuration,
-	 * always reset the GPU upon unload. Afterwards we then clean up the
-	 * GEM state tracking, flushing off the requests and leaving the
-	 * system in a known idle state.
-	 *
-	 * Note that is of the upmost importance that the GPU is idle and
-	 * all stray writes are flushed *before* we dismantle the backing
-	 * storage for the pinned objects.
-	 *
-	 * However, since we are uncertain that reseting the GPU on older
-	 * machines is a good idea, we don't - just in case it leaves the
-	 * machine in an unusable condition.
-	 */
-	if (HAS_HW_CONTEXTS(dev)) {
-		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
-		WARN_ON(reset && reset != -ENODEV);
-	}
+	rcu_barrier();
+	flush_work(&dev_priv->mm.free_work);
 
-	mutex_lock(&dev->struct_mutex);
-	i915_gem_cleanup_engines(dev);
-	i915_gem_context_fini(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	WARN_ON(!list_empty(&to_i915(dev)->context_list));
+	WARN_ON(!list_empty(&dev_priv->context_list));
 }
 
 static int i915_load_modeset_init(struct drm_device *dev)
@@ -611,7 +595,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 	/* Important: The output setup functions called by modeset_init need
 	 * working irqs for e.g. gmbus and dp aux transfers. */
-	intel_modeset_init(dev);
+	ret = intel_modeset_init(dev);
+	if (ret)
+		goto cleanup_irq;
 
 	intel_guc_init(dev);
 
@@ -636,7 +622,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	return 0;
 
 cleanup_gem:
-	i915_gem_fini(dev);
+	if (i915_gem_suspend(dev))
+		DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+	i915_gem_fini(dev_priv);
 cleanup_irq:
 	intel_guc_fini(dev);
 	drm_irq_uninstall(dev);
@@ -771,6 +759,19 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
 	destroy_workqueue(dev_priv->wq);
 }
 
+/*
+ * We don't keep the workarounds for pre-production hardware, so we expect our
+ * driver to fail on these machines in one way or another. A little warning on
+ * dmesg may help both the user and the bug triagers.
+ */
+static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+{
+	if (IS_HSW_EARLY_SDV(dev_priv) ||
+	    IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+		DRM_ERROR("This is a pre-production stepping. "
+			  "It may not be fully functional.\n");
+}
+
 /**
  * i915_driver_init_early - setup state not requiring device access
  * @dev_priv: device private
@@ -829,25 +830,24 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 	intel_init_dpio(dev_priv);
 	intel_power_domains_init(dev_priv);
 	intel_irq_init(dev_priv);
+	intel_hangcheck_init(dev_priv);
 	intel_init_display_hooks(dev_priv);
 	intel_init_clock_gating_hooks(dev_priv);
 	intel_init_audio_hooks(dev_priv);
-	i915_gem_load_init(&dev_priv->drm);
+	ret = i915_gem_load_init(&dev_priv->drm);
+	if (ret < 0)
+		goto err_gvt;
 
 	intel_display_crc_init(dev_priv);
 
 	intel_device_info_dump(dev_priv);
 
-	/* Not all pre-production machines fall into this category, only the
-	 * very first ones. Almost everything should work, except for maybe
-	 * suspend/resume. And we don't implement workarounds that affect only
-	 * pre-production machines. */
-	if (IS_HSW_EARLY_SDV(dev_priv))
-		DRM_INFO("This is an early pre-production Haswell machine. "
-			 "It may not be fully functional.\n");
+	intel_detect_preproduction_hw(dev_priv);
 
 	return 0;
 
+err_gvt:
+	intel_gvt_cleanup(dev_priv);
 err_workqueues:
 	i915_workqueues_cleanup(dev_priv);
 	return ret;
@@ -870,7 +870,7 @@ static int i915_mmio_setup(struct drm_device *dev)
 	int mmio_bar;
 	int mmio_size;
 
-	mmio_bar = IS_GEN2(dev) ? 1 : 0;
+	mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
 	/*
 	 * Before gen4, the registers and the GTT are behind different BARs.
 	 * However, from gen4 onwards, the registers and the GTT are shared
@@ -982,7 +982,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
-	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	if (i915_inject_load_failure())
@@ -1023,7 +1022,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 	pci_set_master(pdev);
 
 	/* overlay on gen2 is broken and can't address above 1G */
-	if (IS_GEN2(dev)) {
+	if (IS_GEN2(dev_priv)) {
 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
 		if (ret) {
 			DRM_ERROR("failed to set DMA mask\n");
@@ -1040,7 +1039,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 	 * behaviour if any general state is accessed within a page above 4GB,
 	 * which also needs to be handled carefully.
 	 */
-	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+	if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 		if (ret) {
@@ -1070,7 +1069,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 	 * be lost or delayed, but we use them anyways to avoid
 	 * stuck interrupts on some machines.
 	 */
-	if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+	if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
 		if (pci_enable_msi(pdev) < 0)
 			DRM_DEBUG_DRIVER("can't enable MSI");
 	}
@@ -1121,6 +1120,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	/* Reveal our presence to userspace */
 	if (drm_dev_register(dev, 0) == 0) {
 		i915_debugfs_register(dev_priv);
+		i915_guc_register(dev_priv);
 		i915_setup_sysfs(dev_priv);
 	} else
 		DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1159,6 +1159,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 	intel_opregion_unregister(dev_priv);
 
 	i915_teardown_sysfs(dev_priv);
+	i915_guc_unregister(dev_priv);
 	i915_debugfs_unregister(dev_priv);
 	drm_dev_unregister(&dev_priv->drm);
 
@@ -1242,6 +1243,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
 		 driver.name, driver.major, driver.minor, driver.patchlevel,
 		 driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+		DRM_INFO("DRM_I915_DEBUG enabled\n");
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
 
 	intel_runtime_pm_put(dev_priv);
 
@@ -1309,7 +1314,7 @@ void i915_driver_unload(struct drm_device *dev)
 	drain_workqueue(dev_priv->wq);
 
 	intel_guc_fini(dev);
-	i915_gem_fini(dev);
+	i915_gem_fini(dev_priv);
 	intel_fbc_cleanup_cfb(dev_priv);
 
 	intel_power_domains_fini(dev_priv);
@@ -1431,7 +1436,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 
 	intel_suspend_encoders(dev_priv);
 
-	intel_suspend_hw(dev);
+	intel_suspend_hw(dev_priv);
 
 	i915_gem_suspend_gtt_mappings(dev);
 
@@ -1447,8 +1452,6 @@ static int i915_drm_suspend(struct drm_device *dev)
 
 	dev_priv->suspend_count++;
 
-	intel_display_set_init_power(dev_priv, false);
-
 	intel_csr_ucode_suspend(dev_priv);
 
 out:
@@ -1466,6 +1469,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
 	disable_rpm_wakeref_asserts(dev_priv);
 
+	intel_display_set_init_power(dev_priv, false);
+
 	fw_csr = !IS_BROXTON(dev_priv) &&
 		suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
 	/*
@@ -1595,6 +1600,8 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	intel_display_resume(dev);
 
+	drm_kms_helper_poll_enable(dev);
+
 	/*
 	 * ... but also need to make sure that hotplug processing
 	 * doesn't cause havoc. Like in the driver load code we don't
@@ -1602,8 +1609,6 @@ static int i915_drm_resume(struct drm_device *dev)
 	 * notifications.
 	 * */
 	intel_hpd_init(dev_priv);
-	/* Config may have changed between suspend and resume */
-	drm_helper_hpd_irq_event(dev);
 
 	intel_opregion_register(dev_priv);
 
@@ -1616,7 +1621,6 @@ static int i915_drm_resume(struct drm_device *dev)
 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
 
 	intel_autoenable_gt_powersave(dev_priv);
-	drm_kms_helper_poll_enable(dev);
 
 	enable_rpm_wakeref_asserts(dev_priv);
 
@@ -1721,6 +1725,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
 	return i915_drm_resume(dev);
 }
 
+static void disable_engines_irq(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/* Ensure irq handler finishes, and not run again. */
+	disable_irq(dev_priv->drm.irq);
+	for_each_engine(engine, dev_priv, id)
+		tasklet_kill(&engine->irq_tasklet);
+}
+
+static void enable_engines_irq(struct drm_i915_private *dev_priv)
+{
+	enable_irq(dev_priv->drm.irq);
+}
+
 /**
  * i915_reset - reset chip after a hang
  * @dev: drm device to reset
@@ -1754,7 +1774,11 @@ void i915_reset(struct drm_i915_private *dev_priv)
 	error->reset_count++;
 
 	pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
+	disable_engines_irq(dev_priv);
 	ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
+	enable_engines_irq(dev_priv);
+
 	if (ret) {
 		if (ret != -ENODEV)
 			DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -2240,7 +2264,6 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
 				bool rpm_resume)
 {
-	struct drm_device *dev = &dev_priv->drm;
 	int err;
 	int ret;
 
@@ -2264,10 +2287,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
 
 	vlv_check_no_gt_access(dev_priv);
 
-	if (rpm_resume) {
-		intel_init_clock_gating(dev);
-		i915_gem_restore_fences(dev);
-	}
+	if (rpm_resume)
+		intel_init_clock_gating(dev_priv);
 
 	return ret;
 }
@@ -2282,37 +2303,18 @@ static int intel_runtime_suspend(struct device *kdev)
 	if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
 		return -ENODEV;
 
-	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
 		return -ENODEV;
 
 	DRM_DEBUG_KMS("Suspending device\n");
 
-	/*
-	 * We could deadlock here in case another thread holding struct_mutex
-	 * calls RPM suspend concurrently, since the RPM suspend will wait
-	 * first for this RPM suspend to finish. In this case the concurrent
-	 * RPM resume will be followed by its RPM suspend counterpart. Still
-	 * for consistency return -EAGAIN, which will reschedule this suspend.
-	 */
-	if (!mutex_trylock(&dev->struct_mutex)) {
-		DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
-		/*
-		 * Bump the expiration timestamp, otherwise the suspend won't
-		 * be rescheduled.
-		 */
-		pm_runtime_mark_last_busy(kdev);
-
-		return -EAGAIN;
-	}
-
 	disable_rpm_wakeref_asserts(dev_priv);
 
 	/*
 	 * We are safe here against re-faults, since the fault handler takes
 	 * an RPM reference.
 	 */
-	i915_gem_release_all_mmaps(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_runtime_suspend(dev_priv);
 
 	intel_guc_suspend(dev);
 
@@ -2386,7 +2388,7 @@ static int intel_runtime_resume(struct device *kdev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret = 0;
 
-	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
 		return -ENODEV;
 
 	DRM_DEBUG_KMS("Resuming device\n");
@@ -2404,7 +2406,7 @@ static int intel_runtime_resume(struct device *kdev)
 	if (IS_GEN6(dev_priv))
 		intel_init_pch_refclk(dev);
 
-	if (IS_BROXTON(dev)) {
+	if (IS_BROXTON(dev_priv)) {
 		bxt_disable_dc9(dev_priv);
 		bxt_display_core_init(dev_priv, true);
 		if (dev_priv->csr.dmc_payload &&
@@ -2577,7 +2579,7 @@ static struct drm_driver driver = {
 	.set_busid = drm_pci_set_busid,
 
 	.gem_close_object = i915_gem_close_object,
-	.gem_free_object = i915_gem_free_object,
+	.gem_free_object_unlocked = i915_gem_free_object,
 	.gem_vm_ops = &i915_gem_vm_ops,
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8b9ee4e..91bec60 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -41,6 +41,7 @@
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
 #include <linux/pm_qos.h>
+#include <linux/reservation.h>
 #include <linux/shmem_fs.h>
 
 #include <drm/drmP.h>
@@ -62,6 +63,7 @@
 #include "i915_gem_gtt.h"
 #include "i915_gem_render_state.h"
 #include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
 
 #include "intel_gvt.h"
 
@@ -70,7 +72,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20160919"
+#define DRIVER_DATE		"20161108"
+#define DRIVER_TIMESTAMP	1478587895
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -182,9 +185,10 @@ enum plane {
 };
 #define plane_name(p) ((p) + 'A')
 
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
 
 enum port {
+	PORT_NONE = -1,
 	PORT_A = 0,
 	PORT_B,
 	PORT_C,
@@ -310,7 +314,7 @@ struct i915_hotplug {
 #define for_each_pipe_masked(__dev_priv, __p, __mask) \
 	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
 		for_each_if ((__mask) & (1 << (__p)))
-#define for_each_plane(__dev_priv, __pipe, __p)				\
+#define for_each_universal_plane(__dev_priv, __pipe, __p)		\
 	for ((__p) = 0;							\
 	     (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;	\
 	     (__p)++)
@@ -490,8 +494,8 @@ struct intel_limit;
 struct dpll;
 
 struct drm_i915_display_funcs {
-	int (*get_display_clock_speed)(struct drm_device *dev);
-	int (*get_fifo_size)(struct drm_device *dev, int plane);
+	int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
+	int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
 	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
 	int (*compute_intermediate_wm)(struct drm_device *dev,
 				       struct intel_crtc *intel_crtc,
@@ -499,7 +503,7 @@ struct drm_i915_display_funcs {
 	void (*initial_watermarks)(struct intel_crtc_state *cstate);
 	void (*optimize_watermarks)(struct intel_crtc_state *cstate);
 	int (*compute_global_watermarks)(struct drm_atomic_state *state);
-	void (*update_wm)(struct drm_crtc *crtc);
+	void (*update_wm)(struct intel_crtc *crtc);
 	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
 	void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
 	/* Returns the active state of the crtc, and if the crtc is active,
@@ -521,7 +525,7 @@ struct drm_i915_display_funcs {
 				   const struct drm_display_mode *adjusted_mode);
 	void (*audio_codec_disable)(struct intel_encoder *encoder);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
-	void (*init_clock_gating)(struct drm_device *dev);
+	void (*init_clock_gating)(struct drm_i915_private *dev_priv);
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
 			  struct drm_framebuffer *fb,
 			  struct drm_i915_gem_object *obj,
@@ -581,13 +585,25 @@ struct intel_uncore_funcs {
 				uint32_t val, bool trace);
 };
 
+struct intel_forcewake_range {
+	u32 start;
+	u32 end;
+
+	enum forcewake_domains domains;
+};
+
 struct intel_uncore {
 	spinlock_t lock; /** lock is also taken in irq contexts. */
 
+	const struct intel_forcewake_range *fw_domains_table;
+	unsigned int fw_domains_table_entries;
+
 	struct intel_uncore_funcs funcs;
 
 	unsigned fifo_count;
+
 	enum forcewake_domains fw_domains;
+	enum forcewake_domains fw_domains_active;
 
 	struct intel_uncore_forcewake_domain {
 		struct drm_i915_private *i915;
@@ -633,54 +649,54 @@ struct intel_csr {
 	uint32_t allowed_dc_mask;
 };
 
-#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
-	func(is_mobile) sep \
-	func(is_i85x) sep \
-	func(is_i915g) sep \
-	func(is_i945gm) sep \
-	func(is_g33) sep \
-	func(hws_needs_physical) sep \
-	func(is_g4x) sep \
-	func(is_pineview) sep \
-	func(is_broadwater) sep \
-	func(is_crestline) sep \
-	func(is_ivybridge) sep \
-	func(is_valleyview) sep \
-	func(is_cherryview) sep \
-	func(is_haswell) sep \
-	func(is_broadwell) sep \
-	func(is_skylake) sep \
-	func(is_broxton) sep \
-	func(is_kabylake) sep \
-	func(is_preliminary) sep \
-	func(has_fbc) sep \
-	func(has_psr) sep \
-	func(has_runtime_pm) sep \
-	func(has_csr) sep \
-	func(has_resource_streamer) sep \
-	func(has_rc6) sep \
-	func(has_rc6p) sep \
-	func(has_dp_mst) sep \
-	func(has_gmbus_irq) sep \
-	func(has_hw_contexts) sep \
-	func(has_logical_ring_contexts) sep \
-	func(has_l3_dpf) sep \
-	func(has_gmch_display) sep \
-	func(has_guc) sep \
-	func(has_pipe_cxsr) sep \
-	func(has_hotplug) sep \
-	func(cursor_needs_physical) sep \
-	func(has_overlay) sep \
-	func(overlay_needs_physical) sep \
-	func(supports_tv) sep \
-	func(has_llc) sep \
-	func(has_snoop) sep \
-	func(has_ddi) sep \
-	func(has_fpga_dbg) sep \
-	func(has_pooled_eu)
-
-#define DEFINE_FLAG(name) u8 name:1
-#define SEP_SEMICOLON ;
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+	/* Keep is_* in chronological order */ \
+	func(is_mobile); \
+	func(is_i85x); \
+	func(is_i915g); \
+	func(is_i945gm); \
+	func(is_g33); \
+	func(is_g4x); \
+	func(is_pineview); \
+	func(is_broadwater); \
+	func(is_crestline); \
+	func(is_ivybridge); \
+	func(is_valleyview); \
+	func(is_cherryview); \
+	func(is_haswell); \
+	func(is_broadwell); \
+	func(is_skylake); \
+	func(is_broxton); \
+	func(is_kabylake); \
+	func(is_preliminary); \
+	/* Keep has_* in alphabetical order */ \
+	func(has_64bit_reloc); \
+	func(has_csr); \
+	func(has_ddi); \
+	func(has_dp_mst); \
+	func(has_fbc); \
+	func(has_fpga_dbg); \
+	func(has_gmbus_irq); \
+	func(has_gmch_display); \
+	func(has_guc); \
+	func(has_hotplug); \
+	func(has_hw_contexts); \
+	func(has_l3_dpf); \
+	func(has_llc); \
+	func(has_logical_ring_contexts); \
+	func(has_overlay); \
+	func(has_pipe_cxsr); \
+	func(has_pooled_eu); \
+	func(has_psr); \
+	func(has_rc6); \
+	func(has_rc6p); \
+	func(has_resource_streamer); \
+	func(has_runtime_pm); \
+	func(has_snoop); \
+	func(cursor_needs_physical); \
+	func(hws_needs_physical); \
+	func(overlay_needs_physical); \
+	func(supports_tv)
 
 struct sseu_dev_info {
 	u8 slice_mask;
@@ -709,7 +725,9 @@ struct intel_device_info {
 	u16 gen_mask;
 	u8 ring_mask; /* Rings supported by the HW */
 	u8 num_rings;
-	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+#define DEFINE_FLAG(name) u8 name:1
+	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
 	u16 ddb_size; /* in blocks */
 	/* Register offsets for the various display pipes and transcoders */
 	int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -726,14 +744,15 @@ struct intel_device_info {
 	} color;
 };
 
-#undef DEFINE_FLAG
-#undef SEP_SEMICOLON
-
 struct intel_display_error_state;
 
 struct drm_i915_error_state {
 	struct kref ref;
 	struct timeval time;
+	struct timeval boottime;
+	struct timeval uptime;
+
+	struct drm_i915_private *i915;
 
 	char error_msg[128];
 	bool simulated;
@@ -759,11 +778,12 @@ struct drm_i915_error_state {
 	u32 gam_ecochk;
 	u32 gab_ctl;
 	u32 gfx_mode;
-	u32 extra_instdone[I915_NUM_INSTDONE_REG];
+
 	u64 fence[I915_MAX_NUM_FENCES];
 	struct intel_overlay_error_state *overlay;
 	struct intel_display_error_state *display;
 	struct drm_i915_error_object *semaphore;
+	struct drm_i915_error_object *guc_log;
 
 	struct drm_i915_error_engine {
 		int engine_id;
@@ -775,12 +795,14 @@ struct drm_i915_error_state {
 		struct i915_address_space *vm;
 		int num_requests;
 
+		/* position of active request inside the ring */
+		u32 rq_head, rq_post, rq_tail;
+
 		/* our own tracking of ring head and tail */
 		u32 cpu_ring_head;
 		u32 cpu_ring_tail;
 
 		u32 last_seqno;
-		u32 semaphore_seqno[I915_NUM_ENGINES - 1];
 
 		/* Register state */
 		u32 start;
@@ -791,7 +813,6 @@ struct drm_i915_error_state {
 		u32 hws;
 		u32 ipeir;
 		u32 ipehr;
-		u32 instdone;
 		u32 bbstate;
 		u32 instpm;
 		u32 instps;
@@ -802,11 +823,13 @@ struct drm_i915_error_state {
 		u64 faddr;
 		u32 rc_psmi; /* sleep state */
 		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+		struct intel_instdone instdone;
 
 		struct drm_i915_error_object {
-			int page_count;
 			u64 gtt_offset;
 			u64 gtt_size;
+			int page_count;
+			int unused;
 			u32 *pages[0];
 		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
 
@@ -815,10 +838,11 @@ struct drm_i915_error_state {
 		struct drm_i915_error_request {
 			long jiffies;
 			pid_t pid;
+			u32 context;
 			u32 seqno;
 			u32 head;
 			u32 tail;
-		} *requests;
+		} *requests, execlist[2];
 
 		struct drm_i915_error_waiter {
 			char comm[TASK_COMM_LEN];
@@ -914,6 +938,7 @@ struct i915_gem_context {
 	struct drm_i915_file_private *file_priv;
 	struct i915_hw_ppgtt *ppgtt;
 	struct pid *pid;
+	const char *name;
 
 	struct i915_ctx_hang_stats hang_stats;
 
@@ -972,6 +997,9 @@ struct intel_fbc {
 	bool enabled;
 	bool active;
 
+	bool underrun_detected;
+	struct work_struct underrun_work;
+
 	struct intel_fbc_state_cache {
 		struct {
 			unsigned int mode_flags;
@@ -1297,6 +1325,12 @@ struct i915_power_well {
 	/* cached hw enabled state */
 	bool hw_enabled;
 	unsigned long domains;
+	/* unique identifier for this power well */
+	unsigned long id;
+	/*
+	 * Arbitraty data associated with this power well. Platform and power
+	 * well specific.
+	 */
 	unsigned long data;
 	const struct i915_power_well_ops *ops;
 };
@@ -1334,11 +1368,22 @@ struct i915_gem_mm {
 	struct list_head bound_list;
 	/**
 	 * List of objects which are not bound to the GTT (thus
-	 * are idle and not used by the GPU) but still have
-	 * (presumably uncached) pages still attached.
+	 * are idle and not used by the GPU). These objects may or may
+	 * not actually have any pages attached.
 	 */
 	struct list_head unbound_list;
 
+	/** List of all objects in gtt_space, currently mmaped by userspace.
+	 * All objects within this list must also be on bound_list.
+	 */
+	struct list_head userfault_list;
+
+	/**
+	 * List of objects which are pending destruction.
+	 */
+	struct llist_head free_list;
+	struct work_struct free_work;
+
 	/** Usable portion of the GTT for GEM */
 	unsigned long stolen_base; /* limited to low memory (32-bit) */
 
@@ -1368,7 +1413,7 @@ struct i915_gem_mm {
 
 	/* accounting, useful for userland debugging */
 	spinlock_t object_stat_lock;
-	size_t object_memory;
+	u64 object_memory;
 	u32 object_count;
 };
 
@@ -1387,6 +1432,9 @@ struct i915_error_state_file_priv {
 	struct drm_i915_error_state *error;
 };
 
+#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
+#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
+
 struct i915_gpu_error {
 	/* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -1620,7 +1668,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 }
 
 struct skl_ddb_allocation {
-	struct skl_ddb_entry pipe[I915_MAX_PIPES];
 	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
 	struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
 };
@@ -1628,15 +1675,12 @@ struct skl_ddb_allocation {
 struct skl_wm_values {
 	unsigned dirty_pipes;
 	struct skl_ddb_allocation ddb;
-	uint32_t wm_linetime[I915_MAX_PIPES];
-	uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
-	uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
 };
 
 struct skl_wm_level {
-	bool plane_en[I915_MAX_PLANES];
-	uint16_t plane_res_b[I915_MAX_PLANES];
-	uint8_t plane_res_l[I915_MAX_PLANES];
+	bool plane_en;
+	uint16_t plane_res_b;
+	uint8_t plane_res_l;
 };
 
 /*
@@ -1664,7 +1708,6 @@ struct skl_wm_level {
  */
 struct i915_runtime_pm {
 	atomic_t wakeref_count;
-	atomic_t atomic_seq;
 	bool suspended;
 	bool irqs_enabled;
 };
@@ -1759,7 +1802,7 @@ struct drm_i915_private {
 
 	struct i915_virtual_gpu vgpu;
 
-	struct intel_gvt gvt;
+	struct intel_gvt *gvt;
 
 	struct intel_guc guc;
 
@@ -1787,9 +1830,8 @@ struct drm_i915_private {
 
 	struct pci_dev *bridge_dev;
 	struct i915_gem_context *kernel_context;
-	struct intel_engine_cs engine[I915_NUM_ENGINES];
+	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	struct i915_vma *semaphore;
-	u32 next_seqno;
 
 	struct drm_dma_handle *status_page_dmah;
 	struct resource mch_res;
@@ -1814,8 +1856,10 @@ struct drm_i915_private {
 		u32 de_irq_mask[I915_MAX_PIPES];
 	};
 	u32 gt_irq_mask;
-	u32 pm_irq_mask;
+	u32 pm_imr;
+	u32 pm_ier;
 	u32 pm_rps_events;
+	u32 pm_guc_events;
 	u32 pipestat_irq_mask[I915_MAX_PIPES];
 
 	struct i915_hotplug hotplug;
@@ -1892,8 +1936,8 @@ struct drm_i915_private {
 
 	/* Kernel Modesetting */
 
-	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
-	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
+	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+	struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
 	wait_queue_head_t pending_flip_queue;
 
 #ifdef CONFIG_DEBUG_FS
@@ -2047,6 +2091,10 @@ struct drm_i915_private {
 		void (*resume)(struct drm_i915_private *);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
+		struct list_head timelines;
+		struct i915_gem_timeline global_timeline;
+		u32 active_requests;
+
 		/**
 		 * Is the GPU currently considered idle, or busy executing
 		 * userspace requests? Whilst idle, we allow runtime power
@@ -2054,7 +2102,6 @@ struct drm_i915_private {
 		 * In order to reduce the effect on performance, there
 		 * is a slight delay before we do so.
 		 */
-		unsigned int active_engines;
 		bool awake;
 
 		/**
@@ -2074,12 +2121,15 @@ struct drm_i915_private {
 		 * off the idle_work.
 		 */
 		struct delayed_work idle_work;
+
+		ktime_t last_init_time;
 	} gt;
 
 	/* perform PHY state sanity checks? */
 	bool chv_phy_assert[2];
 
-	struct intel_encoder *dig_port_map[I915_MAX_PORTS];
+	/* Used to save the pipe-to-encoder mapping for audio */
+	struct intel_encoder *av_enc_map[I915_MAX_PIPES];
 
 	/*
 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
@@ -2103,19 +2153,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 }
 
 /* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, dev_priv__) \
-	for ((engine__) = &(dev_priv__)->engine[0]; \
-	     (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
-	     (engine__)++) \
-		for_each_if (intel_engine_initialized(engine__))
-
-/* Iterator with engine_id */
-#define for_each_engine_id(engine__, dev_priv__, id__) \
-	for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
-	     (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
-	     (engine__)++) \
-		for_each_if (((id__) = (engine__)->id, \
-			      intel_engine_initialized(engine__)))
+#define for_each_engine(engine__, dev_priv__, id__) \
+	for ((id__) = 0; \
+	     (id__) < I915_NUM_ENGINES; \
+	     (id__)++) \
+		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
 
 #define __mask_next_bit(mask) ({					\
 	int __idx = ffs(mask) - 1;					\
@@ -2126,7 +2168,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 /* Iterator over subset of engines selected by mask */
 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
 	for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;	\
-	     tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+	     tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
 
 enum hdmi_force_audio {
 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
@@ -2140,6 +2182,7 @@ enum hdmi_force_audio {
 struct drm_i915_gem_object_ops {
 	unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+#define I915_GEM_OBJECT_IS_SHRINKABLE   0x2
 
 	/* Interface between the GEM object and its backing storage.
 	 * get_pages() is called once prior to the use of the associated set
@@ -2154,8 +2197,8 @@ struct drm_i915_gem_object_ops {
 	 * being released or under memory pressure (where we attempt to
 	 * reap pages for the shrinker).
 	 */
-	int (*get_pages)(struct drm_i915_gem_object *);
-	void (*put_pages)(struct drm_i915_gem_object *);
+	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
 	int (*dmabuf_export)(struct drm_i915_gem_object *);
 	void (*release)(struct drm_i915_gem_object *);
@@ -2189,10 +2232,20 @@ struct drm_i915_gem_object {
 
 	/** List of VMAs backed by this object */
 	struct list_head vma_list;
+	struct rb_root vma_tree;
 
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	struct drm_mm_node *stolen;
-	struct list_head global_list;
+	struct list_head global_link;
+	union {
+		struct rcu_head rcu;
+		struct llist_node freed;
+	};
+
+	/**
+	 * Whether the object is currently in the GGTT mmap.
+	 */
+	struct list_head userfault_link;
 
 	/** Used in execbuf to temporarily hold a ref */
 	struct list_head obj_exec_link;
@@ -2200,33 +2253,12 @@ struct drm_i915_gem_object {
 	struct list_head batch_pool_link;
 
 	unsigned long flags;
-	/**
-	 * This is set if the object is on the active lists (has pending
-	 * rendering and so a non-zero seqno), and is not set if it i s on
-	 * inactive (ready to be unbound) list.
-	 */
-#define I915_BO_ACTIVE_SHIFT 0
-#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
-#define __I915_BO_ACTIVE(bo) \
-	((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
 
 	/**
-	 * This is set if the object has been written to since last bound
-	 * to the GTT
+	 * Have we taken a reference for the object for incomplete GPU
+	 * activity?
 	 */
-	unsigned int dirty:1;
-
-	/**
-	 * Advice: are the backing pages purgeable?
-	 */
-	unsigned int madv:2;
-
-	/**
-	 * Whether the current gtt mapping needs to be mappable (and isn't just
-	 * mappable by accident). Track pin and fault separate for a more
-	 * accurate mappable working set.
-	 */
-	unsigned int fault_mappable:1;
+#define I915_BO_ACTIVE_REF 0
 
 	/*
 	 * Is the object to be mapped as read-only to the GPU
@@ -2247,15 +2279,41 @@ struct drm_i915_gem_object {
 
 	/** Count of VMA actually bound by this object */
 	unsigned int bind_count;
+	unsigned int active_count;
 	unsigned int pin_display;
 
-	struct sg_table *pages;
-	int pages_pin_count;
-	struct get_page {
-		struct scatterlist *sg;
-		int last;
-	} get_page;
-	void *mapping;
+	struct {
+		struct mutex lock; /* protects the pages and their use */
+		atomic_t pages_pin_count;
+
+		struct sg_table *pages;
+		void *mapping;
+
+		struct i915_gem_object_page_iter {
+			struct scatterlist *sg_pos;
+			unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+			struct radix_tree_root radix;
+			struct mutex lock; /* protects this cache */
+		} get_page;
+
+		/**
+		 * Advice: are the backing pages purgeable?
+		 */
+		unsigned int madv:2;
+
+		/**
+		 * This is set if the object has been written to since the
+		 * pages were last acquired.
+		 */
+		bool dirty:1;
+
+		/**
+		 * This is set if the object has been pinned due to unknown
+		 * swizzling.
+		 */
+		bool quirked:1;
+	} mm;
 
 	/** Breadcrumb of last rendering to the buffer.
 	 * There can only be one writer, but we allow for multiple readers.
@@ -2267,8 +2325,7 @@ struct drm_i915_gem_object {
 	 * read request. This allows for the CPU to read from an active
 	 * buffer by only waiting for the write to complete.
 	 */
-	struct i915_gem_active last_read[I915_NUM_ENGINES];
-	struct i915_gem_active last_write;
+	struct reservation_object *resv;
 
 	/** References from framebuffers, locks out tiling changes. */
 	unsigned long framebuffer_references;
@@ -2279,8 +2336,6 @@ struct drm_i915_gem_object {
 	struct i915_gem_userptr {
 		uintptr_t ptr;
 		unsigned read_only :1;
-		unsigned workers :4;
-#define I915_GEM_USERPTR_MAX_WORKERS 15
 
 		struct i915_mm_struct *mm;
 		struct i915_mmu_object *mmu_object;
@@ -2289,6 +2344,8 @@ struct drm_i915_gem_object {
 
 	/** for phys allocated objects */
 	struct drm_dma_handle *phys_handle;
+
+	struct reservation_object __builtin_resv;
 };
 
 static inline struct drm_i915_gem_object *
@@ -2300,10 +2357,38 @@ to_intel_bo(struct drm_gem_object *gem)
 	return container_of(gem, struct drm_i915_gem_object, base);
 }
 
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+	return idr_find(&file->object_idr, handle);
+}
+
 static inline struct drm_i915_gem_object *
 i915_gem_object_lookup(struct drm_file *file, u32 handle)
 {
-	return to_intel_bo(drm_gem_object_lookup(file, handle));
+	struct drm_i915_gem_object *obj;
+
+	rcu_read_lock();
+	obj = i915_gem_object_lookup_rcu(file, handle);
+	if (obj && !kref_get_unless_zero(&obj->base.refcount))
+		obj = NULL;
+	rcu_read_unlock();
+
+	return obj;
 }
 
 __deprecated
@@ -2325,59 +2410,61 @@ __attribute__((nonnull))
 static inline void
 i915_gem_object_put(struct drm_i915_gem_object *obj)
 {
-	drm_gem_object_unreference(&obj->base);
+	__drm_gem_object_unreference(&obj->base);
 }
 
 __deprecated
 extern void drm_gem_object_unreference(struct drm_gem_object *);
 
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
-{
-	drm_gem_object_unreference_unlocked(&obj->base);
-}
-
 __deprecated
 extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
 
 static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+	return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
 	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
 }
 
-static inline unsigned long
-i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
 {
-	return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
+	return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
 }
 
 static inline bool
 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
 {
-	return i915_gem_object_get_active(obj);
-}
-
-static inline void
-i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
-{
-	obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
-}
-
-static inline void
-i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
-{
-	obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
+	return obj->active_count;
 }
 
 static inline bool
-i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
-				  int engine)
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
 {
-	return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
+	return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
 }
 
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	__set_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	__clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
 static inline unsigned int
 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
 {
@@ -2396,6 +2483,23 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
 	return obj->tiling_and_stride & STRIDE_MASK;
 }
 
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+	struct intel_engine_cs *engine = NULL;
+	struct dma_fence *fence;
+
+	rcu_read_lock();
+	fence = reservation_object_get_excl_rcu(obj->resv);
+	rcu_read_unlock();
+
+	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+		engine = to_request(fence)->engine;
+	dma_fence_put(fence);
+
+	return engine;
+}
+
 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
 {
 	i915_gem_object_get(vma->obj);
@@ -2404,7 +2508,6 @@ static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
 
 static inline void i915_vma_put(struct i915_vma *vma)
 {
-	lockdep_assert_held(&vma->vm->dev->struct_mutex);
 	i915_gem_object_put(vma->obj);
 }
 
@@ -2434,6 +2537,14 @@ static __always_inline struct sgt_iter {
 	return s;
 }
 
+static inline struct scatterlist *____sg_next(struct scatterlist *sg)
+{
+	++sg;
+	if (unlikely(sg_is_chain(sg)))
+		sg = sg_chain_ptr(sg);
+	return sg;
+}
+
 /**
  * __sg_next - return the next scatterlist entry in a list
  * @sg:		The current sg entry
@@ -2448,9 +2559,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
 #ifdef CONFIG_DEBUG_SG
 	BUG_ON(sg->sg_magic != SG_MAGIC);
 #endif
-	return sg_is_last(sg) ? NULL :
-		likely(!sg_is_chain(++sg)) ? sg :
-		sg_chain_ptr(sg);
+	return sg_is_last(sg) ? NULL : ____sg_next(sg);
 }
 
 /**
@@ -2586,8 +2695,9 @@ struct drm_i915_cmd_table {
 	__p; \
 })
 #define INTEL_INFO(p)	(&__I915__(p)->info)
-#define INTEL_GEN(p)	(INTEL_INFO(p)->gen)
-#define INTEL_DEVID(p)	(INTEL_INFO(p)->device_id)
+
+#define INTEL_GEN(dev_priv)	((dev_priv)->info.gen)
+#define INTEL_DEVID(dev_priv)	((dev_priv)->info.device_id)
 
 #define REVID_FOREVER		0xff
 #define INTEL_REVID(p)	(__I915__(p)->drm.pdev->revision)
@@ -2598,7 +2708,7 @@ struct drm_i915_cmd_table {
  *
  * Use GEN_FOREVER for unbound start and or end.
  */
-#define IS_GEN(p, s, e) ({ \
+#define IS_GEN(dev_priv, s, e) ({ \
 	unsigned int __s = (s), __e = (e); \
 	BUILD_BUG_ON(!__builtin_constant_p(s)); \
 	BUILD_BUG_ON(!__builtin_constant_p(e)); \
@@ -2608,7 +2718,7 @@ struct drm_i915_cmd_table {
 		__e = BITS_PER_LONG - 1; \
 	else \
 		__e = (e) - 1; \
-	!!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+	!!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
 })
 
 /*
@@ -2619,73 +2729,73 @@ struct drm_i915_cmd_table {
 #define IS_REVID(p, since, until) \
 	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
 
-#define IS_I830(dev)		(INTEL_DEVID(dev) == 0x3577)
-#define IS_845G(dev)		(INTEL_DEVID(dev) == 0x2562)
-#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)		(INTEL_DEVID(dev) == 0x2572)
-#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)		(INTEL_DEVID(dev) == 0x2592)
-#define IS_I945G(dev)		(INTEL_DEVID(dev) == 0x2772)
-#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)		(INTEL_DEVID(dev) == 0x2A42)
-#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)	(INTEL_DEVID(dev) == 0xa001)
-#define IS_PINEVIEW_M(dev)	(INTEL_DEVID(dev) == 0xa011)
-#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev)	(INTEL_DEVID(dev) == 0x0046)
-#define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev)		(INTEL_DEVID(dev) == 0x0156 || \
-				 INTEL_DEVID(dev) == 0x0152 || \
-				 INTEL_DEVID(dev) == 0x015a)
-#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
-#define IS_CHERRYVIEW(dev)	(INTEL_INFO(dev)->is_cherryview)
-#define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev)	(INTEL_INFO(dev)->is_broadwell)
-#define IS_SKYLAKE(dev)	(INTEL_INFO(dev)->is_skylake)
-#define IS_BROXTON(dev)		(INTEL_INFO(dev)->is_broxton)
-#define IS_KABYLAKE(dev)	(INTEL_INFO(dev)->is_kabylake)
-#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
-#define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
-				 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
-				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
-				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
-				 (INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_I830(dev_priv)	(INTEL_DEVID(dev_priv) == 0x3577)
+#define IS_845G(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2562)
+#define IS_I85X(dev_priv)	((dev_priv)->info.is_i85x)
+#define IS_I865G(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2572)
+#define IS_I915G(dev_priv)	((dev_priv)->info.is_i915g)
+#define IS_I915GM(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2592)
+#define IS_I945G(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2772)
+#define IS_I945GM(dev_priv)	((dev_priv)->info.is_i945gm)
+#define IS_BROADWATER(dev_priv)	((dev_priv)->info.is_broadwater)
+#define IS_CRESTLINE(dev_priv)	((dev_priv)->info.is_crestline)
+#define IS_GM45(dev_priv)	(INTEL_DEVID(dev_priv) == 0x2A42)
+#define IS_G4X(dev_priv)	((dev_priv)->info.is_g4x)
+#define IS_PINEVIEW_G(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa001)
+#define IS_PINEVIEW_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa011)
+#define IS_PINEVIEW(dev_priv)	((dev_priv)->info.is_pineview)
+#define IS_G33(dev_priv)	((dev_priv)->info.is_g33)
+#define IS_IRONLAKE_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IVYBRIDGE(dev_priv)	((dev_priv)->info.is_ivybridge)
+#define IS_IVB_GT1(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0156 || \
+				 INTEL_DEVID(dev_priv) == 0x0152 || \
+				 INTEL_DEVID(dev_priv) == 0x015a)
+#define IS_VALLEYVIEW(dev_priv)	((dev_priv)->info.is_valleyview)
+#define IS_CHERRYVIEW(dev_priv)	((dev_priv)->info.is_cherryview)
+#define IS_HASWELL(dev_priv)	((dev_priv)->info.is_haswell)
+#define IS_BROADWELL(dev_priv)	((dev_priv)->info.is_broadwell)
+#define IS_SKYLAKE(dev_priv)	((dev_priv)->info.is_skylake)
+#define IS_BROXTON(dev_priv)	((dev_priv)->info.is_broxton)
+#define IS_KABYLAKE(dev_priv)	((dev_priv)->info.is_kabylake)
+#define IS_MOBILE(dev_priv)	((dev_priv)->info.is_mobile)
+#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
+				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(dev_priv)	(IS_BROADWELL(dev_priv) && \
+				 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 ||	\
+				 (INTEL_DEVID(dev_priv) & 0xf) == 0xb ||	\
+				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
 /* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev)		(IS_BROADWELL(dev) && \
-				 (INTEL_DEVID(dev) & 0xf) == 0xe)
-#define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
-				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
-				 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
-				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_BDW_ULX(dev_priv)	(IS_BROADWELL(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_HSW_ULT(dev_priv)	(IS_HASWELL(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
 /* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev)		(INTEL_DEVID(dev) == 0x0A0E || \
-				 INTEL_DEVID(dev) == 0x0A1E)
-#define IS_SKL_ULT(dev)		(INTEL_DEVID(dev) == 0x1906 || \
-				 INTEL_DEVID(dev) == 0x1913 || \
-				 INTEL_DEVID(dev) == 0x1916 || \
-				 INTEL_DEVID(dev) == 0x1921 || \
-				 INTEL_DEVID(dev) == 0x1926)
-#define IS_SKL_ULX(dev)		(INTEL_DEVID(dev) == 0x190E || \
-				 INTEL_DEVID(dev) == 0x1915 || \
-				 INTEL_DEVID(dev) == 0x191E)
-#define IS_KBL_ULT(dev)		(INTEL_DEVID(dev) == 0x5906 || \
-				 INTEL_DEVID(dev) == 0x5913 || \
-				 INTEL_DEVID(dev) == 0x5916 || \
-				 INTEL_DEVID(dev) == 0x5921 || \
-				 INTEL_DEVID(dev) == 0x5926)
-#define IS_KBL_ULX(dev)		(INTEL_DEVID(dev) == 0x590E || \
-				 INTEL_DEVID(dev) == 0x5915 || \
-				 INTEL_DEVID(dev) == 0x591E)
-#define IS_SKL_GT3(dev)		(IS_SKYLAKE(dev) && \
-				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_SKL_GT4(dev)		(IS_SKYLAKE(dev) && \
-				 (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
+#define IS_HSW_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0A0E || \
+				 INTEL_DEVID(dev_priv) == 0x0A1E)
+#define IS_SKL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x1906 || \
+				 INTEL_DEVID(dev_priv) == 0x1913 || \
+				 INTEL_DEVID(dev_priv) == 0x1916 || \
+				 INTEL_DEVID(dev_priv) == 0x1921 || \
+				 INTEL_DEVID(dev_priv) == 0x1926)
+#define IS_SKL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x190E || \
+				 INTEL_DEVID(dev_priv) == 0x1915 || \
+				 INTEL_DEVID(dev_priv) == 0x191E)
+#define IS_KBL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x5906 || \
+				 INTEL_DEVID(dev_priv) == 0x5913 || \
+				 INTEL_DEVID(dev_priv) == 0x5916 || \
+				 INTEL_DEVID(dev_priv) == 0x5921 || \
+				 INTEL_DEVID(dev_priv) == 0x5926)
+#define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
+				 INTEL_DEVID(dev_priv) == 0x5915 || \
+				 INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
 
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
@@ -2705,7 +2815,8 @@ struct drm_i915_cmd_table {
 #define BXT_REVID_B0		0x3
 #define BXT_REVID_C0		0x9
 
-#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define IS_BXT_REVID(dev_priv, since, until) \
+	(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
 
 #define KBL_REVID_A0		0x0
 #define KBL_REVID_B0		0x1
@@ -2713,8 +2824,8 @@ struct drm_i915_cmd_table {
 #define KBL_REVID_D0		0x3
 #define KBL_REVID_E0		0x4
 
-#define IS_KBL_REVID(p, since, until) \
-	(IS_KABYLAKE(p) && IS_REVID(p, since, until))
+#define IS_KBL_REVID(dev_priv, since, until) \
+	(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
 
 /*
  * The genX designation typically refers to the render engine, so render
@@ -2722,14 +2833,14 @@ struct drm_i915_cmd_table {
  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
  * chips, etc.).
  */
-#define IS_GEN2(dev)	(!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
-#define IS_GEN3(dev)	(!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
-#define IS_GEN4(dev)	(!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
-#define IS_GEN5(dev)	(!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
-#define IS_GEN6(dev)	(!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
-#define IS_GEN7(dev)	(!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
-#define IS_GEN8(dev)	(!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
-#define IS_GEN9(dev)	(!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+#define IS_GEN2(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(1)))
+#define IS_GEN3(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(2)))
+#define IS_GEN4(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(3)))
+#define IS_GEN5(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(4)))
+#define IS_GEN6(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(5)))
+#define IS_GEN7(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(6)))
+#define IS_GEN8(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(7)))
+#define IS_GEN9(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(8)))
 
 #define ENGINE_MASK(id)	BIT(id)
 #define RENDER_RING	ENGINE_MASK(RCS)
@@ -2750,8 +2861,8 @@ struct drm_i915_cmd_table {
 #define HAS_LLC(dev)		(INTEL_INFO(dev)->has_llc)
 #define HAS_SNOOP(dev)		(INTEL_INFO(dev)->has_snoop)
 #define HAS_EDRAM(dev)		(!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
-#define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
-				 HAS_EDRAM(dev))
+#define HAS_WT(dev_priv)	((IS_HASWELL(dev_priv) || \
+				 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
 #define HWS_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->hws_needs_physical)
 
 #define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->has_hw_contexts)
@@ -2764,7 +2875,7 @@ struct drm_i915_cmd_table {
 #define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
+#define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_845G(dev_priv))
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -2784,28 +2895,31 @@ struct drm_i915_cmd_table {
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
-						      IS_I915GM(dev)))
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+					 !(IS_I915G(dev_priv) || \
+					 IS_I915GM(dev_priv)))
 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
-#define HAS_IPS(dev)		(IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
 
 #define HAS_DP_MST(dev)	(INTEL_INFO(dev)->has_dp_mst)
 
-#define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
+#define HAS_DDI(dev_priv)	((dev_priv)->info.has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
 #define HAS_PSR(dev)		(INTEL_INFO(dev)->has_psr)
-#define HAS_RUNTIME_PM(dev)	(INTEL_INFO(dev)->has_runtime_pm)
 #define HAS_RC6(dev)		(INTEL_INFO(dev)->has_rc6)
 #define HAS_RC6p(dev)		(INTEL_INFO(dev)->has_rc6p)
 
 #define HAS_CSR(dev)	(INTEL_INFO(dev)->has_csr)
 
+#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
  * command submission once loaded. But these are logically independent
@@ -2832,22 +2946,27 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
 
-#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
-#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
-#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
-#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+	((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+	((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+
+#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
 
 /* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
-#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
+#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
+				 2 : HAS_L3_DPF(dev_priv))
 
 #define GT_FREQUENCY_MULTIPLIER 50
 #define GEN9_FREQ_SCALER 3
@@ -2883,11 +3002,17 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 #endif
+extern const struct dev_pm_ops i915_pm_ops;
+
+extern int i915_driver_load(struct pci_dev *pdev,
+			    const struct pci_device_id *ent);
+extern void i915_driver_unload(struct drm_device *dev);
 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
 extern void i915_reset(struct drm_i915_private *dev_priv);
 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
+extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -2969,7 +3094,7 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
 
 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
 {
-	return dev_priv->gvt.initialized;
+	return dev_priv->gvt;
 }
 
 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
@@ -3071,7 +3196,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
-void i915_gem_load_init(struct drm_device *dev);
+int i915_gem_load_init(struct drm_device *dev);
 void i915_gem_load_cleanup(struct drm_device *dev);
 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
 int i915_gem_freeze(struct drm_i915_private *dev_priv);
@@ -3082,7 +3207,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
-						  size_t size);
+						   u64 size);
 struct drm_i915_gem_object *i915_gem_object_create_from_data(
 		struct drm_device *dev, const void *data, size_t size);
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
@@ -3103,69 +3228,85 @@ void i915_vma_close(struct i915_vma *vma);
 void i915_vma_destroy(struct i915_vma *vma);
 
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
-int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 
-int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
-static inline int __sg_page_count(struct scatterlist *sg)
+static inline int __sg_page_count(const struct scatterlist *sg)
 {
 	return sg->length >> PAGE_SHIFT;
 }
 
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+		       unsigned int n, unsigned int *offset);
+
 struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+			 unsigned int n);
 
-static inline dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+			       unsigned int n);
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+				unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+				 struct sg_table *pages);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-	if (n < obj->get_page.last) {
-		obj->get_page.sg = obj->pages->sgl;
-		obj->get_page.last = 0;
-	}
+	might_lock(&obj->mm.lock);
 
-	while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
-		obj->get_page.last += __sg_page_count(obj->get_page.sg++);
-		if (unlikely(sg_is_chain(obj->get_page.sg)))
-			obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
-	}
+	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+		return 0;
 
-	return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
+	return __i915_gem_object_get_pages(obj);
 }
 
-static inline struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-	if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
-		return NULL;
+	GEM_BUG_ON(!obj->mm.pages);
 
-	if (n < obj->get_page.last) {
-		obj->get_page.sg = obj->pages->sgl;
-		obj->get_page.last = 0;
-	}
-
-	while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
-		obj->get_page.last += __sg_page_count(obj->get_page.sg++);
-		if (unlikely(sg_is_chain(obj->get_page.sg)))
-			obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
-	}
-
-	return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
+	atomic_inc(&obj->mm.pages_pin_count);
 }
 
-static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 {
-	BUG_ON(obj->pages == NULL);
-	obj->pages_pin_count++;
+	return atomic_read(&obj->mm.pages_pin_count);
 }
 
-static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
-	BUG_ON(obj->pages_pin_count == 0);
-	obj->pages_pin_count--;
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	GEM_BUG_ON(!obj->mm.pages);
+
+	atomic_dec(&obj->mm.pages_pin_count);
+	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
 }
 
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+	__i915_gem_object_unpin_pages(obj);
+}
+
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+	I915_MM_NORMAL = 0,
+	I915_MM_SHRINKER
+};
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+				 enum i915_mm_subclass subclass);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
+
 enum i915_map_type {
 	I915_MAP_WB = 0,
 	I915_MAP_WC,
@@ -3181,8 +3322,8 @@ enum i915_map_type {
  * the kernel address space. Based on the @type of mapping, the PTE will be
  * set to either WriteBack or WriteCombine (via pgprot_t).
  *
- * The caller must hold the struct_mutex, and is responsible for calling
- * i915_gem_object_unpin_map() when the mapping is no longer required.
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
  *
  * Returns the pointer through which to access the mapped object, or an
  * ERR_PTR() on error.
@@ -3198,12 +3339,9 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
  * with your access, call i915_gem_object_unpin_map() to release the pin
  * upon the mapping. Once the pin count reaches zero, that mapping may be
  * removed.
- *
- * The caller must hold the struct_mutex.
  */
 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 {
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
 	i915_gem_object_unpin_pages(obj);
 }
 
@@ -3236,7 +3374,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits);
 
-int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
@@ -3265,7 +3403,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
 
 void i915_gem_reset(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
@@ -3275,9 +3413,10 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void i915_gem_resume(struct drm_device *dev);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int __must_check
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-			       bool readonly);
+int i915_gem_object_wait(struct drm_i915_gem_object *obj,
+			 unsigned int flags,
+			 long timeout,
+			 struct intel_rps_client *rps);
 int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
 				  bool write);
@@ -3359,6 +3498,7 @@ int __must_check i915_vma_put_fence(struct i915_vma *vma);
 static inline bool
 i915_vma_pin_fence(struct i915_vma *vma)
 {
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
 	if (vma->fence) {
 		vma->fence->pin_count++;
 		return true;
@@ -3377,6 +3517,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
 static inline void
 i915_vma_unpin_fence(struct i915_vma *vma)
 {
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
 	if (vma->fence) {
 		GEM_BUG_ON(vma->fence->pin_count <= 0);
 		vma->fence->pin_count--;
@@ -3386,8 +3527,10 @@ i915_vma_unpin_fence(struct i915_vma *vma)
 void i915_gem_restore_fences(struct drm_device *dev);
 
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+				       struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+					 struct sg_table *pages);
 
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3397,6 +3540,9 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct drm_i915_gem_request *req);
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+			    unsigned int flags);
 void i915_gem_context_free(struct kref *ctx_ref);
 struct drm_i915_gem_object *
 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@@ -3430,6 +3576,16 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
 	kref_put(&ctx->ref, i915_gem_context_free);
 }
 
+static inline struct intel_timeline *
+i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
+				 struct intel_engine_cs *engine)
+{
+	struct i915_address_space *vm;
+
+	vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+	return &vm->timeline.engine[engine->id];
+}
+
 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
 {
 	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
@@ -3483,6 +3639,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 					       u32 gtt_offset,
 					       u32 size);
 
+/* i915_gem_internal.c */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
+				unsigned int size);
+
 /* i915_gem_shrinker.c */
 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
 			      unsigned long target,
@@ -3521,6 +3682,8 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
 #endif
 
 /* i915_gpu_error.c */
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
 __printf(2, 3)
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
@@ -3541,7 +3704,20 @@ void i915_error_state_get(struct drm_device *dev,
 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
 void i915_destroy_error_state(struct drm_device *dev);
 
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
+#else
+
+static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+					    u32 engine_mask,
+					    const char *error_msg)
+{
+}
+
+static inline void i915_destroy_error_state(struct drm_device *dev)
+{
+}
+
+#endif
+
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
 /* i915_cmd_parser.c */
@@ -3591,6 +3767,9 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
 				     enum port port);
+bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+				enum port port);
+
 
 /* intel_opregion.c */
 #ifdef CONFIG_ACPI
@@ -3647,7 +3826,7 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv);
 
 /* modesetting */
 extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_init(struct drm_device *dev);
+extern int intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern int intel_connector_register(struct drm_connector *);
@@ -3702,6 +3881,23 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 
 /* intel_dpio_phy.c */
+void bxt_port_to_phy_channel(enum port port,
+			     enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+				  enum port port, u32 margin, u32 scale,
+				  u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+			    enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+			      enum dpio_phy phy);
+uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+					     uint8_t lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+				     uint8_t lane_lat_optim_mask);
+uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
 void chv_set_phy_signal_level(struct intel_encoder *encoder,
 			      u32 deemph_reg_value, u32 margin_reg_value,
 			      bool uniq_trans_scale);
@@ -3791,11 +3987,30 @@ __raw_write(64, q)
 #undef __raw_write
 
 /* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections inside IRQ handlers where forcewake is explicitly
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
  * controlled.
+ *
  * Think twice, and think again, before using these.
- * Note: Should only be used between intel_uncore_forcewake_irqlock() and
- * intel_uncore_forcewake_irqunlock().
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&dev_priv->uncore.lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&dev_priv->uncore.lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
  */
 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
@@ -3807,11 +4022,11 @@ __raw_write(64, q)
 #define INTEL_BROADCAST_RGB_FULL 1
 #define INTEL_BROADCAST_RGB_LIMITED 2
 
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
+static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
 {
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		return VLV_VGACNTRL;
-	else if (INTEL_INFO(dev)->gen >= 5)
+	else if (INTEL_GEN(dev_priv) >= 5)
 		return CPU_VGACNTRL;
 	else
 		return VGACNTRL;
@@ -3872,7 +4087,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
 	/* Before we do the heavier coherent read of the seqno,
 	 * check the value (hopefully) in the CPU cacheline.
 	 */
-	if (i915_gem_request_completed(req))
+	if (__i915_gem_request_completed(req))
 		return true;
 
 	/* Ensure our read of the seqno is coherent so that we
@@ -3923,7 +4138,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
 			wake_up_process(tsk);
 		rcu_read_unlock();
 
-		if (i915_gem_request_completed(req))
+		if (__i915_gem_request_completed(req))
 			return true;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 947e82c..d2ad73d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,6 @@
 #include <drm/drm_vma_manager.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
@@ -42,6 +41,7 @@
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 
@@ -63,13 +63,13 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 }
 
 static int
-insert_mappable_node(struct drm_i915_private *i915,
+insert_mappable_node(struct i915_ggtt *ggtt,
                      struct drm_mm_node *node, u32 size)
 {
 	memset(node, 0, sizeof(*node));
-	return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
-						   size, 0, 0, 0,
-						   i915->ggtt.mappable_end,
+	return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
+						   size, 0, -1,
+						   0, ggtt->mappable_end,
 						   DRM_MM_SEARCH_DEFAULT,
 						   DRM_MM_CREATE_DEFAULT);
 }
@@ -82,7 +82,7 @@ remove_mappable_node(struct drm_mm_node *node)
 
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
-				  size_t size)
+				  u64 size)
 {
 	spin_lock(&dev_priv->mm.object_stat_lock);
 	dev_priv->mm.object_count++;
@@ -91,7 +91,7 @@ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
 }
 
 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
-				     size_t size)
+				     u64 size)
 {
 	spin_lock(&dev_priv->mm.object_stat_lock);
 	dev_priv->mm.object_count--;
@@ -104,6 +104,8 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 {
 	int ret;
 
+	might_sleep();
+
 	if (!i915_reset_in_progress(error))
 		return 0;
 
@@ -114,7 +116,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 	 */
 	ret = wait_event_interruptible_timeout(error->reset_queue,
 					       !i915_reset_in_progress(error),
-					       10*HZ);
+					       I915_RESET_TIMEOUT);
 	if (ret == 0) {
 		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
 		return -EIO;
@@ -167,7 +169,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-static int
+static struct sg_table *
 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
 	struct address_space *mapping = obj->base.filp->f_mapping;
@@ -177,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	int i;
 
 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
 		struct page *page;
@@ -185,7 +187,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
 		page = shmem_read_mapping_page(mapping, i);
 		if (IS_ERR(page))
-			return PTR_ERR(page);
+			return ERR_CAST(page);
 
 		src = kmap_atomic(page);
 		memcpy(vaddr, src, PAGE_SIZE);
@@ -200,11 +202,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (st == NULL)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
 		kfree(st);
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	sg = st->sgl;
@@ -214,29 +216,31 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	sg_dma_address(sg) = obj->phys_handle->busaddr;
 	sg_dma_len(sg) = obj->base.size;
 
-	obj->pages = st;
-	return 0;
+	return st;
 }
 
 static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
 {
-	int ret;
+	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
 
-	BUG_ON(obj->madv == __I915_MADV_PURGED);
+	if (obj->mm.madv == I915_MADV_DONTNEED)
+		obj->mm.dirty = false;
 
-	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-	if (WARN_ON(ret)) {
-		/* In the event of a disaster, abandon all caches and
-		 * hope for the best.
-		 */
-		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+		i915_gem_clflush_object(obj, false);
 
-	if (obj->madv == I915_MADV_DONTNEED)
-		obj->dirty = 0;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+}
 
-	if (obj->dirty) {
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+			       struct sg_table *pages)
+{
+	__i915_gem_object_release_shmem(obj);
+
+	if (obj->mm.dirty) {
 		struct address_space *mapping = obj->base.filp->f_mapping;
 		char *vaddr = obj->phys_handle->vaddr;
 		int i;
@@ -255,22 +259,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
 			kunmap_atomic(dst);
 
 			set_page_dirty(page);
-			if (obj->madv == I915_MADV_WILLNEED)
+			if (obj->mm.madv == I915_MADV_WILLNEED)
 				mark_page_accessed(page);
 			put_page(page);
 			vaddr += PAGE_SIZE;
 		}
-		obj->dirty = 0;
+		obj->mm.dirty = false;
 	}
 
-	sg_free_table(obj->pages);
-	kfree(obj->pages);
+	sg_free_table(pages);
+	kfree(pages);
 }
 
 static void
 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
 {
 	drm_pci_free(obj->base.dev, obj->phys_handle);
+	i915_gem_object_unpin_pages(obj);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
@@ -292,7 +297,12 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	 * must wait for all rendering to complete to the object (as unbinding
 	 * must anyway), and retire the requests.
 	 */
-	ret = i915_gem_object_wait_rendering(obj, false);
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_LOCKED |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT,
+				   NULL);
 	if (ret)
 		return ret;
 
@@ -311,88 +321,143 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	return ret;
 }
 
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for just read access or read-write access
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-			       bool readonly)
+static long
+i915_gem_object_wait_fence(struct dma_fence *fence,
+			   unsigned int flags,
+			   long timeout,
+			   struct intel_rps_client *rps)
 {
-	struct reservation_object *resv;
-	struct i915_gem_active *active;
-	unsigned long active_mask;
-	int idx;
+	struct drm_i915_gem_request *rq;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
 
-	if (!readonly) {
-		active = obj->last_read;
-		active_mask = i915_gem_object_get_active(obj);
-	} else {
-		active_mask = 1;
-		active = &obj->last_write;
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return timeout;
+
+	if (!dma_fence_is_i915(fence))
+		return dma_fence_wait_timeout(fence,
+					      flags & I915_WAIT_INTERRUPTIBLE,
+					      timeout);
+
+	rq = to_request(fence);
+	if (i915_gem_request_completed(rq))
+		goto out;
+
+	/* This client is about to stall waiting for the GPU. In many cases
+	 * this is undesirable and limits the throughput of the system, as
+	 * many clients cannot continue processing user input/output whilst
+	 * blocked. RPS autotuning may take tens of milliseconds to respond
+	 * to the GPU load and thus incurs additional latency for the client.
+	 * We can circumvent that by promoting the GPU frequency to maximum
+	 * before we wait. This makes the GPU throttle up much more quickly
+	 * (good for benchmarks and user experience, e.g. window animations),
+	 * but at a cost of spending more power processing the workload
+	 * (bad for battery). Not all clients even want their results
+	 * immediately and for them we should just let the GPU select its own
+	 * frequency to maximise efficiency. To prevent a single client from
+	 * forcing the clocks too high for the whole system, we only allow
+	 * each client to waitboost once in a busy period.
+	 */
+	if (rps) {
+		if (INTEL_GEN(rq->i915) >= 6)
+			gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
+		else
+			rps = NULL;
 	}
 
-	for_each_active(active_mask, idx) {
-		int ret;
+	timeout = i915_wait_request(rq, flags, timeout);
 
-		ret = i915_gem_active_wait(&active[idx],
-					   &obj->base.dev->struct_mutex);
-		if (ret)
-			return ret;
+out:
+	if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
+		i915_gem_request_retire_upto(rq);
+
+	if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
+		/* The GPU is now idle and this client has stalled.
+		 * Since no other client has submitted a request in the
+		 * meantime, assume that this client is the only one
+		 * supplying work to the GPU but is unable to keep that
+		 * work supplied because it is waiting. Since the GPU is
+		 * then never kept fully busy, RPS autoclocking will
+		 * keep the clocks relatively low, causing further delays.
+		 * Compensate by giving the synchronous client credit for
+		 * a waitboost next time.
+		 */
+		spin_lock(&rq->i915->rps.client_lock);
+		list_del_init(&rps->link);
+		spin_unlock(&rq->i915->rps.client_lock);
 	}
 
-	resv = i915_gem_object_get_dmabuf_resv(obj);
-	if (resv) {
-		long err;
-
-		err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
-							  MAX_SCHEDULE_TIMEOUT);
-		if (err < 0)
-			return err;
-	}
-
-	return 0;
+	return timeout;
 }
 
-/* A nonblocking variant of the above wait. Must be called prior to
- * acquiring the mutex for the object, as the object state may change
- * during this call. A reference must be held by the caller for the object.
- */
-static __must_check int
-__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
-			struct intel_rps_client *rps,
-			bool readonly)
+static long
+i915_gem_object_wait_reservation(struct reservation_object *resv,
+				 unsigned int flags,
+				 long timeout,
+				 struct intel_rps_client *rps)
 {
-	struct i915_gem_active *active;
-	unsigned long active_mask;
-	int idx;
+	struct dma_fence *excl;
 
-	active_mask = __I915_BO_ACTIVE(obj);
-	if (!active_mask)
-		return 0;
-
-	if (!readonly) {
-		active = obj->last_read;
-	} else {
-		active_mask = 1;
-		active = &obj->last_write;
-	}
-
-	for_each_active(active_mask, idx) {
+	if (flags & I915_WAIT_ALL) {
+		struct dma_fence **shared;
+		unsigned int count, i;
 		int ret;
 
-		ret = i915_gem_active_wait_unlocked(&active[idx],
-						    I915_WAIT_INTERRUPTIBLE,
-						    NULL, rps);
+		ret = reservation_object_get_fences_rcu(resv,
+							&excl, &count, &shared);
 		if (ret)
 			return ret;
+
+		for (i = 0; i < count; i++) {
+			timeout = i915_gem_object_wait_fence(shared[i],
+							     flags, timeout,
+							     rps);
+			if (timeout <= 0)
+				break;
+
+			dma_fence_put(shared[i]);
+		}
+
+		for (; i < count; i++)
+			dma_fence_put(shared[i]);
+		kfree(shared);
+	} else {
+		excl = reservation_object_get_excl_rcu(resv);
 	}
 
-	return 0;
+	if (excl && timeout > 0)
+		timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+
+	dma_fence_put(excl);
+
+	return timeout;
+}
+
+/**
+ * Waits for rendering to the object to be completed
+ * @obj: i915 gem object
+ * @flags: how to wait (under a lock, for all rendering or just for writes etc)
+ * @timeout: how long to wait
+ * @rps: client (user process) to charge for any waitboosting
+ */
+int
+i915_gem_object_wait(struct drm_i915_gem_object *obj,
+		     unsigned int flags,
+		     long timeout,
+		     struct intel_rps_client *rps)
+{
+	might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+	GEM_BUG_ON(debug_locks &&
+		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
+		   !!(flags & I915_WAIT_LOCKED));
+#endif
+	GEM_BUG_ON(timeout < 0);
+
+	timeout = i915_gem_object_wait_reservation(obj->resv,
+						   flags, timeout,
+						   rps);
+	return timeout < 0 ? timeout : 0;
 }
 
 static struct intel_rps_client *to_rps_client(struct drm_file *file)
@@ -416,7 +481,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 		return 0;
 	}
 
-	if (obj->madv != I915_MADV_WILLNEED)
+	if (obj->mm.madv != I915_MADV_WILLNEED)
 		return -EFAULT;
 
 	if (obj->base.filp == NULL)
@@ -426,9 +491,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
-	ret = i915_gem_object_put_pages(obj);
-	if (ret)
-		return ret;
+	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+	if (obj->mm.pages)
+		return -EBUSY;
 
 	/* create a new object */
 	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
@@ -438,23 +503,29 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 	obj->phys_handle = phys;
 	obj->ops = &i915_gem_phys_ops;
 
-	return i915_gem_object_get_pages(obj);
+	return i915_gem_object_pin_pages(obj);
 }
 
 static int
 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 		     struct drm_i915_gem_pwrite *args,
-		     struct drm_file *file_priv)
+		     struct drm_file *file)
 {
 	struct drm_device *dev = obj->base.dev;
 	void *vaddr = obj->phys_handle->vaddr + args->offset;
 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
-	int ret = 0;
+	int ret;
 
 	/* We manually control the domain here and pretend that it
 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
 	 */
-	ret = i915_gem_object_wait_rendering(obj, false);
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_LOCKED |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT,
+				   to_rps_client(file));
 	if (ret)
 		return ret;
 
@@ -516,7 +587,7 @@ i915_gem_create(struct drm_file *file,
 
 	ret = drm_gem_handle_create(file, &obj->base, &handle);
 	/* drop reference from allocate - handle holds it now */
-	i915_gem_object_put_unlocked(obj);
+	i915_gem_object_put(obj);
 	if (ret)
 		return ret;
 
@@ -548,6 +619,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_create *args = data;
 
+	i915_gem_flush_free_objects(to_i915(dev));
+
 	return i915_gem_create(file, dev,
 			       args->size, &args->handle);
 }
@@ -614,21 +687,24 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 {
 	int ret;
 
-	*needs_clflush = 0;
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
+	*needs_clflush = 0;
 	if (!i915_gem_object_has_struct_page(obj))
 		return -ENODEV;
 
-	ret = i915_gem_object_wait_rendering(obj, true);
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_LOCKED,
+				   MAX_SCHEDULE_TIMEOUT,
+				   NULL);
 	if (ret)
 		return ret;
 
-	ret = i915_gem_object_get_pages(obj);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		return ret;
 
-	i915_gem_object_pin_pages(obj);
-
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	/* If we're not in the cpu read domain, set ourself into the gtt
@@ -661,20 +737,25 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 {
 	int ret;
 
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
 	*needs_clflush = 0;
 	if (!i915_gem_object_has_struct_page(obj))
 		return -ENODEV;
 
-	ret = i915_gem_object_wait_rendering(obj, false);
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_LOCKED |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT,
+				   NULL);
 	if (ret)
 		return ret;
 
-	ret = i915_gem_object_get_pages(obj);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		return ret;
 
-	i915_gem_object_pin_pages(obj);
-
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	/* If we're not in the cpu write domain, set ourself into the
@@ -704,7 +785,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 		obj->cache_dirty = true;
 
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-	obj->dirty = 1;
+	obj->mm.dirty = true;
 	/* return with the pages pinned */
 	return 0;
 
@@ -713,32 +794,6 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	return ret;
 }
 
-/* Per-page copy function for the shmem pread fastpath.
- * Flushes invalid cachelines before reading the target if
- * needs_clflush is set. */
-static int
-shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
-		 char __user *user_data,
-		 bool page_do_bit17_swizzling, bool needs_clflush)
-{
-	char *vaddr;
-	int ret;
-
-	if (unlikely(page_do_bit17_swizzling))
-		return -EINVAL;
-
-	vaddr = kmap_atomic(page);
-	if (needs_clflush)
-		drm_clflush_virt_range(vaddr + shmem_page_offset,
-				       page_length);
-	ret = __copy_to_user_inatomic(user_data,
-				      vaddr + shmem_page_offset,
-				      page_length);
-	kunmap_atomic(vaddr);
-
-	return ret ? -EFAULT : 0;
-}
-
 static void
 shmem_clflush_swizzled_range(char *addr, unsigned long length,
 			     bool swizzled)
@@ -764,7 +819,7 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
 /* Only difference to the fast-path function is that this can handle bit17
  * and uses non-atomic copy and kmap functions. */
 static int
-shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pread_slow(struct page *page, int offset, int length,
 		 char __user *user_data,
 		 bool page_do_bit17_swizzling, bool needs_clflush)
 {
@@ -773,60 +828,130 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
 
 	vaddr = kmap(page);
 	if (needs_clflush)
-		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
-					     page_length,
+		shmem_clflush_swizzled_range(vaddr + offset, length,
 					     page_do_bit17_swizzling);
 
 	if (page_do_bit17_swizzling)
-		ret = __copy_to_user_swizzled(user_data,
-					      vaddr, shmem_page_offset,
-					      page_length);
+		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
 	else
-		ret = __copy_to_user(user_data,
-				     vaddr + shmem_page_offset,
-				     page_length);
+		ret = __copy_to_user(user_data, vaddr + offset, length);
 	kunmap(page);
 
 	return ret ? - EFAULT : 0;
 }
 
-static inline unsigned long
-slow_user_access(struct io_mapping *mapping,
-		 uint64_t page_base, int page_offset,
-		 char __user *user_data,
-		 unsigned long length, bool pwrite)
+static int
+shmem_pread(struct page *page, int offset, int length, char __user *user_data,
+	    bool page_do_bit17_swizzling, bool needs_clflush)
 {
-	void __iomem *ioaddr;
+	int ret;
+
+	ret = -ENODEV;
+	if (!page_do_bit17_swizzling) {
+		char *vaddr = kmap_atomic(page);
+
+		if (needs_clflush)
+			drm_clflush_virt_range(vaddr + offset, length);
+		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+		kunmap_atomic(vaddr);
+	}
+	if (ret == 0)
+		return 0;
+
+	return shmem_pread_slow(page, offset, length, user_data,
+				page_do_bit17_swizzling, needs_clflush);
+}
+
+static int
+i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
+		     struct drm_i915_gem_pread *args)
+{
+	char __user *user_data;
+	u64 remain;
+	unsigned int obj_do_bit17_swizzling;
+	unsigned int needs_clflush;
+	unsigned int idx, offset;
+	int ret;
+
+	obj_do_bit17_swizzling = 0;
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		obj_do_bit17_swizzling = BIT(17);
+
+	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+	mutex_unlock(&obj->base.dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	remain = args->size;
+	user_data = u64_to_user_ptr(args->data_ptr);
+	offset = offset_in_page(args->offset);
+	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+		struct page *page = i915_gem_object_get_page(obj, idx);
+		int length;
+
+		length = remain;
+		if (offset + length > PAGE_SIZE)
+			length = PAGE_SIZE - offset;
+
+		ret = shmem_pread(page, offset, length, user_data,
+				  page_to_phys(page) & obj_do_bit17_swizzling,
+				  needs_clflush);
+		if (ret)
+			break;
+
+		remain -= length;
+		user_data += length;
+		offset = 0;
+	}
+
+	i915_gem_obj_finish_shmem_access(obj);
+	return ret;
+}
+
+static inline bool
+gtt_user_read(struct io_mapping *mapping,
+	      loff_t base, int offset,
+	      char __user *user_data, int length)
+{
 	void *vaddr;
-	uint64_t unwritten;
+	unsigned long unwritten;
 
-	ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
 	/* We can use the cpu mem copy function because this is X86. */
-	vaddr = (void __force *)ioaddr + page_offset;
-	if (pwrite)
-		unwritten = __copy_from_user(vaddr, user_data, length);
-	else
-		unwritten = __copy_to_user(user_data, vaddr, length);
-
-	io_mapping_unmap(ioaddr);
+	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+	unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+	io_mapping_unmap_atomic(vaddr);
+	if (unwritten) {
+		vaddr = (void __force *)
+			io_mapping_map_wc(mapping, base, PAGE_SIZE);
+		unwritten = copy_to_user(user_data, vaddr + offset, length);
+		io_mapping_unmap(vaddr);
+	}
 	return unwritten;
 }
 
 static int
-i915_gem_gtt_pread(struct drm_device *dev,
-		   struct drm_i915_gem_object *obj, uint64_t size,
-		   uint64_t data_offset, uint64_t data_ptr)
+i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
+		   const struct drm_i915_gem_pread *args)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct i915_vma *vma;
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct drm_mm_node node;
-	char __user *user_data;
-	uint64_t remain;
-	uint64_t offset;
+	struct i915_vma *vma;
+	void __user *user_data;
+	u64 remain, offset;
 	int ret;
 
-	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+	if (ret)
+		return ret;
+
+	intel_runtime_pm_get(i915);
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+				       PIN_MAPPABLE | PIN_NONBLOCK);
 	if (!IS_ERR(vma)) {
 		node.start = i915_ggtt_offset(vma);
 		node.allocated = false;
@@ -837,35 +962,21 @@ i915_gem_gtt_pread(struct drm_device *dev,
 		}
 	}
 	if (IS_ERR(vma)) {
-		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
 		if (ret)
-			goto out;
-
-		ret = i915_gem_object_get_pages(obj);
-		if (ret) {
-			remove_mappable_node(&node);
-			goto out;
-		}
-
-		i915_gem_object_pin_pages(obj);
+			goto out_unlock;
+		GEM_BUG_ON(!node.allocated);
 	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
 	if (ret)
 		goto out_unpin;
 
-	user_data = u64_to_user_ptr(data_ptr);
-	remain = size;
-	offset = data_offset;
+	mutex_unlock(&i915->drm.struct_mutex);
 
-	mutex_unlock(&dev->struct_mutex);
-	if (likely(!i915.prefault_disable)) {
-		ret = fault_in_pages_writeable(user_data, remain);
-		if (ret) {
-			mutex_lock(&dev->struct_mutex);
-			goto out_unpin;
-		}
-	}
+	user_data = u64_to_user_ptr(args->data_ptr);
+	remain = args->size;
+	offset = args->offset;
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -882,19 +993,14 @@ i915_gem_gtt_pread(struct drm_device *dev,
 			wmb();
 			ggtt->base.insert_page(&ggtt->base,
 					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
-					       node.start,
-					       I915_CACHE_NONE, 0);
+					       node.start, I915_CACHE_NONE, 0);
 			wmb();
 		} else {
 			page_base += offset & PAGE_MASK;
 		}
-		/* This is a slow read/write as it tries to read from
-		 * and write to user memory which may result into page
-		 * faults, and so we cannot perform this under struct_mutex.
-		 */
-		if (slow_user_access(&ggtt->mappable, page_base,
-				     page_offset, user_data,
-				     page_length, false)) {
+
+		if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+				  user_data, page_length)) {
 			ret = -EFAULT;
 			break;
 		}
@@ -904,111 +1010,19 @@ i915_gem_gtt_pread(struct drm_device *dev,
 		offset += page_length;
 	}
 
-	mutex_lock(&dev->struct_mutex);
-	if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
-		/* The user has modified the object whilst we tried
-		 * reading from it, and we now have no idea what domain
-		 * the pages should be in. As we have just been touching
-		 * them directly, flush everything back to the GTT
-		 * domain.
-		 */
-		ret = i915_gem_object_set_to_gtt_domain(obj, false);
-	}
-
+	mutex_lock(&i915->drm.struct_mutex);
 out_unpin:
 	if (node.allocated) {
 		wmb();
 		ggtt->base.clear_range(&ggtt->base,
-				       node.start, node.size,
-				       true);
-		i915_gem_object_unpin_pages(obj);
+				       node.start, node.size);
 		remove_mappable_node(&node);
 	} else {
 		i915_vma_unpin(vma);
 	}
-out:
-	return ret;
-}
-
-static int
-i915_gem_shmem_pread(struct drm_device *dev,
-		     struct drm_i915_gem_object *obj,
-		     struct drm_i915_gem_pread *args,
-		     struct drm_file *file)
-{
-	char __user *user_data;
-	ssize_t remain;
-	loff_t offset;
-	int shmem_page_offset, page_length, ret = 0;
-	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
-	int prefaulted = 0;
-	int needs_clflush = 0;
-	struct sg_page_iter sg_iter;
-
-	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
-	if (ret)
-		return ret;
-
-	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-	user_data = u64_to_user_ptr(args->data_ptr);
-	offset = args->offset;
-	remain = args->size;
-
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
-			 offset >> PAGE_SHIFT) {
-		struct page *page = sg_page_iter_page(&sg_iter);
-
-		if (remain <= 0)
-			break;
-
-		/* Operation in this page
-		 *
-		 * shmem_page_offset = offset within page in shmem file
-		 * page_length = bytes to copy for this page
-		 */
-		shmem_page_offset = offset_in_page(offset);
-		page_length = remain;
-		if ((shmem_page_offset + page_length) > PAGE_SIZE)
-			page_length = PAGE_SIZE - shmem_page_offset;
-
-		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
-			(page_to_phys(page) & (1 << 17)) != 0;
-
-		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
-				       user_data, page_do_bit17_swizzling,
-				       needs_clflush);
-		if (ret == 0)
-			goto next_page;
-
-		mutex_unlock(&dev->struct_mutex);
-
-		if (likely(!i915.prefault_disable) && !prefaulted) {
-			ret = fault_in_pages_writeable(user_data, remain);
-			/* Userspace is tricking us, but we've already clobbered
-			 * its pages with the prefault and promised to write the
-			 * data up to the first fault. Hence ignore any errors
-			 * and just continue. */
-			(void)ret;
-			prefaulted = 1;
-		}
-
-		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
-				       user_data, page_do_bit17_swizzling,
-				       needs_clflush);
-
-		mutex_lock(&dev->struct_mutex);
-
-		if (ret)
-			goto out;
-
-next_page:
-		remain -= page_length;
-		user_data += page_length;
-		offset += page_length;
-	}
-
-out:
-	i915_gem_obj_finish_shmem_access(obj);
+out_unlock:
+	intel_runtime_pm_put(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 	return ret;
 }
@@ -1027,7 +1041,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_pread *args = data;
 	struct drm_i915_gem_object *obj;
-	int ret = 0;
+	int ret;
 
 	if (args->size == 0)
 		return 0;
@@ -1045,36 +1059,29 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 	if (args->offset > obj->base.size ||
 	    args->size > obj->base.size - args->offset) {
 		ret = -EINVAL;
-		goto err;
+		goto out;
 	}
 
 	trace_i915_gem_object_pread(obj, args->offset, args->size);
 
-	ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE,
+				   MAX_SCHEDULE_TIMEOUT,
+				   to_rps_client(file));
 	if (ret)
-		goto err;
+		goto out;
 
-	ret = i915_mutex_lock_interruptible(dev);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
-		goto err;
+		goto out;
 
-	ret = i915_gem_shmem_pread(dev, obj, args, file);
+	ret = i915_gem_shmem_pread(obj, args);
+	if (ret == -EFAULT || ret == -ENODEV)
+		ret = i915_gem_gtt_pread(obj, args);
 
-	/* pread for non shmem backed objects */
-	if (ret == -EFAULT || ret == -ENODEV) {
-		intel_runtime_pm_get(to_i915(dev));
-		ret = i915_gem_gtt_pread(dev, obj, args->size,
-					args->offset, args->data_ptr);
-		intel_runtime_pm_put(to_i915(dev));
-	}
-
+	i915_gem_object_unpin_pages(obj);
+out:
 	i915_gem_object_put(obj);
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-
-err:
-	i915_gem_object_put_unlocked(obj);
 	return ret;
 }
 
@@ -1082,51 +1089,52 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  * page faults in the source data
  */
 
-static inline int
-fast_user_write(struct io_mapping *mapping,
-		loff_t page_base, int page_offset,
-		char __user *user_data,
-		int length)
+static inline bool
+ggtt_write(struct io_mapping *mapping,
+	   loff_t base, int offset,
+	   char __user *user_data, int length)
 {
-	void __iomem *vaddr_atomic;
 	void *vaddr;
 	unsigned long unwritten;
 
-	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
 	/* We can use the cpu mem copy function because this is X86. */
-	vaddr = (void __force*)vaddr_atomic + page_offset;
-	unwritten = __copy_from_user_inatomic_nocache(vaddr,
+	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+	unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
 						      user_data, length);
-	io_mapping_unmap_atomic(vaddr_atomic);
+	io_mapping_unmap_atomic(vaddr);
+	if (unwritten) {
+		vaddr = (void __force *)
+			io_mapping_map_wc(mapping, base, PAGE_SIZE);
+		unwritten = copy_from_user(vaddr + offset, user_data, length);
+		io_mapping_unmap(vaddr);
+	}
+
 	return unwritten;
 }
 
 /**
  * This is the fast pwrite path, where we copy the data directly from the
  * user into the GTT, uncached.
- * @i915: i915 device private data
- * @obj: i915 gem object
+ * @obj: i915 GEM object
  * @args: pwrite arguments structure
- * @file: drm file pointer
  */
 static int
-i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
-			 struct drm_i915_gem_object *obj,
-			 struct drm_i915_gem_pwrite *args,
-			 struct drm_file *file)
+i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
+			 const struct drm_i915_gem_pwrite *args)
 {
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_ggtt *ggtt = &i915->ggtt;
-	struct drm_device *dev = obj->base.dev;
-	struct i915_vma *vma;
 	struct drm_mm_node node;
-	uint64_t remain, offset;
-	char __user *user_data;
+	struct i915_vma *vma;
+	u64 remain, offset;
+	void __user *user_data;
 	int ret;
-	bool hit_slow_path = false;
 
-	if (i915_gem_object_is_tiled(obj))
-		return -EFAULT;
+	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+	if (ret)
+		return ret;
 
+	intel_runtime_pm_get(i915);
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       PIN_MAPPABLE | PIN_NONBLOCK);
 	if (!IS_ERR(vma)) {
@@ -1139,25 +1147,19 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 		}
 	}
 	if (IS_ERR(vma)) {
-		ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
 		if (ret)
-			goto out;
-
-		ret = i915_gem_object_get_pages(obj);
-		if (ret) {
-			remove_mappable_node(&node);
-			goto out;
-		}
-
-		i915_gem_object_pin_pages(obj);
+			goto out_unlock;
+		GEM_BUG_ON(!node.allocated);
 	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
 		goto out_unpin;
 
+	mutex_unlock(&i915->drm.struct_mutex);
+
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-	obj->dirty = true;
 
 	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = args->offset;
@@ -1170,8 +1172,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 		 * page_length = bytes to copy for this page
 		 */
 		u32 page_base = node.start;
-		unsigned page_offset = offset_in_page(offset);
-		unsigned page_length = PAGE_SIZE - page_offset;
+		unsigned int page_offset = offset_in_page(offset);
+		unsigned int page_length = PAGE_SIZE - page_offset;
 		page_length = remain < page_length ? remain : page_length;
 		if (node.allocated) {
 			wmb(); /* flush the write before we modify the GGTT */
@@ -1188,92 +1190,36 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 		 * If the object is non-shmem backed, we retry again with the
 		 * path that handles page fault.
 		 */
-		if (fast_user_write(&ggtt->mappable, page_base,
-				    page_offset, user_data, page_length)) {
-			hit_slow_path = true;
-			mutex_unlock(&dev->struct_mutex);
-			if (slow_user_access(&ggtt->mappable,
-					     page_base,
-					     page_offset, user_data,
-					     page_length, true)) {
-				ret = -EFAULT;
-				mutex_lock(&dev->struct_mutex);
-				goto out_flush;
-			}
-
-			mutex_lock(&dev->struct_mutex);
+		if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+			       user_data, page_length)) {
+			ret = -EFAULT;
+			break;
 		}
 
 		remain -= page_length;
 		user_data += page_length;
 		offset += page_length;
 	}
-
-out_flush:
-	if (hit_slow_path) {
-		if (ret == 0 &&
-		    (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
-			/* The user has modified the object whilst we tried
-			 * reading from it, and we now have no idea what domain
-			 * the pages should be in. As we have just been touching
-			 * them directly, flush everything back to the GTT
-			 * domain.
-			 */
-			ret = i915_gem_object_set_to_gtt_domain(obj, false);
-		}
-	}
-
 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+
+	mutex_lock(&i915->drm.struct_mutex);
 out_unpin:
 	if (node.allocated) {
 		wmb();
 		ggtt->base.clear_range(&ggtt->base,
-				       node.start, node.size,
-				       true);
-		i915_gem_object_unpin_pages(obj);
+				       node.start, node.size);
 		remove_mappable_node(&node);
 	} else {
 		i915_vma_unpin(vma);
 	}
-out:
+out_unlock:
+	intel_runtime_pm_put(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
 	return ret;
 }
 
-/* Per-page copy function for the shmem pwrite fastpath.
- * Flushes invalid cachelines before writing to the target if
- * needs_clflush_before is set and flushes out any written cachelines after
- * writing if needs_clflush is set. */
 static int
-shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
-		  char __user *user_data,
-		  bool page_do_bit17_swizzling,
-		  bool needs_clflush_before,
-		  bool needs_clflush_after)
-{
-	char *vaddr;
-	int ret;
-
-	if (unlikely(page_do_bit17_swizzling))
-		return -EINVAL;
-
-	vaddr = kmap_atomic(page);
-	if (needs_clflush_before)
-		drm_clflush_virt_range(vaddr + shmem_page_offset,
-				       page_length);
-	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
-					user_data, page_length);
-	if (needs_clflush_after)
-		drm_clflush_virt_range(vaddr + shmem_page_offset,
-				       page_length);
-	kunmap_atomic(vaddr);
-
-	return ret ? -EFAULT : 0;
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
-static int
-shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pwrite_slow(struct page *page, int offset, int length,
 		  char __user *user_data,
 		  bool page_do_bit17_swizzling,
 		  bool needs_clflush_before,
@@ -1284,124 +1230,114 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
 
 	vaddr = kmap(page);
 	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
-		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
-					     page_length,
+		shmem_clflush_swizzled_range(vaddr + offset, length,
 					     page_do_bit17_swizzling);
 	if (page_do_bit17_swizzling)
-		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
-						user_data,
-						page_length);
+		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
+						length);
 	else
-		ret = __copy_from_user(vaddr + shmem_page_offset,
-				       user_data,
-				       page_length);
+		ret = __copy_from_user(vaddr + offset, user_data, length);
 	if (needs_clflush_after)
-		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
-					     page_length,
+		shmem_clflush_swizzled_range(vaddr + offset, length,
 					     page_do_bit17_swizzling);
 	kunmap(page);
 
 	return ret ? -EFAULT : 0;
 }
 
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set.
+ */
 static int
-i915_gem_shmem_pwrite(struct drm_device *dev,
-		      struct drm_i915_gem_object *obj,
-		      struct drm_i915_gem_pwrite *args,
-		      struct drm_file *file)
+shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
+	     bool page_do_bit17_swizzling,
+	     bool needs_clflush_before,
+	     bool needs_clflush_after)
 {
-	ssize_t remain;
-	loff_t offset;
-	char __user *user_data;
-	int shmem_page_offset, page_length, ret = 0;
-	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
-	int hit_slowpath = 0;
-	unsigned int needs_clflush;
-	struct sg_page_iter sg_iter;
+	int ret;
 
-	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+	ret = -ENODEV;
+	if (!page_do_bit17_swizzling) {
+		char *vaddr = kmap_atomic(page);
+
+		if (needs_clflush_before)
+			drm_clflush_virt_range(vaddr + offset, len);
+		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
+		if (needs_clflush_after)
+			drm_clflush_virt_range(vaddr + offset, len);
+
+		kunmap_atomic(vaddr);
+	}
+	if (ret == 0)
+		return ret;
+
+	return shmem_pwrite_slow(page, offset, len, user_data,
+				 page_do_bit17_swizzling,
+				 needs_clflush_before,
+				 needs_clflush_after);
+}
+
+static int
+i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
+		      const struct drm_i915_gem_pwrite *args)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	void __user *user_data;
+	u64 remain;
+	unsigned int obj_do_bit17_swizzling;
+	unsigned int partial_cacheline_write;
+	unsigned int needs_clflush;
+	unsigned int offset, idx;
+	int ret;
+
+	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
 	if (ret)
 		return ret;
 
-	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+	mutex_unlock(&i915->drm.struct_mutex);
+	if (ret)
+		return ret;
+
+	obj_do_bit17_swizzling = 0;
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		obj_do_bit17_swizzling = BIT(17);
+
+	/* If we don't overwrite a cacheline completely we need to be
+	 * careful to have up-to-date data by first clflushing. Don't
+	 * overcomplicate things and flush the entire patch.
+	 */
+	partial_cacheline_write = 0;
+	if (needs_clflush & CLFLUSH_BEFORE)
+		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
+
 	user_data = u64_to_user_ptr(args->data_ptr);
-	offset = args->offset;
 	remain = args->size;
+	offset = offset_in_page(args->offset);
+	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+		struct page *page = i915_gem_object_get_page(obj, idx);
+		int length;
 
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
-			 offset >> PAGE_SHIFT) {
-		struct page *page = sg_page_iter_page(&sg_iter);
-		int partial_cacheline_write;
+		length = remain;
+		if (offset + length > PAGE_SIZE)
+			length = PAGE_SIZE - offset;
 
-		if (remain <= 0)
+		ret = shmem_pwrite(page, offset, length, user_data,
+				   page_to_phys(page) & obj_do_bit17_swizzling,
+				   (offset | length) & partial_cacheline_write,
+				   needs_clflush & CLFLUSH_AFTER);
+		if (ret)
 			break;
 
-		/* Operation in this page
-		 *
-		 * shmem_page_offset = offset within page in shmem file
-		 * page_length = bytes to copy for this page
-		 */
-		shmem_page_offset = offset_in_page(offset);
-
-		page_length = remain;
-		if ((shmem_page_offset + page_length) > PAGE_SIZE)
-			page_length = PAGE_SIZE - shmem_page_offset;
-
-		/* If we don't overwrite a cacheline completely we need to be
-		 * careful to have up-to-date data by first clflushing. Don't
-		 * overcomplicate things and flush the entire patch. */
-		partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
-			((shmem_page_offset | page_length)
-				& (boot_cpu_data.x86_clflush_size - 1));
-
-		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
-			(page_to_phys(page) & (1 << 17)) != 0;
-
-		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
-					user_data, page_do_bit17_swizzling,
-					partial_cacheline_write,
-					needs_clflush & CLFLUSH_AFTER);
-		if (ret == 0)
-			goto next_page;
-
-		hit_slowpath = 1;
-		mutex_unlock(&dev->struct_mutex);
-		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
-					user_data, page_do_bit17_swizzling,
-					partial_cacheline_write,
-					needs_clflush & CLFLUSH_AFTER);
-
-		mutex_lock(&dev->struct_mutex);
-
-		if (ret)
-			goto out;
-
-next_page:
-		remain -= page_length;
-		user_data += page_length;
-		offset += page_length;
+		remain -= length;
+		user_data += length;
+		offset = 0;
 	}
 
-out:
-	i915_gem_obj_finish_shmem_access(obj);
-
-	if (hit_slowpath) {
-		/*
-		 * Fixup: Flush cpu caches in case we didn't flush the dirty
-		 * cachelines in-line while writing and the object moved
-		 * out of the cpu write domain while we've dropped the lock.
-		 */
-		if (!(needs_clflush & CLFLUSH_AFTER) &&
-		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
-			if (i915_gem_clflush_object(obj, obj->pin_display))
-				needs_clflush |= CLFLUSH_AFTER;
-		}
-	}
-
-	if (needs_clflush & CLFLUSH_AFTER)
-		i915_gem_chipset_flush(to_i915(dev));
-
 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+	i915_gem_obj_finish_shmem_access(obj);
 	return ret;
 }
 
@@ -1417,7 +1353,6 @@ int
 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_pwrite *args = data;
 	struct drm_i915_gem_object *obj;
 	int ret;
@@ -1430,13 +1365,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 		       args->size))
 		return -EFAULT;
 
-	if (likely(!i915.prefault_disable)) {
-		ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
-						   args->size);
-		if (ret)
-			return -EFAULT;
-	}
-
 	obj = i915_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
@@ -1450,15 +1378,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 
 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
-	ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT,
+				   to_rps_client(file));
 	if (ret)
 		goto err;
 
-	intel_runtime_pm_get(dev_priv);
-
-	ret = i915_mutex_lock_interruptible(dev);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
-		goto err_rpm;
+		goto err;
 
 	ret = -EFAULT;
 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -1468,30 +1398,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 	 * perspective, requiring manual detiling by the client.
 	 */
 	if (!i915_gem_object_has_struct_page(obj) ||
-	    cpu_write_needs_clflush(obj)) {
-		ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
+	    cpu_write_needs_clflush(obj))
 		/* Note that the gtt paths might fail with non-page-backed user
 		 * pointers (e.g. gtt mappings when moving data between
-		 * textures). Fallback to the shmem path in that case. */
-	}
+		 * textures). Fallback to the shmem path in that case.
+		 */
+		ret = i915_gem_gtt_pwrite_fast(obj, args);
 
 	if (ret == -EFAULT || ret == -ENOSPC) {
 		if (obj->phys_handle)
 			ret = i915_gem_phys_pwrite(obj, args, file);
 		else
-			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+			ret = i915_gem_shmem_pwrite(obj, args);
 	}
 
-	i915_gem_object_put(obj);
-	mutex_unlock(&dev->struct_mutex);
-	intel_runtime_pm_put(dev_priv);
-
-	return ret;
-
-err_rpm:
-	intel_runtime_pm_put(dev_priv);
+	i915_gem_object_unpin_pages(obj);
 err:
-	i915_gem_object_put_unlocked(obj);
+	i915_gem_object_put(obj);
 	return ret;
 }
 
@@ -1502,6 +1425,30 @@ write_origin(struct drm_i915_gem_object *obj, unsigned domain)
 		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
 }
 
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *i915;
+	struct list_head *list;
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (!i915_vma_is_ggtt(vma))
+			continue;
+
+		if (i915_vma_is_active(vma))
+			continue;
+
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
+
+		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+	}
+
+	i915 = to_i915(obj->base.dev);
+	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
+	list_move_tail(&obj->global_link, list);
+}
+
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
@@ -1517,7 +1464,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_object *obj;
 	uint32_t read_domains = args->read_domains;
 	uint32_t write_domain = args->write_domain;
-	int ret;
+	int err;
 
 	/* Only handle setting domains to types used by the CPU. */
 	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
@@ -1537,29 +1484,48 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	 * We will repeat the flush holding the lock in the normal manner
 	 * to catch cases where we are gazumped.
 	 */
-	ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
-	if (ret)
-		goto err;
+	err = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   (write_domain ? I915_WAIT_ALL : 0),
+				   MAX_SCHEDULE_TIMEOUT,
+				   to_rps_client(file));
+	if (err)
+		goto out;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto err;
+	/* Flush and acquire obj->pages so that we are coherent through
+	 * direct access in memory with previous cached writes through
+	 * shmemfs and that our cache domain tracking remains valid.
+	 * For example, if the obj->filp was moved to swap without us
+	 * being notified and releasing the pages, we would mistakenly
+	 * continue to assume that the obj remained out of the CPU cached
+	 * domain.
+	 */
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out;
+
+	err = i915_mutex_lock_interruptible(dev);
+	if (err)
+		goto out_unpin;
 
 	if (read_domains & I915_GEM_DOMAIN_GTT)
-		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+		err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 	else
-		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+		err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+
+	/* And bump the LRU for this access */
+	i915_gem_object_bump_inactive_ggtt(obj);
+
+	mutex_unlock(&dev->struct_mutex);
 
 	if (write_domain != 0)
 		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 
+out_unpin:
+	i915_gem_object_unpin_pages(obj);
+out:
 	i915_gem_object_put(obj);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-
-err:
-	i915_gem_object_put_unlocked(obj);
-	return ret;
+	return err;
 }
 
 /**
@@ -1589,7 +1555,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 		}
 	}
 
-	i915_gem_object_put_unlocked(obj);
+	i915_gem_object_put(obj);
 	return err;
 }
 
@@ -1635,7 +1601,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	 * pages from.
 	 */
 	if (!obj->base.filp) {
-		i915_gem_object_put_unlocked(obj);
+		i915_gem_object_put(obj);
 		return -EINVAL;
 	}
 
@@ -1647,7 +1613,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		struct vm_area_struct *vma;
 
 		if (down_write_killable(&mm->mmap_sem)) {
-			i915_gem_object_put_unlocked(obj);
+			i915_gem_object_put(obj);
 			return -EINTR;
 		}
 		vma = find_vma(mm, addr);
@@ -1661,7 +1627,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		/* This may race, but that's ok, it only gets set */
 		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
 	}
-	i915_gem_object_put_unlocked(obj);
+	i915_gem_object_put(obj);
 	if (IS_ERR((void *)addr))
 		return addr;
 
@@ -1773,7 +1739,14 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 	 * repeat the flush holding the lock in the normal manner to catch cases
 	 * where we are gazumped.
 	 */
-	ret = __unsafe_wait_rendering(obj, NULL, !write);
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE,
+				   MAX_SCHEDULE_TIMEOUT,
+				   NULL);
+	if (ret)
+		goto err;
+
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		goto err;
 
@@ -1806,15 +1779,14 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 		/* Use a partial view if it is bigger than available space */
 		chunk_size = MIN_CHUNK_PAGES;
 		if (i915_gem_object_is_tiled(obj))
-			chunk_size = max(chunk_size, tile_row_pages(obj));
+			chunk_size = roundup(chunk_size, tile_row_pages(obj));
 
 		memset(&view, 0, sizeof(view));
 		view.type = I915_GGTT_VIEW_PARTIAL;
 		view.params.partial.offset = rounddown(page_offset, chunk_size);
 		view.params.partial.size =
 			min_t(unsigned int, chunk_size,
-			      (area->vm_end - area->vm_start) / PAGE_SIZE -
-			      view.params.partial.offset);
+			      vma_pages(area) - view.params.partial.offset);
 
 		/* If the partial covers the entire object, just create a
 		 * normal VMA.
@@ -1842,22 +1814,25 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 	if (ret)
 		goto err_unpin;
 
+	/* Mark as being mmapped into userspace for later revocation */
+	assert_rpm_wakelock_held(dev_priv);
+	if (list_empty(&obj->userfault_link))
+		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+
 	/* Finally, remap it using the new GTT offset */
 	ret = remap_io_mapping(area,
 			       area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
 			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
 			       min_t(u64, vma->size, area->vm_end - area->vm_start),
 			       &ggtt->mappable);
-	if (ret)
-		goto err_unpin;
 
-	obj->fault_mappable = true;
 err_unpin:
 	__i915_vma_unpin(vma);
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
 err_rpm:
 	intel_runtime_pm_put(dev_priv);
+	i915_gem_object_unpin_pages(obj);
 err:
 	switch (ret) {
 	case -EIO:
@@ -1919,15 +1894,23 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 void
 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
 	/* Serialisation between user GTT access and our code depends upon
 	 * revoking the CPU's PTE whilst the mutex is held. The next user
 	 * pagefault then has to wait until we release the mutex.
+	 *
+	 * Note that RPM complicates somewhat by adding an additional
+	 * requirement that operations to the GGTT be made holding the RPM
+	 * wakeref.
 	 */
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	intel_runtime_pm_get(i915);
 
-	if (!obj->fault_mappable)
-		return;
+	if (list_empty(&obj->userfault_link))
+		goto out;
 
+	list_del_init(&obj->userfault_link);
 	drm_vma_node_unmap(&obj->base.vma_node,
 			   obj->base.dev->anon_inode->i_mapping);
 
@@ -1940,16 +1923,45 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 	 */
 	wmb();
 
-	obj->fault_mappable = false;
+out:
+	intel_runtime_pm_put(i915);
 }
 
-void
-i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_object *obj, *on;
+	int i;
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-		i915_gem_release_mmap(obj);
+	/*
+	 * Only called during RPM suspend. All users of the userfault_list
+	 * must be holding an RPM wakeref to ensure that this can not
+	 * run concurrently with themselves (and use the struct_mutex for
+	 * protection between themselves).
+	 */
+
+	list_for_each_entry_safe(obj, on,
+				 &dev_priv->mm.userfault_list, userfault_link) {
+		list_del_init(&obj->userfault_link);
+		drm_vma_node_unmap(&obj->base.vma_node,
+				   obj->base.dev->anon_inode->i_mapping);
+	}
+
+	/* The fence will be lost when the device powers down. If any were
+	 * in use by hardware (i.e. they are pinned), we should not be powering
+	 * down! All other fences will be reacquired by the user upon waking.
+	 */
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+		if (WARN_ON(reg->pin_count))
+			continue;
+
+		if (!reg->vma)
+			continue;
+
+		GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+		reg->dirty = true;
+	}
 }
 
 /**
@@ -2063,7 +2075,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
 	if (ret == 0)
 		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
 
-	i915_gem_object_put_unlocked(obj);
+	i915_gem_object_put(obj);
 	return ret;
 }
 
@@ -2106,16 +2118,18 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 	 * backing pages, *now*.
 	 */
 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
-	obj->madv = __I915_MADV_PURGED;
+	obj->mm.madv = __I915_MADV_PURGED;
 }
 
 /* Try to discard unwanted pages */
-static void
-i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 {
 	struct address_space *mapping;
 
-	switch (obj->madv) {
+	lockdep_assert_held(&obj->mm.lock);
+	GEM_BUG_ON(obj->mm.pages);
+
+	switch (obj->mm.madv) {
 	case I915_MADV_DONTNEED:
 		i915_gem_object_truncate(obj);
 	case __I915_MADV_PURGED:
@@ -2130,85 +2144,95 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
+			      struct sg_table *pages)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
-	int ret;
 
-	BUG_ON(obj->madv == __I915_MADV_PURGED);
+	__i915_gem_object_release_shmem(obj);
 
-	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-	if (WARN_ON(ret)) {
-		/* In the event of a disaster, abandon all caches and
-		 * hope for the best.
-		 */
-		i915_gem_clflush_object(obj, true);
-		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
-
-	i915_gem_gtt_finish_object(obj);
+	i915_gem_gtt_finish_pages(obj, pages);
 
 	if (i915_gem_object_needs_bit17_swizzle(obj))
-		i915_gem_object_save_bit_17_swizzle(obj);
+		i915_gem_object_save_bit_17_swizzle(obj, pages);
 
-	if (obj->madv == I915_MADV_DONTNEED)
-		obj->dirty = 0;
-
-	for_each_sgt_page(page, sgt_iter, obj->pages) {
-		if (obj->dirty)
+	for_each_sgt_page(page, sgt_iter, pages) {
+		if (obj->mm.dirty)
 			set_page_dirty(page);
 
-		if (obj->madv == I915_MADV_WILLNEED)
+		if (obj->mm.madv == I915_MADV_WILLNEED)
 			mark_page_accessed(page);
 
 		put_page(page);
 	}
-	obj->dirty = 0;
+	obj->mm.dirty = false;
 
-	sg_free_table(obj->pages);
-	kfree(obj->pages);
+	sg_free_table(pages);
+	kfree(pages);
 }
 
-int
-i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 {
-	const struct drm_i915_gem_object_ops *ops = obj->ops;
+	struct radix_tree_iter iter;
+	void **slot;
 
-	if (obj->pages == NULL)
-		return 0;
+	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+}
 
-	if (obj->pages_pin_count)
-		return -EBUSY;
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+				 enum i915_mm_subclass subclass)
+{
+	struct sg_table *pages;
+
+	if (i915_gem_object_has_pinned_pages(obj))
+		return;
 
 	GEM_BUG_ON(obj->bind_count);
+	if (!READ_ONCE(obj->mm.pages))
+		return;
+
+	/* May be called by shrinker from within get_pages() (on another bo) */
+	mutex_lock_nested(&obj->mm.lock, subclass);
+	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+		goto unlock;
 
 	/* ->put_pages might need to allocate memory for the bit17 swizzle
 	 * array, hence protect them from being reaped by removing them from gtt
 	 * lists early. */
-	list_del(&obj->global_list);
+	pages = fetch_and_zero(&obj->mm.pages);
+	GEM_BUG_ON(!pages);
 
-	if (obj->mapping) {
+	if (obj->mm.mapping) {
 		void *ptr;
 
-		ptr = ptr_mask_bits(obj->mapping);
+		ptr = ptr_mask_bits(obj->mm.mapping);
 		if (is_vmalloc_addr(ptr))
 			vunmap(ptr);
 		else
 			kunmap(kmap_to_page(ptr));
 
-		obj->mapping = NULL;
+		obj->mm.mapping = NULL;
 	}
 
-	ops->put_pages(obj);
-	obj->pages = NULL;
+	__i915_gem_object_reset_page_iter(obj);
 
-	i915_gem_object_invalidate(obj);
-
-	return 0;
+	obj->ops->put_pages(obj, pages);
+unlock:
+	mutex_unlock(&obj->mm.lock);
 }
 
-static int
+static unsigned int swiotlb_max_size(void)
+{
+#if IS_ENABLED(CONFIG_SWIOTLB)
+	return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
+#else
+	return 0;
+#endif
+}
+
+static struct sg_table *
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2219,6 +2243,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	struct sgt_iter sgt_iter;
 	struct page *page;
 	unsigned long last_pfn = 0;	/* suppress gcc warning */
+	unsigned int max_segment;
 	int ret;
 	gfp_t gfp;
 
@@ -2226,17 +2251,21 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	 * wasn't in the GTT, there shouldn't be any way it could have been in
 	 * a GPU cache
 	 */
-	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
-	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+	max_segment = swiotlb_max_size();
+	if (!max_segment)
+		max_segment = rounddown(UINT_MAX, PAGE_SIZE);
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (st == NULL)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	page_count = obj->base.size / PAGE_SIZE;
 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
 		kfree(st);
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	/* Get the list of pages out of our struct file.  They'll be pinned
@@ -2264,22 +2293,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 			 * our own buffer, now let the real VM do its job and
 			 * go down in flames if truly OOM.
 			 */
-			i915_gem_shrink_all(dev_priv);
 			page = shmem_read_mapping_page(mapping, i);
 			if (IS_ERR(page)) {
 				ret = PTR_ERR(page);
 				goto err_pages;
 			}
 		}
-#ifdef CONFIG_SWIOTLB
-		if (swiotlb_nr_tbl()) {
-			st->nents++;
-			sg_set_page(sg, page, PAGE_SIZE, 0);
-			sg = sg_next(sg);
-			continue;
-		}
-#endif
-		if (!i || page_to_pfn(page) != last_pfn + 1) {
+		if (!i ||
+		    sg->length >= max_segment ||
+		    page_to_pfn(page) != last_pfn + 1) {
 			if (i)
 				sg = sg_next(sg);
 			st->nents++;
@@ -2292,24 +2314,17 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 		/* Check that the i965g/gm workaround works. */
 		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
 	}
-#ifdef CONFIG_SWIOTLB
-	if (!swiotlb_nr_tbl())
-#endif
+	if (sg) /* loop terminated early; short sg table */
 		sg_mark_end(sg);
-	obj->pages = st;
 
-	ret = i915_gem_gtt_prepare_object(obj);
+	ret = i915_gem_gtt_prepare_pages(obj, st);
 	if (ret)
 		goto err_pages;
 
 	if (i915_gem_object_needs_bit17_swizzle(obj))
-		i915_gem_object_do_bit_17_swizzle(obj);
+		i915_gem_object_do_bit_17_swizzle(obj, st);
 
-	if (i915_gem_object_is_tiled(obj) &&
-	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
-		i915_gem_object_pin_pages(obj);
-
-	return 0;
+	return st;
 
 err_pages:
 	sg_mark_end(sg);
@@ -2329,51 +2344,81 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	if (ret == -ENOSPC)
 		ret = -ENOMEM;
 
-	return ret;
+	return ERR_PTR(ret);
 }
 
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_get_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_put_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int
-i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+				 struct sg_table *pages)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	const struct drm_i915_gem_object_ops *ops = obj->ops;
-	int ret;
+	lockdep_assert_held(&obj->mm.lock);
 
-	if (obj->pages)
-		return 0;
+	obj->mm.get_page.sg_pos = pages->sgl;
+	obj->mm.get_page.sg_idx = 0;
 
-	if (obj->madv != I915_MADV_WILLNEED) {
+	obj->mm.pages = pages;
+
+	if (i915_gem_object_is_tiled(obj) &&
+	    to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+		GEM_BUG_ON(obj->mm.quirked);
+		__i915_gem_object_pin_pages(obj);
+		obj->mm.quirked = true;
+	}
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	struct sg_table *pages;
+
+	GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
 		DRM_DEBUG("Attempting to obtain a purgeable object\n");
 		return -EFAULT;
 	}
 
-	BUG_ON(obj->pages_pin_count);
+	pages = obj->ops->get_pages(obj);
+	if (unlikely(IS_ERR(pages)))
+		return PTR_ERR(pages);
 
-	ret = ops->get_pages(obj);
-	if (ret)
-		return ret;
-
-	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
-
-	obj->get_page.sg = obj->pages->sgl;
-	obj->get_page.last = 0;
-
+	__i915_gem_object_set_pages(obj, pages);
 	return 0;
 }
 
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	int err;
+
+	err = mutex_lock_interruptible(&obj->mm.lock);
+	if (err)
+		return err;
+
+	if (unlikely(!obj->mm.pages)) {
+		err = ____i915_gem_object_get_pages(obj);
+		if (err)
+			goto unlock;
+
+		smp_mb__before_atomic();
+	}
+	atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+	mutex_unlock(&obj->mm.lock);
+	return err;
+}
+
 /* The 'mapping' part of i915_gem_object_pin_map() below */
 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
 				 enum i915_map_type type)
 {
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
-	struct sg_table *sgt = obj->pages;
+	struct sg_table *sgt = obj->mm.pages;
 	struct sgt_iter sgt_iter;
 	struct page *page;
 	struct page *stack_pages[32];
@@ -2424,21 +2469,31 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 	void *ptr;
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
 
-	ret = i915_gem_object_get_pages(obj);
+	ret = mutex_lock_interruptible(&obj->mm.lock);
 	if (ret)
 		return ERR_PTR(ret);
 
-	i915_gem_object_pin_pages(obj);
-	pinned = obj->pages_pin_count > 1;
+	pinned = true;
+	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+		if (unlikely(!obj->mm.pages)) {
+			ret = ____i915_gem_object_get_pages(obj);
+			if (ret)
+				goto err_unlock;
 
-	ptr = ptr_unpack_bits(obj->mapping, has_type);
+			smp_mb__before_atomic();
+		}
+		atomic_inc(&obj->mm.pages_pin_count);
+		pinned = false;
+	}
+	GEM_BUG_ON(!obj->mm.pages);
+
+	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
 	if (ptr && has_type != type) {
 		if (pinned) {
 			ret = -EBUSY;
-			goto err;
+			goto err_unpin;
 		}
 
 		if (is_vmalloc_addr(ptr))
@@ -2446,59 +2501,28 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 		else
 			kunmap(kmap_to_page(ptr));
 
-		ptr = obj->mapping = NULL;
+		ptr = obj->mm.mapping = NULL;
 	}
 
 	if (!ptr) {
 		ptr = i915_gem_object_map(obj, type);
 		if (!ptr) {
 			ret = -ENOMEM;
-			goto err;
+			goto err_unpin;
 		}
 
-		obj->mapping = ptr_pack_bits(ptr, type);
+		obj->mm.mapping = ptr_pack_bits(ptr, type);
 	}
 
+out_unlock:
+	mutex_unlock(&obj->mm.lock);
 	return ptr;
 
-err:
-	i915_gem_object_unpin_pages(obj);
-	return ERR_PTR(ret);
-}
-
-static void
-i915_gem_object_retire__write(struct i915_gem_active *active,
-			      struct drm_i915_gem_request *request)
-{
-	struct drm_i915_gem_object *obj =
-		container_of(active, struct drm_i915_gem_object, last_write);
-
-	intel_fb_obj_flush(obj, true, ORIGIN_CS);
-}
-
-static void
-i915_gem_object_retire__read(struct i915_gem_active *active,
-			     struct drm_i915_gem_request *request)
-{
-	int idx = request->engine->id;
-	struct drm_i915_gem_object *obj =
-		container_of(active, struct drm_i915_gem_object, last_read[idx]);
-
-	GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
-
-	i915_gem_object_clear_active(obj, idx);
-	if (i915_gem_object_is_active(obj))
-		return;
-
-	/* Bump our place on the bound list to keep it roughly in LRU order
-	 * so that we don't steal from recently used but inactive objects
-	 * (unless we are forced to ofc!)
-	 */
-	if (obj->bind_count)
-		list_move_tail(&obj->global_list,
-			       &request->i915->mm.bound_list);
-
-	i915_gem_object_put(obj);
+err_unpin:
+	atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+	ptr = ERR_PTR(ret);
+	goto out_unlock;
 }
 
 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2545,13 +2569,10 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 	 * extra delay for a recent interrupt is pointless. Hence, we do
 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
 	 */
-	list_for_each_entry(request, &engine->request_list, link) {
-		if (i915_gem_request_completed(request))
+	list_for_each_entry(request, &engine->timeline->requests, link) {
+		if (__i915_gem_request_completed(request))
 			continue;
 
-		if (!i915_sw_fence_done(&request->submit))
-			break;
-
 		return request;
 	}
 
@@ -2579,10 +2600,9 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request;
 	struct i915_gem_context *incomplete_ctx;
+	struct intel_timeline *timeline;
 	bool ring_hung;
 
-	/* Ensure irq handler finishes, and not run again. */
-	tasklet_kill(&engine->irq_tasklet);
 	if (engine->irq_seqno_barrier)
 		engine->irq_seqno_barrier(engine);
 
@@ -2591,12 +2611,15 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 		return;
 
 	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
+		ring_hung = false;
+
 	i915_set_reset_status(request->ctx, ring_hung);
 	if (!ring_hung)
 		return;
 
 	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
-			 engine->name, request->fence.seqno);
+			 engine->name, request->global_seqno);
 
 	/* Setup the CS to resume from the breadcrumb of the hung request */
 	engine->reset_hw(engine, request);
@@ -2613,18 +2636,25 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 	if (i915_gem_context_is_default(incomplete_ctx))
 		return;
 
-	list_for_each_entry_continue(request, &engine->request_list, link)
+	list_for_each_entry_continue(request, &engine->timeline->requests, link)
 		if (request->ctx == incomplete_ctx)
 			reset_request(request);
+
+	timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+	list_for_each_entry(request, &timeline->requests, link)
+		reset_request(request);
 }
 
 void i915_gem_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
 	i915_gem_retire_requests(dev_priv);
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		i915_gem_reset_engine(engine);
 
 	i915_gem_restore_fences(&dev_priv->drm);
@@ -2649,7 +2679,8 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
 	 * (lockless) lookup doesn't try and wait upon the request as we
 	 * reset it.
 	 */
-	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+	intel_engine_init_global_seqno(engine,
+				       intel_engine_last_submit(engine));
 
 	/*
 	 * Clear the execlists queue up before freeing the requests, as those
@@ -2665,19 +2696,18 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
 		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
 		spin_unlock(&engine->execlist_lock);
 	}
-
-	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
 }
 
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
 
 	i915_gem_context_lost(dev_priv);
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		i915_gem_cleanup_engine(engine);
 	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 
@@ -2716,12 +2746,20 @@ i915_gem_idle_work_handler(struct work_struct *work)
 		container_of(work, typeof(*dev_priv), gt.idle_work.work);
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	bool rearm_hangcheck;
 
 	if (!READ_ONCE(dev_priv->gt.awake))
 		return;
 
-	if (READ_ONCE(dev_priv->gt.active_engines))
+	/*
+	 * Wait for last execlists context complete, but bail out in case a
+	 * new request is submitted.
+	 */
+	wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
+		 intel_execlists_idle(dev_priv), 10);
+
+	if (READ_ONCE(dev_priv->gt.active_requests))
 		return;
 
 	rearm_hangcheck =
@@ -2735,10 +2773,20 @@ i915_gem_idle_work_handler(struct work_struct *work)
 		goto out_rearm;
 	}
 
-	if (dev_priv->gt.active_engines)
+	/*
+	 * New request retired after this work handler started, extend active
+	 * period until next instance of the work.
+	 */
+	if (work_pending(work))
 		goto out_unlock;
 
-	for_each_engine(engine, dev_priv)
+	if (dev_priv->gt.active_requests)
+		goto out_unlock;
+
+	if (wait_for(intel_execlists_idle(dev_priv), 10))
+		DRM_ERROR("Timeout waiting for engines to idle\n");
+
+	for_each_engine(engine, dev_priv, id)
 		i915_gem_batch_pool_fini(&engine->batch_pool);
 
 	GEM_BUG_ON(!dev_priv->gt.awake);
@@ -2768,9 +2816,26 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
 		if (vma->vm->file == fpriv)
 			i915_vma_close(vma);
+
+	if (i915_gem_object_is_active(obj) &&
+	    !i915_gem_object_has_active_reference(obj)) {
+		i915_gem_object_set_active_reference(obj);
+		i915_gem_object_get(obj);
+	}
 	mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
+static unsigned long to_wait_timeout(s64 timeout_ns)
+{
+	if (timeout_ns < 0)
+		return MAX_SCHEDULE_TIMEOUT;
+
+	if (timeout_ns == 0)
+		return 0;
+
+	return nsecs_to_jiffies_timeout(timeout_ns);
+}
+
 /**
  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  * @dev: drm device pointer
@@ -2799,10 +2864,9 @@ int
 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
 	struct drm_i915_gem_wait *args = data;
-	struct intel_rps_client *rps = to_rps_client(file);
 	struct drm_i915_gem_object *obj;
-	unsigned long active;
-	int idx, ret = 0;
+	ktime_t start;
+	long ret;
 
 	if (args->flags != 0)
 		return -EINVAL;
@@ -2811,17 +2875,20 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	if (!obj)
 		return -ENOENT;
 
-	active = __I915_BO_ACTIVE(obj);
-	for_each_active(active, idx) {
-		s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
-		ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
-						    I915_WAIT_INTERRUPTIBLE,
-						    timeout, rps);
-		if (ret)
-			break;
+	start = ktime_get();
+
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+				   to_wait_timeout(args->timeout_ns),
+				   to_rps_client(file));
+
+	if (args->timeout_ns > 0) {
+		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
+		if (args->timeout_ns < 0)
+			args->timeout_ns = 0;
 	}
 
-	i915_gem_object_put_unlocked(obj);
+	i915_gem_object_put(obj);
 	return ret;
 }
 
@@ -2842,6 +2909,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 	unsigned long active;
 	int ret;
 
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
 	/* First wait upon any activity as retiring the request may
 	 * have side-effects such as unpinning or even unbinding this vma.
 	 */
@@ -2853,6 +2922,13 @@ int i915_vma_unbind(struct i915_vma *vma)
 		 * In order to prevent it from being recursively closed,
 		 * take a pin on the vma so that the second unbind is
 		 * aborted.
+		 *
+		 * Even more scary is that the retire callback may free
+		 * the object (last active vma). To prevent the explosion
+		 * we defer the actual object free to a worker that can
+		 * only proceed once it acquires the struct_mutex (which
+		 * we currently hold, therefore it cannot free this object
+		 * before we are finished).
 		 */
 		__i915_vma_pin(vma);
 
@@ -2877,7 +2953,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		goto destroy;
 
 	GEM_BUG_ON(obj->bind_count == 0);
-	GEM_BUG_ON(!obj->pages);
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
 	if (i915_vma_is_map_and_fenceable(vma)) {
 		/* release the fence reg _after_ flushing */
@@ -2901,7 +2977,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 	drm_mm_remove_node(&vma->node);
 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
-	if (vma->pages != obj->pages) {
+	if (vma->pages != obj->mm.pages) {
 		GEM_BUG_ON(!vma->pages);
 		sg_free_table(vma->pages);
 		kfree(vma->pages);
@@ -2911,7 +2987,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 	/* Since the unbound list is global, only move to that list if
 	 * no more VMAs exist. */
 	if (--obj->bind_count == 0)
-		list_move_tail(&obj->global_list,
+		list_move_tail(&obj->global_link,
 			       &to_i915(obj->base.dev)->mm.unbound_list);
 
 	/* And finally now the object is completely decoupled from this vma,
@@ -2927,17 +3003,26 @@ int i915_vma_unbind(struct i915_vma *vma)
 	return 0;
 }
 
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-			   unsigned int flags)
+static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
 {
-	struct intel_engine_cs *engine;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+{
+	struct i915_gem_timeline *tl;
 	int ret;
 
-	for_each_engine(engine, dev_priv) {
-		if (engine->last_context == NULL)
-			continue;
-
-		ret = intel_engine_idle(engine, flags);
+	list_for_each_entry(tl, &i915->gt.timelines, link) {
+		ret = wait_for_timeline(tl, flags);
 		if (ret)
 			return ret;
 	}
@@ -3033,12 +3118,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		return -E2BIG;
 	}
 
-	ret = i915_gem_object_get_pages(obj);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		return ret;
 
-	i915_gem_object_pin_pages(obj);
-
 	if (flags & PIN_OFFSET_FIXED) {
 		u64 offset = flags & PIN_OFFSET_MASK;
 		if (offset & (alignment - 1) || offset > end - size) {
@@ -3095,12 +3178,16 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
 			goto err_unpin;
 		}
+
+		GEM_BUG_ON(vma->node.start < start);
+		GEM_BUG_ON(vma->node.start + vma->node.size > end);
 	}
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
-	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
+	list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	obj->bind_count++;
+	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
 
 	return 0;
 
@@ -3109,23 +3196,22 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	return ret;
 }
 
-bool
-i915_gem_clflush_object(struct drm_i915_gem_object *obj,
-			bool force)
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+			     bool force)
 {
 	/* If we don't have a page list set up, then we're not pinned
 	 * to GPU, and we can ignore the cache flush because it'll happen
 	 * again at bind time.
 	 */
-	if (obj->pages == NULL)
-		return false;
+	if (!obj->mm.pages)
+		return;
 
 	/*
 	 * Stolen memory is always coherent with the GPU as it is explicitly
 	 * marked as wc by the system, or the system is cache-coherent.
 	 */
 	if (obj->stolen || obj->phys_handle)
-		return false;
+		return;
 
 	/* If the GPU is snooping the contents of the CPU cache,
 	 * we do not need to manually clear the CPU cache lines.  However,
@@ -3137,14 +3223,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 	 */
 	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
 		obj->cache_dirty = true;
-		return false;
+		return;
 	}
 
 	trace_i915_gem_object_clflush(obj);
-	drm_clflush_sg(obj->pages);
+	drm_clflush_sg(obj->mm.pages);
 	obj->cache_dirty = false;
-
-	return true;
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -3173,7 +3257,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 	 */
 	wmb();
 	if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
-		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
+		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
 
 	intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
 
@@ -3190,9 +3274,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
 		return;
 
-	if (i915_gem_clflush_object(obj, obj->pin_display))
-		i915_gem_chipset_flush(to_i915(obj->base.dev));
-
+	i915_gem_clflush_object(obj, obj->pin_display);
 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
 	obj->base.write_domain = 0;
@@ -3201,24 +3283,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 					    I915_GEM_DOMAIN_CPU);
 }
 
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (!i915_vma_is_ggtt(vma))
-			continue;
-
-		if (i915_vma_is_active(vma))
-			continue;
-
-		if (!drm_mm_node_allocated(&vma->node))
-			continue;
-
-		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-	}
-}
-
 /**
  * Moves a single object to the GTT read, and possibly write domain.
  * @obj: object to act on
@@ -3233,7 +3297,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
-	ret = i915_gem_object_wait_rendering(obj, !write);
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_LOCKED |
+				   (write ? I915_WAIT_ALL : 0),
+				   MAX_SCHEDULE_TIMEOUT,
+				   NULL);
 	if (ret)
 		return ret;
 
@@ -3248,7 +3319,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	 * continue to assume that the obj remained out of the CPU cached
 	 * domain.
 	 */
-	ret = i915_gem_object_get_pages(obj);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		return ret;
 
@@ -3267,21 +3338,19 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 	if (write) {
 		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
 		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-		obj->dirty = 1;
+		obj->mm.dirty = true;
 	}
 
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
 
-	/* And bump the LRU for this access */
-	i915_gem_object_bump_inactive_ggtt(obj);
-
+	i915_gem_object_unpin_pages(obj);
 	return 0;
 }
 
@@ -3306,6 +3375,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret = 0;
 
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
 	if (obj->cache_level == cache_level)
 		goto out;
 
@@ -3350,7 +3421,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		 * If we wait upon the object, we know that all the bound
 		 * VMA are no longer active.
 		 */
-		ret = i915_gem_object_wait_rendering(obj, false);
+		ret = i915_gem_object_wait(obj,
+					   I915_WAIT_INTERRUPTIBLE |
+					   I915_WAIT_LOCKED |
+					   I915_WAIT_ALL,
+					   MAX_SCHEDULE_TIMEOUT,
+					   NULL);
 		if (ret)
 			return ret;
 
@@ -3405,10 +3481,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	 * object is now coherent at its new cache level (with respect
 	 * to the access domain).
 	 */
-	if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
-		if (i915_gem_clflush_object(obj, true))
-			i915_gem_chipset_flush(to_i915(obj->base.dev));
-	}
+	if (obj->cache_dirty && cpu_write_needs_clflush(obj))
+		i915_gem_clflush_object(obj, true);
 
 	return 0;
 }
@@ -3418,10 +3492,14 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_caching *args = data;
 	struct drm_i915_gem_object *obj;
+	int err = 0;
 
-	obj = i915_gem_object_lookup(file, args->handle);
-	if (!obj)
-		return -ENOENT;
+	rcu_read_lock();
+	obj = i915_gem_object_lookup_rcu(file, args->handle);
+	if (!obj) {
+		err = -ENOENT;
+		goto out;
+	}
 
 	switch (obj->cache_level) {
 	case I915_CACHE_LLC:
@@ -3437,15 +3515,15 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
 		args->caching = I915_CACHING_NONE;
 		break;
 	}
-
-	i915_gem_object_put_unlocked(obj);
-	return 0;
+out:
+	rcu_read_unlock();
+	return err;
 }
 
 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 			       struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_caching *args = data;
 	struct drm_i915_gem_object *obj;
 	enum i915_cache_level level;
@@ -3462,23 +3540,21 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 		 * cacheline, whereas normally such cachelines would get
 		 * invalidated.
 		 */
-		if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
+		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
 			return -ENODEV;
 
 		level = I915_CACHE_LLC;
 		break;
 	case I915_CACHING_DISPLAY:
-		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	intel_runtime_pm_get(dev_priv);
-
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
-		goto rpm_put;
+		return ret;
 
 	obj = i915_gem_object_lookup(file, args->handle);
 	if (!obj) {
@@ -3487,13 +3563,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 	}
 
 	ret = i915_gem_object_set_cache_level(obj, level);
-
 	i915_gem_object_put(obj);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
-rpm_put:
-	intel_runtime_pm_put(dev_priv);
-
 	return ret;
 }
 
@@ -3511,6 +3583,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
 	/* Mark the pin_display early so that we account for the
 	 * display coherency whilst setting up the cache domains.
 	 */
@@ -3526,7 +3600,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
 	 */
 	ret = i915_gem_object_set_cache_level(obj,
-					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+					      HAS_WT(to_i915(obj->base.dev)) ?
+					      I915_CACHE_WT : I915_CACHE_NONE);
 	if (ret) {
 		vma = ERR_PTR(ret);
 		goto err_unpin_display;
@@ -3543,15 +3618,27 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	if (view->type == I915_GGTT_VIEW_NORMAL)
 		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
 					       PIN_MAPPABLE | PIN_NONBLOCK);
-	if (IS_ERR(vma))
-		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
+	if (IS_ERR(vma)) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		unsigned int flags;
+
+		/* Valleyview is definitely limited to scanning out the first
+		 * 512MiB. Lets presume this behaviour was inherited from the
+		 * g4x display engine and that all earlier gen are similarly
+		 * limited. Testing suggests that it is a little more
+		 * complicated than this. For example, Cherryview appears quite
+		 * happy to scanout from anywhere within its global aperture.
+		 */
+		flags = 0;
+		if (HAS_GMCH_DISPLAY(i915))
+			flags = PIN_MAPPABLE;
+		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
+	}
 	if (IS_ERR(vma))
 		goto err_unpin_display;
 
 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 
-	WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
-
 	i915_gem_object_flush_cpu_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -3577,6 +3664,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 void
 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
+
 	if (WARN_ON(vma->obj->pin_display == 0))
 		return;
 
@@ -3588,7 +3677,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 
 	i915_vma_unpin(vma);
-	WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
 }
 
 /**
@@ -3605,7 +3693,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
-	ret = i915_gem_object_wait_rendering(obj, !write);
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_LOCKED |
+				   (write ? I915_WAIT_ALL : 0),
+				   MAX_SCHEDULE_TIMEOUT,
+				   NULL);
 	if (ret)
 		return ret;
 
@@ -3627,7 +3722,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
 	/* If we're writing through the CPU, then the GPU read domains will
 	 * need to be invalidated at next use.
@@ -3661,11 +3756,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
 	struct drm_i915_gem_request *request, *target = NULL;
-	int ret;
-
-	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
-	if (ret)
-		return ret;
+	long ret;
 
 	/* ABI: return -EIO if already wedged */
 	if (i915_terminally_wedged(&dev_priv->gpu_error))
@@ -3692,10 +3783,12 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (target == NULL)
 		return 0;
 
-	ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+	ret = i915_wait_request(target,
+				I915_WAIT_INTERRUPTIBLE,
+				MAX_SCHEDULE_TIMEOUT);
 	i915_gem_request_put(target);
 
-	return ret;
+	return ret < 0 ? ret : 0;
 }
 
 static bool
@@ -3745,7 +3838,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	mappable = (vma->node.start + fence_size <=
 		    dev_priv->ggtt.mappable_end);
 
-	if (mappable && fenceable)
+	/*
+	 * Explicitly disable for rotated VMA since the display does not
+	 * need the fence and the VMA is not accessible to other users.
+	 */
+	if (mappable && fenceable &&
+	    vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
 		vma->flags |= I915_VMA_CAN_FENCE;
 	else
 		vma->flags &= ~I915_VMA_CAN_FENCE;
@@ -3757,6 +3855,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 	unsigned int bound = vma->flags;
 	int ret;
 
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
@@ -3793,10 +3892,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 u64 alignment,
 			 u64 flags)
 {
-	struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct i915_address_space *vm = &dev_priv->ggtt.base;
 	struct i915_vma *vma;
 	int ret;
 
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
 	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
 	if (IS_ERR(vma))
 		return vma;
@@ -3806,6 +3908,41 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
 			return ERR_PTR(-ENOSPC);
 
+		if (flags & PIN_MAPPABLE) {
+			u32 fence_size;
+
+			fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
+							    i915_gem_object_get_tiling(obj));
+			/* If the required space is larger than the available
+			 * aperture, we will not able to find a slot for the
+			 * object and unbinding the object now will be in
+			 * vain. Worse, doing so may cause us to ping-pong
+			 * the object in and out of the Global GTT and
+			 * waste a lot of cycles under the mutex.
+			 */
+			if (fence_size > dev_priv->ggtt.mappable_end)
+				return ERR_PTR(-E2BIG);
+
+			/* If NONBLOCK is set the caller is optimistically
+			 * trying to cache the full object within the mappable
+			 * aperture, and *must* have a fallback in place for
+			 * situations where we cannot bind the object. We
+			 * can be a little more lax here and use the fallback
+			 * more often to avoid costly migrations of ourselves
+			 * and other objects within the aperture.
+			 *
+			 * Half-the-aperture is used as a simple heuristic.
+			 * More interesting would to do search for a free
+			 * block prior to making the commitment to unbind.
+			 * That caters for the self-harm case, and with a
+			 * little more heuristics (e.g. NOFAULT, NOEVICT)
+			 * we could try to minimise harm to others.
+			 */
+			if (flags & PIN_NONBLOCK &&
+			    fence_size > dev_priv->ggtt.mappable_end / 2)
+				return ERR_PTR(-ENOSPC);
+		}
+
 		WARN(i915_vma_is_pinned(vma),
 		     "bo is already pinned in ggtt with incorrect alignment:"
 		     " offset=%08x, req.alignment=%llx,"
@@ -3852,83 +3989,42 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
 }
 
 static __always_inline unsigned int
-__busy_set_if_active(const struct i915_gem_active *active,
+__busy_set_if_active(const struct dma_fence *fence,
 		     unsigned int (*flag)(unsigned int id))
 {
-	struct drm_i915_gem_request *request;
+	struct drm_i915_gem_request *rq;
 
-	request = rcu_dereference(active->request);
-	if (!request || i915_gem_request_completed(request))
+	/* We have to check the current hw status of the fence as the uABI
+	 * guarantees forward progress. We could rely on the idle worker
+	 * to eventually flush us, but to minimise latency just ask the
+	 * hardware.
+	 *
+	 * Note we only report on the status of native fences.
+	 */
+	if (!dma_fence_is_i915(fence))
 		return 0;
 
-	/* This is racy. See __i915_gem_active_get_rcu() for an in detail
-	 * discussion of how to handle the race correctly, but for reporting
-	 * the busy state we err on the side of potentially reporting the
-	 * wrong engine as being busy (but we guarantee that the result
-	 * is at least self-consistent).
-	 *
-	 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
-	 * whilst we are inspecting it, even under the RCU read lock as we are.
-	 * This means that there is a small window for the engine and/or the
-	 * seqno to have been overwritten. The seqno will always be in the
-	 * future compared to the intended, and so we know that if that
-	 * seqno is idle (on whatever engine) our request is idle and the
-	 * return 0 above is correct.
-	 *
-	 * The issue is that if the engine is switched, it is just as likely
-	 * to report that it is busy (but since the switch happened, we know
-	 * the request should be idle). So there is a small chance that a busy
-	 * result is actually the wrong engine.
-	 *
-	 * So why don't we care?
-	 *
-	 * For starters, the busy ioctl is a heuristic that is by definition
-	 * racy. Even with perfect serialisation in the driver, the hardware
-	 * state is constantly advancing - the state we report to the user
-	 * is stale.
-	 *
-	 * The critical information for the busy-ioctl is whether the object
-	 * is idle as userspace relies on that to detect whether its next
-	 * access will stall, or if it has missed submitting commands to
-	 * the hardware allowing the GPU to stall. We never generate a
-	 * false-positive for idleness, thus busy-ioctl is reliable at the
-	 * most fundamental level, and we maintain the guarantee that a
-	 * busy object left to itself will eventually become idle (and stay
-	 * idle!).
-	 *
-	 * We allow ourselves the leeway of potentially misreporting the busy
-	 * state because that is an optimisation heuristic that is constantly
-	 * in flux. Being quickly able to detect the busy/idle state is much
-	 * more important than accurate logging of exactly which engines were
-	 * busy.
-	 *
-	 * For accuracy in reporting the engine, we could use
-	 *
-	 *	result = 0;
-	 *	request = __i915_gem_active_get_rcu(active);
-	 *	if (request) {
-	 *		if (!i915_gem_request_completed(request))
-	 *			result = flag(request->engine->exec_id);
-	 *		i915_gem_request_put(request);
-	 *	}
-	 *
-	 * but that still remains susceptible to both hardware and userspace
-	 * races. So we accept making the result of that race slightly worse,
-	 * given the rarity of the race and its low impact on the result.
-	 */
-	return flag(READ_ONCE(request->engine->exec_id));
+	/* opencode to_request() in order to avoid const warnings */
+	rq = container_of(fence, struct drm_i915_gem_request, fence);
+	if (i915_gem_request_completed(rq))
+		return 0;
+
+	return flag(rq->engine->exec_id);
 }
 
 static __always_inline unsigned int
-busy_check_reader(const struct i915_gem_active *active)
+busy_check_reader(const struct dma_fence *fence)
 {
-	return __busy_set_if_active(active, __busy_read_flag);
+	return __busy_set_if_active(fence, __busy_read_flag);
 }
 
 static __always_inline unsigned int
-busy_check_writer(const struct i915_gem_active *active)
+busy_check_writer(const struct dma_fence *fence)
 {
-	return __busy_set_if_active(active, __busy_write_id);
+	if (!fence)
+		return 0;
+
+	return __busy_set_if_active(fence, __busy_write_id);
 }
 
 int
@@ -3937,64 +4033,58 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
-	unsigned long active;
+	struct reservation_object_list *list;
+	unsigned int seq;
+	int err;
 
-	obj = i915_gem_object_lookup(file, args->handle);
+	err = -ENOENT;
+	rcu_read_lock();
+	obj = i915_gem_object_lookup_rcu(file, args->handle);
 	if (!obj)
-		return -ENOENT;
+		goto out;
 
-	args->busy = 0;
-	active = __I915_BO_ACTIVE(obj);
-	if (active) {
-		int idx;
+	/* A discrepancy here is that we do not report the status of
+	 * non-i915 fences, i.e. even though we may report the object as idle,
+	 * a call to set-domain may still stall waiting for foreign rendering.
+	 * This also means that wait-ioctl may report an object as busy,
+	 * where busy-ioctl considers it idle.
+	 *
+	 * We trade the ability to warn of foreign fences to report on which
+	 * i915 engines are active for the object.
+	 *
+	 * Alternatively, we can trade that extra information on read/write
+	 * activity with
+	 *	args->busy =
+	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
+	 * to report the overall busyness. This is what the wait-ioctl does.
+	 *
+	 */
+retry:
+	seq = raw_read_seqcount(&obj->resv->seq);
 
-		/* Yes, the lookups are intentionally racy.
-		 *
-		 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
-		 * to regard the value as stale and as our ABI guarantees
-		 * forward progress, we confirm the status of each active
-		 * request with the hardware.
-		 *
-		 * Even though we guard the pointer lookup by RCU, that only
-		 * guarantees that the pointer and its contents remain
-		 * dereferencable and does *not* mean that the request we
-		 * have is the same as the one being tracked by the object.
-		 *
-		 * Consider that we lookup the request just as it is being
-		 * retired and freed. We take a local copy of the pointer,
-		 * but before we add its engine into the busy set, the other
-		 * thread reallocates it and assigns it to a task on another
-		 * engine with a fresh and incomplete seqno. Guarding against
-		 * that requires careful serialisation and reference counting,
-		 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
-		 * instead we expect that if the result is busy, which engines
-		 * are busy is not completely reliable - we only guarantee
-		 * that the object was busy.
-		 */
-		rcu_read_lock();
+	/* Translate the exclusive fence to the READ *and* WRITE engine */
+	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
 
-		for_each_active(active, idx)
-			args->busy |= busy_check_reader(&obj->last_read[idx]);
+	/* Translate shared fences to READ set of engines */
+	list = rcu_dereference(obj->resv->fence);
+	if (list) {
+		unsigned int shared_count = list->shared_count, i;
 
-		/* For ABI sanity, we only care that the write engine is in
-		 * the set of read engines. This should be ensured by the
-		 * ordering of setting last_read/last_write in
-		 * i915_vma_move_to_active(), and then in reverse in retire.
-		 * However, for good measure, we always report the last_write
-		 * request as a busy read as well as being a busy write.
-		 *
-		 * We don't care that the set of active read/write engines
-		 * may change during construction of the result, as it is
-		 * equally liable to change before userspace can inspect
-		 * the result.
-		 */
-		args->busy |= busy_check_writer(&obj->last_write);
+		for (i = 0; i < shared_count; ++i) {
+			struct dma_fence *fence =
+				rcu_dereference(list->shared[i]);
 
-		rcu_read_unlock();
+			args->busy |= busy_check_reader(fence);
+		}
 	}
 
-	i915_gem_object_put_unlocked(obj);
-	return 0;
+	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
+		goto retry;
+
+	err = 0;
+out:
+	rcu_read_unlock();
+	return err;
 }
 
 int
@@ -4011,7 +4101,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_madvise *args = data;
 	struct drm_i915_gem_object *obj;
-	int ret;
+	int err;
 
 	switch (args->madv) {
 	case I915_MADV_DONTNEED:
@@ -4021,77 +4111,100 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	    return -EINVAL;
 	}
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
 	obj = i915_gem_object_lookup(file_priv, args->handle);
-	if (!obj) {
-		ret = -ENOENT;
-		goto unlock;
-	}
+	if (!obj)
+		return -ENOENT;
 
-	if (obj->pages &&
+	err = mutex_lock_interruptible(&obj->mm.lock);
+	if (err)
+		goto out;
+
+	if (obj->mm.pages &&
 	    i915_gem_object_is_tiled(obj) &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-		if (obj->madv == I915_MADV_WILLNEED)
-			i915_gem_object_unpin_pages(obj);
-		if (args->madv == I915_MADV_WILLNEED)
-			i915_gem_object_pin_pages(obj);
+		if (obj->mm.madv == I915_MADV_WILLNEED) {
+			GEM_BUG_ON(!obj->mm.quirked);
+			__i915_gem_object_unpin_pages(obj);
+			obj->mm.quirked = false;
+		}
+		if (args->madv == I915_MADV_WILLNEED) {
+			GEM_BUG_ON(obj->mm.quirked);
+			__i915_gem_object_pin_pages(obj);
+			obj->mm.quirked = true;
+		}
 	}
 
-	if (obj->madv != __I915_MADV_PURGED)
-		obj->madv = args->madv;
+	if (obj->mm.madv != __I915_MADV_PURGED)
+		obj->mm.madv = args->madv;
 
 	/* if the object is no longer attached, discard its backing storage */
-	if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
+	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
 		i915_gem_object_truncate(obj);
 
-	args->retained = obj->madv != __I915_MADV_PURGED;
+	args->retained = obj->mm.madv != __I915_MADV_PURGED;
+	mutex_unlock(&obj->mm.lock);
 
+out:
 	i915_gem_object_put(obj);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	return err;
 }
 
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			  const struct drm_i915_gem_object_ops *ops)
 {
-	int i;
+	mutex_init(&obj->mm.lock);
 
-	INIT_LIST_HEAD(&obj->global_list);
-	for (i = 0; i < I915_NUM_ENGINES; i++)
-		init_request_active(&obj->last_read[i],
-				    i915_gem_object_retire__read);
-	init_request_active(&obj->last_write,
-			    i915_gem_object_retire__write);
+	INIT_LIST_HEAD(&obj->global_link);
+	INIT_LIST_HEAD(&obj->userfault_link);
 	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
 
 	obj->ops = ops;
 
+	reservation_object_init(&obj->__builtin_resv);
+	obj->resv = &obj->__builtin_resv;
+
 	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
-	obj->madv = I915_MADV_WILLNEED;
+
+	obj->mm.madv = I915_MADV_WILLNEED;
+	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+	mutex_init(&obj->mm.get_page.lock);
 
 	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
-	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+		 I915_GEM_OBJECT_IS_SHRINKABLE,
 	.get_pages = i915_gem_object_get_pages_gtt,
 	.put_pages = i915_gem_object_put_pages_gtt,
 };
 
-struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
-						  size_t size)
+/* Note we don't consider signbits :| */
+#define overflows_type(x, T) \
+	(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+
+struct drm_i915_gem_object *
+i915_gem_object_create(struct drm_device *dev, u64 size)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
 	struct address_space *mapping;
 	gfp_t mask;
 	int ret;
 
+	/* There is a prevalence of the assumption that we fit the object's
+	 * page count inside a 32bit _signed_ variable. Let's document this and
+	 * catch if we ever need to fix it. In the meantime, if you do spot
+	 * such a local variable, please consider fixing!
+	 */
+	if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
+		return ERR_PTR(-E2BIG);
+
+	if (overflows_type(size, obj->base.size))
+		return ERR_PTR(-E2BIG);
+
 	obj = i915_gem_object_alloc(dev);
 	if (obj == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -4101,7 +4214,7 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
 		goto fail;
 
 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
-	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+	if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
 		/* 965gm cannot relocate objects above 4GiB. */
 		mask &= ~__GFP_HIGHMEM;
 		mask |= __GFP_DMA32;
@@ -4138,7 +4251,6 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
 
 fail:
 	i915_gem_object_free(obj);
-
 	return ERR_PTR(ret);
 }
 
@@ -4150,7 +4262,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
 	 * back the contents from the GPU.
 	 */
 
-	if (obj->madv != I915_MADV_WILLNEED)
+	if (obj->mm.madv != I915_MADV_WILLNEED)
 		return false;
 
 	if (obj->base.filp == NULL)
@@ -4166,16 +4278,72 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
 	return atomic_long_read(&obj->base.filp->f_count) == 1;
 }
 
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void __i915_gem_free_objects(struct drm_i915_private *i915,
+				    struct llist_node *freed)
 {
-	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_vma *vma, *next;
+	struct drm_i915_gem_object *obj, *on;
 
-	intel_runtime_pm_get(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_runtime_pm_get(i915);
+	llist_for_each_entry(obj, freed, freed) {
+		struct i915_vma *vma, *vn;
 
-	trace_i915_gem_object_destroy(obj);
+		trace_i915_gem_object_destroy(obj);
+
+		GEM_BUG_ON(i915_gem_object_is_active(obj));
+		list_for_each_entry_safe(vma, vn,
+					 &obj->vma_list, obj_link) {
+			GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+			GEM_BUG_ON(i915_vma_is_active(vma));
+			vma->flags &= ~I915_VMA_PIN_MASK;
+			i915_vma_close(vma);
+		}
+		GEM_BUG_ON(!list_empty(&obj->vma_list));
+		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
+
+		list_del(&obj->global_link);
+	}
+	intel_runtime_pm_put(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	llist_for_each_entry_safe(obj, on, freed, freed) {
+		GEM_BUG_ON(obj->bind_count);
+		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+
+		if (obj->ops->release)
+			obj->ops->release(obj);
+
+		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
+			atomic_set(&obj->mm.pages_pin_count, 0);
+		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+		GEM_BUG_ON(obj->mm.pages);
+
+		if (obj->base.import_attach)
+			drm_prime_gem_destroy(&obj->base, NULL);
+
+		reservation_object_fini(&obj->__builtin_resv);
+		drm_gem_object_release(&obj->base);
+		i915_gem_info_remove_obj(i915, obj->base.size);
+
+		kfree(obj->bit_17);
+		i915_gem_object_free(obj);
+	}
+}
+
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
+{
+	struct llist_node *freed;
+
+	freed = llist_del_all(&i915->mm.free_list);
+	if (unlikely(freed))
+		__i915_gem_free_objects(i915, freed);
+}
+
+static void __i915_gem_free_work(struct work_struct *work)
+{
+	struct drm_i915_private *i915 =
+		container_of(work, struct drm_i915_private, mm.free_work);
+	struct llist_node *freed;
 
 	/* All file-owned VMA should have been released by this point through
 	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
@@ -4184,47 +4352,62 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	 * the GTT either for the user or for scanout). Those VMA still need to
 	 * unbound now.
 	 */
-	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
-		GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-		GEM_BUG_ON(i915_vma_is_active(vma));
-		vma->flags &= ~I915_VMA_PIN_MASK;
-		i915_vma_close(vma);
-	}
-	GEM_BUG_ON(obj->bind_count);
 
-	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
-	 * before progressing. */
-	if (obj->stolen)
-		i915_gem_object_unpin_pages(obj);
+	while ((freed = llist_del_all(&i915->mm.free_list)))
+		__i915_gem_free_objects(i915, freed);
+}
 
-	WARN_ON(atomic_read(&obj->frontbuffer_bits));
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+	struct drm_i915_gem_object *obj =
+		container_of(head, typeof(*obj), rcu);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
-	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
-	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
-	    i915_gem_object_is_tiled(obj))
-		i915_gem_object_unpin_pages(obj);
+	/* We can't simply use call_rcu() from i915_gem_free_object()
+	 * as we need to block whilst unbinding, and the call_rcu
+	 * task may be called from softirq context. So we take a
+	 * detour through a worker.
+	 */
+	if (llist_add(&obj->freed, &i915->mm.free_list))
+		schedule_work(&i915->mm.free_work);
+}
 
-	if (WARN_ON(obj->pages_pin_count))
-		obj->pages_pin_count = 0;
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+	if (obj->mm.quirked)
+		__i915_gem_object_unpin_pages(obj);
+
 	if (discard_backing_storage(obj))
-		obj->madv = I915_MADV_DONTNEED;
-	i915_gem_object_put_pages(obj);
+		obj->mm.madv = I915_MADV_DONTNEED;
 
-	BUG_ON(obj->pages);
+	/* Before we free the object, make sure any pure RCU-only
+	 * read-side critical sections are complete, e.g.
+	 * i915_gem_busy_ioctl(). For the corresponding synchronized
+	 * lookup see i915_gem_object_lookup_rcu().
+	 */
+	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
 
-	if (obj->base.import_attach)
-		drm_prime_gem_destroy(&obj->base, NULL);
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-	if (obj->ops->release)
-		obj->ops->release(obj);
+	GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+	if (i915_gem_object_is_active(obj))
+		i915_gem_object_set_active_reference(obj);
+	else
+		i915_gem_object_put(obj);
+}
 
-	drm_gem_object_release(&obj->base);
-	i915_gem_info_remove_obj(dev_priv, obj->base.size);
+static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	kfree(obj->bit_17);
-	i915_gem_object_free(obj);
-
-	intel_runtime_pm_put(dev_priv);
+	for_each_engine(engine, dev_priv, id)
+		GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
 }
 
 int i915_gem_suspend(struct drm_device *dev)
@@ -4255,18 +4438,46 @@ int i915_gem_suspend(struct drm_device *dev)
 		goto err;
 
 	i915_gem_retire_requests(dev_priv);
+	GEM_BUG_ON(dev_priv->gt.active_requests);
 
+	assert_kernel_context_is_current(dev_priv);
 	i915_gem_context_lost(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
 	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
 	flush_delayed_work(&dev_priv->gt.idle_work);
+	flush_work(&dev_priv->mm.free_work);
 
 	/* Assert that we sucessfully flushed all the work and
 	 * reset the GPU back to its idle, low power state.
 	 */
 	WARN_ON(dev_priv->gt.awake);
+	WARN_ON(!intel_execlists_idle(dev_priv));
+
+	/*
+	 * Neither the BIOS, ourselves or any other kernel
+	 * expects the system to be in execlists mode on startup,
+	 * so we need to reset the GPU back to legacy mode. And the only
+	 * known way to disable logical contexts is through a GPU reset.
+	 *
+	 * So in order to leave the system in a known default configuration,
+	 * always reset the GPU upon unload and suspend. Afterwards we then
+	 * clean up the GEM state tracking, flushing off the requests and
+	 * leaving the system in a known idle state.
+	 *
+	 * Note that is of the upmost importance that the GPU is idle and
+	 * all stray writes are flushed *before* we dismantle the backing
+	 * storage for the pinned objects.
+	 *
+	 * However, since we are uncertain that resetting the GPU on older
+	 * machines is a good idea, we don't - just in case it leaves the
+	 * machine in an unusable condition.
+	 */
+	if (HAS_HW_CONTEXTS(dev)) {
+		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+		WARN_ON(reset && reset != -ENODEV);
+	}
 
 	return 0;
 
@@ -4279,6 +4490,8 @@ void i915_gem_resume(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
+	WARN_ON(dev_priv->gt.awake);
+
 	mutex_lock(&dev->struct_mutex);
 	i915_gem_restore_gtt_mappings(dev);
 
@@ -4302,44 +4515,42 @@ void i915_gem_init_swizzling(struct drm_device *dev)
 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
 				 DISP_TILE_SURFACE_SWIZZLING);
 
-	if (IS_GEN5(dev))
+	if (IS_GEN5(dev_priv))
 		return;
 
 	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
-	if (IS_GEN6(dev))
+	if (IS_GEN6(dev_priv))
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
-	else if (IS_GEN7(dev))
+	else if (IS_GEN7(dev_priv))
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
-	else if (IS_GEN8(dev))
+	else if (IS_GEN8(dev_priv))
 		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
 	else
 		BUG();
 }
 
-static void init_unused_ring(struct drm_device *dev, u32 base)
+static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	I915_WRITE(RING_CTL(base), 0);
 	I915_WRITE(RING_HEAD(base), 0);
 	I915_WRITE(RING_TAIL(base), 0);
 	I915_WRITE(RING_START(base), 0);
 }
 
-static void init_unused_rings(struct drm_device *dev)
+static void init_unused_rings(struct drm_i915_private *dev_priv)
 {
-	if (IS_I830(dev)) {
-		init_unused_ring(dev, PRB1_BASE);
-		init_unused_ring(dev, SRB0_BASE);
-		init_unused_ring(dev, SRB1_BASE);
-		init_unused_ring(dev, SRB2_BASE);
-		init_unused_ring(dev, SRB3_BASE);
-	} else if (IS_GEN2(dev)) {
-		init_unused_ring(dev, SRB0_BASE);
-		init_unused_ring(dev, SRB1_BASE);
-	} else if (IS_GEN3(dev)) {
-		init_unused_ring(dev, PRB1_BASE);
-		init_unused_ring(dev, PRB2_BASE);
+	if (IS_I830(dev_priv)) {
+		init_unused_ring(dev_priv, PRB1_BASE);
+		init_unused_ring(dev_priv, SRB0_BASE);
+		init_unused_ring(dev_priv, SRB1_BASE);
+		init_unused_ring(dev_priv, SRB2_BASE);
+		init_unused_ring(dev_priv, SRB3_BASE);
+	} else if (IS_GEN2(dev_priv)) {
+		init_unused_ring(dev_priv, SRB0_BASE);
+		init_unused_ring(dev_priv, SRB1_BASE);
+	} else if (IS_GEN3(dev_priv)) {
+		init_unused_ring(dev_priv, PRB1_BASE);
+		init_unused_ring(dev_priv, PRB2_BASE);
 	}
 }
 
@@ -4348,20 +4559,23 @@ i915_gem_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int ret;
 
+	dev_priv->gt.last_init_time = ktime_get();
+
 	/* Double layer security blanket, see i915_gem_init() */
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 	if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
-	if (IS_HASWELL(dev))
-		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+	if (IS_HASWELL(dev_priv))
+		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
 			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
-	if (HAS_PCH_NOP(dev)) {
-		if (IS_IVYBRIDGE(dev)) {
+	if (HAS_PCH_NOP(dev_priv)) {
+		if (IS_IVYBRIDGE(dev_priv)) {
 			u32 temp = I915_READ(GEN7_MSG_CTL);
 			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
 			I915_WRITE(GEN7_MSG_CTL, temp);
@@ -4380,7 +4594,7 @@ i915_gem_init_hw(struct drm_device *dev)
 	 * will prevent c3 entry. Makes sure all unused rings
 	 * are totally idle.
 	 */
-	init_unused_rings(dev);
+	init_unused_rings(dev_priv);
 
 	BUG_ON(!dev_priv->kernel_context);
 
@@ -4391,7 +4605,7 @@ i915_gem_init_hw(struct drm_device *dev)
 	}
 
 	/* Need to do basic initialisation of all rings first: */
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		ret = engine->init_hw(engine);
 		if (ret)
 			goto out;
@@ -4490,17 +4704,12 @@ i915_gem_cleanup_engines(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		dev_priv->gt.cleanup_engine(engine);
 }
 
-static void
-init_engine_lists(struct intel_engine_cs *engine)
-{
-	INIT_LIST_HEAD(&engine->request_list);
-}
-
 void
 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 {
@@ -4533,36 +4742,43 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 	i915_gem_detect_bit_6_swizzle(dev);
 }
 
-void
+int
 i915_gem_load_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int i;
+	int err = -ENOMEM;
 
-	dev_priv->objects =
-		kmem_cache_create("i915_gem_object",
-				  sizeof(struct drm_i915_gem_object), 0,
-				  SLAB_HWCACHE_ALIGN,
-				  NULL);
-	dev_priv->vmas =
-		kmem_cache_create("i915_gem_vma",
-				  sizeof(struct i915_vma), 0,
-				  SLAB_HWCACHE_ALIGN,
-				  NULL);
-	dev_priv->requests =
-		kmem_cache_create("i915_gem_request",
-				  sizeof(struct drm_i915_gem_request), 0,
-				  SLAB_HWCACHE_ALIGN |
-				  SLAB_RECLAIM_ACCOUNT |
-				  SLAB_DESTROY_BY_RCU,
-				  NULL);
+	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+	if (!dev_priv->objects)
+		goto err_out;
+
+	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+	if (!dev_priv->vmas)
+		goto err_objects;
+
+	dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+					SLAB_HWCACHE_ALIGN |
+					SLAB_RECLAIM_ACCOUNT |
+					SLAB_DESTROY_BY_RCU);
+	if (!dev_priv->requests)
+		goto err_vmas;
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	INIT_LIST_HEAD(&dev_priv->gt.timelines);
+	err = i915_gem_timeline_init(dev_priv,
+				     &dev_priv->gt.global_timeline,
+				     "[execution]");
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+	if (err)
+		goto err_requests;
 
 	INIT_LIST_HEAD(&dev_priv->context_list);
+	INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+	init_llist_head(&dev_priv->mm.free_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	for (i = 0; i < I915_NUM_ENGINES; i++)
-		init_engine_lists(&dev_priv->engine[i]);
+	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
 	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
 			  i915_gem_retire_work_handler);
 	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -4579,12 +4795,25 @@ i915_gem_load_init(struct drm_device *dev)
 	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
 
 	spin_lock_init(&dev_priv->fb_tracking.lock);
+
+	return 0;
+
+err_requests:
+	kmem_cache_destroy(dev_priv->requests);
+err_vmas:
+	kmem_cache_destroy(dev_priv->vmas);
+err_objects:
+	kmem_cache_destroy(dev_priv->objects);
+err_out:
+	return err;
 }
 
 void i915_gem_load_cleanup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
+	WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+
 	kmem_cache_destroy(dev_priv->requests);
 	kmem_cache_destroy(dev_priv->vmas);
 	kmem_cache_destroy(dev_priv->objects);
@@ -4633,7 +4862,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 	i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
 
 	for (p = phases; *p; p++) {
-		list_for_each_entry(obj, *p, global_list) {
+		list_for_each_entry(obj, *p, global_link) {
 			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 		}
@@ -4725,21 +4954,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 	}
 }
 
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
-{
-	struct page *page;
-
-	/* Only default objects have per-page dirty tracking */
-	if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
-		return NULL;
-
-	page = i915_gem_object_get_page(obj, n);
-	set_page_dirty(page);
-	return page;
-}
-
 /* Allocate a new GEM object and fill it with the supplied data */
 struct drm_i915_gem_object *
 i915_gem_object_create_from_data(struct drm_device *dev,
@@ -4758,14 +4972,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
 	if (ret)
 		goto fail;
 
-	ret = i915_gem_object_get_pages(obj);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		goto fail;
 
-	i915_gem_object_pin_pages(obj);
-	sg = obj->pages;
+	sg = obj->mm.pages;
 	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-	obj->dirty = 1;		/* Backing store is now out of date */
+	obj->mm.dirty = true; /* Backing store is now out of date */
 	i915_gem_object_unpin_pages(obj);
 
 	if (WARN_ON(bytes != size)) {
@@ -4780,3 +4993,156 @@ i915_gem_object_create_from_data(struct drm_device *dev,
 	i915_gem_object_put(obj);
 	return ERR_PTR(ret);
 }
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+		       unsigned int n,
+		       unsigned int *offset)
+{
+	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+	struct scatterlist *sg;
+	unsigned int idx, count;
+
+	might_sleep();
+	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+	/* As we iterate forward through the sg, we record each entry in a
+	 * radixtree for quick repeated (backwards) lookups. If we have seen
+	 * this index previously, we will have an entry for it.
+	 *
+	 * Initial lookup is O(N), but this is amortized to O(1) for
+	 * sequential page access (where each new request is consecutive
+	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+	 * i.e. O(1) with a large constant!
+	 */
+	if (n < READ_ONCE(iter->sg_idx))
+		goto lookup;
+
+	mutex_lock(&iter->lock);
+
+	/* We prefer to reuse the last sg so that repeated lookup of this
+	 * (or the subsequent) sg are fast - comparing against the last
+	 * sg is faster than going through the radixtree.
+	 */
+
+	sg = iter->sg_pos;
+	idx = iter->sg_idx;
+	count = __sg_page_count(sg);
+
+	while (idx + count <= n) {
+		unsigned long exception, i;
+		int ret;
+
+		/* If we cannot allocate and insert this entry, or the
+		 * individual pages from this range, cancel updating the
+		 * sg_idx so that on this lookup we are forced to linearly
+		 * scan onwards, but on future lookups we will try the
+		 * insertion again (in which case we need to be careful of
+		 * the error return reporting that we have already inserted
+		 * this index).
+		 */
+		ret = radix_tree_insert(&iter->radix, idx, sg);
+		if (ret && ret != -EEXIST)
+			goto scan;
+
+		exception =
+			RADIX_TREE_EXCEPTIONAL_ENTRY |
+			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+		for (i = 1; i < count; i++) {
+			ret = radix_tree_insert(&iter->radix, idx + i,
+						(void *)exception);
+			if (ret && ret != -EEXIST)
+				goto scan;
+		}
+
+		idx += count;
+		sg = ____sg_next(sg);
+		count = __sg_page_count(sg);
+	}
+
+scan:
+	iter->sg_pos = sg;
+	iter->sg_idx = idx;
+
+	mutex_unlock(&iter->lock);
+
+	if (unlikely(n < idx)) /* insertion completed by another thread */
+		goto lookup;
+
+	/* In case we failed to insert the entry into the radixtree, we need
+	 * to look beyond the current sg.
+	 */
+	while (idx + count <= n) {
+		idx += count;
+		sg = ____sg_next(sg);
+		count = __sg_page_count(sg);
+	}
+
+	*offset = n - idx;
+	return sg;
+
+lookup:
+	rcu_read_lock();
+
+	sg = radix_tree_lookup(&iter->radix, n);
+	GEM_BUG_ON(!sg);
+
+	/* If this index is in the middle of multi-page sg entry,
+	 * the radixtree will contain an exceptional entry that points
+	 * to the start of that range. We will return the pointer to
+	 * the base page and the offset of this page within the
+	 * sg entry's range.
+	 */
+	*offset = 0;
+	if (unlikely(radix_tree_exception(sg))) {
+		unsigned long base =
+			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+		sg = radix_tree_lookup(&iter->radix, base);
+		GEM_BUG_ON(!sg);
+
+		*offset = n - base;
+	}
+
+	rcu_read_unlock();
+
+	return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+	struct scatterlist *sg;
+	unsigned int offset;
+
+	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+	sg = i915_gem_object_get_sg(obj, n, &offset);
+	return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+			       unsigned int n)
+{
+	struct page *page;
+
+	page = i915_gem_object_get_page(obj, n);
+	if (!obj->mm.dirty)
+		set_page_dirty(page);
+
+	return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+				unsigned long n)
+{
+	struct scatterlist *sg;
+	unsigned int offset;
+
+	sg = i915_gem_object_get_sg(obj, n, &offset);
+	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 8292e79..51ec793 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,7 +28,9 @@
 #ifdef CONFIG_DRM_I915_DEBUG_GEM
 #define GEM_BUG_ON(expr) BUG_ON(expr)
 #else
-#define GEM_BUG_ON(expr)
+#define GEM_BUG_ON(expr) do { } while (0)
 #endif
 
+#define I915_NUM_ENGINES 5
+
 #endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index ed98959..b3bc119e 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 		list_for_each_entry_safe(obj, next,
 					 &pool->cache_list[n],
 					 batch_pool_link)
-			i915_gem_object_put(obj);
+			__i915_gem_object_release_unless_active(obj);
 
 		INIT_LIST_HEAD(&pool->cache_list[n]);
 	}
@@ -97,9 +97,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 			size_t size)
 {
 	struct drm_i915_gem_object *obj = NULL;
-	struct drm_i915_gem_object *tmp, *next;
+	struct drm_i915_gem_object *tmp;
 	struct list_head *list;
-	int n;
+	int n, ret;
 
 	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
@@ -112,40 +112,35 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 		n = ARRAY_SIZE(pool->cache_list) - 1;
 	list = &pool->cache_list[n];
 
-	list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
+	list_for_each_entry(tmp, list, batch_pool_link) {
 		/* The batches are strictly LRU ordered */
-		if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
-					     &tmp->base.dev->struct_mutex))
+		if (i915_gem_object_is_active(tmp))
 			break;
 
-		/* While we're looping, do some clean up */
-		if (tmp->madv == __I915_MADV_PURGED) {
-			list_del(&tmp->batch_pool_link);
-			i915_gem_object_put(tmp);
-			continue;
-		}
+		GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
+								 true));
 
 		if (tmp->base.size >= size) {
+			/* Clear the set of shared fences early */
+			ww_mutex_lock(&tmp->resv->lock, NULL);
+			reservation_object_add_excl_fence(tmp->resv, NULL);
+			ww_mutex_unlock(&tmp->resv->lock);
+
 			obj = tmp;
 			break;
 		}
 	}
 
 	if (obj == NULL) {
-		int ret;
-
-		obj = i915_gem_object_create(&pool->engine->i915->drm, size);
+		obj = i915_gem_object_create_internal(pool->engine->i915, size);
 		if (IS_ERR(obj))
 			return obj;
-
-		ret = i915_gem_object_get_pages(obj);
-		if (ret)
-			return ERR_PTR(ret);
-
-		obj->madv = I915_MADV_DONTNEED;
 	}
 
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		return ERR_PTR(ret);
+
 	list_move_tail(&obj->batch_pool_link, list);
-	i915_gem_object_pin_pages(obj);
 	return obj;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index df10f4e9..6dd4757 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
 		if (ce->ring)
 			intel_ring_free(ce->ring);
 
-		i915_vma_put(ce->state);
+		__i915_gem_object_release_unless_active(ce->state->obj);
 	}
 
+	kfree(ctx->name);
 	put_pid(ctx->pid);
 	list_del(&ctx->link);
 
@@ -192,7 +193,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
 	 * This is only applicable for Ivy Bridge devices since
 	 * later platforms don't have L3 control bits in the PTE.
 	 */
-	if (IS_IVYBRIDGE(dev)) {
+	if (IS_IVYBRIDGE(to_i915(dev))) {
 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
 		/* Failure shouldn't ever happen this early */
 		if (WARN_ON(ret)) {
@@ -303,19 +304,28 @@ __create_hw_context(struct drm_device *dev,
 	}
 
 	/* Default context will never have a file_priv */
-	if (file_priv != NULL) {
+	ret = DEFAULT_CONTEXT_HANDLE;
+	if (file_priv) {
 		ret = idr_alloc(&file_priv->context_idr, ctx,
 				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
 		if (ret < 0)
 			goto err_out;
-	} else
-		ret = DEFAULT_CONTEXT_HANDLE;
+	}
+	ctx->user_handle = ret;
 
 	ctx->file_priv = file_priv;
-	if (file_priv)
+	if (file_priv) {
 		ctx->pid = get_task_pid(current, PIDTYPE_PID);
+		ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
+				      current->comm,
+				      pid_nr(ctx->pid),
+				      ctx->user_handle);
+		if (!ctx->name) {
+			ret = -ENOMEM;
+			goto err_pid;
+		}
+	}
 
-	ctx->user_handle = ret;
 	/* NB: Mark all slices as needing a remap so that when the context first
 	 * loads it will restore whatever remap state already exists. If there
 	 * is no remap info, it will be a NOP. */
@@ -329,6 +339,9 @@ __create_hw_context(struct drm_device *dev,
 
 	return ctx;
 
+err_pid:
+	put_pid(ctx->pid);
+	idr_remove(&file_priv->context_idr, ctx->user_handle);
 err_out:
 	context_close(ctx);
 	return ERR_PTR(ret);
@@ -352,9 +365,9 @@ i915_gem_create_context(struct drm_device *dev,
 		return ctx;
 
 	if (USES_FULL_PPGTT(dev)) {
-		struct i915_hw_ppgtt *ppgtt =
-			i915_ppgtt_create(to_i915(dev), file_priv);
+		struct i915_hw_ppgtt *ppgtt;
 
+		ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
 		if (IS_ERR(ppgtt)) {
 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 					 PTR_ERR(ppgtt));
@@ -474,10 +487,11 @@ int i915_gem_context_init(struct drm_device *dev)
 void i915_gem_context_lost(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		if (engine->last_context) {
 			i915_gem_context_unpin(engine->last_context, engine);
 			engine->last_context = NULL;
@@ -492,13 +506,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
 			if (!i915_gem_context_is_default(ctx))
 				continue;
 
-			for_each_engine(engine, dev_priv)
+			for_each_engine(engine, dev_priv, id)
 				ctx->engine[engine->id].initialised = false;
 
 			ctx->remap_slice = ALL_L3_SLICES(dev_priv);
 		}
 
-		for_each_engine(engine, dev_priv) {
+		for_each_engine(engine, dev_priv, id) {
 			struct intel_context *kce =
 				&dev_priv->kernel_context->engine[engine->id];
 
@@ -563,6 +577,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	enum intel_engine_id id;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -605,7 +620,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 			intel_ring_emit(ring,
 					MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_engine(signaller, dev_priv) {
+			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
@@ -634,7 +649,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 			intel_ring_emit(ring,
 					MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_engine(signaller, dev_priv) {
+			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
@@ -749,12 +764,36 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
 	return false;
 }
 
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+			    unsigned int flags)
+{
+	struct i915_vma *vma = ctx->engine[RCS].state;
+	int ret;
+
+	/* Clear this page out of any CPU caches for coherent swap-in/out.
+	 * We only want to do this on the first bind so that we do not stall
+	 * on an active context (which by nature is already on the GPU).
+	 */
+	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return vma;
+}
+
 static int do_rcs_switch(struct drm_i915_gem_request *req)
 {
 	struct i915_gem_context *to = req->ctx;
 	struct intel_engine_cs *engine = req->engine;
 	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
-	struct i915_vma *vma = to->engine[RCS].state;
+	struct i915_vma *vma;
 	struct i915_gem_context *from;
 	u32 hw_flags;
 	int ret, i;
@@ -762,17 +801,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 	if (skip_rcs_switch(ppgtt, engine, to))
 		return 0;
 
-	/* Clear this page out of any CPU caches for coherent swap-in/out. */
-	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
-		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
-		if (ret)
-			return ret;
-	}
-
 	/* Trying to pin first makes error handling easier. */
-	ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
-	if (ret)
-		return ret;
+	vma = i915_gem_context_pin_legacy(to, 0);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
 
 	/*
 	 * Pin can switch back to the default context if we end up calling into
@@ -929,21 +961,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	struct i915_gem_timeline *timeline;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv) {
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+	for_each_engine(engine, dev_priv, id) {
 		struct drm_i915_gem_request *req;
 		int ret;
 
-		if (engine->last_context == NULL)
-			continue;
-
-		if (engine->last_context == dev_priv->kernel_context)
-			continue;
-
 		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
+		/* Queue this switch after all other activity */
+		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+			struct drm_i915_gem_request *prev;
+			struct intel_timeline *tl;
+
+			tl = &timeline->engine[engine->id];
+			prev = i915_gem_active_raw(&tl->last_request,
+						   &dev_priv->drm.struct_mutex);
+			if (prev)
+				i915_sw_fence_await_sw_fence_gfp(&req->submit,
+								 &prev->submit,
+								 GFP_KERNEL);
+		}
+
 		ret = i915_switch_context(req);
 		i915_add_request_no_flush(req);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 97c9d68..5e38299 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -44,51 +44,42 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 	struct scatterlist *src, *dst;
 	int ret, i;
 
-	ret = i915_mutex_lock_interruptible(obj->base.dev);
+	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		goto err;
 
-	ret = i915_gem_object_get_pages(obj);
-	if (ret)
-		goto err_unlock;
-
-	i915_gem_object_pin_pages(obj);
-
 	/* Copy sg so that we make an independent mapping */
 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (st == NULL) {
 		ret = -ENOMEM;
-		goto err_unpin;
+		goto err_unpin_pages;
 	}
 
-	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
 	if (ret)
 		goto err_free;
 
-	src = obj->pages->sgl;
+	src = obj->mm.pages->sgl;
 	dst = st->sgl;
-	for (i = 0; i < obj->pages->nents; i++) {
+	for (i = 0; i < obj->mm.pages->nents; i++) {
 		sg_set_page(dst, sg_page(src), src->length, 0);
 		dst = sg_next(dst);
 		src = sg_next(src);
 	}
 
 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
-		ret =-ENOMEM;
+		ret = -ENOMEM;
 		goto err_free_sg;
 	}
 
-	mutex_unlock(&obj->base.dev->struct_mutex);
 	return st;
 
 err_free_sg:
 	sg_free_table(st);
 err_free:
 	kfree(st);
-err_unpin:
+err_unpin_pages:
 	i915_gem_object_unpin_pages(obj);
-err_unlock:
-	mutex_unlock(&obj->base.dev->struct_mutex);
 err:
 	return ERR_PTR(ret);
 }
@@ -103,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
 	sg_free_table(sg);
 	kfree(sg);
 
-	mutex_lock(&obj->base.dev->struct_mutex);
 	i915_gem_object_unpin_pages(obj);
-	mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
-	struct drm_device *dev = obj->base.dev;
-	void *addr;
-	int ret;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ERR_PTR(ret);
-
-	addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
-	mutex_unlock(&dev->struct_mutex);
-
-	return addr;
+	return i915_gem_object_pin_map(obj, I915_MAP_WB);
 }
 
 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
-	struct drm_device *dev = obj->base.dev;
 
-	mutex_lock(&dev->struct_mutex);
 	i915_gem_object_unpin_map(obj);
-	mutex_unlock(&dev->struct_mutex);
 }
 
 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
@@ -179,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 	struct drm_device *dev = obj->base.dev;
-	int ret;
 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+	int err;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		return err;
 
-	ret = i915_gem_object_set_to_cpu_domain(obj, write);
+	err = i915_mutex_lock_interruptible(dev);
+	if (err)
+		goto out;
+
+	err = i915_gem_object_set_to_cpu_domain(obj, write);
 	mutex_unlock(&dev->struct_mutex);
-	return ret;
+
+out:
+	i915_gem_object_unpin_pages(obj);
+	return err;
 }
 
 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 	struct drm_device *dev = obj->base.dev;
-	int ret;
+	int err;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		return err;
 
-	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+	err = i915_mutex_lock_interruptible(dev);
+	if (err)
+		goto out;
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
 	mutex_unlock(&dev->struct_mutex);
 
-	return ret;
+out:
+	i915_gem_object_unpin_pages(obj);
+	return err;
 }
 
 static const struct dma_buf_ops i915_dmabuf_ops =  {
@@ -222,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.end_cpu_access = i915_gem_end_cpu_access,
 };
 
-static void export_fences(struct drm_i915_gem_object *obj,
-			  struct dma_buf *dma_buf)
-{
-	struct reservation_object *resv = dma_buf->resv;
-	struct drm_i915_gem_request *req;
-	unsigned long active;
-	int idx;
-
-	active = __I915_BO_ACTIVE(obj);
-	if (!active)
-		return;
-
-	/* Serialise with execbuf to prevent concurrent fence-loops */
-	mutex_lock(&obj->base.dev->struct_mutex);
-
-	/* Mark the object for future fences before racily adding old fences */
-	obj->base.dma_buf = dma_buf;
-
-	ww_mutex_lock(&resv->lock, NULL);
-
-	for_each_active(active, idx) {
-		req = i915_gem_active_get(&obj->last_read[idx],
-					  &obj->base.dev->struct_mutex);
-		if (!req)
-			continue;
-
-		if (reservation_object_reserve_shared(resv) == 0)
-			reservation_object_add_shared_fence(resv, &req->fence);
-
-		i915_gem_request_put(req);
-	}
-
-	req = i915_gem_active_get(&obj->last_write,
-				  &obj->base.dev->struct_mutex);
-	if (req) {
-		reservation_object_add_excl_fence(resv, &req->fence);
-		i915_gem_request_put(req);
-	}
-
-	ww_mutex_unlock(&resv->lock);
-	mutex_unlock(&obj->base.dev->struct_mutex);
-}
-
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				      struct drm_gem_object *gem_obj, int flags)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-	struct dma_buf *dma_buf;
 
 	exp_info.ops = &i915_dmabuf_ops;
 	exp_info.size = gem_obj->size;
 	exp_info.flags = flags;
 	exp_info.priv = gem_obj;
+	exp_info.resv = obj->resv;
 
 	if (obj->ops->dmabuf_export) {
 		int ret = obj->ops->dmabuf_export(obj);
@@ -283,30 +229,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 			return ERR_PTR(ret);
 	}
 
-	dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
-	if (IS_ERR(dma_buf))
-		return dma_buf;
-
-	export_fences(obj, dma_buf);
-	return dma_buf;
+	return drm_gem_dmabuf_export(dev, &exp_info);
 }
 
-static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
-	struct sg_table *sg;
-
-	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sg))
-		return PTR_ERR(sg);
-
-	obj->pages = sg;
-	return 0;
+	return dma_buf_map_attachment(obj->base.import_attach,
+				      DMA_BIDIRECTIONAL);
 }
 
-static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
+					     struct sg_table *pages)
 {
-	dma_buf_unmap_attachment(obj->base.import_attach,
-				 obj->pages, DMA_BIDIRECTIONAL);
+	dma_buf_unmap_attachment(obj->base.import_attach, pages,
+				 DMA_BIDIRECTIONAL);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
@@ -350,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 	obj->base.import_attach = attach;
+	obj->resv = dma_buf->resv;
 
 	/* We use GTT as shorthand for a coherent domain, one that is
 	 * neither in the GPU cache nor in the CPU cache, where all
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5b6f81c..bd08814 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,13 +33,17 @@
 #include "intel_drv.h"
 #include "i915_trace.h"
 
-static bool
-gpu_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
 {
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv) {
-		if (intel_engine_is_active(engine))
+	for_each_engine(engine, dev_priv, id) {
+		struct intel_timeline *tl;
+
+		tl = &ggtt->base.timeline.engine[engine->id];
+		if (i915_gem_active_isset(&tl->last_request))
 			return false;
 	}
 
@@ -55,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
 	if (WARN_ON(!list_empty(&vma->exec_list)))
 		return false;
 
-	if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
+	if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
 		return false;
 
 	list_add(&vma->exec_list, unwind);
@@ -102,6 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	struct i915_vma *vma, *next;
 	int ret;
 
+	lockdep_assert_held(&vm->dev->struct_mutex);
 	trace_i915_gem_evict(vm, min_size, alignment, flags);
 
 	/*
@@ -152,7 +157,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
 		return -ENOSPC;
 
-	if (gpu_is_idle(dev_priv)) {
+	if (ggtt_is_idle(dev_priv)) {
 		/* If we still have pending pageflip completions, drop
 		 * back to userspace to give our workqueues time to
 		 * acquire our locks and unpin the old scanouts.
@@ -212,6 +217,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
 {
 	struct drm_mm_node *node, *next;
 
+	lockdep_assert_held(&target->vm->dev->struct_mutex);
+
 	list_for_each_entry_safe(node, next,
 			&target->vm->mm.head_node.node_list,
 			node_list) {
@@ -265,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 	struct i915_vma *vma, *next;
 	int ret;
 
-	WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
+	lockdep_assert_held(&vm->dev->struct_mutex);
 	trace_i915_gem_evict_vm(vm);
 
 	if (do_idle) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7adb4c7..fb5b443 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,7 +34,6 @@
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
@@ -332,7 +331,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
 	cache->page = -1;
 	cache->vaddr = 0;
 	cache->i915 = i915;
-	cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+	/* Must be a variable in the struct to allow GCC to unroll. */
+	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
 	cache->node.allocated = false;
 }
 
@@ -370,8 +370,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
 
 			ggtt->base.clear_range(&ggtt->base,
 					       cache->node.start,
-					       cache->node.size,
-					       true);
+					       cache->node.size);
 			drm_mm_remove_node(&cache->node);
 		} else {
 			i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -419,17 +418,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 	unsigned long offset;
 	void *vaddr;
 
-	if (cache->node.allocated) {
-		wmb();
-		ggtt->base.insert_page(&ggtt->base,
-				       i915_gem_object_get_dma_address(obj, page),
-				       cache->node.start, I915_CACHE_NONE, 0);
-		cache->page = page;
-		return unmask_page(cache->vaddr);
-	}
-
 	if (cache->vaddr) {
-		io_mapping_unmap_atomic(unmask_page(cache->vaddr));
+		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
 	} else {
 		struct i915_vma *vma;
 		int ret;
@@ -467,6 +457,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 
 	offset = cache->node.start;
 	if (cache->node.allocated) {
+		wmb();
 		ggtt->base.insert_page(&ggtt->base,
 				       i915_gem_object_get_dma_address(obj, page),
 				       offset, I915_CACHE_NONE, 0);
@@ -474,7 +465,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 		offset += page << PAGE_SHIFT;
 	}
 
-	vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+	vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
 	cache->page = page;
 	cache->vaddr = (unsigned long)vaddr;
 
@@ -552,27 +543,13 @@ relocate_entry(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
-static bool object_is_idle(struct drm_i915_gem_object *obj)
-{
-	unsigned long active = i915_gem_object_get_active(obj);
-	int idx;
-
-	for_each_active(active, idx) {
-		if (!i915_gem_active_is_idle(&obj->last_read[idx],
-					     &obj->base.dev->struct_mutex))
-			return false;
-	}
-
-	return true;
-}
-
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_vmas *eb,
 				   struct drm_i915_gem_relocation_entry *reloc,
 				   struct reloc_cache *cache)
 {
-	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct drm_gem_object *target_obj;
 	struct drm_i915_gem_object *target_i915_obj;
 	struct i915_vma *target_vma;
@@ -591,7 +568,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 	 * pipe_control writes because the gpu doesn't properly redirect them
 	 * through the ppgtt for non_secure batchbuffers. */
-	if (unlikely(IS_GEN6(dev) &&
+	if (unlikely(IS_GEN6(dev_priv) &&
 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
 		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
 				    PIN_GLOBAL);
@@ -649,10 +626,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 		return -EINVAL;
 	}
 
-	/* We can't wait for rendering with pagefaults disabled */
-	if (pagefault_disabled() && !object_is_idle(obj))
-		return -EFAULT;
-
 	ret = relocate_entry(obj, reloc, cache, target_offset);
 	if (ret)
 		return ret;
@@ -679,12 +652,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
 	remain = entry->relocation_count;
 	while (remain) {
 		struct drm_i915_gem_relocation_entry *r = stack_reloc;
-		int count = remain;
-		if (count > ARRAY_SIZE(stack_reloc))
-			count = ARRAY_SIZE(stack_reloc);
+		unsigned long unwritten;
+		unsigned int count;
+
+		count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
 		remain -= count;
 
-		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
+		/* This is the fast path and we cannot handle a pagefault
+		 * whilst holding the struct mutex lest the user pass in the
+		 * relocations contained within a mmaped bo. For in such a case
+		 * we, the page fault handler would call i915_gem_fault() and
+		 * we would try to acquire the struct mutex again. Obviously
+		 * this is bad and so lockdep complains vehemently.
+		 */
+		pagefault_disable();
+		unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
+		pagefault_enable();
+		if (unlikely(unwritten)) {
 			ret = -EFAULT;
 			goto out;
 		}
@@ -696,11 +680,26 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
 			if (ret)
 				goto out;
 
-			if (r->presumed_offset != offset &&
-			    __put_user(r->presumed_offset,
-				       &user_relocs->presumed_offset)) {
-				ret = -EFAULT;
-				goto out;
+			if (r->presumed_offset != offset) {
+				pagefault_disable();
+				unwritten = __put_user(r->presumed_offset,
+						       &user_relocs->presumed_offset);
+				pagefault_enable();
+				if (unlikely(unwritten)) {
+					/* Note that reporting an error now
+					 * leaves everything in an inconsistent
+					 * state as we have *already* changed
+					 * the relocation value inside the
+					 * object. As we have not changed the
+					 * reloc.presumed_offset or will not
+					 * change the execobject.offset, on the
+					 * call we may not rewrite the value
+					 * inside the object, leaving it
+					 * dangling and causing a GPU hang.
+					 */
+					ret = -EFAULT;
+					goto out;
+				}
 			}
 
 			user_relocs++;
@@ -740,20 +739,11 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 	struct i915_vma *vma;
 	int ret = 0;
 
-	/* This is the fast path and we cannot handle a pagefault whilst
-	 * holding the struct mutex lest the user pass in the relocations
-	 * contained within a mmaped bo. For in such a case we, the page
-	 * fault handler would call i915_gem_fault() and we would try to
-	 * acquire the struct mutex again. Obviously this is bad and so
-	 * lockdep complains vehemently.
-	 */
-	pagefault_disable();
 	list_for_each_entry(vma, &eb->vmas, exec_list) {
 		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
 		if (ret)
 			break;
 	}
-	pagefault_enable();
 
 	return ret;
 }
@@ -1111,44 +1101,20 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 	return ret;
 }
 
-static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
-{
-	unsigned int mask;
-
-	mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
-	mask <<= I915_BO_ACTIVE_SHIFT;
-
-	return mask;
-}
-
 static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned int other_rings = eb_other_engines(req);
 	struct i915_vma *vma;
 	int ret;
 
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
-		struct reservation_object *resv;
 
-		if (obj->flags & other_rings) {
-			ret = i915_gem_request_await_object
-				(req, obj, obj->base.pending_write_domain);
-			if (ret)
-				return ret;
-		}
-
-		resv = i915_gem_object_get_dmabuf_resv(obj);
-		if (resv) {
-			ret = i915_sw_fence_await_reservation
-				(&req->submit, resv, &i915_fence_ops,
-				 obj->base.pending_write_domain, 10*HZ,
-				 GFP_KERNEL | __GFP_NOWARN);
-			if (ret < 0)
-				return ret;
-		}
+		ret = i915_gem_request_await_object
+			(req, obj, obj->base.pending_write_domain);
+		if (ret)
+			return ret;
 
 		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
 			i915_gem_clflush_object(obj, false);
@@ -1281,6 +1247,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 	return ctx;
 }
 
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+	return !(obj->cache_level == I915_CACHE_NONE ||
+		 obj->cache_level == I915_CACHE_WT);
+}
+
 void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req,
 			     unsigned int flags)
@@ -1290,8 +1262,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-	obj->dirty = 1; /* be paranoid  */
-
 	/* Add a reference if we're newly entering the active list.
 	 * The order in which we add operations to the retirement queue is
 	 * vital here: mark_active adds to the start of the callback list,
@@ -1299,37 +1269,32 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	if (!i915_gem_object_is_active(obj))
-		i915_gem_object_get(obj);
-	i915_gem_object_set_active(obj, idx);
-	i915_gem_active_set(&obj->last_read[idx], req);
+	if (!i915_vma_is_active(vma))
+		obj->active_count++;
+	i915_vma_set_active(vma, idx);
+	i915_gem_active_set(&vma->last_read[idx], req);
+	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 
 	if (flags & EXEC_OBJECT_WRITE) {
-		i915_gem_active_set(&obj->last_write, req);
+		i915_gem_active_set(&vma->last_write, req);
 
 		intel_fb_obj_invalidate(obj, ORIGIN_CS);
 
 		/* update for the implicit flush after a batch */
 		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+		if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
+			obj->cache_dirty = true;
 	}
 
 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
 		i915_gem_active_set(&vma->last_fence, req);
-
-	i915_vma_set_active(vma, idx);
-	i915_gem_active_set(&vma->last_read[idx], req);
-	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
 static void eb_export_fence(struct drm_i915_gem_object *obj,
 			    struct drm_i915_gem_request *req,
 			    unsigned int flags)
 {
-	struct reservation_object *resv;
-
-	resv = i915_gem_object_get_dmabuf_resv(obj);
-	if (!resv)
-		return;
+	struct reservation_object *resv = obj->resv;
 
 	/* Ignore errors from failing to allocate the new fence, we can't
 	 * handle an error right now. Worst case should be missed
@@ -1599,12 +1564,12 @@ eb_select_engine(struct drm_i915_private *dev_priv,
 			return NULL;
 		}
 
-		engine = &dev_priv->engine[_VCS(bsd_idx)];
+		engine = dev_priv->engine[_VCS(bsd_idx)];
 	} else {
-		engine = &dev_priv->engine[user_ring_map[user_ring_id]];
+		engine = dev_priv->engine[user_ring_map[user_ring_id]];
 	}
 
-	if (!intel_engine_initialized(engine)) {
+	if (!engine) {
 		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
 		return NULL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 8df1fa7..cd59dbc 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -290,6 +290,8 @@ i915_vma_put_fence(struct i915_vma *vma)
 {
 	struct drm_i915_fence_reg *fence = vma->fence;
 
+	assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
 	if (!fence)
 		return 0;
 
@@ -341,6 +343,11 @@ i915_vma_get_fence(struct i915_vma *vma)
 	struct drm_i915_fence_reg *fence;
 	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
 
+	/* Note that we revoke fences on runtime suspend. Therefore the user
+	 * must keep the device awake whilst using the fence.
+	 */
+	assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
 	/* Just update our place in the LRU if our fence is getting reused. */
 	if (vma->fence) {
 		fence = vma->fence;
@@ -364,7 +371,8 @@ i915_vma_get_fence(struct i915_vma *vma)
  * @dev: DRM device
  *
  * Restore the hw fence state to match the software tracking again, to be called
- * after a gpu reset and on resume.
+ * after a gpu reset and on resume. Note that on runtime suspend we only cancel
+ * the fences, to be reacquired by the user later.
  */
 void i915_gem_restore_fences(struct drm_device *dev)
 {
@@ -379,10 +387,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
 		 * Commit delayed tiling changes if we have an object still
 		 * attached to the fence, otherwise just clear the fence.
 		 */
-		if (vma && !i915_gem_object_is_tiled(vma->obj))
-			vma = NULL;
+		if (vma && !i915_gem_object_is_tiled(vma->obj)) {
+			GEM_BUG_ON(!reg->dirty);
+			GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
 
-		fence_update(reg, vma);
+			list_move(&reg->link, &dev_priv->mm.fence_list);
+			vma->fence = NULL;
+			vma = NULL;
+		}
+
+		fence_write(reg, vma);
+		reg->vma = vma;
 	}
 }
 
@@ -448,7 +463,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
-	if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+	if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
 		/*
 		 * On BDW+, swizzling is not used. We leave the CPU memory
 		 * controller in charge of optimizing memory accesses without
@@ -487,19 +502,20 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
 			}
 		}
-	} else if (IS_GEN5(dev)) {
+	} else if (IS_GEN5(dev_priv)) {
 		/* On Ironlake whatever DRAM config, GPU always do
 		 * same swizzling setup.
 		 */
 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 		swizzle_y = I915_BIT_6_SWIZZLE_9;
-	} else if (IS_GEN2(dev)) {
+	} else if (IS_GEN2(dev_priv)) {
 		/* As far as we know, the 865 doesn't have these bit 6
 		 * swizzling issues.
 		 */
 		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
 		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+	} else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
+		   !IS_G33(dev_priv))) {
 		uint32_t dcc;
 
 		/* On 9xx chipsets, channel interleave by the CPU is
@@ -537,7 +553,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 		}
 
 		/* check for L-shaped memory aka modified enhanced addressing */
-		if (IS_GEN4(dev) &&
+		if (IS_GEN4(dev_priv) &&
 		    !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
 			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -628,6 +644,7 @@ i915_gem_swizzle_page(struct page *page)
 /**
  * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
  * @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
  *
  * This function fixes up the swizzling in case any page frame number for this
  * object has changed in bit 17 since that state has been saved with
@@ -638,7 +655,8 @@ i915_gem_swizzle_page(struct page *page)
  * by swapping them out and back in again).
  */
 void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+				  struct sg_table *pages)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
@@ -648,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 		return;
 
 	i = 0;
-	for_each_sgt_page(page, sgt_iter, obj->pages) {
+	for_each_sgt_page(page, sgt_iter, pages) {
 		char new_bit_17 = page_to_phys(page) >> 17;
-		if ((new_bit_17 & 0x1) !=
-		    (test_bit(i, obj->bit_17) != 0)) {
+		if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
 			i915_gem_swizzle_page(page);
 			set_page_dirty(page);
 		}
@@ -662,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 /**
  * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
  * @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
  *
  * This function saves the bit 17 of each page frame number so that swizzling
  * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
  * be called before the backing storage can be unpinned.
  */
 void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+				    struct sg_table *pages)
 {
+	const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
 	struct sgt_iter sgt_iter;
 	struct page *page;
-	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
 	if (obj->bit_17 == NULL) {
@@ -687,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 
 	i = 0;
 
-	for_each_sgt_page(page, sgt_iter, obj->pages) {
+	for_each_sgt_page(page, sgt_iter, pages) {
 		if (page_to_phys(page) & (1 << 17))
 			__set_bit(i, obj->bit_17);
 		else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0bb4232..a5fafa3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,7 @@
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 
 #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
 
@@ -175,7 +176,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 {
 	u32 pte_flags = 0;
 
-	vma->pages = vma->obj->pages;
+	vma->pages = vma->obj->mm.pages;
 
 	/* Currently applicable only to VLV */
 	if (vma->obj->gt_ro)
@@ -191,15 +192,13 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
 	vma->vm->clear_range(vma->vm,
 			     vma->node.start,
-			     vma->size,
-			     true);
+			     vma->size);
 }
 
 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-				  enum i915_cache_level level,
-				  bool valid)
+				  enum i915_cache_level level)
 {
-	gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+	gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
 	pte |= addr;
 
 	switch (level) {
@@ -234,9 +233,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
 
 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
 				 enum i915_cache_level level,
-				 bool valid, u32 unused)
+				 u32 unused)
 {
-	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
 	switch (level) {
@@ -256,9 +255,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
 
 static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
 				 enum i915_cache_level level,
-				 bool valid, u32 unused)
+				 u32 unused)
 {
-	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
 	switch (level) {
@@ -280,9 +279,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
 
 static gen6_pte_t byt_pte_encode(dma_addr_t addr,
 				 enum i915_cache_level level,
-				 bool valid, u32 flags)
+				 u32 flags)
 {
-	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
 	if (!(flags & PTE_READ_ONLY))
@@ -296,9 +295,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
 
 static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
 				 enum i915_cache_level level,
-				 bool valid, u32 unused)
+				 u32 unused)
 {
-	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
 
 	if (level != I915_CACHE_NONE)
@@ -309,9 +308,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
 
 static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 				  enum i915_cache_level level,
-				  bool valid, u32 unused)
+				  u32 unused)
 {
-	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
 
 	switch (level) {
@@ -373,27 +372,29 @@ static void *kmap_page_dma(struct i915_page_dma *p)
 /* We use the flushing unmap only with ppgtt structures:
  * page directories, page tables and scratch pages.
  */
-static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
 {
 	/* There are only few exceptions for gen >=6. chv and bxt.
 	 * And we are not sure about the latter so play safe for now.
 	 */
-	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+	if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
 		drm_clflush_virt_range(vaddr, PAGE_SIZE);
 
 	kunmap_atomic(vaddr);
 }
 
 #define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+#define kunmap_px(ppgtt, vaddr) \
+		kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
 
 #define setup_px(dev, px) setup_page_dma((dev), px_base(px))
 #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
-#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
-#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
+#define fill32_px(dev_priv, px, v) \
+		fill_page_dma_32((dev_priv), px_base(px), (v))
 
-static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
-			  const uint64_t val)
+static void fill_page_dma(struct drm_i915_private *dev_priv,
+			  struct i915_page_dma *p, const uint64_t val)
 {
 	int i;
 	uint64_t * const vaddr = kmap_page_dma(p);
@@ -401,17 +402,17 @@ static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
 	for (i = 0; i < 512; i++)
 		vaddr[i] = val;
 
-	kunmap_page_dma(dev, vaddr);
+	kunmap_page_dma(dev_priv, vaddr);
 }
 
-static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
-			     const uint32_t val32)
+static void fill_page_dma_32(struct drm_i915_private *dev_priv,
+			     struct i915_page_dma *p, const uint32_t val32)
 {
 	uint64_t v = val32;
 
 	v = v << 32 | val32;
 
-	fill_page_dma(dev, p, v);
+	fill_page_dma(dev_priv, p, v);
 }
 
 static int
@@ -472,9 +473,9 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
 	gen8_pte_t scratch_pte;
 
 	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
-				      I915_CACHE_LLC, true);
+				      I915_CACHE_LLC);
 
-	fill_px(vm->dev, pt, scratch_pte);
+	fill_px(to_i915(vm->dev), pt, scratch_pte);
 }
 
 static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -485,9 +486,9 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
 	WARN_ON(vm->scratch_page.daddr == 0);
 
 	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-				     I915_CACHE_LLC, true, 0);
+				     I915_CACHE_LLC, 0);
 
-	fill32_px(vm->dev, pt, scratch_pte);
+	fill32_px(to_i915(vm->dev), pt, scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct drm_device *dev)
@@ -534,7 +535,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
 
 	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
 
-	fill_px(vm->dev, pd, scratch_pde);
+	fill_px(to_i915(vm->dev), pd, scratch_pde);
 }
 
 static int __pdp_init(struct drm_device *dev,
@@ -615,7 +616,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
 
 	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
 
-	fill_px(vm->dev, pdp, scratch_pdpe);
+	fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
 }
 
 static void gen8_initialize_pml4(struct i915_address_space *vm,
@@ -626,7 +627,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
 	scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
 					  I915_CACHE_LLC);
 
-	fill_px(vm->dev, pml4, scratch_pml4e);
+	fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
 }
 
 static void
@@ -706,83 +707,169 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
 }
 
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
-				       struct i915_page_directory_pointer *pdp,
-				       uint64_t start,
-				       uint64_t length,
-				       gen8_pte_t scratch_pte)
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
+/* Removes entries from a single page table, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries.
+ */
+static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+				struct i915_page_table *pt,
+				uint64_t start,
+				uint64_t length)
 {
 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+	unsigned int num_entries = gen8_pte_count(start, length);
+	unsigned int pte = gen8_pte_index(start);
+	unsigned int pte_end = pte + num_entries;
 	gen8_pte_t *pt_vaddr;
-	unsigned pdpe = gen8_pdpe_index(start);
-	unsigned pde = gen8_pde_index(start);
-	unsigned pte = gen8_pte_index(start);
-	unsigned num_entries = length >> PAGE_SHIFT;
-	unsigned last_pte, i;
+	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+						 I915_CACHE_LLC);
 
-	if (WARN_ON(!pdp))
-		return;
+	if (WARN_ON(!px_page(pt)))
+		return false;
 
-	while (num_entries) {
-		struct i915_page_directory *pd;
-		struct i915_page_table *pt;
+	GEM_BUG_ON(pte_end > GEN8_PTES);
 
-		if (WARN_ON(!pdp->page_directory[pdpe]))
-			break;
+	bitmap_clear(pt->used_ptes, pte, num_entries);
 
-		pd = pdp->page_directory[pdpe];
+	if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
+		free_pt(vm->dev, pt);
+		return true;
+	}
 
+	pt_vaddr = kmap_px(pt);
+
+	while (pte < pte_end)
+		pt_vaddr[pte++] = scratch_pte;
+
+	kunmap_px(ppgtt, pt_vaddr);
+
+	return false;
+}
+
+/* Removes entries from a single page dir, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+				struct i915_page_directory *pd,
+				uint64_t start,
+				uint64_t length)
+{
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+	struct i915_page_table *pt;
+	uint64_t pde;
+	gen8_pde_t *pde_vaddr;
+	gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
+						 I915_CACHE_LLC);
+
+	gen8_for_each_pde(pt, pd, start, length, pde) {
 		if (WARN_ON(!pd->page_table[pde]))
 			break;
 
-		pt = pd->page_table[pde];
+		if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
+			__clear_bit(pde, pd->used_pdes);
+			pde_vaddr = kmap_px(pd);
+			pde_vaddr[pde] = scratch_pde;
+			kunmap_px(ppgtt, pde_vaddr);
+		}
+	}
 
-		if (WARN_ON(!px_page(pt)))
+	if (bitmap_empty(pd->used_pdes, I915_PDES)) {
+		free_pd(vm->dev, pd);
+		return true;
+	}
+
+	return false;
+}
+
+/* Removes entries from a single page dir pointer, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+				 struct i915_page_directory_pointer *pdp,
+				 uint64_t start,
+				 uint64_t length)
+{
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+	struct i915_page_directory *pd;
+	uint64_t pdpe;
+	gen8_ppgtt_pdpe_t *pdpe_vaddr;
+	gen8_ppgtt_pdpe_t scratch_pdpe =
+		gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
+
+	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+		if (WARN_ON(!pdp->page_directory[pdpe]))
 			break;
 
-		last_pte = pte + num_entries;
-		if (last_pte > GEN8_PTES)
-			last_pte = GEN8_PTES;
-
-		pt_vaddr = kmap_px(pt);
-
-		for (i = pte; i < last_pte; i++) {
-			pt_vaddr[i] = scratch_pte;
-			num_entries--;
+		if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
+			__clear_bit(pdpe, pdp->used_pdpes);
+			if (USES_FULL_48BIT_PPGTT(vm->dev)) {
+				pdpe_vaddr = kmap_px(pdp);
+				pdpe_vaddr[pdpe] = scratch_pdpe;
+				kunmap_px(ppgtt, pdpe_vaddr);
+			}
 		}
+	}
 
-		kunmap_px(ppgtt, pt_vaddr);
+	mark_tlbs_dirty(ppgtt);
 
-		pte = 0;
-		if (++pde == I915_PDES) {
-			if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
-				break;
-			pde = 0;
+	if (USES_FULL_48BIT_PPGTT(vm->dev) &&
+	    bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev))) {
+		free_pdp(vm->dev, pdp);
+		return true;
+	}
+
+	return false;
+}
+
+/* Removes entries from a single pml4.
+ * This is the top-level structure in 4-level page tables used on gen8+.
+ * Empty entries are always scratch pml4e.
+ */
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+				  struct i915_pml4 *pml4,
+				  uint64_t start,
+				  uint64_t length)
+{
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+	struct i915_page_directory_pointer *pdp;
+	uint64_t pml4e;
+	gen8_ppgtt_pml4e_t *pml4e_vaddr;
+	gen8_ppgtt_pml4e_t scratch_pml4e =
+		gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
+
+	GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->dev));
+
+	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+		if (WARN_ON(!pml4->pdps[pml4e]))
+			break;
+
+		if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
+			__clear_bit(pml4e, pml4->used_pml4es);
+			pml4e_vaddr = kmap_px(pml4);
+			pml4e_vaddr[pml4e] = scratch_pml4e;
+			kunmap_px(ppgtt, pml4e_vaddr);
 		}
 	}
 }
 
 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
-				   uint64_t start,
-				   uint64_t length,
-				   bool use_scratch)
+				   uint64_t start, uint64_t length)
 {
 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
-						 I915_CACHE_LLC, use_scratch);
 
-	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
-		gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
-					   scratch_pte);
-	} else {
-		uint64_t pml4e;
-		struct i915_page_directory_pointer *pdp;
-
-		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
-			gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
-						   scratch_pte);
-		}
-	}
+	if (USES_FULL_48BIT_PPGTT(vm->dev))
+		gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
+	else
+		gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
 }
 
 static void
@@ -809,7 +896,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
 
 		pt_vaddr[pte] =
 			gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
-					cache_level, true);
+					cache_level);
 		if (++pte == GEN8_PTES) {
 			kunmap_px(ppgtt, pt_vaddr);
 			pt_vaddr = NULL;
@@ -1208,16 +1295,6 @@ int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
 	return -ENOMEM;
 }
 
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
-	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
 static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
 				    struct i915_page_directory_pointer *pdp,
 				    uint64_t start,
@@ -1452,7 +1529,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 	uint64_t start = ppgtt->base.start;
 	uint64_t length = ppgtt->base.total;
 	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
-						 I915_CACHE_LLC, true);
+						 I915_CACHE_LLC);
 
 	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
 		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
@@ -1569,7 +1646,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
 	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-				     I915_CACHE_LLC, true, 0);
+				     I915_CACHE_LLC, 0);
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
 		u32 expected;
@@ -1728,8 +1805,9 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
 		I915_WRITE(RING_MODE_GEN7(engine),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
@@ -1741,12 +1819,13 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
 	uint32_t ecochk, ecobits;
+	enum intel_engine_id id;
 
 	ecobits = I915_READ(GAC_ECO_BITS);
 	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
 
 	ecochk = I915_READ(GAM_ECOCHK);
-	if (IS_HASWELL(dev)) {
+	if (IS_HASWELL(dev_priv)) {
 		ecochk |= ECOCHK_PPGTT_WB_HSW;
 	} else {
 		ecochk |= ECOCHK_PPGTT_LLC_IVB;
@@ -1754,7 +1833,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
 	}
 	I915_WRITE(GAM_ECOCHK, ecochk);
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		/* GFX_MODE is per-ring on gen7+ */
 		I915_WRITE(RING_MODE_GEN7(engine),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
@@ -1782,8 +1861,7 @@ static void gen6_ppgtt_enable(struct drm_device *dev)
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 				   uint64_t start,
-				   uint64_t length,
-				   bool use_scratch)
+				   uint64_t length)
 {
 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	gen6_pte_t *pt_vaddr, scratch_pte;
@@ -1794,7 +1872,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 	unsigned last_pte, i;
 
 	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-				     I915_CACHE_LLC, true, 0);
+				     I915_CACHE_LLC, 0);
 
 	while (num_entries) {
 		last_pte = first_pte + num_entries;
@@ -1832,7 +1910,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
 		pt_vaddr[act_pte] =
-			vm->pte_encode(addr, cache_level, true, flags);
+			vm->pte_encode(addr, cache_level, flags);
 
 		if (++act_pte == GEN6_PTES) {
 			kunmap_px(ppgtt, pt_vaddr);
@@ -2056,11 +2134,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	int ret;
 
 	ppgtt->base.pte_encode = ggtt->base.pte_encode;
-	if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
+	if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
 		ppgtt->switch_mm = gen6_mm_switch;
-	else if (IS_HASWELL(dev))
+	else if (IS_HASWELL(dev_priv))
 		ppgtt->switch_mm = hsw_mm_switch;
-	else if (IS_GEN7(dev))
+	else if (IS_GEN7(dev_priv))
 		ppgtt->switch_mm = gen7_mm_switch;
 	else
 		BUG();
@@ -2111,8 +2189,10 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
 }
 
 static void i915_address_space_init(struct i915_address_space *vm,
-				    struct drm_i915_private *dev_priv)
+				    struct drm_i915_private *dev_priv,
+				    const char *name)
 {
+	i915_gem_timeline_init(dev_priv, &vm->timeline, name);
 	drm_mm_init(&vm->mm, vm->start, vm->total);
 	INIT_LIST_HEAD(&vm->active_list);
 	INIT_LIST_HEAD(&vm->inactive_list);
@@ -2129,26 +2209,27 @@ static void gtt_write_workarounds(struct drm_device *dev)
 	 * workarounds here even if they get overwritten by GPU reset.
 	 */
 	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
-	if (IS_BROADWELL(dev))
+	if (IS_BROADWELL(dev_priv))
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
-	else if (IS_CHERRYVIEW(dev))
+	else if (IS_CHERRYVIEW(dev_priv))
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
-	else if (IS_SKYLAKE(dev))
+	else if (IS_SKYLAKE(dev_priv))
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
 }
 
 static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
 			   struct drm_i915_private *dev_priv,
-			   struct drm_i915_file_private *file_priv)
+			   struct drm_i915_file_private *file_priv,
+			   const char *name)
 {
 	int ret;
 
 	ret = __hw_ppgtt_init(ppgtt, dev_priv);
 	if (ret == 0) {
 		kref_init(&ppgtt->ref);
-		i915_address_space_init(&ppgtt->base, dev_priv);
+		i915_address_space_init(&ppgtt->base, dev_priv, name);
 		ppgtt->base.file = file_priv;
 	}
 
@@ -2157,6 +2238,8 @@ static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
 
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
 	gtt_write_workarounds(dev);
 
 	/* In the case of execlists, PPGTT is enabled by the context descriptor
@@ -2168,9 +2251,9 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 	if (!USES_PPGTT(dev))
 		return 0;
 
-	if (IS_GEN6(dev))
+	if (IS_GEN6(dev_priv))
 		gen6_ppgtt_enable(dev);
-	else if (IS_GEN7(dev))
+	else if (IS_GEN7(dev_priv))
 		gen7_ppgtt_enable(dev);
 	else if (INTEL_INFO(dev)->gen >= 8)
 		gen8_ppgtt_enable(dev);
@@ -2182,7 +2265,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 
 struct i915_hw_ppgtt *
 i915_ppgtt_create(struct drm_i915_private *dev_priv,
-		  struct drm_i915_file_private *fpriv)
+		  struct drm_i915_file_private *fpriv,
+		  const char *name)
 {
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
@@ -2191,7 +2275,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
 	if (!ppgtt)
 		return ERR_PTR(-ENOMEM);
 
-	ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
+	ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
 	if (ret) {
 		kfree(ppgtt);
 		return ERR_PTR(ret);
@@ -2214,6 +2298,7 @@ void  i915_ppgtt_release(struct kref *kref)
 	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
 	WARN_ON(!list_empty(&ppgtt->base.unbound_list));
 
+	i915_gem_timeline_fini(&ppgtt->base.timeline);
 	list_del(&ppgtt->base.global_link);
 	drm_mm_takedown(&ppgtt->base.mm);
 
@@ -2239,11 +2324,12 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
 	if (INTEL_INFO(dev_priv)->gen < 6)
 		return;
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		u32 fault_reg;
 		fault_reg = I915_READ(RING_FAULT_REG(engine));
 		if (fault_reg & RING_FAULT_VALID) {
@@ -2260,7 +2346,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
 				   fault_reg & ~RING_FAULT_VALID);
 		}
 	}
-	POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
+
+	/* Engine specific init may not have been done till this point. */
+	if (dev_priv->engine[RCS])
+		POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
 }
 
 static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
@@ -2286,20 +2375,20 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 
 	i915_check_and_clear_faults(dev_priv);
 
-	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
-			     true);
+	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
 
 	i915_ggtt_flush(dev_priv);
 }
 
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+			       struct sg_table *pages)
 {
-	if (!dma_map_sg(&obj->base.dev->pdev->dev,
-			obj->pages->sgl, obj->pages->nents,
-			PCI_DMA_BIDIRECTIONAL))
-		return -ENOSPC;
+	if (dma_map_sg(&obj->base.dev->pdev->dev,
+		       pages->sgl, pages->nents,
+		       PCI_DMA_BIDIRECTIONAL))
+		return 0;
 
-	return 0;
+	return -ENOSPC;
 }
 
 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -2317,16 +2406,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 	gen8_pte_t __iomem *pte =
 		(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
 		(offset >> PAGE_SHIFT);
-	int rpm_atomic_seq;
 
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
-	gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+	gen8_set_pte(pte, gen8_pte_encode(addr, level));
 
 	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2340,15 +2424,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 	gen8_pte_t __iomem *gtt_entries;
 	gen8_pte_t gtt_entry;
 	dma_addr_t addr;
-	int rpm_atomic_seq;
 	int i = 0;
 
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
 	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
 
 	for_each_sgt_dma(addr, sgt_iter, st) {
-		gtt_entry = gen8_pte_encode(addr, level, true);
+		gtt_entry = gen8_pte_encode(addr, level);
 		gen8_set_pte(&gtt_entries[i++], gtt_entry);
 	}
 
@@ -2368,8 +2449,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 	 */
 	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
 struct insert_entries {
@@ -2408,16 +2487,11 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
 	gen6_pte_t __iomem *pte =
 		(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
 		(offset >> PAGE_SHIFT);
-	int rpm_atomic_seq;
 
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
-	iowrite32(vm->pte_encode(addr, level, true, flags), pte);
+	iowrite32(vm->pte_encode(addr, level, flags), pte);
 
 	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
 /*
@@ -2437,15 +2511,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 	gen6_pte_t __iomem *gtt_entries;
 	gen6_pte_t gtt_entry;
 	dma_addr_t addr;
-	int rpm_atomic_seq;
 	int i = 0;
 
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
 	gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
 
 	for_each_sgt_dma(addr, sgt_iter, st) {
-		gtt_entry = vm->pte_encode(addr, level, true, flags);
+		gtt_entry = vm->pte_encode(addr, level, flags);
 		iowrite32(gtt_entry, &gtt_entries[i++]);
 	}
 
@@ -2464,23 +2535,16 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 	 */
 	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
 static void nop_clear_range(struct i915_address_space *vm,
-			    uint64_t start,
-			    uint64_t length,
-			    bool use_scratch)
+			    uint64_t start, uint64_t length)
 {
 }
 
 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
-				  uint64_t start,
-				  uint64_t length,
-				  bool use_scratch)
+				  uint64_t start, uint64_t length)
 {
-	struct drm_i915_private *dev_priv = to_i915(vm->dev);
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned num_entries = length >> PAGE_SHIFT;
@@ -2488,9 +2552,6 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 	int i;
-	int rpm_atomic_seq;
-
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
 	if (WARN(num_entries > max_entries,
 		 "First entry = %d; Num entries = %d (max=%d)\n",
@@ -2498,21 +2559,16 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 		num_entries = max_entries;
 
 	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
-				      I915_CACHE_LLC,
-				      use_scratch);
+				      I915_CACHE_LLC);
 	for (i = 0; i < num_entries; i++)
 		gen8_set_pte(&gtt_base[i], scratch_pte);
 	readl(gtt_base);
-
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 				  uint64_t start,
-				  uint64_t length,
-				  bool use_scratch)
+				  uint64_t length)
 {
-	struct drm_i915_private *dev_priv = to_i915(vm->dev);
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned num_entries = length >> PAGE_SHIFT;
@@ -2520,9 +2576,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 	int i;
-	int rpm_atomic_seq;
-
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
 	if (WARN(num_entries > max_entries,
 		 "First entry = %d; Num entries = %d (max=%d)\n",
@@ -2530,13 +2583,11 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 		num_entries = max_entries;
 
 	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-				     I915_CACHE_LLC, use_scratch, 0);
+				     I915_CACHE_LLC, 0);
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
 	readl(gtt_base);
-
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
 static void i915_ggtt_insert_page(struct i915_address_space *vm,
@@ -2545,16 +2596,10 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
 				  enum i915_cache_level cache_level,
 				  u32 unused)
 {
-	struct drm_i915_private *dev_priv = to_i915(vm->dev);
 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-	int rpm_atomic_seq;
-
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
 	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2562,40 +2607,25 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 				     uint64_t start,
 				     enum i915_cache_level cache_level, u32 unused)
 {
-	struct drm_i915_private *dev_priv = to_i915(vm->dev);
 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-	int rpm_atomic_seq;
-
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
 	intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
 
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
-
 }
 
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
 				  uint64_t start,
-				  uint64_t length,
-				  bool unused)
+				  uint64_t length)
 {
-	struct drm_i915_private *dev_priv = to_i915(vm->dev);
-	unsigned first_entry = start >> PAGE_SHIFT;
-	unsigned num_entries = length >> PAGE_SHIFT;
-	int rpm_atomic_seq;
-
-	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
-	intel_gtt_clear_range(first_entry, num_entries);
-
-	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
 }
 
 static int ggtt_bind_vma(struct i915_vma *vma,
 			 enum i915_cache_level cache_level,
 			 u32 flags)
 {
+	struct drm_i915_private *i915 = to_i915(vma->vm->dev);
 	struct drm_i915_gem_object *obj = vma->obj;
 	u32 pte_flags = 0;
 	int ret;
@@ -2608,8 +2638,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	if (obj->gt_ro)
 		pte_flags |= PTE_READ_ONLY;
 
+	intel_runtime_pm_get(i915);
 	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
 				cache_level, pte_flags);
+	intel_runtime_pm_put(i915);
 
 	/*
 	 * Without aliasing PPGTT there's no difference between
@@ -2625,6 +2657,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 				 enum i915_cache_level cache_level,
 				 u32 flags)
 {
+	struct drm_i915_private *i915 = to_i915(vma->vm->dev);
 	u32 pte_flags;
 	int ret;
 
@@ -2639,14 +2672,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 
 	if (flags & I915_VMA_GLOBAL_BIND) {
+		intel_runtime_pm_get(i915);
 		vma->vm->insert_entries(vma->vm,
 					vma->pages, vma->node.start,
 					cache_level, pte_flags);
+		intel_runtime_pm_put(i915);
 	}
 
 	if (flags & I915_VMA_LOCAL_BIND) {
-		struct i915_hw_ppgtt *appgtt =
-			to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
 		appgtt->base.insert_entries(&appgtt->base,
 					    vma->pages, vma->node.start,
 					    cache_level, pte_flags);
@@ -2657,21 +2691,24 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
-	struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+	struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+	struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
 	const u64 size = min(vma->size, vma->node.size);
 
-	if (vma->flags & I915_VMA_GLOBAL_BIND)
+	if (vma->flags & I915_VMA_GLOBAL_BIND) {
+		intel_runtime_pm_get(i915);
 		vma->vm->clear_range(vma->vm,
-				     vma->node.start, size,
-				     true);
+				     vma->node.start, size);
+		intel_runtime_pm_put(i915);
+	}
 
 	if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
 		appgtt->base.clear_range(&appgtt->base,
-					 vma->node.start, size,
-					 true);
+					 vma->node.start, size);
 }
 
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+			       struct sg_table *pages)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct device *kdev = &dev_priv->drm.pdev->dev;
@@ -2685,8 +2722,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 		}
 	}
 
-	dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
-		     PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
 }
 
 static void i915_gtt_color_adjust(struct drm_mm_node *node,
@@ -2717,6 +2753,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	 */
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned long hole_start, hole_end;
+	struct i915_hw_ppgtt *ppgtt;
 	struct drm_mm_node *entry;
 	int ret;
 
@@ -2724,45 +2761,48 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	if (ret)
 		return ret;
 
+	/* Reserve a mappable slot for our lockless error capture */
+	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
+						  &ggtt->error_capture,
+						  4096, 0, -1,
+						  0, ggtt->mappable_end,
+						  0, 0);
+	if (ret)
+		return ret;
+
 	/* Clear any non-preallocated blocks */
 	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
 			      hole_start, hole_end);
 		ggtt->base.clear_range(&ggtt->base, hole_start,
-				     hole_end - hole_start, true);
+				       hole_end - hole_start);
 	}
 
 	/* And finally clear the reserved guard page */
 	ggtt->base.clear_range(&ggtt->base,
-			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
-			       true);
+			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
 
 	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
-		struct i915_hw_ppgtt *ppgtt;
-
 		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-		if (!ppgtt)
-			return -ENOMEM;
-
-		ret = __hw_ppgtt_init(ppgtt, dev_priv);
-		if (ret) {
-			kfree(ppgtt);
-			return ret;
+		if (!ppgtt) {
+			ret = -ENOMEM;
+			goto err;
 		}
 
-		if (ppgtt->base.allocate_va_range)
+		ret = __hw_ppgtt_init(ppgtt, dev_priv);
+		if (ret)
+			goto err_ppgtt;
+
+		if (ppgtt->base.allocate_va_range) {
 			ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
 							    ppgtt->base.total);
-		if (ret) {
-			ppgtt->base.cleanup(&ppgtt->base);
-			kfree(ppgtt);
-			return ret;
+			if (ret)
+				goto err_ppgtt_cleanup;
 		}
 
 		ppgtt->base.clear_range(&ppgtt->base,
 					ppgtt->base.start,
-					ppgtt->base.total,
-					true);
+					ppgtt->base.total);
 
 		dev_priv->mm.aliasing_ppgtt = ppgtt;
 		WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
@@ -2770,6 +2810,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	}
 
 	return 0;
+
+err_ppgtt_cleanup:
+	ppgtt->base.cleanup(&ppgtt->base);
+err_ppgtt:
+	kfree(ppgtt);
+err:
+	drm_mm_remove_node(&ggtt->error_capture);
+	return ret;
 }
 
 /**
@@ -2788,6 +2836,9 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 
 	i915_gem_cleanup_stolen(&dev_priv->drm);
 
+	if (drm_mm_node_allocated(&ggtt->error_capture))
+		drm_mm_remove_node(&ggtt->error_capture);
+
 	if (drm_mm_initialized(&ggtt->base.mm)) {
 		intel_vgt_deballoon(dev_priv);
 
@@ -2895,7 +2946,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 	 * resort to an uncached mapping. The WC issue is easily caught by the
 	 * readback check when writing GTT PTE entries.
 	 */
-	if (IS_BROXTON(ggtt->base.dev))
+	if (IS_BROXTON(to_i915(ggtt->base.dev)))
 		ggtt->gsm = ioremap_nocache(phys_addr, size);
 	else
 		ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -3190,11 +3241,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 	/* Subtract the guard page before address space initialization to
 	 * shrink the range used by drm_mm.
 	 */
+	mutex_lock(&dev_priv->drm.struct_mutex);
 	ggtt->base.total -= PAGE_SIZE;
-	i915_address_space_init(&ggtt->base, dev_priv);
+	i915_address_space_init(&ggtt->base, dev_priv, "[global]");
 	ggtt->base.total += PAGE_SIZE;
 	if (!HAS_LLC(dev_priv))
 		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
 				dev_priv->ggtt.mappable_base,
@@ -3237,14 +3290,13 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	i915_check_and_clear_faults(dev_priv);
 
 	/* First fill our portion of the GTT with scratch pages */
-	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
-			       true);
+	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
 
 	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
 
 	/* clflush objects bound into the GGTT and rebind them. */
 	list_for_each_entry_safe(obj, on,
-				 &dev_priv->mm.bound_list, global_list) {
+				 &dev_priv->mm.bound_list, global_link) {
 		bool ggtt_bound = false;
 		struct i915_vma *vma;
 
@@ -3267,7 +3319,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	ggtt->base.closed = false;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
-		if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+		if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
 			chv_setup_private_ppat(dev_priv);
 		else
 			bdw_setup_private_ppat(dev_priv);
@@ -3303,6 +3355,7 @@ i915_vma_retire(struct i915_gem_active *active,
 	const unsigned int idx = rq->engine->id;
 	struct i915_vma *vma =
 		container_of(active, struct i915_vma, last_read[idx]);
+	struct drm_i915_gem_object *obj = vma->obj;
 
 	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
 
@@ -3313,6 +3366,34 @@ i915_vma_retire(struct i915_gem_active *active,
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
 		WARN_ON(i915_vma_unbind(vma));
+
+	GEM_BUG_ON(!i915_gem_object_is_active(obj));
+	if (--obj->active_count)
+		return;
+
+	/* Bump our place on the bound list to keep it roughly in LRU order
+	 * so that we don't steal from recently used but inactive objects
+	 * (unless we are forced to ofc!)
+	 */
+	if (obj->bind_count)
+		list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+
+	obj->mm.dirty = true; /* be paranoid  */
+
+	if (i915_gem_object_has_active_reference(obj)) {
+		i915_gem_object_clear_active_reference(obj);
+		i915_gem_object_put(obj);
+	}
+}
+
+static void
+i915_ggtt_retire__write(struct i915_gem_active *active,
+			struct drm_i915_gem_request *request)
+{
+	struct i915_vma *vma =
+		container_of(active, struct i915_vma, last_write);
+
+	intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
 }
 
 void i915_vma_destroy(struct i915_vma *vma)
@@ -3334,17 +3415,40 @@ void i915_vma_close(struct i915_vma *vma)
 	GEM_BUG_ON(i915_vma_is_closed(vma));
 	vma->flags |= I915_VMA_CLOSED;
 
-	list_del_init(&vma->obj_link);
+	list_del(&vma->obj_link);
+	rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+
 	if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
 		WARN_ON(i915_vma_unbind(vma));
 }
 
+static inline long vma_compare(struct i915_vma *vma,
+			       struct i915_address_space *vm,
+			       const struct i915_ggtt_view *view)
+{
+	GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+	if (vma->vm != vm)
+		return vma->vm - vm;
+
+	if (!view)
+		return vma->ggtt_view.type;
+
+	if (vma->ggtt_view.type != view->type)
+		return vma->ggtt_view.type - view->type;
+
+	return memcmp(&vma->ggtt_view.params,
+		      &view->params,
+		      sizeof(view->params));
+}
+
 static struct i915_vma *
 __i915_vma_create(struct drm_i915_gem_object *obj,
 		  struct i915_address_space *vm,
 		  const struct i915_ggtt_view *view)
 {
 	struct i915_vma *vma;
+	struct rb_node *rb, **p;
 	int i;
 
 	GEM_BUG_ON(vm->closed);
@@ -3356,6 +3460,8 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&vma->exec_list);
 	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
 		init_request_active(&vma->last_read[i], i915_vma_retire);
+	init_request_active(&vma->last_write,
+			    i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
 	init_request_active(&vma->last_fence, NULL);
 	list_add(&vma->vm_link, &vm->unbound_list);
 	vma->vm = vm;
@@ -3376,40 +3482,36 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
 
 	if (i915_is_ggtt(vm)) {
 		vma->flags |= I915_VMA_GGTT;
+		list_add(&vma->obj_link, &obj->vma_list);
 	} else {
 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+		list_add_tail(&vma->obj_link, &obj->vma_list);
 	}
 
-	list_add_tail(&vma->obj_link, &obj->vma_list);
+	rb = NULL;
+	p = &obj->vma_tree.rb_node;
+	while (*p) {
+		struct i915_vma *pos;
+
+		rb = *p;
+		pos = rb_entry(rb, struct i915_vma, obj_node);
+		if (vma_compare(pos, vm, view) < 0)
+			p = &rb->rb_right;
+		else
+			p = &rb->rb_left;
+	}
+	rb_link_node(&vma->obj_node, rb, p);
+	rb_insert_color(&vma->obj_node, &obj->vma_tree);
+
 	return vma;
 }
 
-static inline bool vma_matches(struct i915_vma *vma,
-			       struct i915_address_space *vm,
-			       const struct i915_ggtt_view *view)
-{
-	if (vma->vm != vm)
-		return false;
-
-	if (!i915_vma_is_ggtt(vma))
-		return true;
-
-	if (!view)
-		return vma->ggtt_view.type == 0;
-
-	if (vma->ggtt_view.type != view->type)
-		return false;
-
-	return memcmp(&vma->ggtt_view.params,
-		      &view->params,
-		      sizeof(view->params)) == 0;
-}
-
 struct i915_vma *
 i915_vma_create(struct drm_i915_gem_object *obj,
 		struct i915_address_space *vm,
 		const struct i915_ggtt_view *view)
 {
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
 	GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
 
@@ -3421,12 +3523,23 @@ i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
 		    const struct i915_ggtt_view *view)
 {
-	struct i915_vma *vma;
+	struct rb_node *rb;
 
-	list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
-		if (vma_matches(vma, vm, view))
+	rb = obj->vma_tree.rb_node;
+	while (rb) {
+		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
+		long cmp;
+
+		cmp = vma_compare(vma, vm, view);
+		if (cmp == 0)
 			return vma;
 
+		if (cmp < 0)
+			rb = rb->rb_right;
+		else
+			rb = rb->rb_left;
+	}
+
 	return NULL;
 }
 
@@ -3437,11 +3550,14 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
 {
 	struct i915_vma *vma;
 
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
 
 	vma = i915_gem_obj_to_vma(obj, vm, view);
-	if (!vma)
+	if (!vma) {
 		vma = __i915_vma_create(obj, vm, view);
+		GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
+	}
 
 	GEM_BUG_ON(i915_vma_is_closed(vma));
 	return vma;
@@ -3507,7 +3623,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
 
 	/* Populate source page list from the object. */
 	i = 0;
-	for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+	for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
 		page_addr_list[i++] = dma_addr;
 
 	GEM_BUG_ON(i != n_pages);
@@ -3543,35 +3659,47 @@ intel_partial_pages(const struct i915_ggtt_view *view,
 		    struct drm_i915_gem_object *obj)
 {
 	struct sg_table *st;
-	struct scatterlist *sg;
-	struct sg_page_iter obj_sg_iter;
+	struct scatterlist *sg, *iter;
+	unsigned int count = view->params.partial.size;
+	unsigned int offset;
 	int ret = -ENOMEM;
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (!st)
 		goto err_st_alloc;
 
-	ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
+	ret = sg_alloc_table(st, count, GFP_KERNEL);
 	if (ret)
 		goto err_sg_alloc;
 
+	iter = i915_gem_object_get_sg(obj,
+				      view->params.partial.offset,
+				      &offset);
+	GEM_BUG_ON(!iter);
+
 	sg = st->sgl;
 	st->nents = 0;
-	for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
-		view->params.partial.offset)
-	{
-		if (st->nents >= view->params.partial.size)
-			break;
+	do {
+		unsigned int len;
 
-		sg_set_page(sg, NULL, PAGE_SIZE, 0);
-		sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
-		sg_dma_len(sg) = PAGE_SIZE;
+		len = min(iter->length - (offset << PAGE_SHIFT),
+			  count << PAGE_SHIFT);
+		sg_set_page(sg, NULL, len, 0);
+		sg_dma_address(sg) =
+			sg_dma_address(iter) + (offset << PAGE_SHIFT);
+		sg_dma_len(sg) = len;
 
-		sg = sg_next(sg);
 		st->nents++;
-	}
+		count -= len >> PAGE_SHIFT;
+		if (count == 0) {
+			sg_mark_end(sg);
+			return st;
+		}
 
-	return st;
+		sg = __sg_next(sg);
+		iter = __sg_next(iter);
+		offset = 0;
+	} while (1);
 
 err_sg_alloc:
 	kfree(st);
@@ -3584,11 +3712,18 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 {
 	int ret = 0;
 
+	/* The vma->pages are only valid within the lifespan of the borrowed
+	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+	 * must be the vma->pages. A simple rule is that vma->pages must only
+	 * be accessed when the obj->mm.pages are pinned.
+	 */
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
 	if (vma->pages)
 		return 0;
 
 	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
-		vma->pages = vma->obj->pages;
+		vma->pages = vma->obj->mm.pages;
 	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
 		vma->pages =
 			intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
@@ -3695,11 +3830,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 void i915_vma_unpin_and_release(struct i915_vma **p_vma)
 {
 	struct i915_vma *vma;
+	struct drm_i915_gem_object *obj;
 
 	vma = fetch_and_zero(p_vma);
 	if (!vma)
 		return;
 
+	obj = vma->obj;
+
 	i915_vma_unpin(vma);
-	i915_vma_put(vma);
+	i915_vma_close(vma);
+
+	__i915_gem_object_release_unless_active(obj);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index ec78be2..c23ef9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -211,6 +211,7 @@ struct i915_vma {
 
 	unsigned int active;
 	struct i915_gem_active last_read[I915_NUM_ENGINES];
+	struct i915_gem_active last_write;
 	struct i915_gem_active last_fence;
 
 	/**
@@ -226,6 +227,7 @@ struct i915_vma {
 	struct list_head vm_link;
 
 	struct list_head obj_link; /* Link in the object's VMA list */
+	struct rb_node obj_node;
 
 	/** This vma's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
@@ -341,6 +343,7 @@ struct i915_pml4 {
 
 struct i915_address_space {
 	struct drm_mm mm;
+	struct i915_gem_timeline timeline;
 	struct drm_device *dev;
 	/* Every address space belongs to a struct file - except for the global
 	 * GTT that is owned by the driver (and so @file is set to NULL). In
@@ -395,7 +398,7 @@ struct i915_address_space {
 	/* FIXME: Need a more generic return type */
 	gen6_pte_t (*pte_encode)(dma_addr_t addr,
 				 enum i915_cache_level level,
-				 bool valid, u32 flags); /* Create a valid PTE */
+				 u32 flags); /* Create a valid PTE */
 	/* flags for pte_encode */
 #define PTE_READ_ONLY	(1<<0)
 	int (*allocate_va_range)(struct i915_address_space *vm,
@@ -403,8 +406,7 @@ struct i915_address_space {
 				 uint64_t length);
 	void (*clear_range)(struct i915_address_space *vm,
 			    uint64_t start,
-			    uint64_t length,
-			    bool use_scratch);
+			    uint64_t length);
 	void (*insert_page)(struct i915_address_space *vm,
 			    dma_addr_t addr,
 			    uint64_t offset,
@@ -450,6 +452,8 @@ struct i915_ggtt {
 	bool do_idle_maps;
 
 	int mtrr;
+
+	struct drm_mm_node error_capture;
 };
 
 struct i915_hw_ppgtt {
@@ -611,7 +615,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 int i915_ppgtt_init_hw(struct drm_device *dev);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
-					struct drm_i915_file_private *fpriv);
+					struct drm_i915_file_private *fpriv,
+					const char *name);
 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
 	if (ppgtt)
@@ -627,8 +632,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+					    struct sg_table *pages);
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+			       struct sg_table *pages);
 
 /* Flags used by pin/bind&friends. */
 #define PIN_NONBLOCK		BIT(0)
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
new file mode 100644
index 0000000..4b3ff3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+
+/* convert swiotlb segment size into sensible units (pages)! */
+#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
+
+static void internal_free_pages(struct sg_table *st)
+{
+	struct scatterlist *sg;
+
+	for (sg = st->sgl; sg; sg = __sg_next(sg))
+		__free_pages(sg_page(sg), get_order(sg->length));
+
+	sg_free_table(st);
+	kfree(st);
+}
+
+static struct sg_table *
+i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	unsigned int npages = obj->base.size / PAGE_SIZE;
+	struct sg_table *st;
+	struct scatterlist *sg;
+	int max_order;
+	gfp_t gfp;
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (!st)
+		return ERR_PTR(-ENOMEM);
+
+	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+		kfree(st);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sg = st->sgl;
+	st->nents = 0;
+
+	max_order = MAX_ORDER;
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
+		max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
+#endif
+
+	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
+	if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
+		/* 965gm cannot relocate objects above 4GiB. */
+		gfp &= ~__GFP_HIGHMEM;
+		gfp |= __GFP_DMA32;
+	}
+
+	do {
+		int order = min(fls(npages) - 1, max_order);
+		struct page *page;
+
+		do {
+			page = alloc_pages(gfp | (order ? QUIET : 0), order);
+			if (page)
+				break;
+			if (!order--)
+				goto err;
+
+			/* Limit subsequent allocations as well */
+			max_order = order;
+		} while (1);
+
+		sg_set_page(sg, page, PAGE_SIZE << order, 0);
+		st->nents++;
+
+		npages -= 1 << order;
+		if (!npages) {
+			sg_mark_end(sg);
+			break;
+		}
+
+		sg = __sg_next(sg);
+	} while (1);
+
+	if (i915_gem_gtt_prepare_pages(obj, st))
+		goto err;
+
+	/* Mark the pages as dontneed whilst they are still pinned. As soon
+	 * as they are unpinned they are allowed to be reaped by the shrinker,
+	 * and the caller is expected to repopulate - the contents of this
+	 * object are only valid whilst active and pinned.
+	 */
+	obj->mm.madv = I915_MADV_DONTNEED;
+	return st;
+
+err:
+	sg_mark_end(sg);
+	internal_free_pages(st);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
+					       struct sg_table *pages)
+{
+	i915_gem_gtt_finish_pages(obj, pages);
+	internal_free_pages(pages);
+
+	obj->mm.dirty = false;
+	obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
+	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+		 I915_GEM_OBJECT_IS_SHRINKABLE,
+	.get_pages = i915_gem_object_get_pages_internal,
+	.put_pages = i915_gem_object_put_pages_internal,
+};
+
+/**
+ * Creates a new object that wraps some internal memory for private use.
+ * This object is not backed by swappable storage, and as such its contents
+ * are volatile and only valid whilst pinned. If the object is reaped by the
+ * shrinker, its pages and data will be discarded. Equally, it is not a full
+ * GEM object and so not valid for access from userspace. This makes it useful
+ * for hardware interfaces like ringbuffers (which are pinned from the time
+ * the request is written to the time the hardware stops accessing it), but
+ * not for contexts (which need to be preserved when not active for later
+ * reuse). Note that it is not cleared upon allocation.
+ */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+				unsigned int size)
+{
+	struct drm_i915_gem_object *obj;
+
+	obj = i915_gem_object_alloc(&i915->drm);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(&i915->drm, &obj->base, size);
+	i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+
+	return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 95b7e9a..5af19b0 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,17 +28,19 @@
 #include "i915_drv.h"
 #include "intel_renderstate.h"
 
-struct render_state {
+struct intel_render_state {
 	const struct intel_renderstate_rodata *rodata;
 	struct i915_vma *vma;
-	u32 aux_batch_size;
-	u32 aux_batch_offset;
+	u32 batch_offset;
+	u32 batch_size;
+	u32 aux_offset;
+	u32 aux_size;
 };
 
 static const struct intel_renderstate_rodata *
-render_state_get_rodata(const struct drm_i915_gem_request *req)
+render_state_get_rodata(const struct intel_engine_cs *engine)
 {
-	switch (INTEL_GEN(req->i915)) {
+	switch (INTEL_GEN(engine->i915)) {
 	case 6:
 		return &gen6_null_state;
 	case 7:
@@ -63,29 +65,26 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
  */
 #define OUT_BATCH(batch, i, val)				\
 	do {							\
-		if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {	\
-			ret = -ENOSPC;				\
-			goto err_out;				\
-		}						\
+		if ((i) >= PAGE_SIZE / sizeof(u32))		\
+			goto err;				\
 		(batch)[(i)++] = (val);				\
 	} while(0)
 
-static int render_state_setup(struct render_state *so)
+static int render_state_setup(struct intel_render_state *so,
+			      struct drm_i915_private *i915)
 {
-	struct drm_device *dev = so->vma->vm->dev;
 	const struct intel_renderstate_rodata *rodata = so->rodata;
-	const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
+	struct drm_i915_gem_object *obj = so->vma->obj;
 	unsigned int i = 0, reloc_index = 0;
-	struct page *page;
+	unsigned int needs_clflush;
 	u32 *d;
 	int ret;
 
-	ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
+	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
 	if (ret)
 		return ret;
 
-	page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
-	d = kmap(page);
+	d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
 
 	while (i < rodata->batch_items) {
 		u32 s = rodata->batch[i];
@@ -93,12 +92,10 @@ static int render_state_setup(struct render_state *so)
 		if (i * 4  == rodata->reloc[reloc_index]) {
 			u64 r = s + so->vma->node.start;
 			s = lower_32_bits(r);
-			if (has_64bit_reloc) {
+			if (HAS_64BIT_RELOC(i915)) {
 				if (i + 1 >= rodata->batch_items ||
-				    rodata->batch[i + 1] != 0) {
-					ret = -EINVAL;
-					goto err_out;
-				}
+				    rodata->batch[i + 1] != 0)
+					goto err;
 
 				d[i++] = s;
 				s = upper_32_bits(r);
@@ -110,12 +107,20 @@ static int render_state_setup(struct render_state *so)
 		d[i++] = s;
 	}
 
+	if (rodata->reloc[reloc_index] != -1) {
+		DRM_ERROR("only %d relocs resolved\n", reloc_index);
+		goto err;
+	}
+
+	so->batch_offset = so->vma->node.start;
+	so->batch_size = rodata->batch_items * sizeof(u32);
+
 	while (i % CACHELINE_DWORDS)
 		OUT_BATCH(d, i, MI_NOOP);
 
-	so->aux_batch_offset = i * sizeof(u32);
+	so->aux_offset = i * sizeof(u32);
 
-	if (HAS_POOLED_EU(dev)) {
+	if (HAS_POOLED_EU(i915)) {
 		/*
 		 * We always program 3x6 pool config but depending upon which
 		 * subslice is disabled HW drops down to appropriate config
@@ -143,88 +148,133 @@ static int render_state_setup(struct render_state *so)
 	}
 
 	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
-	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
-
+	so->aux_size = i * sizeof(u32) - so->aux_offset;
+	so->aux_offset += so->batch_offset;
 	/*
 	 * Since we are sending length, we need to strictly conform to
 	 * all requirements. For Gen2 this must be a multiple of 8.
 	 */
-	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
+	so->aux_size = ALIGN(so->aux_size, 8);
 
-	kunmap(page);
+	if (needs_clflush)
+		drm_clflush_virt_range(d, i * sizeof(u32));
+	kunmap_atomic(d);
 
-	ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
-	if (ret)
-		return ret;
-
-	if (rodata->reloc[reloc_index] != -1) {
-		DRM_ERROR("only %d relocs resolved\n", reloc_index);
-		return -EINVAL;
-	}
-
-	return 0;
-
-err_out:
-	kunmap(page);
+	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+out:
+	i915_gem_obj_finish_shmem_access(obj);
 	return ret;
+
+err:
+	kunmap_atomic(d);
+	ret = -EINVAL;
+	goto out;
 }
 
 #undef OUT_BATCH
 
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
+int i915_gem_render_state_init(struct intel_engine_cs *engine)
 {
-	struct render_state so;
+	struct intel_render_state *so;
+	const struct intel_renderstate_rodata *rodata;
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	if (WARN_ON(req->engine->id != RCS))
-		return -ENOENT;
-
-	so.rodata = render_state_get_rodata(req);
-	if (!so.rodata)
+	if (engine->id != RCS)
 		return 0;
 
-	if (so.rodata->batch_items * 4 > 4096)
+	rodata = render_state_get_rodata(engine);
+	if (!rodata)
+		return 0;
+
+	if (rodata->batch_items * 4 > 4096)
 		return -EINVAL;
 
-	obj = i915_gem_object_create(&req->i915->drm, 4096);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
+	so = kmalloc(sizeof(*so), GFP_KERNEL);
+	if (!so)
+		return -ENOMEM;
 
-	so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
-	if (IS_ERR(so.vma)) {
-		ret = PTR_ERR(so.vma);
+	obj = i915_gem_object_create_internal(engine->i915, 4096);
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto err_free;
+	}
+
+	so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+	if (IS_ERR(so->vma)) {
+		ret = PTR_ERR(so->vma);
 		goto err_obj;
 	}
 
-	ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
-	if (ret)
-		goto err_obj;
+	so->rodata = rodata;
+	engine->render_state = so;
+	return 0;
 
-	ret = render_state_setup(&so);
-	if (ret)
-		goto err_unpin;
+err_obj:
+	i915_gem_object_put(obj);
+err_free:
+	kfree(so);
+	return ret;
+}
 
-	ret = req->engine->emit_bb_start(req, so.vma->node.start,
-					 so.rodata->batch_items * 4,
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
+{
+	struct intel_render_state *so;
+	int ret;
+
+	lockdep_assert_held(&req->i915->drm.struct_mutex);
+
+	so = req->engine->render_state;
+	if (!so)
+		return 0;
+
+	/* Recreate the page after shrinking */
+	if (!so->vma->obj->mm.pages)
+		so->batch_offset = -1;
+
+	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	if (ret)
+		return ret;
+
+	if (so->vma->node.start != so->batch_offset) {
+		ret = render_state_setup(so, req->i915);
+		if (ret)
+			goto err_unpin;
+	}
+
+	ret = req->engine->emit_bb_start(req,
+					 so->batch_offset, so->batch_size,
 					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto err_unpin;
 
-	if (so.aux_batch_size > 8) {
+	if (so->aux_size > 8) {
 		ret = req->engine->emit_bb_start(req,
-						 (so.vma->node.start +
-						  so.aux_batch_offset),
-						 so.aux_batch_size,
+						 so->aux_offset, so->aux_size,
 						 I915_DISPATCH_SECURE);
 		if (ret)
 			goto err_unpin;
 	}
 
-	i915_vma_move_to_active(so.vma, req, 0);
+	i915_vma_move_to_active(so->vma, req, 0);
 err_unpin:
-	i915_vma_unpin(so.vma);
-err_obj:
-	i915_gem_object_put(obj);
+	i915_vma_unpin(so->vma);
 	return ret;
 }
+
+void i915_gem_render_state_fini(struct intel_engine_cs *engine)
+{
+	struct intel_render_state *so;
+	struct drm_i915_gem_object *obj;
+
+	so = fetch_and_zero(&engine->render_state);
+	if (!so)
+		return;
+
+	obj = so->vma->obj;
+
+	i915_vma_close(so->vma);
+	__i915_gem_object_release_unless_active(obj);
+
+	kfree(so);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 18cce3f..8748184 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,6 +26,8 @@
 
 struct drm_i915_gem_request;
 
-int i915_gem_render_state_init(struct drm_i915_gem_request *req);
+int i915_gem_render_state_init(struct intel_engine_cs *engine);
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
+void i915_gem_render_state_fini(struct intel_engine_cs *engine);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8832f8e..5050464 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -23,31 +23,26 @@
  */
 
 #include <linux/prefetch.h>
+#include <linux/dma-fence-array.h>
 
 #include "i915_drv.h"
 
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "i915";
 }
 
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
-	/* Timelines are bound by eviction to a VM. However, since
-	 * we only have a global seqno at the moment, we only have
-	 * a single timeline. Note that each timeline will have
-	 * multiple execution contexts (fence contexts) as we allow
-	 * engines within a single timeline to execute in parallel.
-	 */
-	return "global";
+	return to_request(fence)->timeline->common->name;
 }
 
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
 {
 	return i915_gem_request_completed(to_request(fence));
 }
 
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
 {
 	if (i915_fence_signaled(fence))
 		return false;
@@ -56,63 +51,27 @@ static bool i915_fence_enable_signaling(struct fence *fence)
 	return true;
 }
 
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
 				   bool interruptible,
-				   signed long timeout_jiffies)
+				   signed long timeout)
 {
-	s64 timeout_ns, *timeout;
-	int ret;
-
-	if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
-		timeout_ns = jiffies_to_nsecs(timeout_jiffies);
-		timeout = &timeout_ns;
-	} else {
-		timeout = NULL;
-	}
-
-	ret = i915_wait_request(to_request(fence),
-				interruptible, timeout,
-				NO_WAITBOOST);
-	if (ret == -ETIME)
-		return 0;
-
-	if (ret < 0)
-		return ret;
-
-	if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
-		timeout_jiffies = nsecs_to_jiffies(timeout_ns);
-
-	return timeout_jiffies;
+	return i915_wait_request(to_request(fence), interruptible, timeout);
 }
 
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
-{
-	snprintf(str, size, "%u", fence->seqno);
-}
-
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
-					  int size)
-{
-	snprintf(str, size, "%u",
-		 intel_engine_get_seqno(to_request(fence)->engine));
-}
-
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
 {
 	struct drm_i915_gem_request *req = to_request(fence);
 
 	kmem_cache_free(req->i915->requests, req);
 }
 
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
 	.get_driver_name = i915_fence_get_driver_name,
 	.get_timeline_name = i915_fence_get_timeline_name,
 	.enable_signaling = i915_fence_enable_signaling,
 	.signaled = i915_fence_signaled,
 	.wait = i915_fence_wait,
 	.release = i915_fence_release,
-	.fence_value_str = i915_fence_value_str,
-	.timeline_value_str = i915_fence_timeline_value_str,
 };
 
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -164,8 +123,14 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 {
 	struct i915_gem_active *active, *next;
 
+	lockdep_assert_held(&request->i915->drm.struct_mutex);
+	GEM_BUG_ON(!i915_gem_request_completed(request));
+
 	trace_i915_gem_request_retire(request);
-	list_del(&request->link);
+
+	spin_lock_irq(&request->engine->timeline->lock);
+	list_del_init(&request->link);
+	spin_unlock_irq(&request->engine->timeline->lock);
 
 	/* We know the GPU must have read the request to have
 	 * sent us the seqno + interrupt, so use the position
@@ -177,6 +142,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	 */
 	list_del(&request->ring_link);
 	request->ring->last_retired_head = request->postfix;
+	request->i915->gt.active_requests--;
 
 	/* Walk through the active list, calling retire on each. This allows
 	 * objects to track their GPU activity and mark themselves as idle
@@ -214,6 +180,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	}
 
 	i915_gem_context_put(request->ctx);
+
+	dma_fence_signal(&request->fence);
 	i915_gem_request_put(request);
 }
 
@@ -223,10 +191,11 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
 	struct drm_i915_gem_request *tmp;
 
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
-	GEM_BUG_ON(list_empty(&req->link));
+	if (list_empty(&req->link))
+		return;
 
 	do {
-		tmp = list_first_entry(&engine->request_list,
+		tmp = list_first_entry(&engine->timeline->requests,
 				       typeof(*tmp), link);
 
 		i915_gem_request_retire(tmp);
@@ -253,39 +222,50 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
 {
+	struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int ret;
 
 	/* Carefully retire all requests without writing to the rings */
-	for_each_engine(engine, dev_priv) {
-		ret = intel_engine_idle(engine,
-					I915_WAIT_INTERRUPTIBLE |
-					I915_WAIT_LOCKED);
-		if (ret)
-			return ret;
-	}
-	i915_gem_retire_requests(dev_priv);
+	ret = i915_gem_wait_for_idle(i915,
+				     I915_WAIT_INTERRUPTIBLE |
+				     I915_WAIT_LOCKED);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests(i915);
+	GEM_BUG_ON(i915->gt.active_requests > 1);
 
 	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-	if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
-		while (intel_kick_waiters(dev_priv) ||
-		       intel_kick_signalers(dev_priv))
-			yield();
+	if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
+		while (intel_breadcrumbs_busy(i915))
+			cond_resched(); /* spin until threads are complete */
 	}
+	atomic_set(&timeline->next_seqno, seqno);
 
 	/* Finally reset hw state */
-	for_each_engine(engine, dev_priv)
-		intel_engine_init_seqno(engine, seqno);
+	for_each_engine(engine, i915, id)
+		intel_engine_init_global_seqno(engine, seqno);
+
+	list_for_each_entry(timeline, &i915->gt.timelines, link) {
+		for_each_engine(engine, i915, id) {
+			struct intel_timeline *tl = &timeline->engine[id];
+
+			memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+		}
+	}
 
 	return 0;
 }
 
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
+
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
 	if (seqno == 0)
 		return -EINVAL;
@@ -293,29 +273,37 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 	/* HWS page needs to be set less than what we
 	 * will inject to ring
 	 */
-	ret = i915_gem_init_seqno(dev_priv, seqno - 1);
-	if (ret)
-		return ret;
+	return i915_gem_init_global_seqno(dev_priv, seqno - 1);
+}
 
-	dev_priv->next_seqno = seqno;
+static int reserve_global_seqno(struct drm_i915_private *i915)
+{
+	u32 active_requests = ++i915->gt.active_requests;
+	u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
+	int ret;
+
+	/* Reservation is fine until we need to wrap around */
+	if (likely(next_seqno + active_requests > next_seqno))
+		return 0;
+
+	ret = i915_gem_init_global_seqno(i915, 0);
+	if (ret) {
+		i915->gt.active_requests--;
+		return ret;
+	}
+
 	return 0;
 }
 
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
 {
-	/* reserve 0 for non-seqno */
-	if (unlikely(dev_priv->next_seqno == 0)) {
-		int ret;
+	/* next_seqno only incremented under a mutex */
+	return ++tl->next_seqno.counter;
+}
 
-		ret = i915_gem_init_seqno(dev_priv, 0);
-		if (ret)
-			return ret;
-
-		dev_priv->next_seqno = 1;
-	}
-
-	*seqno = dev_priv->next_seqno++;
-	return 0;
+static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
+{
+	return atomic_inc_return(&tl->next_seqno);
 }
 
 static int __i915_sw_fence_call
@@ -323,18 +311,46 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
 	struct drm_i915_gem_request *request =
 		container_of(fence, typeof(*request), submit);
+	struct intel_engine_cs *engine = request->engine;
+	struct intel_timeline *timeline;
+	unsigned long flags;
+	u32 seqno;
+
+	if (state != FENCE_COMPLETE)
+		return NOTIFY_DONE;
+
+	/* Transfer from per-context onto the global per-engine timeline */
+	timeline = engine->timeline;
+	GEM_BUG_ON(timeline == request->timeline);
 
 	/* Will be called from irq-context when using foreign DMA fences */
+	spin_lock_irqsave(&timeline->lock, flags);
 
-	switch (state) {
-	case FENCE_COMPLETE:
-		request->engine->last_submitted_seqno = request->fence.seqno;
-		request->engine->submit_request(request);
-		break;
+	seqno = timeline_get_seqno(timeline->common);
+	GEM_BUG_ON(!seqno);
+	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
 
-	case FENCE_FREE:
-		break;
-	}
+	GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
+	request->previous_seqno = timeline->last_submitted_seqno;
+	timeline->last_submitted_seqno = seqno;
+
+	/* We may be recursing from the signal callback of another i915 fence */
+	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+	request->global_seqno = seqno;
+	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+		intel_engine_enable_signaling(request);
+	spin_unlock(&request->lock);
+
+	GEM_BUG_ON(!request->global_seqno);
+	engine->emit_breadcrumb(request,
+				request->ring->vaddr + request->postfix);
+	engine->submit_request(request);
+
+	spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+	list_move_tail(&request->link, &timeline->requests);
+	spin_unlock(&request->timeline->lock);
+
+	spin_unlock_irqrestore(&timeline->lock, flags);
 
 	return NOTIFY_DONE;
 }
@@ -357,9 +373,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct drm_i915_gem_request *req;
-	u32 seqno;
 	int ret;
 
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
 	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
 	 * and restart.
@@ -368,10 +385,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	if (ret)
 		return ERR_PTR(ret);
 
+	ret = reserve_global_seqno(dev_priv);
+	if (ret)
+		return ERR_PTR(ret);
+
 	/* Move the oldest request to the slab-cache (if not in use!) */
-	req = list_first_entry_or_null(&engine->request_list,
+	req = list_first_entry_or_null(&engine->timeline->requests,
 				       typeof(*req), link);
-	if (req && i915_gem_request_completed(req))
+	if (req && __i915_gem_request_completed(req))
 		i915_gem_request_retire(req);
 
 	/* Beware: Dragons be flying overhead.
@@ -382,13 +403,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 * of being read by __i915_gem_active_get_rcu(). As such,
 	 * we have to be very careful when overwriting the contents. During
 	 * the RCU lookup, we change chase the request->engine pointer,
-	 * read the request->fence.seqno and increment the reference count.
+	 * read the request->global_seqno and increment the reference count.
 	 *
 	 * The reference count is incremented atomically. If it is zero,
 	 * the lookup knows the request is unallocated and complete. Otherwise,
 	 * it is either still in use, or has been reallocated and reset
-	 * with fence_init(). This increment is safe for release as we check
-	 * that the request we have a reference to and matches the active
+	 * with dma_fence_init(). This increment is safe for release as we
+	 * check that the request we have a reference to and matches the active
 	 * request.
 	 *
 	 * Before we increment the refcount, we chase the request->engine
@@ -403,19 +424,20 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 * Do not use kmem_cache_zalloc() here!
 	 */
 	req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
-	if (!req)
-		return ERR_PTR(-ENOMEM);
+	if (!req) {
+		ret = -ENOMEM;
+		goto err_unreserve;
+	}
 
-	ret = i915_gem_get_seqno(dev_priv, &seqno);
-	if (ret)
-		goto err;
+	req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+	GEM_BUG_ON(req->timeline == engine->timeline);
 
 	spin_lock_init(&req->lock);
-	fence_init(&req->fence,
-		   &i915_fence_ops,
-		   &req->lock,
-		   engine->fence_context,
-		   seqno);
+	dma_fence_init(&req->fence,
+		       &i915_fence_ops,
+		       &req->lock,
+		       req->timeline->fence_context,
+		       __timeline_get_seqno(req->timeline->common));
 
 	i915_sw_fence_init(&req->submit, submit_notify);
 
@@ -425,6 +447,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	req->ctx = i915_gem_context_get(ctx);
 
 	/* No zalloc, must clear what we need by hand */
+	req->global_seqno = 0;
 	req->previous_context = NULL;
 	req->file_priv = NULL;
 	req->batch = NULL;
@@ -437,6 +460,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 * away, e.g. because a GPU scheduler has deferred it.
 	 */
 	req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+	GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
 
 	if (i915.enable_execlists)
 		ret = intel_logical_ring_alloc_request_extras(req);
@@ -456,8 +480,9 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 
 err_ctx:
 	i915_gem_context_put(ctx);
-err:
 	kmem_cache_free(dev_priv->requests, req);
+err_unreserve:
+	dev_priv->gt.active_requests--;
 	return ERR_PTR(ret);
 }
 
@@ -465,15 +490,28 @@ static int
 i915_gem_request_await_request(struct drm_i915_gem_request *to,
 			       struct drm_i915_gem_request *from)
 {
-	int idx, ret;
+	int ret;
 
 	GEM_BUG_ON(to == from);
 
-	if (to->engine == from->engine)
+	if (to->timeline == from->timeline)
 		return 0;
 
-	idx = intel_engine_sync_index(from->engine, to->engine);
-	if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+	if (to->engine == from->engine) {
+		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+						       &from->submit,
+						       GFP_KERNEL);
+		return ret < 0 ? ret : 0;
+	}
+
+	if (!from->global_seqno) {
+		ret = i915_sw_fence_await_dma_fence(&to->submit,
+						    &from->fence, 0,
+						    GFP_KERNEL);
+		return ret < 0 ? ret : 0;
+	}
+
+	if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
 		return 0;
 
 	trace_i915_gem_ring_sync_to(to, from);
@@ -491,7 +529,54 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
 			return ret;
 	}
 
-	from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+	to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
+	return 0;
+}
+
+int
+i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+				 struct dma_fence *fence)
+{
+	struct dma_fence_array *array;
+	int ret;
+	int i;
+
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return 0;
+
+	if (dma_fence_is_i915(fence))
+		return i915_gem_request_await_request(req, to_request(fence));
+
+	if (!dma_fence_is_array(fence)) {
+		ret = i915_sw_fence_await_dma_fence(&req->submit,
+						    fence, I915_FENCE_TIMEOUT,
+						    GFP_KERNEL);
+		return ret < 0 ? ret : 0;
+	}
+
+	/* Note that if the fence-array was created in signal-on-any mode,
+	 * we should *not* decompose it into its individual fences. However,
+	 * we don't currently store which mode the fence-array is operating
+	 * in. Fortunately, the only user of signal-on-any is private to
+	 * amdgpu and we should not see any incoming fence-array from
+	 * sync-file being in signal-on-any mode.
+	 */
+
+	array = to_dma_fence_array(fence);
+	for (i = 0; i < array->num_fences; i++) {
+		struct dma_fence *child = array->fences[i];
+
+		if (dma_fence_is_i915(child))
+			ret = i915_gem_request_await_request(req,
+							     to_request(child));
+		else
+			ret = i915_sw_fence_await_dma_fence(&req->submit,
+							    child, I915_FENCE_TIMEOUT,
+							    GFP_KERNEL);
+		if (ret < 0)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -520,40 +605,47 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
 			      struct drm_i915_gem_object *obj,
 			      bool write)
 {
-	struct i915_gem_active *active;
-	unsigned long active_mask;
-	int idx;
+	struct dma_fence *excl;
+	int ret = 0;
 
 	if (write) {
-		active_mask = i915_gem_object_get_active(obj);
-		active = obj->last_read;
-	} else {
-		active_mask = 1;
-		active = &obj->last_write;
-	}
+		struct dma_fence **shared;
+		unsigned int count, i;
 
-	for_each_active(active_mask, idx) {
-		struct drm_i915_gem_request *request;
-		int ret;
-
-		request = i915_gem_active_peek(&active[idx],
-					       &obj->base.dev->struct_mutex);
-		if (!request)
-			continue;
-
-		ret = i915_gem_request_await_request(to, request);
+		ret = reservation_object_get_fences_rcu(obj->resv,
+							&excl, &count, &shared);
 		if (ret)
 			return ret;
+
+		for (i = 0; i < count; i++) {
+			ret = i915_gem_request_await_dma_fence(to, shared[i]);
+			if (ret)
+				break;
+
+			dma_fence_put(shared[i]);
+		}
+
+		for (; i < count; i++)
+			dma_fence_put(shared[i]);
+		kfree(shared);
+	} else {
+		excl = reservation_object_get_excl_rcu(obj->resv);
 	}
 
-	return 0;
+	if (excl) {
+		if (ret == 0)
+			ret = i915_gem_request_await_dma_fence(to, excl);
+
+		dma_fence_put(excl);
+	}
+
+	return ret;
 }
 
 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
-	dev_priv->gt.active_engines |= intel_engine_flag(engine);
 	if (dev_priv->gt.awake)
 		return;
 
@@ -579,11 +671,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_ring *ring = request->ring;
+	struct intel_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
-	u32 request_start;
-	u32 reserved_tail;
-	int ret;
+	int err;
 
+	lockdep_assert_held(&request->i915->drm.struct_mutex);
 	trace_i915_gem_request_add(request);
 
 	/*
@@ -591,8 +683,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * should already have been reserved in the ring buffer. Let the ring
 	 * know that it is time to use that space up.
 	 */
-	request_start = ring->tail;
-	reserved_tail = request->reserved_space;
 	request->reserved_space = 0;
 
 	/*
@@ -603,10 +693,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * what.
 	 */
 	if (flush_caches) {
-		ret = engine->emit_flush(request, EMIT_FLUSH);
+		err = engine->emit_flush(request, EMIT_FLUSH);
 
 		/* Not allowed to fail! */
-		WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+		WARN(err, "engine->emit_flush() failed: %d!\n", err);
 	}
 
 	/* Record the position of the start of the breadcrumb so that
@@ -614,20 +704,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * GPU processing the request, we never over-estimate the
 	 * position of the ring's HEAD.
 	 */
+	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+	GEM_BUG_ON(err);
 	request->postfix = ring->tail;
-
-	/* Not allowed to fail! */
-	ret = engine->emit_request(request);
-	WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
-
-	/* Sanity check that the reserved size was large enough. */
-	ret = ring->tail - request_start;
-	if (ret < 0)
-		ret += ring->size;
-	WARN_ONCE(ret > reserved_tail,
-		  "Not enough space reserved (%d bytes) "
-		  "for adding the request (%d bytes)\n",
-		  reserved_tail, ret);
+	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
 
 	/* Seal the request and mark it as pending execution. Note that
 	 * we may inspect this state, without holding any locks, during
@@ -635,18 +715,24 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * see a more recent value in the hws than we are tracking.
 	 */
 
-	prev = i915_gem_active_raw(&engine->last_request,
+	prev = i915_gem_active_raw(&timeline->last_request,
 				   &request->i915->drm.struct_mutex);
 	if (prev)
 		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
 					     &request->submitq);
 
-	request->emitted_jiffies = jiffies;
-	request->previous_seqno = engine->last_pending_seqno;
-	engine->last_pending_seqno = request->fence.seqno;
-	i915_gem_active_set(&engine->last_request, request);
-	list_add_tail(&request->link, &engine->request_list);
+	spin_lock_irq(&timeline->lock);
+	list_add_tail(&request->link, &timeline->requests);
+	spin_unlock_irq(&timeline->lock);
+
+	GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
+				     request->fence.seqno));
+
+	timeline->last_submitted_seqno = request->fence.seqno;
+	i915_gem_active_set(&timeline->last_request, request);
+
 	list_add_tail(&request->ring_link, &ring->request_list);
+	request->emitted_jiffies = jiffies;
 
 	i915_gem_mark_busy(engine);
 
@@ -714,7 +800,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 
 	timeout_us += local_clock_us(&cpu);
 	do {
-		if (i915_gem_request_completed(req))
+		if (__i915_gem_request_completed(req))
 			return true;
 
 		if (signal_pending_state(state, current))
@@ -729,76 +815,101 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 	return false;
 }
 
+static long
+__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
+			       unsigned int flags,
+			       long timeout)
+{
+	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+	wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
+	DEFINE_WAIT(reset);
+	DEFINE_WAIT(wait);
+
+	if (flags & I915_WAIT_LOCKED)
+		add_wait_queue(q, &reset);
+
+	do {
+		prepare_to_wait(&request->submit.wait, &wait, state);
+
+		if (i915_sw_fence_done(&request->submit))
+			break;
+
+		if (flags & I915_WAIT_LOCKED &&
+		    i915_reset_in_progress(&request->i915->gpu_error)) {
+			__set_current_state(TASK_RUNNING);
+			i915_reset(request->i915);
+			reset_wait_queue(q, &reset);
+			continue;
+		}
+
+		if (signal_pending_state(state, current)) {
+			timeout = -ERESTARTSYS;
+			break;
+		}
+
+		timeout = io_schedule_timeout(timeout);
+	} while (timeout);
+	finish_wait(&request->submit.wait, &wait);
+
+	if (flags & I915_WAIT_LOCKED)
+		remove_wait_queue(q, &reset);
+
+	return timeout;
+}
+
 /**
  * i915_wait_request - wait until execution of request has finished
- * @req: duh!
+ * @req: the request to wait upon
  * @flags: how to wait
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: client to charge for RPS boosting
+ * @timeout: how long to wait in jiffies
  *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
+ * i915_wait_request() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
  *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
+ * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
+ * in via the flags, and vice versa if the struct_mutex is not held, the caller
+ * must not specify that the wait is locked.
+ *
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
  */
-int i915_wait_request(struct drm_i915_gem_request *req,
-		      unsigned int flags,
-		      s64 *timeout,
-		      struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+		       unsigned int flags,
+		       long timeout)
 {
 	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
 		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(reset);
 	struct intel_wait wait;
-	unsigned long timeout_remain;
-	int ret = 0;
 
 	might_sleep();
 #if IS_ENABLED(CONFIG_LOCKDEP)
-	GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+	GEM_BUG_ON(debug_locks &&
+		   !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
 		   !!(flags & I915_WAIT_LOCKED));
 #endif
+	GEM_BUG_ON(timeout < 0);
 
 	if (i915_gem_request_completed(req))
-		return 0;
+		return timeout;
 
-	timeout_remain = MAX_SCHEDULE_TIMEOUT;
-	if (timeout) {
-		if (WARN_ON(*timeout < 0))
-			return -EINVAL;
-
-		if (*timeout == 0)
-			return -ETIME;
-
-		/* Record current time in case interrupted, or wedged */
-		timeout_remain = nsecs_to_jiffies_timeout(*timeout);
-		*timeout += ktime_get_raw_ns();
-	}
+	if (!timeout)
+		return -ETIME;
 
 	trace_i915_gem_request_wait_begin(req);
 
-	/* This client is about to stall waiting for the GPU. In many cases
-	 * this is undesirable and limits the throughput of the system, as
-	 * many clients cannot continue processing user input/output whilst
-	 * blocked. RPS autotuning may take tens of milliseconds to respond
-	 * to the GPU load and thus incurs additional latency for the client.
-	 * We can circumvent that by promoting the GPU frequency to maximum
-	 * before we wait. This makes the GPU throttle up much more quickly
-	 * (good for benchmarks and user experience, e.g. window animations),
-	 * but at a cost of spending more power processing the workload
-	 * (bad for battery). Not all clients even want their results
-	 * immediately and for them we should just let the GPU select its own
-	 * frequency to maximise efficiency. To prevent a single client from
-	 * forcing the clocks too high for the whole system, we only allow
-	 * each client to waitboost once in a busy period.
-	 */
-	if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
-		gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+	if (!i915_sw_fence_done(&req->submit)) {
+		timeout = __i915_request_wait_for_submit(req, flags, timeout);
+		if (timeout < 0)
+			goto complete;
+
+		GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+	}
+	GEM_BUG_ON(!req->global_seqno);
 
 	/* Optimistic short spin before touching IRQs */
 	if (i915_spin_request(req, state, 5))
@@ -808,7 +919,7 @@ int i915_wait_request(struct drm_i915_gem_request *req,
 	if (flags & I915_WAIT_LOCKED)
 		add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-	intel_wait_init(&wait, req->fence.seqno);
+	intel_wait_init(&wait, req->global_seqno);
 	if (intel_engine_add_wait(req->engine, &wait))
 		/* In order to check that we haven't missed the interrupt
 		 * as we enabled it, we need to kick ourselves to do a
@@ -818,16 +929,17 @@ int i915_wait_request(struct drm_i915_gem_request *req,
 
 	for (;;) {
 		if (signal_pending_state(state, current)) {
-			ret = -ERESTARTSYS;
+			timeout = -ERESTARTSYS;
 			break;
 		}
 
-		timeout_remain = io_schedule_timeout(timeout_remain);
-		if (timeout_remain == 0) {
-			ret = -ETIME;
+		if (!timeout) {
+			timeout = -ETIME;
 			break;
 		}
 
+		timeout = io_schedule_timeout(timeout);
+
 		if (intel_wait_complete(&wait))
 			break;
 
@@ -874,74 +986,39 @@ int i915_wait_request(struct drm_i915_gem_request *req,
 complete:
 	trace_i915_gem_request_wait_end(req);
 
-	if (timeout) {
-		*timeout -= ktime_get_raw_ns();
-		if (*timeout < 0)
-			*timeout = 0;
-
-		/*
-		 * Apparently ktime isn't accurate enough and occasionally has a
-		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
-		 * things up to make the test happy. We allow up to 1 jiffy.
-		 *
-		 * This is a regrssion from the timespec->ktime conversion.
-		 */
-		if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
-			*timeout = 0;
-	}
-
-	if (IS_RPS_USER(rps) &&
-	    req->fence.seqno == req->engine->last_submitted_seqno) {
-		/* The GPU is now idle and this client has stalled.
-		 * Since no other client has submitted a request in the
-		 * meantime, assume that this client is the only one
-		 * supplying work to the GPU but is unable to keep that
-		 * work supplied because it is waiting. Since the GPU is
-		 * then never kept fully busy, RPS autoclocking will
-		 * keep the clocks relatively low, causing further delays.
-		 * Compensate by giving the synchronous client credit for
-		 * a waitboost next time.
-		 */
-		spin_lock(&req->i915->rps.client_lock);
-		list_del_init(&rps->link);
-		spin_unlock(&req->i915->rps.client_lock);
-	}
-
-	return ret;
+	return timeout;
 }
 
-static bool engine_retire_requests(struct intel_engine_cs *engine)
+static void engine_retire_requests(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request, *next;
 
-	list_for_each_entry_safe(request, next, &engine->request_list, link) {
-		if (!i915_gem_request_completed(request))
-			return false;
+	list_for_each_entry_safe(request, next,
+				 &engine->timeline->requests, link) {
+		if (!__i915_gem_request_completed(request))
+			return;
 
 		i915_gem_request_retire(request);
 	}
-
-	return true;
 }
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
-	unsigned int tmp;
+	enum intel_engine_id id;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-	if (dev_priv->gt.active_engines == 0)
+	if (!dev_priv->gt.active_requests)
 		return;
 
 	GEM_BUG_ON(!dev_priv->gt.awake);
 
-	for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
-		if (engine_retire_requests(engine))
-			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
+	for_each_engine(engine, dev_priv, id)
+		engine_retire_requests(engine);
 
-	if (dev_priv->gt.active_engines == 0)
-		queue_delayed_work(dev_priv->wq,
-				   &dev_priv->gt.idle_work,
-				   msecs_to_jiffies(100));
+	if (!dev_priv->gt.active_requests)
+		mod_delayed_work(dev_priv->wq,
+				 &dev_priv->gt.idle_work,
+				 msecs_to_jiffies(100));
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 974bd7b..0f69fad 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -25,7 +25,7 @@
 #ifndef I915_GEM_REQUEST_H
 #define I915_GEM_REQUEST_H
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #include "i915_gem.h"
 #include "i915_sw_fence.h"
@@ -62,7 +62,7 @@ struct intel_signal_node {
  * The requests are reference counted.
  */
 struct drm_i915_gem_request {
-	struct fence fence;
+	struct dma_fence fence;
 	spinlock_t lock;
 
 	/** On Which ring this request was generated */
@@ -81,11 +81,14 @@ struct drm_i915_gem_request {
 	struct i915_gem_context *ctx;
 	struct intel_engine_cs *engine;
 	struct intel_ring *ring;
+	struct intel_timeline *timeline;
 	struct intel_signal_node signaling;
 
 	struct i915_sw_fence submit;
 	wait_queue_t submitq;
 
+	u32 global_seqno;
+
 	/** GEM sequence number associated with the previous request,
 	 * when the HWS breadcrumb is equal to this the GPU is processing
 	 * this request.
@@ -145,9 +148,9 @@ struct drm_i915_gem_request {
 	struct list_head execlist_link;
 };
 
-extern const struct fence_ops i915_fence_ops;
+extern const struct dma_fence_ops i915_fence_ops;
 
-static inline bool fence_is_i915(struct fence *fence)
+static inline bool dma_fence_is_i915(const struct dma_fence *fence)
 {
 	return fence->ops == &i915_fence_ops;
 }
@@ -159,43 +162,31 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
 				   struct drm_file *file);
 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
 
-static inline u32
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
-	return req ? req->fence.seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
-	return req ? req->engine : NULL;
-}
-
 static inline struct drm_i915_gem_request *
-to_request(struct fence *fence)
+to_request(struct dma_fence *fence)
 {
 	/* We assume that NULL fence/request are interoperable */
 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
-	GEM_BUG_ON(fence && !fence_is_i915(fence));
+	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
 	return container_of(fence, struct drm_i915_gem_request, fence);
 }
 
 static inline struct drm_i915_gem_request *
 i915_gem_request_get(struct drm_i915_gem_request *req)
 {
-	return to_request(fence_get(&req->fence));
+	return to_request(dma_fence_get(&req->fence));
 }
 
 static inline struct drm_i915_gem_request *
 i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
 {
-	return to_request(fence_get_rcu(&req->fence));
+	return to_request(dma_fence_get_rcu(&req->fence));
 }
 
 static inline void
 i915_gem_request_put(struct drm_i915_gem_request *req)
 {
-	fence_put(&req->fence);
+	dma_fence_put(&req->fence);
 }
 
 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -214,6 +205,8 @@ int
 i915_gem_request_await_object(struct drm_i915_gem_request *to,
 			      struct drm_i915_gem_object *obj,
 			      bool write);
+int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+				     struct dma_fence *fence);
 
 void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
 #define i915_add_request(req) \
@@ -226,13 +219,13 @@ struct intel_rps_client;
 #define IS_RPS_CLIENT(p) (!IS_ERR(p))
 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
 
-int i915_wait_request(struct drm_i915_gem_request *req,
-		      unsigned int flags,
-		      s64 *timeout,
-		      struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+		       unsigned int flags,
+		       long timeout)
 	__attribute__((nonnull(1)));
 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
 #define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
+#define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
@@ -245,17 +238,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 }
 
 static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+__i915_gem_request_started(const struct drm_i915_gem_request *req)
 {
+	GEM_BUG_ON(!req->global_seqno);
 	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
 				 req->previous_seqno);
 }
 
 static inline bool
+i915_gem_request_started(const struct drm_i915_gem_request *req)
+{
+	if (!req->global_seqno)
+		return false;
+
+	return __i915_gem_request_started(req);
+}
+
+static inline bool
+__i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+	GEM_BUG_ON(!req->global_seqno);
+	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+				 req->global_seqno);
+}
+
+static inline bool
 i915_gem_request_completed(const struct drm_i915_gem_request *req)
 {
-	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
-				 req->fence.seqno);
+	if (!req->global_seqno)
+		return false;
+
+	return __i915_gem_request_completed(req);
 }
 
 bool __i915_spin_request(const struct drm_i915_gem_request *request,
@@ -263,7 +276,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
 static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
 				     int state, unsigned long timeout_us)
 {
-	return (i915_gem_request_started(request) &&
+	return (__i915_gem_request_started(request) &&
 		__i915_spin_request(request, state, timeout_us));
 }
 
@@ -497,7 +510,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
 		 * compiler.
 		 *
 		 * The atomic operation at the heart of
-		 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+		 * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
 		 * atomic_inc_not_zero() which is only a full memory barrier
 		 * when successful. That is, if i915_gem_request_get_rcu()
 		 * returns the request (and so with the reference counted
@@ -552,53 +565,13 @@ i915_gem_active_isset(const struct i915_gem_active *active)
 }
 
 /**
- * i915_gem_active_is_idle - report whether the active tracker is idle
- * @active - the active tracker
- *
- * i915_gem_active_is_idle() returns true if the active tracker is currently
- * unassigned or if the request is complete (but not yet retired). Requires
- * the caller to hold struct_mutex (but that can be relaxed if desired).
- */
-static inline bool
-i915_gem_active_is_idle(const struct i915_gem_active *active,
-			struct mutex *mutex)
-{
-	return !i915_gem_active_peek(active, mutex);
-}
-
-/**
  * i915_gem_active_wait - waits until the request is completed
  * @active - the active request on which to wait
- *
- * i915_gem_active_wait() waits until the request is completed before
- * returning. Note that it does not guarantee that the request is
- * retired first, see i915_gem_active_retire().
- *
- * i915_gem_active_wait() returns immediately if the active
- * request is already complete.
- */
-static inline int __must_check
-i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
-{
-	struct drm_i915_gem_request *request;
-
-	request = i915_gem_active_peek(active, mutex);
-	if (!request)
-		return 0;
-
-	return i915_wait_request(request,
-				 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
-				 NULL, NULL);
-}
-
-/**
- * i915_gem_active_wait_unlocked - waits until the request is completed
- * @active - the active request on which to wait
  * @flags - how to wait
  * @timeout - how long to wait at most
  * @rps - userspace client to charge for a waitboost
  *
- * i915_gem_active_wait_unlocked() waits until the request is completed before
+ * i915_gem_active_wait() waits until the request is completed before
  * returning, without requiring any locks to be held. Note that it does not
  * retire any requests before returning.
  *
@@ -614,21 +587,18 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
  * Returns 0 if successful, or a negative error code.
  */
 static inline int
-i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
-			      unsigned int flags,
-			      s64 *timeout,
-			      struct intel_rps_client *rps)
+i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
 {
 	struct drm_i915_gem_request *request;
-	int ret = 0;
+	long ret = 0;
 
 	request = i915_gem_active_get_unlocked(active);
 	if (request) {
-		ret = i915_wait_request(request, flags, timeout, rps);
+		ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
 		i915_gem_request_put(request);
 	}
 
-	return ret;
+	return ret < 0 ? ret : 0;
 }
 
 /**
@@ -645,7 +615,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
 		       struct mutex *mutex)
 {
 	struct drm_i915_gem_request *request;
-	int ret;
+	long ret;
 
 	request = i915_gem_active_raw(active, mutex);
 	if (!request)
@@ -653,8 +623,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
 
 	ret = i915_wait_request(request,
 				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
-				NULL, NULL);
-	if (ret)
+				MAX_SCHEDULE_TIMEOUT);
+	if (ret < 0)
 		return ret;
 
 	list_del_init(&active->link);
@@ -665,24 +635,6 @@ i915_gem_active_retire(struct i915_gem_active *active,
 	return 0;
 }
 
-/* Convenience functions for peeking at state inside active's request whilst
- * guarded by the struct_mutex.
- */
-
-static inline uint32_t
-i915_gem_active_get_seqno(const struct i915_gem_active *active,
-			  struct mutex *mutex)
-{
-	return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
-}
-
-static inline struct intel_engine_cs *
-i915_gem_active_get_engine(const struct i915_gem_active *active,
-			   struct mutex *mutex)
-{
-	return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
-}
-
 #define for_each_active(mask, idx) \
 	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
 
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1c237d0..a6fc1bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -48,6 +48,20 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 #endif
 }
 
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+	if (!mutex_trylock(&dev->struct_mutex)) {
+		if (!mutex_is_locked_by(&dev->struct_mutex, current))
+			return false;
+
+		*unlock = false;
+	} else {
+		*unlock = true;
+	}
+
+	return true;
+}
+
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
@@ -66,8 +80,11 @@ static bool swap_available(void)
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
-	/* Only shmemfs objects are backed by swap */
-	if (!obj->base.filp)
+	if (!obj->mm.pages)
+		return false;
+
+	/* Consider only shrinkable ojects. */
+	if (!i915_gem_object_is_shrinkable(obj))
 		return false;
 
 	/* Only report true if by unbinding the object and putting its pages
@@ -78,7 +95,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	 * to the GPU, simply unbinding from the GPU is not going to succeed
 	 * in releasing our pin count on the pages themselves.
 	 */
-	if (obj->pages_pin_count > obj->bind_count)
+	if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
 		return false;
 
 	if (any_vma_pinned(obj))
@@ -88,7 +105,14 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	 * discard the contents (because the user has marked them as being
 	 * purgeable) or if we can move their contents out to swap.
 	 */
-	return swap_available() || obj->madv == I915_MADV_DONTNEED;
+	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
+}
+
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+{
+	if (i915_gem_object_unbind(obj) == 0)
+		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+	return !READ_ONCE(obj->mm.pages);
 }
 
 /**
@@ -128,6 +152,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 		{ NULL, 0 },
 	}, *phase;
 	unsigned long count = 0;
+	bool unlock;
+
+	if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
+		return 0;
 
 	trace_i915_gem_shrink(dev_priv, target, flags);
 	i915_gem_retire_requests(dev_priv);
@@ -171,40 +199,51 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 		while (count < target &&
 		       (obj = list_first_entry_or_null(phase->list,
 						       typeof(*obj),
-						       global_list))) {
-			list_move_tail(&obj->global_list, &still_in_list);
+						       global_link))) {
+			list_move_tail(&obj->global_link, &still_in_list);
+			if (!obj->mm.pages) {
+				list_del_init(&obj->global_link);
+				continue;
+			}
 
 			if (flags & I915_SHRINK_PURGEABLE &&
-			    obj->madv != I915_MADV_DONTNEED)
+			    obj->mm.madv != I915_MADV_DONTNEED)
 				continue;
 
 			if (flags & I915_SHRINK_VMAPS &&
-			    !is_vmalloc_addr(obj->mapping))
+			    !is_vmalloc_addr(obj->mm.mapping))
 				continue;
 
-			if ((flags & I915_SHRINK_ACTIVE) == 0 &&
-			    i915_gem_object_is_active(obj))
+			if (!(flags & I915_SHRINK_ACTIVE) &&
+			    (i915_gem_object_is_active(obj) ||
+			     obj->framebuffer_references))
 				continue;
 
 			if (!can_release_pages(obj))
 				continue;
 
-			i915_gem_object_get(obj);
-
-			/* For the unbound phase, this should be a no-op! */
-			i915_gem_object_unbind(obj);
-			if (i915_gem_object_put_pages(obj) == 0)
-				count += obj->base.size >> PAGE_SHIFT;
-
-			i915_gem_object_put(obj);
+			if (unsafe_drop_pages(obj)) {
+				/* May arrive from get_pages on another bo */
+				mutex_lock_nested(&obj->mm.lock,
+						  I915_MM_SHRINKER);
+				if (!obj->mm.pages) {
+					__i915_gem_object_invalidate(obj);
+					list_del_init(&obj->global_link);
+					count += obj->base.size >> PAGE_SHIFT;
+				}
+				mutex_unlock(&obj->mm.lock);
+			}
 		}
-		list_splice(&still_in_list, phase->list);
+		list_splice_tail(&still_in_list, phase->list);
 	}
 
 	if (flags & I915_SHRINK_BOUND)
 		intel_runtime_pm_put(dev_priv);
 
 	i915_gem_retire_requests(dev_priv);
+	if (unlock)
+		mutex_unlock(&dev_priv->drm.struct_mutex);
+
 	/* expedite the RCU grace period to free some request slabs */
 	synchronize_rcu_expedited();
 
@@ -238,19 +277,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 	return freed;
 }
 
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
-	if (!mutex_trylock(&dev->struct_mutex)) {
-		if (!mutex_is_locked_by(&dev->struct_mutex, current))
-			return false;
-
-		*unlock = false;
-	} else
-		*unlock = true;
-
-	return true;
-}
-
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
@@ -267,11 +293,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 	i915_gem_retire_requests(dev_priv);
 
 	count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
 		if (can_release_pages(obj))
 			count += obj->base.size >> PAGE_SHIFT;
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
 		if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
 			count += obj->base.size >> PAGE_SHIFT;
 	}
@@ -372,13 +398,19 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 	 * being pointed to by hardware.
 	 */
 	unbound = bound = unevictable = 0;
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+		if (!obj->mm.pages)
+			continue;
+
 		if (!can_release_pages(obj))
 			unevictable += obj->base.size >> PAGE_SHIFT;
 		else
 			unbound += obj->base.size >> PAGE_SHIFT;
 	}
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+		if (!obj->mm.pages)
+			continue;
+
 		if (!can_release_pages(obj))
 			unevictable += obj->base.size >> PAGE_SHIFT;
 		else
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 59989e8..b1d367d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -109,13 +109,13 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 	 *
 	 */
 	base = 0;
-	if (INTEL_INFO(dev)->gen >= 3) {
+	if (INTEL_GEN(dev_priv) >= 3) {
 		u32 bsm;
 
 		pci_read_config_dword(pdev, INTEL_BSM, &bsm);
 
 		base = bsm & INTEL_BSM_MASK;
-	} else if (IS_I865G(dev)) {
+	} else if (IS_I865G(dev_priv)) {
 		u32 tseg_size = 0;
 		u16 toud = 0;
 		u8 tmp;
@@ -138,7 +138,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 					 I865_TOUD, &toud);
 
 		base = (toud << 16) + tseg_size;
-	} else if (IS_I85X(dev)) {
+	} else if (IS_I85X(dev_priv)) {
 		u32 tseg_size = 0;
 		u32 tom;
 		u8 tmp;
@@ -154,7 +154,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		tom = tmp * MB(32);
 
 		base = tom - tseg_size - ggtt->stolen_size;
-	} else if (IS_845G(dev)) {
+	} else if (IS_845G(dev_priv)) {
 		u32 tseg_size = 0;
 		u32 tom;
 		u8 tmp;
@@ -178,7 +178,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		tom = tmp * MB(32);
 
 		base = tom - tseg_size - ggtt->stolen_size;
-	} else if (IS_I830(dev)) {
+	} else if (IS_I830(dev_priv)) {
 		u32 tseg_size = 0;
 		u32 tom;
 		u8 tmp;
@@ -204,7 +204,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		return 0;
 
 	/* make sure we don't clobber the GTT if it's within stolen memory */
-	if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+	if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
+	    !IS_G4X(dev_priv)) {
 		struct {
 			u32 start, end;
 		} stolen[2] = {
@@ -214,7 +215,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		u64 ggtt_start, ggtt_end;
 
 		ggtt_start = I915_READ(PGTBL_CTL);
-		if (IS_GEN4(dev))
+		if (IS_GEN4(dev_priv))
 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
 		else
@@ -270,7 +271,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		 * GEN3 firmware likes to smash pci bridges into the stolen
 		 * range. Apparently this works.
 		 */
-		if (r == NULL && !IS_GEN3(dev)) {
+		if (r == NULL && !IS_GEN3(dev_priv)) {
 			DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
 				  base, base + (uint32_t)ggtt->stolen_size);
 			base = 0;
@@ -437,7 +438,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
 	case 3:
 		break;
 	case 4:
-		if (IS_G4X(dev))
+		if (IS_G4X(dev_priv))
 			g4x_get_stolen_reserved(dev_priv, &reserved_base,
 						&reserved_size);
 		break;
@@ -456,7 +457,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
 		break;
 	default:
 		if (IS_BROADWELL(dev_priv) ||
-		    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
+		    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 			bdw_get_stolen_reserved(dev_priv, &reserved_base,
 						&reserved_size);
 		else
@@ -545,25 +546,29 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	return st;
 }
 
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 {
-	BUG();
-	return -EINVAL;
+	return i915_pages_create_for_stolen(obj->base.dev,
+					    obj->stolen->start,
+					    obj->stolen->size);
 }
 
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
+					     struct sg_table *pages)
 {
 	/* Should only be called during free */
-	sg_free_table(obj->pages);
-	kfree(obj->pages);
+	sg_free_table(pages);
+	kfree(pages);
 }
 
-
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
+	__i915_gem_object_unpin_pages(obj);
+
 	if (obj->stolen) {
 		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
 		kfree(obj->stolen);
@@ -589,20 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
 	drm_gem_private_object_init(dev, &obj->base, stolen->size);
 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
-	obj->pages = i915_pages_create_for_stolen(dev,
-						  stolen->start, stolen->size);
-	if (obj->pages == NULL)
-		goto cleanup;
-
-	obj->get_page.sg = obj->pages->sgl;
-	obj->get_page.last = 0;
-
-	i915_gem_object_pin_pages(obj);
 	obj->stolen = stolen;
-
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
 	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
 
+	if (i915_gem_object_pin_pages(obj))
+		goto cleanup;
+
 	return obj;
 
 cleanup:
@@ -697,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		goto err;
+
 	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
-		goto err;
+		goto err_pages;
 	}
 
 	/* To simplify the initialisation sequence between KMS and GTT,
@@ -714,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
 	if (ret) {
 		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-		goto err;
+		goto err_pages;
 	}
 
-	vma->pages = obj->pages;
+	vma->pages = obj->mm.pages;
 	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+	list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
 	obj->bind_count++;
 
-	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	i915_gem_object_pin_pages(obj);
-
 	return obj;
 
+err_pages:
+	i915_gem_object_unpin_pages(obj);
 err:
 	i915_gem_object_put(obj);
 	return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a14b1e3..251d51b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -62,6 +62,7 @@
 static bool
 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	int tile_width;
 
 	/* Linear is always fine */
@@ -71,8 +72,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 	if (tiling_mode > I915_TILING_LAST)
 		return false;
 
-	if (IS_GEN2(dev) ||
-	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+	if (IS_GEN2(dev_priv) ||
+	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
 		tile_width = 128;
 	else
 		tile_width = 512;
@@ -90,7 +91,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 		if (stride > 8192)
 			return false;
 
-		if (IS_GEN3(dev)) {
+		if (IS_GEN3(dev_priv)) {
 			if (size > I830_FENCE_MAX_SIZE_VAL << 20)
 				return false;
 		} else {
@@ -200,12 +201,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 
 	if (!i915_tiling_ok(dev,
 			    args->stride, obj->base.size, args->tiling_mode)) {
-		i915_gem_object_put_unlocked(obj);
+		i915_gem_object_put(obj);
 		return -EINVAL;
 	}
 
-	intel_runtime_pm_get(dev_priv);
-
 	mutex_lock(&dev->struct_mutex);
 	if (obj->pin_display || obj->framebuffer_references) {
 		err = -EBUSY;
@@ -260,14 +259,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 		if (!err) {
 			struct i915_vma *vma;
 
-			if (obj->pages &&
-			    obj->madv == I915_MADV_WILLNEED &&
+			mutex_lock(&obj->mm.lock);
+			if (obj->mm.pages &&
+			    obj->mm.madv == I915_MADV_WILLNEED &&
 			    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-				if (args->tiling_mode == I915_TILING_NONE)
-					i915_gem_object_unpin_pages(obj);
-				if (!i915_gem_object_is_tiled(obj))
-					i915_gem_object_pin_pages(obj);
+				if (args->tiling_mode == I915_TILING_NONE) {
+					GEM_BUG_ON(!obj->mm.quirked);
+					__i915_gem_object_unpin_pages(obj);
+					obj->mm.quirked = false;
+				}
+				if (!i915_gem_object_is_tiled(obj)) {
+					GEM_BUG_ON(!obj->mm.quirked);
+					__i915_gem_object_pin_pages(obj);
+					obj->mm.quirked = true;
+				}
 			}
+			mutex_unlock(&obj->mm.lock);
 
 			list_for_each_entry(vma, &obj->vma_list, obj_link) {
 				if (!vma->fence)
@@ -301,8 +308,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_runtime_pm_put(dev_priv);
-
 	return err;
 }
 
@@ -326,12 +331,19 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
 	struct drm_i915_gem_get_tiling *args = data;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
+	int err = -ENOENT;
 
-	obj = i915_gem_object_lookup(file, args->handle);
-	if (!obj)
-		return -ENOENT;
+	rcu_read_lock();
+	obj = i915_gem_object_lookup_rcu(file, args->handle);
+	if (obj) {
+		args->tiling_mode =
+			READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
+		err = 0;
+	}
+	rcu_read_unlock();
+	if (unlikely(err))
+		return err;
 
-	args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
 	switch (args->tiling_mode) {
 	case I915_TILING_X:
 		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@@ -339,11 +351,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
 	case I915_TILING_Y:
 		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
 		break;
+	default:
 	case I915_TILING_NONE:
 		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
 		break;
-	default:
-		DRM_ERROR("unknown tiling mode\n");
 	}
 
 	/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
@@ -356,6 +367,5 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
 	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
 		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
 
-	i915_gem_object_put_unlocked(obj);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
new file mode 100644
index 0000000..fc8f13a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+			   struct i915_gem_timeline *timeline,
+			   const char *name)
+{
+	unsigned int i;
+	u64 fences;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	timeline->i915 = i915;
+	timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
+	if (!timeline->name)
+		return -ENOMEM;
+
+	list_add(&timeline->link, &i915->gt.timelines);
+
+	/* Called during early_init before we know how many engines there are */
+	fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
+	for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+		struct intel_timeline *tl = &timeline->engine[i];
+
+		tl->fence_context = fences++;
+		tl->common = timeline;
+
+		spin_lock_init(&tl->lock);
+		init_request_active(&tl->last_request, NULL);
+		INIT_LIST_HEAD(&tl->requests);
+	}
+
+	return 0;
+}
+
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+{
+	lockdep_assert_held(&tl->i915->drm.struct_mutex);
+
+	list_del(&tl->link);
+	kfree(tl->name);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
new file mode 100644
index 0000000..f2bf7b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_TIMELINE_H
+#define I915_GEM_TIMELINE_H
+
+#include <linux/list.h>
+
+#include "i915_gem_request.h"
+
+struct i915_gem_timeline;
+
+struct intel_timeline {
+	u64 fence_context;
+	u32 last_submitted_seqno;
+
+	spinlock_t lock;
+
+	/**
+	 * List of breadcrumbs associated with GPU requests currently
+	 * outstanding.
+	 */
+	struct list_head requests;
+
+	/* Contains an RCU guarded pointer to the last request. No reference is
+	 * held to the request, users must carefully acquire a reference to
+	 * the request using i915_gem_active_get_request_rcu(), or hold the
+	 * struct_mutex.
+	 */
+	struct i915_gem_active last_request;
+	u32 sync_seqno[I915_NUM_ENGINES];
+
+	struct i915_gem_timeline *common;
+};
+
+struct i915_gem_timeline {
+	struct list_head link;
+	atomic_t next_seqno;
+
+	struct drm_i915_private *i915;
+	const char *name;
+
+	struct intel_timeline engine[I915_NUM_ENGINES];
+};
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+			   struct i915_gem_timeline *tl,
+			   const char *name);
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index c6f780f..6426163 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -61,33 +61,26 @@ struct i915_mmu_object {
 	bool attached;
 };
 
-static void wait_rendering(struct drm_i915_gem_object *obj)
-{
-	unsigned long active = __I915_BO_ACTIVE(obj);
-	int idx;
-
-	for_each_active(active, idx)
-		i915_gem_active_wait_unlocked(&obj->last_read[idx],
-					      0, NULL, NULL);
-}
-
 static void cancel_userptr(struct work_struct *work)
 {
 	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
 	struct drm_i915_gem_object *obj = mo->obj;
 	struct drm_device *dev = obj->base.dev;
 
-	wait_rendering(obj);
+	i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
 
 	mutex_lock(&dev->struct_mutex);
 	/* Cancel any active worker and force us to re-evaluate gup */
 	obj->userptr.work = NULL;
 
-	if (obj->pages != NULL) {
-		/* We are inside a kthread context and can't be interrupted */
-		WARN_ON(i915_gem_object_unbind(obj));
-		WARN_ON(i915_gem_object_put_pages(obj));
-	}
+	/* We are inside a kthread context and can't be interrupted */
+	if (i915_gem_object_unbind(obj) == 0)
+		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+	WARN_ONCE(obj->mm.pages,
+		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+		  obj->bind_count,
+		  atomic_read(&obj->mm.pages_pin_count),
+		  obj->pin_display);
 
 	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
@@ -436,24 +429,25 @@ st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
 	return ret;
 }
 
-static int
+static struct sg_table *
 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
 			     struct page **pvec, int num_pages)
 {
+	struct sg_table *pages;
 	int ret;
 
-	ret = st_set_pages(&obj->pages, pvec, num_pages);
+	ret = st_set_pages(&pages, pvec, num_pages);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
-	ret = i915_gem_gtt_prepare_object(obj);
+	ret = i915_gem_gtt_prepare_pages(obj, pages);
 	if (ret) {
-		sg_free_table(obj->pages);
-		kfree(obj->pages);
-		obj->pages = NULL;
+		sg_free_table(pages);
+		kfree(pages);
+		return ERR_PTR(ret);
 	}
 
-	return ret;
+	return pages;
 }
 
 static int
@@ -497,7 +491,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
 	struct drm_i915_gem_object *obj = work->obj;
-	struct drm_device *dev = obj->base.dev;
 	const int npages = obj->base.size >> PAGE_SHIFT;
 	struct page **pvec;
 	int pinned, ret;
@@ -533,33 +526,32 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 		}
 	}
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&obj->mm.lock);
 	if (obj->userptr.work == &work->work) {
+		struct sg_table *pages = ERR_PTR(ret);
+
 		if (pinned == npages) {
-			ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
-			if (ret == 0) {
-				list_add_tail(&obj->global_list,
-					      &to_i915(dev)->mm.unbound_list);
-				obj->get_page.sg = obj->pages->sgl;
-				obj->get_page.last = 0;
+			pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+			if (!IS_ERR(pages)) {
+				__i915_gem_object_set_pages(obj, pages);
 				pinned = 0;
+				pages = NULL;
 			}
 		}
-		obj->userptr.work = ERR_PTR(ret);
-	}
 
-	obj->userptr.workers--;
-	i915_gem_object_put(obj);
-	mutex_unlock(&dev->struct_mutex);
+		obj->userptr.work = ERR_CAST(pages);
+	}
+	mutex_unlock(&obj->mm.lock);
 
 	release_pages(pvec, pinned, 0);
 	drm_free_large(pvec);
 
+	i915_gem_object_put(obj);
 	put_task_struct(work->task);
 	kfree(work);
 }
 
-static int
+static struct sg_table *
 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 				      bool *active)
 {
@@ -584,15 +576,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 	 * that error back to this function through
 	 * obj->userptr.work = ERR_PTR.
 	 */
-	if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
-		return -EAGAIN;
-
 	work = kmalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	obj->userptr.work = &work->work;
-	obj->userptr.workers++;
 
 	work->obj = i915_gem_object_get(obj);
 
@@ -603,14 +591,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 	schedule_work(&work->work);
 
 	*active = true;
-	return -EAGAIN;
+	return ERR_PTR(-EAGAIN);
 }
 
-static int
+static struct sg_table *
 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
 	const int num_pages = obj->base.size >> PAGE_SHIFT;
 	struct page **pvec;
+	struct sg_table *pages;
 	int pinned, ret;
 	bool active;
 
@@ -634,15 +623,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	if (obj->userptr.work) {
 		/* active flag should still be held for the pending work */
 		if (IS_ERR(obj->userptr.work))
-			return PTR_ERR(obj->userptr.work);
+			return ERR_CAST(obj->userptr.work);
 		else
-			return -EAGAIN;
+			return ERR_PTR(-EAGAIN);
 	}
 
 	/* Let the mmu-notifier know that we have begun and need cancellation */
 	ret = __i915_gem_userptr_set_active(obj, true);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
 	pvec = NULL;
 	pinned = 0;
@@ -651,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 				      GFP_TEMPORARY);
 		if (pvec == NULL) {
 			__i915_gem_userptr_set_active(obj, false);
-			return -ENOMEM;
+			return ERR_PTR(-ENOMEM);
 		}
 
 		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -660,21 +649,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 
 	active = false;
 	if (pinned < 0)
-		ret = pinned, pinned = 0;
+		pages = ERR_PTR(pinned), pinned = 0;
 	else if (pinned < num_pages)
-		ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+		pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
 	else
-		ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
-	if (ret) {
+		pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+	if (IS_ERR(pages)) {
 		__i915_gem_userptr_set_active(obj, active);
 		release_pages(pvec, pinned, 0);
 	}
 	drm_free_large(pvec);
-	return ret;
+	return pages;
 }
 
 static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+			   struct sg_table *pages)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
@@ -682,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 	BUG_ON(obj->userptr.work != NULL);
 	__i915_gem_userptr_set_active(obj, false);
 
-	if (obj->madv != I915_MADV_WILLNEED)
-		obj->dirty = 0;
+	if (obj->mm.madv != I915_MADV_WILLNEED)
+		obj->mm.dirty = false;
 
-	i915_gem_gtt_finish_object(obj);
+	i915_gem_gtt_finish_pages(obj, pages);
 
-	for_each_sgt_page(page, sgt_iter, obj->pages) {
-		if (obj->dirty)
+	for_each_sgt_page(page, sgt_iter, pages) {
+		if (obj->mm.dirty)
 			set_page_dirty(page);
 
 		mark_page_accessed(page);
 		put_page(page);
 	}
-	obj->dirty = 0;
+	obj->mm.dirty = false;
 
-	sg_free_table(obj->pages);
-	kfree(obj->pages);
+	sg_free_table(pages);
+	kfree(pages);
 }
 
 static void
@@ -717,7 +707,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
-	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+		 I915_GEM_OBJECT_IS_SHRINKABLE,
 	.get_pages = i915_gem_userptr_get_pages,
 	.put_pages = i915_gem_userptr_put_pages,
 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
@@ -816,7 +807,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 		ret = drm_gem_handle_create(file, &obj->base, &handle);
 
 	/* drop reference from allocate - handle holds it now */
-	i915_gem_object_put_unlocked(obj);
+	i915_gem_object_put(obj);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 334f15d..0dc5d93 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,6 +28,8 @@
  */
 
 #include <generated/utsrelease.h>
+#include <linux/stop_machine.h>
+#include <linux/zlib.h>
 #include "i915_drv.h"
 
 static const char *engine_str(int engine)
@@ -172,6 +174,110 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
 #define err_puts(e, s) i915_error_puts(e, s)
 
+#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+	memset(zstream, 0, sizeof(*zstream));
+
+	zstream->workspace =
+		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+			GFP_ATOMIC | __GFP_NOWARN);
+	if (!zstream->workspace)
+		return false;
+
+	if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+		kfree(zstream->workspace);
+		return false;
+	}
+
+	return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+			 void *src,
+			 struct drm_i915_error_object *dst)
+{
+	zstream->next_in = src;
+	zstream->avail_in = PAGE_SIZE;
+
+	do {
+		if (zstream->avail_out == 0) {
+			unsigned long page;
+
+			page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+			if (!page)
+				return -ENOMEM;
+
+			dst->pages[dst->page_count++] = (void *)page;
+
+			zstream->next_out = (void *)page;
+			zstream->avail_out = PAGE_SIZE;
+		}
+
+		if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+			return -EIO;
+	} while (zstream->avail_in);
+
+	/* Fallback to uncompressed if we increase size? */
+	if (0 && zstream->total_out > zstream->total_in)
+		return -E2BIG;
+
+	return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+			  struct drm_i915_error_object *dst)
+{
+	if (dst) {
+		zlib_deflate(zstream, Z_FINISH);
+		dst->unused = zstream->avail_out;
+	}
+
+	zlib_deflateEnd(zstream);
+	kfree(zstream->workspace);
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+	err_puts(m, ":");
+}
+
+#else
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+	return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+			 void *src,
+			 struct drm_i915_error_object *dst)
+{
+	unsigned long page;
+
+	page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+	if (!page)
+		return -ENOMEM;
+
+	dst->pages[dst->page_count++] =
+		memcpy((void *)page, src, PAGE_SIZE);
+
+	return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+			  struct drm_i915_error_object *dst)
+{
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+	err_puts(m, "~");
+}
+
+#endif
+
 static void print_error_buffers(struct drm_i915_error_state_buf *m,
 				const char *name,
 				struct drm_i915_error_buffer *err,
@@ -228,13 +334,57 @@ static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
 	return "unknown";
 }
 
+static void error_print_instdone(struct drm_i915_error_state_buf *m,
+				 struct drm_i915_error_engine *ee)
+{
+	int slice;
+	int subslice;
+
+	err_printf(m, "  INSTDONE: 0x%08x\n",
+		   ee->instdone.instdone);
+
+	if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+		return;
+
+	err_printf(m, "  SC_INSTDONE: 0x%08x\n",
+		   ee->instdone.slice_common);
+
+	if (INTEL_GEN(m->i915) <= 6)
+		return;
+
+	for_each_instdone_slice_subslice(m->i915, slice, subslice)
+		err_printf(m, "  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+			   slice, subslice,
+			   ee->instdone.sampler[slice][subslice]);
+
+	for_each_instdone_slice_subslice(m->i915, slice, subslice)
+		err_printf(m, "  ROW_INSTDONE[%d][%d]: 0x%08x\n",
+			   slice, subslice,
+			   ee->instdone.row[slice][subslice]);
+}
+
+static void error_print_request(struct drm_i915_error_state_buf *m,
+				const char *prefix,
+				struct drm_i915_error_request *erq)
+{
+	if (!erq->seqno)
+		return;
+
+	err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+		   prefix, erq->pid,
+		   erq->context, erq->seqno,
+		   jiffies_to_msecs(jiffies - erq->jiffies),
+		   erq->head, erq->tail);
+}
+
 static void error_print_engine(struct drm_i915_error_state_buf *m,
 			       struct drm_i915_error_engine *ee)
 {
 	err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
 	err_printf(m, "  START: 0x%08x\n", ee->start);
-	err_printf(m, "  HEAD:  0x%08x\n", ee->head);
-	err_printf(m, "  TAIL:  0x%08x\n", ee->tail);
+	err_printf(m, "  HEAD:  0x%08x [0x%08x]\n", ee->head, ee->rq_head);
+	err_printf(m, "  TAIL:  0x%08x [0x%08x, 0x%08x]\n",
+		   ee->tail, ee->rq_post, ee->rq_tail);
 	err_printf(m, "  CTL:   0x%08x\n", ee->ctl);
 	err_printf(m, "  MODE:  0x%08x\n", ee->mode);
 	err_printf(m, "  HWS:   0x%08x\n", ee->hws);
@@ -242,7 +392,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 		   (u32)(ee->acthd>>32), (u32)ee->acthd);
 	err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
 	err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
-	err_printf(m, "  INSTDONE: 0x%08x\n", ee->instdone);
+
+	error_print_instdone(m, ee);
+
 	if (ee->batchbuffer) {
 		u64 start = ee->batchbuffer->gtt_offset;
 		u64 end = start + ee->batchbuffer->gtt_size;
@@ -263,17 +415,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 	if (INTEL_GEN(m->i915) >= 6) {
 		err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
 		err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
-		err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
-			   ee->semaphore_mboxes[0],
-			   ee->semaphore_seqno[0]);
-		err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
-			   ee->semaphore_mboxes[1],
-			   ee->semaphore_seqno[1]);
-		if (HAS_VEBOX(m->i915)) {
-			err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
-				   ee->semaphore_mboxes[2],
-				   ee->semaphore_seqno[2]);
-		}
+		err_printf(m, "  SYNC_0: 0x%08x\n",
+			   ee->semaphore_mboxes[0]);
+		err_printf(m, "  SYNC_1: 0x%08x\n",
+			   ee->semaphore_mboxes[1]);
+		if (HAS_VEBOX(m->i915))
+			err_printf(m, "  SYNC_2: 0x%08x\n",
+				   ee->semaphore_mboxes[2]);
 	}
 	if (USES_PPGTT(m->i915)) {
 		err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -296,6 +444,8 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 	err_printf(m, "  hangcheck: %s [%d]\n",
 		   hangcheck_action_to_str(ee->hangcheck_action),
 		   ee->hangcheck_score);
+	error_print_request(m, "  ELSP[0]: ", &ee->execlist[0]);
+	error_print_request(m, "  ELSP[1]: ", &ee->execlist[1]);
 }
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -307,28 +457,72 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
 	va_end(args);
 }
 
+static int
+ascii85_encode_len(int len)
+{
+	return DIV_ROUND_UP(len, 4);
+}
+
+static bool
+ascii85_encode(u32 in, char *out)
+{
+	int i;
+
+	if (in == 0)
+		return false;
+
+	out[5] = '\0';
+	for (i = 5; i--; ) {
+		out[i] = '!' + in % 85;
+		in /= 85;
+	}
+
+	return true;
+}
+
 static void print_error_obj(struct drm_i915_error_state_buf *m,
+			    struct intel_engine_cs *engine,
+			    const char *name,
 			    struct drm_i915_error_object *obj)
 {
-	int page, offset, elt;
+	char out[6];
+	int page;
 
-	for (page = offset = 0; page < obj->page_count; page++) {
-		for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-			err_printf(m, "%08x :  %08x\n", offset,
-				   obj->pages[page][elt]);
-			offset += 4;
+	if (!obj)
+		return;
+
+	if (name) {
+		err_printf(m, "%s --- %s = 0x%08x %08x\n",
+			   engine ? engine->name : "global", name,
+			   upper_32_bits(obj->gtt_offset),
+			   lower_32_bits(obj->gtt_offset));
+	}
+
+	err_compression_marker(m);
+	for (page = 0; page < obj->page_count; page++) {
+		int i, len;
+
+		len = PAGE_SIZE;
+		if (page == obj->page_count - 1)
+			len -= obj->unused;
+		len = ascii85_encode_len(len);
+
+		for (i = 0; i < len; i++) {
+			if (ascii85_encode(obj->pages[page][i], out))
+				err_puts(m, out);
+			else
+				err_puts(m, "z");
 		}
 	}
+	err_puts(m, "\n");
 }
 
 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
 				   const struct intel_device_info *info)
 {
 #define PRINT_FLAG(x)  err_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
-	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
 #undef PRINT_FLAG
-#undef SEP_SEMICOLON
 }
 
 int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
@@ -339,8 +533,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_i915_error_state *error = error_priv->error;
 	struct drm_i915_error_object *obj;
-	int i, j, offset, elt;
 	int max_hangcheck_score;
+	int i, j;
 
 	if (!error) {
 		err_printf(m, "no error state collected\n");
@@ -348,9 +542,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	}
 
 	err_printf(m, "%s\n", error->error_msg);
-	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
-		   error->time.tv_usec);
 	err_printf(m, "Kernel: " UTS_RELEASE "\n");
+	err_printf(m, "Time: %ld s %ld us\n",
+		   error->time.tv_sec, error->time.tv_usec);
+	err_printf(m, "Boottime: %ld s %ld us\n",
+		   error->boottime.tv_sec, error->boottime.tv_usec);
+	err_printf(m, "Uptime: %ld s %ld us\n",
+		   error->uptime.tv_sec, error->uptime.tv_usec);
 	err_print_capabilities(m, &error->device_info);
 	max_hangcheck_score = 0;
 	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -391,7 +589,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 		for (i = 0; i < 4; i++)
 			err_printf(m, "GTIER gt %d: 0x%08x\n", i,
 				   error->gtier[i]);
-	} else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+	} else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
 		err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
 	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
@@ -402,10 +600,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
 		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
-	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
-		err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
-			   error->extra_instdone[i]);
-
 	if (INTEL_INFO(dev)->gen >= 6) {
 		err_printf(m, "ERROR: 0x%08x\n", error->error);
 
@@ -416,7 +610,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 		err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
 	}
 
-	if (IS_GEN7(dev))
+	if (IS_GEN7(dev_priv))
 		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
 	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -438,7 +632,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 			len += scnprintf(buf + len, sizeof(buf), "%s%s",
 					 first ? "" : ", ",
-					 dev_priv->engine[j].name);
+					 dev_priv->engine[j]->name);
 			first = 0;
 		}
 		scnprintf(buf + len, sizeof(buf), ")");
@@ -456,7 +650,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 		obj = ee->batchbuffer;
 		if (obj) {
-			err_puts(m, dev_priv->engine[i].name);
+			err_puts(m, dev_priv->engine[i]->name);
 			if (ee->pid != -1)
 				err_printf(m, " (submitted by %s [%d])",
 					   ee->comm,
@@ -464,37 +658,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
 				   upper_32_bits(obj->gtt_offset),
 				   lower_32_bits(obj->gtt_offset));
-			print_error_obj(m, obj);
-		}
-
-		obj = ee->wa_batchbuffer;
-		if (obj) {
-			err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
-				   dev_priv->engine[i].name,
-				   lower_32_bits(obj->gtt_offset));
-			print_error_obj(m, obj);
+			print_error_obj(m, dev_priv->engine[i], NULL, obj);
 		}
 
 		if (ee->num_requests) {
 			err_printf(m, "%s --- %d requests\n",
-				   dev_priv->engine[i].name,
+				   dev_priv->engine[i]->name,
 				   ee->num_requests);
-			for (j = 0; j < ee->num_requests; j++) {
-				err_printf(m, "  pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
-					   ee->requests[j].pid,
-					   ee->requests[j].seqno,
-					   ee->requests[j].jiffies,
-					   ee->requests[j].head,
-					   ee->requests[j].tail);
-			}
+			for (j = 0; j < ee->num_requests; j++)
+				error_print_request(m, " ", &ee->requests[j]);
 		}
 
 		if (IS_ERR(ee->waiters)) {
 			err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
-				   dev_priv->engine[i].name);
+				   dev_priv->engine[i]->name);
 		} else if (ee->num_waiters) {
 			err_printf(m, "%s --- %d waiters\n",
-				   dev_priv->engine[i].name,
+				   dev_priv->engine[i]->name,
 				   ee->num_waiters);
 			for (j = 0; j < ee->num_waiters; j++) {
 				err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -504,77 +684,25 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			}
 		}
 
-		if ((obj = ee->ringbuffer)) {
-			err_printf(m, "%s --- ringbuffer = 0x%08x\n",
-				   dev_priv->engine[i].name,
-				   lower_32_bits(obj->gtt_offset));
-			print_error_obj(m, obj);
-		}
+		print_error_obj(m, dev_priv->engine[i],
+				"ringbuffer", ee->ringbuffer);
 
-		if ((obj = ee->hws_page)) {
-			u64 hws_offset = obj->gtt_offset;
-			u32 *hws_page = &obj->pages[0][0];
+		print_error_obj(m, dev_priv->engine[i],
+				"HW Status", ee->hws_page);
 
-			if (i915.enable_execlists) {
-				hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
-				hws_page = &obj->pages[LRC_PPHWSP_PN][0];
-			}
-			err_printf(m, "%s --- HW Status = 0x%08llx\n",
-				   dev_priv->engine[i].name, hws_offset);
-			offset = 0;
-			for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
-				err_printf(m, "[%04x] %08x %08x %08x %08x\n",
-					   offset,
-					   hws_page[elt],
-					   hws_page[elt+1],
-					   hws_page[elt+2],
-					   hws_page[elt+3]);
-				offset += 16;
-			}
-		}
+		print_error_obj(m, dev_priv->engine[i],
+				"HW context", ee->ctx);
 
-		obj = ee->wa_ctx;
-		if (obj) {
-			u64 wa_ctx_offset = obj->gtt_offset;
-			u32 *wa_ctx_page = &obj->pages[0][0];
-			struct intel_engine_cs *engine = &dev_priv->engine[RCS];
-			u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
-					   engine->wa_ctx.per_ctx.size);
+		print_error_obj(m, dev_priv->engine[i],
+				"WA context", ee->wa_ctx);
 
-			err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
-				   dev_priv->engine[i].name, wa_ctx_offset);
-			offset = 0;
-			for (elt = 0; elt < wa_ctx_size; elt += 4) {
-				err_printf(m, "[%04x] %08x %08x %08x %08x\n",
-					   offset,
-					   wa_ctx_page[elt + 0],
-					   wa_ctx_page[elt + 1],
-					   wa_ctx_page[elt + 2],
-					   wa_ctx_page[elt + 3]);
-				offset += 16;
-			}
-		}
-
-		if ((obj = ee->ctx)) {
-			err_printf(m, "%s --- HW Context = 0x%08x\n",
-				   dev_priv->engine[i].name,
-				   lower_32_bits(obj->gtt_offset));
-			print_error_obj(m, obj);
-		}
+		print_error_obj(m, dev_priv->engine[i],
+				"WA batchbuffer", ee->wa_batchbuffer);
 	}
 
-	if ((obj = error->semaphore)) {
-		err_printf(m, "Semaphore page = 0x%08x\n",
-			   lower_32_bits(obj->gtt_offset));
-		for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
-			err_printf(m, "[%04x] %08x %08x %08x %08x\n",
-				   elt * 4,
-				   obj->pages[0][elt],
-				   obj->pages[0][elt+1],
-				   obj->pages[0][elt+2],
-				   obj->pages[0][elt+3]);
-		}
-	}
+	print_error_obj(m, NULL, "Semaphores", error->semaphore);
+
+	print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
 
 	if (error->overlay)
 		intel_overlay_print_error_state(m, error->overlay);
@@ -629,7 +757,7 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
 		return;
 
 	for (page = 0; page < obj->page_count; page++)
-		kfree(obj->pages[page]);
+		free_page((unsigned long)obj->pages[page]);
 
 	kfree(obj);
 }
@@ -656,6 +784,7 @@ static void i915_error_state_free(struct kref *error_ref)
 	}
 
 	i915_error_object_free(error->semaphore);
+	i915_error_object_free(error->guc_log);
 
 	for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
 		kfree(error->active_bo[i]);
@@ -667,104 +796,63 @@ static void i915_error_state_free(struct kref *error_ref)
 }
 
 static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *dev_priv,
+i915_error_object_create(struct drm_i915_private *i915,
 			 struct i915_vma *vma)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_i915_gem_object *src;
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	const u64 slot = ggtt->error_capture.start;
 	struct drm_i915_error_object *dst;
-	int num_pages;
-	bool use_ggtt;
-	int i = 0;
-	u64 reloc_offset;
+	struct z_stream_s zstream;
+	unsigned long num_pages;
+	struct sgt_iter iter;
+	dma_addr_t dma;
 
 	if (!vma)
 		return NULL;
 
-	src = vma->obj;
-	if (!src->pages)
-		return NULL;
-
-	num_pages = src->base.size >> PAGE_SHIFT;
-
-	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+	num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
+	num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
+	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
+		      GFP_ATOMIC | __GFP_NOWARN);
 	if (!dst)
 		return NULL;
 
 	dst->gtt_offset = vma->node.start;
 	dst->gtt_size = vma->node.size;
+	dst->page_count = 0;
+	dst->unused = 0;
 
-	reloc_offset = dst->gtt_offset;
-	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
-		   (vma->flags & I915_VMA_GLOBAL_BIND) &&
-		   reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
-
-	/* Cannot access stolen address directly, try to use the aperture */
-	if (src->stolen) {
-		use_ggtt = true;
-
-		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
-			goto unwind;
-
-		reloc_offset = vma->node.start;
-		if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
-			goto unwind;
+	if (!compress_init(&zstream)) {
+		kfree(dst);
+		return NULL;
 	}
 
-	/* Cannot access snooped pages through the aperture */
-	if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
-	    !HAS_LLC(dev_priv))
-		goto unwind;
+	for_each_sgt_dma(dma, iter, vma->pages) {
+		void __iomem *s;
+		int ret;
 
-	dst->page_count = num_pages;
-	while (num_pages--) {
-		unsigned long flags;
-		void *d;
+		ggtt->base.insert_page(&ggtt->base, dma, slot,
+				       I915_CACHE_NONE, 0);
 
-		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
-		if (d == NULL)
+		s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+		ret = compress_page(&zstream, (void  __force *)s, dst);
+		io_mapping_unmap_atomic(s);
+
+		if (ret)
 			goto unwind;
-
-		local_irq_save(flags);
-		if (use_ggtt) {
-			void __iomem *s;
-
-			/* Simply ignore tiling or any overlapping fence.
-			 * It's part of the error state, and this hopefully
-			 * captures what the GPU read.
-			 */
-
-			s = io_mapping_map_atomic_wc(&ggtt->mappable,
-						     reloc_offset);
-			memcpy_fromio(d, s, PAGE_SIZE);
-			io_mapping_unmap_atomic(s);
-		} else {
-			struct page *page;
-			void *s;
-
-			page = i915_gem_object_get_page(src, i);
-
-			drm_clflush_pages(&page, 1);
-
-			s = kmap_atomic(page);
-			memcpy(d, s, PAGE_SIZE);
-			kunmap_atomic(s);
-
-			drm_clflush_pages(&page, 1);
-		}
-		local_irq_restore(flags);
-
-		dst->pages[i++] = d;
-		reloc_offset += PAGE_SIZE;
 	}
-
-	return dst;
+	goto out;
 
 unwind:
-	while (i--)
-		kfree(dst->pages[i]);
+	while (dst->page_count--)
+		free_page((unsigned long)dst->pages[dst->page_count]);
 	kfree(dst);
-	return NULL;
+	dst = NULL;
+
+out:
+	compress_fini(&zstream, dst);
+	ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+	return dst;
 }
 
 /* The error capture is special as tries to run underneath the normal
@@ -773,16 +861,19 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
 static inline uint32_t
 __active_get_seqno(struct i915_gem_active *active)
 {
-	return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+	struct drm_i915_gem_request *request;
+
+	request = __i915_gem_active_peek(active);
+	return request ? request->global_seqno : 0;
 }
 
 static inline int
 __active_get_engine_id(struct i915_gem_active *active)
 {
-	struct intel_engine_cs *engine;
+	struct drm_i915_gem_request *request;
 
-	engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
-	return engine ? engine->id : -1;
+	request = __i915_gem_active_peek(active);
+	return request ? request->engine->id : -1;
 }
 
 static void capture_bo(struct drm_i915_error_buffer *err,
@@ -795,17 +886,17 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->name = obj->base.name;
 
 	for (i = 0; i < I915_NUM_ENGINES; i++)
-		err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
-	err->wseqno = __active_get_seqno(&obj->last_write);
-	err->engine = __active_get_engine_id(&obj->last_write);
+		err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
+	err->wseqno = __active_get_seqno(&vma->last_write);
+	err->engine = __active_get_engine_id(&vma->last_write);
 
 	err->gtt_offset = vma->node.start;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
 	err->fence_reg = vma->fence ? vma->fence->id : -1;
 	err->tiling = i915_gem_object_get_tiling(obj);
-	err->dirty = obj->dirty;
-	err->purgeable = obj->madv != I915_MADV_WILLNEED;
+	err->dirty = obj->mm.dirty;
+	err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
 	err->cache_level = obj->cache_level;
 }
@@ -855,7 +946,8 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
 			if (engine_id)
 				*engine_id = i;
 
-			return error->engine[i].ipehr ^ error->engine[i].instdone;
+			return error->engine[i].ipehr ^
+			       error->engine[i].instdone.instdone;
 		}
 	}
 
@@ -879,6 +971,26 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
 	}
 }
 
+static inline u32
+gen8_engine_sync_index(struct intel_engine_cs *engine,
+		       struct intel_engine_cs *other)
+{
+	int idx;
+
+	/*
+	 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+	 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+	 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+	 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+	 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
+	 */
+
+	idx = (other - engine) - 1;
+	if (idx < 0)
+		idx += I915_NUM_ENGINES;
+
+	return idx;
+}
 
 static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
 					struct intel_engine_cs *engine,
@@ -891,7 +1003,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
 	if (!error->semaphore)
 		return;
 
-	for_each_engine_id(to, dev_priv, id) {
+	for_each_engine(to, dev_priv, id) {
 		int idx;
 		u16 signal_offset;
 		u32 *tmp;
@@ -902,10 +1014,9 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
 		signal_offset =
 			(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
 		tmp = error->semaphore->pages[0];
-		idx = intel_engine_sync_index(engine, to);
+		idx = gen8_engine_sync_index(engine, to);
 
 		ee->semaphore_mboxes[idx] = tmp[signal_offset];
-		ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
 	}
 }
 
@@ -916,14 +1027,9 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
 
 	ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
 	ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
-	ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
-	ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
-
-	if (HAS_VEBOX(dev_priv)) {
+	if (HAS_VEBOX(dev_priv))
 		ee->semaphore_mboxes[2] =
 			I915_READ(RING_SYNC_2(engine->mmio_base));
-		ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
-	}
 }
 
 static void error_record_engine_waiters(struct intel_engine_cs *engine,
@@ -940,7 +1046,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
 	if (RB_EMPTY_ROOT(&b->waiters))
 		return;
 
-	if (!spin_trylock(&b->lock)) {
+	if (!spin_trylock_irq(&b->lock)) {
 		ee->waiters = ERR_PTR(-EDEADLK);
 		return;
 	}
@@ -948,7 +1054,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
 	count = 0;
 	for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
 		count++;
-	spin_unlock(&b->lock);
+	spin_unlock_irq(&b->lock);
 
 	waiter = NULL;
 	if (count)
@@ -958,7 +1064,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
 	if (!waiter)
 		return;
 
-	if (!spin_trylock(&b->lock)) {
+	if (!spin_trylock_irq(&b->lock)) {
 		kfree(waiter);
 		ee->waiters = ERR_PTR(-EDEADLK);
 		return;
@@ -976,7 +1082,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
 		if (++ee->num_waiters == count)
 			break;
 	}
-	spin_unlock(&b->lock);
+	spin_unlock_irq(&b->lock);
 }
 
 static void error_record_engine_registers(struct drm_i915_error_state *error,
@@ -998,7 +1104,6 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
 		ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
 		ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
 		ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-		ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
 		ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
 		ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
 		if (INTEL_GEN(dev_priv) >= 8) {
@@ -1010,14 +1115,15 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
 		ee->faddr = I915_READ(DMA_FADD_I8XX);
 		ee->ipeir = I915_READ(IPEIR);
 		ee->ipehr = I915_READ(IPEHR);
-		ee->instdone = I915_READ(GEN2_INSTDONE);
 	}
 
+	intel_engine_get_instdone(engine, &ee->instdone);
+
 	ee->waiting = intel_engine_has_waiter(engine);
 	ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
 	ee->acthd = intel_engine_get_active_head(engine);
 	ee->seqno = intel_engine_get_seqno(engine);
-	ee->last_seqno = engine->last_submitted_seqno;
+	ee->last_seqno = intel_engine_last_submit(engine);
 	ee->start = I915_READ_START(engine);
 	ee->head = I915_READ_HEAD(engine);
 	ee->tail = I915_READ_TAIL(engine);
@@ -1079,6 +1185,20 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
 	}
 }
 
+static void record_request(struct drm_i915_gem_request *request,
+			   struct drm_i915_error_request *erq)
+{
+	erq->context = request->ctx->hw_id;
+	erq->seqno = request->global_seqno;
+	erq->jiffies = request->emitted_jiffies;
+	erq->head = request->head;
+	erq->tail = request->tail;
+
+	rcu_read_lock();
+	erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+	rcu_read_unlock();
+}
+
 static void engine_record_requests(struct intel_engine_cs *engine,
 				   struct drm_i915_gem_request *first,
 				   struct drm_i915_error_engine *ee)
@@ -1088,7 +1208,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 
 	count = 0;
 	request = first;
-	list_for_each_entry_from(request, &engine->request_list, link)
+	list_for_each_entry_from(request, &engine->timeline->requests, link)
 		count++;
 	if (!count)
 		return;
@@ -1101,9 +1221,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 
 	count = 0;
 	request = first;
-	list_for_each_entry_from(request, &engine->request_list, link) {
-		struct drm_i915_error_request *erq;
-
+	list_for_each_entry_from(request, &engine->timeline->requests, link) {
 		if (count >= ee->num_requests) {
 			/*
 			 * If the ring request list was changed in
@@ -1123,19 +1241,22 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 			break;
 		}
 
-		erq = &ee->requests[count++];
-		erq->seqno = request->fence.seqno;
-		erq->jiffies = request->emitted_jiffies;
-		erq->head = request->head;
-		erq->tail = request->tail;
-
-		rcu_read_lock();
-		erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
-		rcu_read_unlock();
+		record_request(request, &ee->requests[count++]);
 	}
 	ee->num_requests = count;
 }
 
+static void error_record_engine_execlists(struct intel_engine_cs *engine,
+					  struct drm_i915_error_engine *ee)
+{
+	unsigned int n;
+
+	for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+		if (engine->execlist_port[n].request)
+			record_request(engine->execlist_port[n].request,
+				       &ee->execlist[n]);
+}
+
 static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 				  struct drm_i915_error_state *error)
 {
@@ -1146,20 +1267,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		i915_error_object_create(dev_priv, dev_priv->semaphore);
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		struct intel_engine_cs *engine = &dev_priv->engine[i];
+		struct intel_engine_cs *engine = dev_priv->engine[i];
 		struct drm_i915_error_engine *ee = &error->engine[i];
 		struct drm_i915_gem_request *request;
 
 		ee->pid = -1;
 		ee->engine_id = -1;
 
-		if (!intel_engine_initialized(engine))
+		if (!engine)
 			continue;
 
 		ee->engine_id = i;
 
 		error_record_engine_registers(error, engine, ee);
 		error_record_engine_waiters(engine, ee);
+		error_record_engine_execlists(engine, ee);
 
 		request = i915_gem_find_active_request(engine);
 		if (request) {
@@ -1202,6 +1324,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 			error->simulated |=
 				request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
 
+			ee->rq_head = request->head;
+			ee->rq_post = request->postfix;
+			ee->rq_tail = request->tail;
+
 			ring = request->ring;
 			ee->cpu_ring_head = ring->head;
 			ee->cpu_ring_tail = ring->tail;
@@ -1302,6 +1428,17 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
 	error->pinned_bo = bo;
 }
 
+static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
+					    struct drm_i915_error_state *error)
+{
+	/* Capturing log buf contents won't be useful if logging was disabled */
+	if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
+		return;
+
+	error->guc_log = i915_error_object_create(dev_priv,
+						  dev_priv->guc.log.vma);
+}
+
 /* Capture all registers which don't fit into another category. */
 static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error)
@@ -1318,13 +1455,13 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
 	 */
 
 	/* 1: Registers specific to a single generation */
-	if (IS_VALLEYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv)) {
 		error->gtier[0] = I915_READ(GTIER);
 		error->ier = I915_READ(VLV_IER);
 		error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
 	}
 
-	if (IS_GEN7(dev))
+	if (IS_GEN7(dev_priv))
 		error->err_int = I915_READ(GEN7_ERR_INT);
 
 	if (INTEL_INFO(dev)->gen >= 8) {
@@ -1332,7 +1469,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
 		error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
 	}
 
-	if (IS_GEN6(dev)) {
+	if (IS_GEN6(dev_priv)) {
 		error->forcewake = I915_READ_FW(FORCEWAKE);
 		error->gab_ctl = I915_READ(GAB_CTL);
 		error->gfx_mode = I915_READ(GFX_MODE);
@@ -1349,7 +1486,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
 	}
 
 	/* 3: Feature specific registers */
-	if (IS_GEN6(dev) || IS_GEN7(dev)) {
+	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
 		error->gam_ecochk = I915_READ(GAM_ECOCHK);
 		error->gac_eco = I915_READ(GAC_ECO_BITS);
 	}
@@ -1362,18 +1499,16 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
 		error->ier = I915_READ(GEN8_DE_MISC_IER);
 		for (i = 0; i < 4; i++)
 			error->gtier[i] = I915_READ(GEN8_GT_IER(i));
-	} else if (HAS_PCH_SPLIT(dev)) {
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		error->ier = I915_READ(DEIER);
 		error->gtier[0] = I915_READ(GTIER);
-	} else if (IS_GEN2(dev)) {
+	} else if (IS_GEN2(dev_priv)) {
 		error->ier = I915_READ16(IER);
-	} else if (!IS_VALLEYVIEW(dev)) {
+	} else if (!IS_VALLEYVIEW(dev_priv)) {
 		error->ier = I915_READ(IER);
 	}
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
-
-	i915_get_extra_instdone(dev_priv, error->extra_instdone);
 }
 
 static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
@@ -1418,6 +1553,32 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
 	       sizeof(error->device_info));
 }
 
+static int capture(void *data)
+{
+	struct drm_i915_error_state *error = data;
+
+	i915_capture_gen_state(error->i915, error);
+	i915_capture_reg_state(error->i915, error);
+	i915_gem_record_fences(error->i915, error);
+	i915_gem_record_rings(error->i915, error);
+	i915_capture_active_buffers(error->i915, error);
+	i915_capture_pinned_buffers(error->i915, error);
+	i915_gem_capture_guc_log_buffer(error->i915, error);
+
+	do_gettimeofday(&error->time);
+	error->boottime = ktime_to_timeval(ktime_get_boottime());
+	error->uptime =
+		ktime_to_timeval(ktime_sub(ktime_get(),
+					   error->i915->gt.last_init_time));
+
+	error->overlay = intel_overlay_capture_error_state(error->i915);
+	error->display = intel_display_capture_error_state(error->i915);
+
+	return 0;
+}
+
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
 /**
  * i915_capture_error_state - capture an error record for later analysis
  * @dev: drm device
@@ -1435,6 +1596,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
 	struct drm_i915_error_state *error;
 	unsigned long flags;
 
+	if (!i915.error_capture)
+		return;
+
 	if (READ_ONCE(dev_priv->gpu_error.first_error))
 		return;
 
@@ -1446,18 +1610,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
 	}
 
 	kref_init(&error->ref);
+	error->i915 = dev_priv;
 
-	i915_capture_gen_state(dev_priv, error);
-	i915_capture_reg_state(dev_priv, error);
-	i915_gem_record_fences(dev_priv, error);
-	i915_gem_record_rings(dev_priv, error);
-	i915_capture_active_buffers(dev_priv, error);
-	i915_capture_pinned_buffers(dev_priv, error);
-
-	do_gettimeofday(&error->time);
-
-	error->overlay = intel_overlay_capture_error_state(dev_priv);
-	error->display = intel_display_capture_error_state(dev_priv);
+	stop_machine(capture, error, NULL);
 
 	i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
 	DRM_INFO("%s\n", error->error_msg);
@@ -1476,7 +1631,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	if (!warned) {
+	if (!warned &&
+	    ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
 		DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
 		DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
 		DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
@@ -1497,7 +1653,6 @@ void i915_error_state_get(struct drm_device *dev,
 	if (error_priv->error)
 		kref_get(&error_priv->error->ref);
 	spin_unlock_irq(&dev_priv->gpu_error.lock);
-
 }
 
 void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
@@ -1519,33 +1674,3 @@ void i915_destroy_error_state(struct drm_device *dev)
 	if (error)
 		kref_put(&error->ref, i915_error_state_free);
 }
-
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
-{
-	switch (type) {
-	case I915_CACHE_NONE: return " uncached";
-	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
-	case I915_CACHE_L3_LLC: return " L3+LLC";
-	case I915_CACHE_WT: return " WT";
-	default: return "";
-	}
-}
-
-/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
-			     uint32_t *instdone)
-{
-	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
-	if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
-		instdone[0] = I915_READ(GEN2_INSTDONE);
-	else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
-		instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
-		instdone[1] = I915_READ(GEN4_INSTDONE1);
-	} else if (INTEL_GEN(dev_priv) >= 7) {
-		instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
-		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
-		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
-		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
-	}
-}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 3106dcc..666dab7 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -23,6 +23,8 @@
  */
 #include <linux/firmware.h>
 #include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
 #include "i915_drv.h"
 #include "intel_guc.h"
 
@@ -85,6 +87,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
 	if (WARN_ON(len < 1 || len > 15))
 		return -EINVAL;
 
+	mutex_lock(&guc->action_lock);
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 	dev_priv->guc.action_count += 1;
@@ -123,6 +126,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
 	dev_priv->guc.action_status = status;
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	mutex_unlock(&guc->action_lock);
 
 	return ret;
 }
@@ -170,6 +174,35 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
 	return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
 
+static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
+{
+	u32 data[1];
+
+	data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
+
+	return host2guc_action(guc, data, 1);
+}
+
+static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
+{
+	u32 data[2];
+
+	data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
+	data[1] = 0;
+
+	return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
+{
+	u32 data[2];
+
+	data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
+	data[1] = control_val;
+
+	return host2guc_action(guc, data, 2);
+}
+
 /*
  * Initialise, update, or clear doorbell data shared with the GuC
  *
@@ -187,7 +220,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
 	struct guc_context_desc desc;
 	size_t len;
 
-	doorbell = client->client_base + client->doorbell_offset;
+	doorbell = client->vaddr + client->doorbell_offset;
 
 	if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
 	    test_bit(client->doorbell_id, doorbell_bitmap)) {
@@ -293,7 +326,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
 {
 	struct guc_process_desc *desc;
 
-	desc = client->client_base + client->proc_desc_offset;
+	desc = client->vaddr + client->proc_desc_offset;
 
 	memset(desc, 0, sizeof(*desc));
 
@@ -380,8 +413,8 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
 	gfx_addr = i915_ggtt_offset(client->vma);
 	desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
 				client->doorbell_offset;
-	desc.db_trigger_cpu = (uintptr_t)client->client_base +
-				client->doorbell_offset;
+	desc.db_trigger_cpu =
+		(uintptr_t)client->vaddr + client->doorbell_offset;
 	desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
 	desc.process_desc = gfx_addr + client->proc_desc_offset;
 	desc.wq_addr = gfx_addr + client->wq_offset;
@@ -432,7 +465,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
 {
 	const size_t wqi_size = sizeof(struct guc_wq_item);
 	struct i915_guc_client *gc = request->i915->guc.execbuf_client;
-	struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
+	struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
 	u32 freespace;
 	int ret;
 
@@ -473,10 +506,9 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
 	struct intel_engine_cs *engine = rq->engine;
 	struct guc_process_desc *desc;
 	struct guc_wq_item *wqi;
-	void *base;
-	u32 freespace, tail, wq_off, wq_page;
+	u32 freespace, tail, wq_off;
 
-	desc = gc->client_base + gc->proc_desc_offset;
+	desc = gc->vaddr + gc->proc_desc_offset;
 
 	/* Free space is guaranteed, see i915_guc_wq_reserve() above */
 	freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
@@ -506,10 +538,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
 	gc->wq_rsvd -= wqi_size;
 
 	/* WQ starts from the page after doorbell / process_desc */
-	wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
-	wq_off &= PAGE_SIZE - 1;
-	base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
-	wqi = (struct guc_wq_item *)((char *)base + wq_off);
+	wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
 
 	/* Now fill in the 4-word work queue item */
 	wqi->header = WQ_TYPE_INORDER |
@@ -521,9 +550,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
 	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
 
 	wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
-	wqi->fence_id = rq->fence.seqno;
-
-	kunmap_atomic(base);
+	wqi->fence_id = rq->global_seqno;
 }
 
 static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -533,7 +560,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
 	union guc_doorbell_qw *db;
 	int attempt = 2, ret = -EAGAIN;
 
-	desc = gc->client_base + gc->proc_desc_offset;
+	desc = gc->vaddr + gc->proc_desc_offset;
 
 	/* Update the tail so it is visible to GuC */
 	desc->tail = gc->wq_tail;
@@ -549,7 +576,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
 		db_exc.cookie = 1;
 
 	/* pointer of current doorbell cacheline */
-	db = gc->client_base + gc->doorbell_offset;
+	db = gc->vaddr + gc->doorbell_offset;
 
 	while (attempt--) {
 		/* lets ring the doorbell */
@@ -601,6 +628,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
  */
 static void i915_guc_submit(struct drm_i915_gem_request *rq)
 {
+	struct drm_i915_private *dev_priv = rq->i915;
 	unsigned int engine_id = rq->engine->id;
 	struct intel_guc *guc = &rq->i915->guc;
 	struct i915_guc_client *client = guc->execbuf_client;
@@ -608,6 +636,11 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
 
 	spin_lock(&client->wq_lock);
 	guc_wq_item_append(client, rq);
+
+	/* WA to flush out the pending GMADR writes to ring buffer. */
+	if (i915_vma_is_map_and_fenceable(rq->ring->vma))
+		POSTING_READ_FW(GUC_STATUS);
+
 	b_ret = guc_ring_doorbell(client);
 
 	client->submissions[engine_id] += 1;
@@ -616,7 +649,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
 		client->b_fail += 1;
 
 	guc->submissions[engine_id] += 1;
-	guc->last_seqno[engine_id] = rq->fence.seqno;
+	guc->last_seqno[engine_id] = rq->global_seqno;
 	spin_unlock(&client->wq_lock);
 }
 
@@ -685,14 +718,14 @@ guc_client_free(struct drm_i915_private *dev_priv,
 	 * Be sure to drop any locks
 	 */
 
-	if (client->client_base) {
+	if (client->vaddr) {
 		/*
 		 * If we got as far as setting up a doorbell, make sure we
 		 * shut it down before unmapping & deallocating the memory.
 		 */
 		guc_disable_doorbell(guc, client);
 
-		kunmap(kmap_to_page(client->client_base));
+		i915_gem_object_unpin_map(client->vma->obj);
 	}
 
 	i915_vma_unpin_and_release(&client->vma);
@@ -781,6 +814,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 	struct i915_guc_client *client;
 	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_vma *vma;
+	void *vaddr;
 	uint16_t db_id;
 
 	client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -807,7 +841,12 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 
 	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
 	client->vma = vma;
-	client->client_base = kmap(i915_vma_first_page(vma));
+
+	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+	if (IS_ERR(vaddr))
+		goto err;
+
+	client->vaddr = vaddr;
 
 	spin_lock_init(&client->wq_lock);
 	client->wq_offset = GUC_DB_SIZE;
@@ -847,15 +886,411 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 	return NULL;
 }
 
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+				 void *subbuf,
+				 void *prev_subbuf,
+				 size_t prev_padding)
+{
+	/* Use no-overwrite mode by default, where relay will stop accepting
+	 * new data if there are no empty sub buffers left.
+	 * There is no strict synchronization enforced by relay between Consumer
+	 * and Producer. In overwrite mode, there is a possibility of getting
+	 * inconsistent/garbled data, the producer could be writing on to the
+	 * same sub buffer from which Consumer is reading. This can't be avoided
+	 * unless Consumer is fast enough and can always run in tandem with
+	 * Producer.
+	 */
+	if (relay_buf_full(buf))
+		return 0;
+
+	return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+					       struct dentry *parent,
+					       umode_t mode,
+					       struct rchan_buf *buf,
+					       int *is_global)
+{
+	struct dentry *buf_file;
+
+	/* This to enable the use of a single buffer for the relay channel and
+	 * correspondingly have a single file exposed to User, through which
+	 * it can collect the logs in order without any post-processing.
+	 * Need to set 'is_global' even if parent is NULL for early logging.
+	 */
+	*is_global = 1;
+
+	if (!parent)
+		return NULL;
+
+	/* Not using the channel filename passed as an argument, since for each
+	 * channel relay appends the corresponding CPU number to the filename
+	 * passed in relay_open(). This should be fine as relay just needs a
+	 * dentry of the file associated with the channel buffer and that file's
+	 * name need not be same as the filename passed as an argument.
+	 */
+	buf_file = debugfs_create_file("guc_log", mode,
+				       parent, buf, &relay_file_operations);
+	return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+	debugfs_remove(dentry);
+	return 0;
+}
+
+/* relay channel callbacks */
+static struct rchan_callbacks relay_callbacks = {
+	.subbuf_start = subbuf_start_callback,
+	.create_buf_file = create_buf_file_callback,
+	.remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_log_remove_relay_file(struct intel_guc *guc)
+{
+	relay_close(guc->log.relay_chan);
+}
+
+static int guc_log_create_relay_channel(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct rchan *guc_log_relay_chan;
+	size_t n_subbufs, subbuf_size;
+
+	/* Keep the size of sub buffers same as shared log buffer */
+	subbuf_size = guc->log.vma->obj->base.size;
+
+	/* Store up to 8 snapshots, which is large enough to buffer sufficient
+	 * boot time logs and provides enough leeway to User, in terms of
+	 * latency, for consuming the logs from relay. Also doesn't take
+	 * up too much memory.
+	 */
+	n_subbufs = 8;
+
+	guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+					n_subbufs, &relay_callbacks, dev_priv);
+	if (!guc_log_relay_chan) {
+		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+		return -ENOMEM;
+	}
+
+	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+	guc->log.relay_chan = guc_log_relay_chan;
+	return 0;
+}
+
+static int guc_log_create_relay_file(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct dentry *log_dir;
+	int ret;
+
+	/* For now create the log file in /sys/kernel/debug/dri/0 dir */
+	log_dir = dev_priv->drm.primary->debugfs_root;
+
+	/* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+	 * not mounted and so can't create the relay file.
+	 * The relay API seems to fit well with debugfs only, for availing relay
+	 * there are 3 requirements which can be met for debugfs file only in a
+	 * straightforward/clean manner :-
+	 * i)   Need the associated dentry pointer of the file, while opening the
+	 *      relay channel.
+	 * ii)  Should be able to use 'relay_file_operations' fops for the file.
+	 * iii) Set the 'i_private' field of file's inode to the pointer of
+	 *	relay channel buffer.
+	 */
+	if (!log_dir) {
+		DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
+		return -ENODEV;
+	}
+
+	ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
+	if (ret) {
+		DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void guc_move_to_next_buf(struct intel_guc *guc)
+{
+	/* Make sure the updates made in the sub buffer are visible when
+	 * Consumer sees the following update to offset inside the sub buffer.
+	 */
+	smp_wmb();
+
+	/* All data has been written, so now move the offset of sub buffer. */
+	relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+
+	/* Switch to the next sub buffer */
+	relay_flush(guc->log.relay_chan);
+}
+
+static void *guc_get_write_buffer(struct intel_guc *guc)
+{
+	if (!guc->log.relay_chan)
+		return NULL;
+
+	/* Just get the base address of a new sub buffer and copy data into it
+	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
+	 * buffers are full. Could have used the relay_write() to indirectly
+	 * copy the data, but that would have been bit convoluted, as we need to
+	 * write to only certain locations inside a sub buffer which cannot be
+	 * done without using relay_reserve() along with relay_write(). So its
+	 * better to use relay_reserve() alone.
+	 */
+	return relay_reserve(guc->log.relay_chan, 0);
+}
+
+static bool
+guc_check_log_buf_overflow(struct intel_guc *guc,
+			   enum guc_log_buffer_type type, unsigned int full_cnt)
+{
+	unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+	bool overflow = false;
+
+	if (full_cnt != prev_full_cnt) {
+		overflow = true;
+
+		guc->log.prev_overflow_count[type] = full_cnt;
+		guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+
+		if (full_cnt < prev_full_cnt) {
+			/* buffer_full_cnt is a 4 bit counter */
+			guc->log.total_overflow_count[type] += 16;
+		}
+		DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+	}
+
+	return overflow;
+}
+
+static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+{
+	switch (type) {
+	case GUC_ISR_LOG_BUFFER:
+		return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+	case GUC_DPC_LOG_BUFFER:
+		return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+	case GUC_CRASH_DUMP_LOG_BUFFER:
+		return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+	default:
+		MISSING_CASE(type);
+	}
+
+	return 0;
+}
+
+static void guc_read_update_log_buffer(struct intel_guc *guc)
+{
+	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+	struct guc_log_buffer_state log_buf_state_local;
+	enum guc_log_buffer_type type;
+	void *src_data, *dst_data;
+	bool new_overflow;
+
+	if (WARN_ON(!guc->log.buf_addr))
+		return;
+
+	/* Get the pointer to shared GuC log buffer */
+	log_buf_state = src_data = guc->log.buf_addr;
+
+	/* Get the pointer to local buffer to store the logs */
+	log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+
+	/* Actual logs are present from the 2nd page */
+	src_data += PAGE_SIZE;
+	dst_data += PAGE_SIZE;
+
+	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+		/* Make a copy of the state structure, inside GuC log buffer
+		 * (which is uncached mapped), on the stack to avoid reading
+		 * from it multiple times.
+		 */
+		memcpy(&log_buf_state_local, log_buf_state,
+		       sizeof(struct guc_log_buffer_state));
+		buffer_size = guc_get_log_buffer_size(type);
+		read_offset = log_buf_state_local.read_ptr;
+		write_offset = log_buf_state_local.sampled_write_ptr;
+		full_cnt = log_buf_state_local.buffer_full_cnt;
+
+		/* Bookkeeping stuff */
+		guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
+		new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+
+		/* Update the state of shared log buffer */
+		log_buf_state->read_ptr = write_offset;
+		log_buf_state->flush_to_file = 0;
+		log_buf_state++;
+
+		if (unlikely(!log_buf_snapshot_state))
+			continue;
+
+		/* First copy the state structure in snapshot buffer */
+		memcpy(log_buf_snapshot_state, &log_buf_state_local,
+		       sizeof(struct guc_log_buffer_state));
+
+		/* The write pointer could have been updated by GuC firmware,
+		 * after sending the flush interrupt to Host, for consistency
+		 * set write pointer value to same value of sampled_write_ptr
+		 * in the snapshot buffer.
+		 */
+		log_buf_snapshot_state->write_ptr = write_offset;
+		log_buf_snapshot_state++;
+
+		/* Now copy the actual logs. */
+		if (unlikely(new_overflow)) {
+			/* copy the whole buffer in case of overflow */
+			read_offset = 0;
+			write_offset = buffer_size;
+		} else if (unlikely((read_offset > buffer_size) ||
+				    (write_offset > buffer_size))) {
+			DRM_ERROR("invalid log buffer state\n");
+			/* copy whole buffer as offsets are unreliable */
+			read_offset = 0;
+			write_offset = buffer_size;
+		}
+
+		/* Just copy the newly written data */
+		if (read_offset > write_offset) {
+			i915_memcpy_from_wc(dst_data, src_data, write_offset);
+			bytes_to_copy = buffer_size - read_offset;
+		} else {
+			bytes_to_copy = write_offset - read_offset;
+		}
+		i915_memcpy_from_wc(dst_data + read_offset,
+				    src_data + read_offset, bytes_to_copy);
+
+		src_data += buffer_size;
+		dst_data += buffer_size;
+	}
+
+	if (log_buf_snapshot_state)
+		guc_move_to_next_buf(guc);
+	else {
+		/* Used rate limited to avoid deluge of messages, logs might be
+		 * getting consumed by User at a slow rate.
+		 */
+		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+		guc->log.capture_miss_count++;
+	}
+}
+
+static void guc_capture_logs_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, guc.log.flush_work);
+
+	i915_guc_capture_logs(dev_priv);
+}
+
+static void guc_log_cleanup(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+	/* First disable the flush interrupt */
+	gen9_disable_guc_interrupts(dev_priv);
+
+	if (guc->log.flush_wq)
+		destroy_workqueue(guc->log.flush_wq);
+
+	guc->log.flush_wq = NULL;
+
+	if (guc->log.relay_chan)
+		guc_log_remove_relay_file(guc);
+
+	guc->log.relay_chan = NULL;
+
+	if (guc->log.buf_addr)
+		i915_gem_object_unpin_map(guc->log.vma->obj);
+
+	guc->log.buf_addr = NULL;
+}
+
+static int guc_log_create_extras(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	void *vaddr;
+	int ret;
+
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+	/* Nothing to do */
+	if (i915.guc_log_level < 0)
+		return 0;
+
+	if (!guc->log.buf_addr) {
+		/* Create a WC (Uncached for read) vmalloc mapping of log
+		 * buffer pages, so that we can directly get the data
+		 * (up-to-date) from memory.
+		 */
+		vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+		if (IS_ERR(vaddr)) {
+			ret = PTR_ERR(vaddr);
+			DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+			return ret;
+		}
+
+		guc->log.buf_addr = vaddr;
+	}
+
+	if (!guc->log.relay_chan) {
+		/* Create a relay channel, so that we have buffers for storing
+		 * the GuC firmware logs, the channel will be linked with a file
+		 * later on when debugfs is registered.
+		 */
+		ret = guc_log_create_relay_channel(guc);
+		if (ret)
+			return ret;
+	}
+
+	if (!guc->log.flush_wq) {
+		INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
+
+		 /*
+		 * GuC log buffer flush work item has to do register access to
+		 * send the ack to GuC and this work item, if not synced before
+		 * suspend, can potentially get executed after the GFX device is
+		 * suspended.
+		 * By marking the WQ as freezable, we don't have to bother about
+		 * flushing of this work item from the suspend hooks, the pending
+		 * work item if any will be either executed before the suspend
+		 * or scheduled later on resume. This way the handling of work
+		 * item can be kept same between system suspend & rpm suspend.
+		 */
+		guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+							    WQ_HIGHPRI | WQ_FREEZABLE);
+		if (guc->log.flush_wq == NULL) {
+			DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
 static void guc_log_create(struct intel_guc *guc)
 {
 	struct i915_vma *vma;
 	unsigned long offset;
 	uint32_t size, flags;
 
-	if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
-		return;
-
 	if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
 		i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
 
@@ -865,8 +1300,18 @@ static void guc_log_create(struct intel_guc *guc)
 		GUC_LOG_ISR_PAGES + 1 +
 		GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
 
-	vma = guc->log_vma;
+	vma = guc->log.vma;
 	if (!vma) {
+		/* We require SSE 4.1 for fast reads from the GuC log buffer and
+		 * it should be present on the chipsets supporting GuC based
+		 * submisssions.
+		 */
+		if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
+			/* logging will not be enabled */
+			i915.guc_log_level = -1;
+			return;
+		}
+
 		vma = guc_allocate_vma(guc, size);
 		if (IS_ERR(vma)) {
 			/* logging will be off */
@@ -874,7 +1319,14 @@ static void guc_log_create(struct intel_guc *guc)
 			return;
 		}
 
-		guc->log_vma = vma;
+		guc->log.vma = vma;
+
+		if (guc_log_create_extras(guc)) {
+			guc_log_cleanup(guc);
+			i915_vma_unpin_and_release(&guc->log.vma);
+			i915.guc_log_level = -1;
+			return;
+		}
 	}
 
 	/* each allocated unit is a page */
@@ -884,7 +1336,37 @@ static void guc_log_create(struct intel_guc *guc)
 		(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
 
 	offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
-	guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+	guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+static int guc_log_late_setup(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	int ret;
+
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+	if (i915.guc_log_level < 0)
+		return -EINVAL;
+
+	/* If log_level was set as -1 at boot time, then setup needed to
+	 * handle log buffer flush interrupts would not have been done yet,
+	 * so do that now.
+	 */
+	ret = guc_log_create_extras(guc);
+	if (ret)
+		goto err;
+
+	ret = guc_log_create_relay_file(guc);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	guc_log_cleanup(guc);
+	/* logging will remain off */
+	i915.guc_log_level = -1;
+	return ret;
 }
 
 static void guc_policies_init(struct guc_policies *policies)
@@ -917,6 +1399,7 @@ static void guc_addon_create(struct intel_guc *guc)
 	struct guc_policies *policies;
 	struct guc_mmio_reg_state *reg_state;
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	struct page *page;
 	u32 size;
 
@@ -944,10 +1427,10 @@ static void guc_addon_create(struct intel_guc *guc)
 	 * so its address won't change after we've told the GuC where
 	 * to find it.
 	 */
-	engine = &dev_priv->engine[RCS];
+	engine = dev_priv->engine[RCS];
 	ads->golden_context_lrca = engine->status_page.ggtt_offset;
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
 
 	/* GuC scheduling policies */
@@ -960,7 +1443,7 @@ static void guc_addon_create(struct intel_guc *guc)
 	/* MMIO reg state */
 	reg_state = (void *)policies + sizeof(struct guc_policies);
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		reg_state->mmio_white_list[engine->guc_id].mmio_start =
 			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
 
@@ -1005,6 +1488,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 
 	guc->ctx_pool_vma = vma;
 	ida_init(&guc->ctx_ids);
+	mutex_init(&guc->action_lock);
 	guc_log_create(guc);
 	guc_addon_create(guc);
 
@@ -1014,9 +1498,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
+	struct drm_i915_gem_request *request;
 	struct i915_guc_client *client;
 	struct intel_engine_cs *engine;
-	struct drm_i915_gem_request *request;
+	enum intel_engine_id id;
 
 	/* client for execbuf submission */
 	client = guc_client_alloc(dev_priv,
@@ -1033,11 +1518,12 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	guc_init_doorbell_hw(guc);
 
 	/* Take over from manual control of ELSP (execlists) */
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		engine->submit_request = i915_guc_submit;
 
 		/* Replay the current set of previously submitted requests */
-		list_for_each_entry(request, &engine->request_list, link) {
+		list_for_each_entry(request,
+				    &engine->timeline->requests, link) {
 			client->wq_rsvd += sizeof(struct guc_wq_item);
 			if (i915_sw_fence_done(&request->submit))
 				i915_guc_submit(request);
@@ -1066,7 +1552,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
 	struct intel_guc *guc = &dev_priv->guc;
 
 	i915_vma_unpin_and_release(&guc->ads_vma);
-	i915_vma_unpin_and_release(&guc->log_vma);
+	i915_vma_unpin_and_release(&guc->log.vma);
 
 	if (guc->ctx_pool_vma)
 		ida_destroy(&guc->ctx_ids);
@@ -1087,6 +1573,8 @@ int intel_guc_suspend(struct drm_device *dev)
 	if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
 		return 0;
 
+	gen9_disable_guc_interrupts(dev_priv);
+
 	ctx = dev_priv->kernel_context;
 
 	data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
@@ -1113,6 +1601,9 @@ int intel_guc_resume(struct drm_device *dev)
 	if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
 		return 0;
 
+	if (i915.guc_log_level >= 0)
+		gen9_enable_guc_interrupts(dev_priv);
+
 	ctx = dev_priv->kernel_context;
 
 	data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
@@ -1122,3 +1613,104 @@ int intel_guc_resume(struct drm_device *dev)
 
 	return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
+
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
+{
+	guc_read_update_log_buffer(&dev_priv->guc);
+
+	/* Generally device is expected to be active only at this
+	 * time, so get/put should be really quick.
+	 */
+	intel_runtime_pm_get(dev_priv);
+	host2guc_logbuffer_flush_complete(&dev_priv->guc);
+	intel_runtime_pm_put(dev_priv);
+}
+
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
+{
+	if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+		return;
+
+	/* First disable the interrupts, will be renabled afterwards */
+	gen9_disable_guc_interrupts(dev_priv);
+
+	/* Before initiating the forceful flush, wait for any pending/ongoing
+	 * flush to complete otherwise forceful flush may not actually happen.
+	 */
+	flush_work(&dev_priv->guc.log.flush_work);
+
+	/* Ask GuC to update the log buffer state */
+	host2guc_force_logbuffer_flush(&dev_priv->guc);
+
+	/* GuC would have updated log buffer by now, so capture it */
+	i915_guc_capture_logs(dev_priv);
+}
+
+void i915_guc_unregister(struct drm_i915_private *dev_priv)
+{
+	if (!i915.enable_guc_submission)
+		return;
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	guc_log_cleanup(&dev_priv->guc);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+void i915_guc_register(struct drm_i915_private *dev_priv)
+{
+	if (!i915.enable_guc_submission)
+		return;
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	guc_log_late_setup(&dev_priv->guc);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+{
+	union guc_log_control log_param;
+	int ret;
+
+	log_param.value = control_val;
+
+	if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
+	    log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+		return -EINVAL;
+
+	/* This combination doesn't make sense & won't have any effect */
+	if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+		return 0;
+
+	ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
+	if (ret < 0) {
+		DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
+		return ret;
+	}
+
+	i915.guc_log_level = log_param.verbosity;
+
+	/* If log_level was set as -1 at boot time, then the relay channel file
+	 * wouldn't have been created by now and interrupts also would not have
+	 * been enabled.
+	 */
+	if (!dev_priv->guc.log.relay_chan) {
+		ret = guc_log_late_setup(&dev_priv->guc);
+		if (!ret)
+			gen9_enable_guc_interrupts(dev_priv);
+	} else if (!log_param.logging_enabled) {
+		/* Once logging is disabled, GuC won't generate logs & send an
+		 * interrupt. But there could be some data in the log buffer
+		 * which is yet to be captured. So request GuC to update the log
+		 * buffer state and then collect the left over logs.
+		 */
+		i915_guc_flush_logs(dev_priv);
+
+		/* As logging is disabled, update log level to reflect that */
+		i915.guc_log_level = -1;
+	} else {
+		/* In case interrupts were disabled, enable them now */
+		gen9_enable_guc_interrupts(dev_priv);
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fc286cd..6d7505b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -170,6 +170,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
 } while (0)
 
 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 
 /* For display hotplug interrupt */
 static inline void
@@ -303,18 +304,18 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
 
 	assert_spin_locked(&dev_priv->irq_lock);
 
-	new_val = dev_priv->pm_irq_mask;
+	new_val = dev_priv->pm_imr;
 	new_val &= ~interrupt_mask;
 	new_val |= (~enabled_irq_mask & interrupt_mask);
 
-	if (new_val != dev_priv->pm_irq_mask) {
-		dev_priv->pm_irq_mask = new_val;
-		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+	if (new_val != dev_priv->pm_imr) {
+		dev_priv->pm_imr = new_val;
+		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
 		POSTING_READ(gen6_pm_imr(dev_priv));
 	}
 }
 
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 		return;
@@ -322,28 +323,54 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 	snb_update_pm_irq(dev_priv, mask, mask);
 }
 
-static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
-				  uint32_t mask)
+static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
 	snb_update_pm_irq(dev_priv, mask, 0);
 }
 
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 		return;
 
-	__gen6_disable_pm_irq(dev_priv, mask);
+	__gen6_mask_pm_irq(dev_priv, mask);
+}
+
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
+{
+	i915_reg_t reg = gen6_pm_iir(dev_priv);
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	I915_WRITE(reg, reset_mask);
+	I915_WRITE(reg, reset_mask);
+	POSTING_READ(reg);
+}
+
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+{
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	dev_priv->pm_ier |= enable_mask;
+	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+	gen6_unmask_pm_irq(dev_priv, enable_mask);
+	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
+}
+
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+{
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	dev_priv->pm_ier &= ~disable_mask;
+	__gen6_mask_pm_irq(dev_priv, disable_mask);
+	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+	/* though a barrier is missing here, but don't really need a one */
 }
 
 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-	i915_reg_t reg = gen6_pm_iir(dev_priv);
-
 	spin_lock_irq(&dev_priv->irq_lock);
-	I915_WRITE(reg, dev_priv->pm_rps_events);
-	I915_WRITE(reg, dev_priv->pm_rps_events);
-	POSTING_READ(reg);
+	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
 	dev_priv->rps.pm_iir = 0;
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
@@ -357,8 +384,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 	WARN_ON_ONCE(dev_priv->rps.pm_iir);
 	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
 	dev_priv->rps.interrupts_enabled = true;
-	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
-				dev_priv->pm_rps_events);
 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 
 	spin_unlock_irq(&dev_priv->irq_lock);
@@ -379,9 +404,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 
 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
-	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
-				~dev_priv->pm_rps_events);
+	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 
 	spin_unlock_irq(&dev_priv->irq_lock);
 	synchronize_irq(dev_priv->drm.irq);
@@ -395,6 +418,38 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 	gen6_reset_rps_interrupts(dev_priv);
 }
 
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (!dev_priv->guc.interrupts_enabled) {
+		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
+				       dev_priv->pm_guc_events);
+		dev_priv->guc.interrupts_enabled = true;
+		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+	}
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	dev_priv->guc.interrupts_enabled = false;
+
+	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+	synchronize_irq(dev_priv->drm.irq);
+
+	gen9_reset_guc_interrupts(dev_priv);
+}
+
 /**
  * bdw_update_port_irq - update DE port interrupt
  * @dev_priv: driver private
@@ -670,8 +725,8 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	i915_reg_t high_frame, low_frame;
 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
-	struct intel_crtc *intel_crtc =
-		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+								pipe);
 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 
 	htotal = mode->crtc_htotal;
@@ -776,8 +831,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 				    const struct drm_display_mode *mode)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+								pipe);
 	int position;
 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
 	bool in_vbl = true;
@@ -912,21 +967,22 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
 			      struct timeval *vblank_time,
 			      unsigned flags)
 {
-	struct drm_crtc *crtc;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *crtc;
 
-	if (pipe >= INTEL_INFO(dev)->num_pipes) {
+	if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
 		DRM_ERROR("Invalid crtc %u\n", pipe);
 		return -EINVAL;
 	}
 
 	/* Get drm_crtc to timestamp: */
-	crtc = intel_get_crtc_for_pipe(dev, pipe);
+	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	if (crtc == NULL) {
 		DRM_ERROR("Invalid crtc %u\n", pipe);
 		return -EINVAL;
 	}
 
-	if (!crtc->hwmode.crtc_clock) {
+	if (!crtc->base.hwmode.crtc_clock) {
 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
 		return -EBUSY;
 	}
@@ -934,7 +990,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
 	/* Helper routine in DRM core does all the work: */
 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 						     vblank_time, flags,
-						     &crtc->hwmode);
+						     &crtc->base.hwmode);
 }
 
 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
@@ -1058,8 +1114,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
 static bool any_waiters(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		if (intel_engine_has_waiter(engine))
 			return true;
 
@@ -1084,7 +1141,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	pm_iir = dev_priv->rps.pm_iir;
 	dev_priv->rps.pm_iir = 0;
 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
-	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
 	client_boost = dev_priv->rps.client_boost;
 	dev_priv->rps.client_boost = false;
 	spin_unlock_irq(&dev_priv->irq_lock);
@@ -1257,20 +1314,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
 			       u32 gt_iir)
 {
 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		notify_ring(&dev_priv->engine[RCS]);
+		notify_ring(dev_priv->engine[RCS]);
 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
-		notify_ring(&dev_priv->engine[VCS]);
+		notify_ring(dev_priv->engine[VCS]);
 }
 
 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
 			       u32 gt_iir)
 {
 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		notify_ring(&dev_priv->engine[RCS]);
+		notify_ring(dev_priv->engine[RCS]);
 	if (gt_iir & GT_BSD_USER_INTERRUPT)
-		notify_ring(&dev_priv->engine[VCS]);
+		notify_ring(dev_priv->engine[VCS]);
 	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		notify_ring(&dev_priv->engine[BCS]);
+		notify_ring(dev_priv->engine[BCS]);
 
 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
 		      GT_BSD_CS_ERROR_INTERRUPT |
@@ -1323,11 +1380,13 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
 	}
 
-	if (master_ctl & GEN8_GT_PM_IRQ) {
+	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
 		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
-		if (gt_iir[2] & dev_priv->pm_rps_events) {
+		if (gt_iir[2] & (dev_priv->pm_rps_events |
+				 dev_priv->pm_guc_events)) {
 			I915_WRITE_FW(GEN8_GT_IIR(2),
-				      gt_iir[2] & dev_priv->pm_rps_events);
+				      gt_iir[2] & (dev_priv->pm_rps_events |
+						   dev_priv->pm_guc_events));
 			ret = IRQ_HANDLED;
 		} else
 			DRM_ERROR("The master control interrupt lied (PM)!\n");
@@ -1340,25 +1399,28 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
 				u32 gt_iir[4])
 {
 	if (gt_iir[0]) {
-		gen8_cs_irq_handler(&dev_priv->engine[RCS],
+		gen8_cs_irq_handler(dev_priv->engine[RCS],
 				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
-		gen8_cs_irq_handler(&dev_priv->engine[BCS],
+		gen8_cs_irq_handler(dev_priv->engine[BCS],
 				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
 	}
 
 	if (gt_iir[1]) {
-		gen8_cs_irq_handler(&dev_priv->engine[VCS],
+		gen8_cs_irq_handler(dev_priv->engine[VCS],
 				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
-		gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+		gen8_cs_irq_handler(dev_priv->engine[VCS2],
 				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
 	}
 
 	if (gt_iir[3])
-		gen8_cs_irq_handler(&dev_priv->engine[VECS],
+		gen8_cs_irq_handler(dev_priv->engine[VECS],
 				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
 
 	if (gt_iir[2] & dev_priv->pm_rps_events)
 		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+
+	if (gt_iir[2] & dev_priv->pm_guc_events)
+		gen9_guc_irq_handler(dev_priv, gt_iir[2]);
 }
 
 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -1585,7 +1647,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
 	if (pm_iir & dev_priv->pm_rps_events) {
 		spin_lock(&dev_priv->irq_lock);
-		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
 		if (dev_priv->rps.interrupts_enabled) {
 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
 			schedule_work(&dev_priv->rps.work);
@@ -1598,13 +1660,48 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 
 	if (HAS_VEBOX(dev_priv)) {
 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-			notify_ring(&dev_priv->engine[VECS]);
+			notify_ring(dev_priv->engine[VECS]);
 
 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
 	}
 }
 
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
+{
+	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
+		/* Sample the log buffer flush related bits & clear them out now
+		 * itself from the message identity register to minimize the
+		 * probability of losing a flush interrupt, when there are back
+		 * to back flush interrupts.
+		 * There can be a new flush interrupt, for different log buffer
+		 * type (like for ISR), whilst Host is handling one (for DPC).
+		 * Since same bit is used in message register for ISR & DPC, it
+		 * could happen that GuC sets the bit for 2nd interrupt but Host
+		 * clears out the bit on handling the 1st interrupt.
+		 */
+		u32 msg, flush;
+
+		msg = I915_READ(SOFT_SCRATCH(15));
+		flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
+			       GUC2HOST_MSG_FLUSH_LOG_BUFFER);
+		if (flush) {
+			/* Clear the message bits that are handled */
+			I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
+
+			/* Handle flush interrupt in bottom half */
+			queue_work(dev_priv->guc.log.flush_wq,
+				   &dev_priv->guc.log.flush_work);
+
+			dev_priv->guc.log.flush_interrupt_count++;
+		} else {
+			/* Not clearing of unhandled event bits won't result in
+			 * re-triggering of the interrupt.
+			 */
+		}
+	}
+}
+
 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
 				     enum pipe pipe)
 {
@@ -2407,7 +2504,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
 
 		if (fault_errors)
-			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
 				  pipe_name(pipe),
 				  fault_errors);
 	}
@@ -2551,92 +2648,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
 	wake_up_all(&dev_priv->gpu_error.reset_queue);
 }
 
-static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
+static inline void
+i915_err_print_instdone(struct drm_i915_private *dev_priv,
+			struct intel_instdone *instdone)
 {
-	uint32_t instdone[I915_NUM_INSTDONE_REG];
-	u32 eir = I915_READ(EIR);
-	int pipe, i;
+	int slice;
+	int subslice;
 
-	if (!eir)
+	pr_err("  INSTDONE: 0x%08x\n", instdone->instdone);
+
+	if (INTEL_GEN(dev_priv) <= 3)
 		return;
 
-	pr_err("render error detected, EIR: 0x%08x\n", eir);
+	pr_err("  SC_INSTDONE: 0x%08x\n", instdone->slice_common);
 
-	i915_get_extra_instdone(dev_priv, instdone);
+	if (INTEL_GEN(dev_priv) <= 6)
+		return;
 
-	if (IS_G4X(dev_priv)) {
-		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
-			u32 ipeir = I915_READ(IPEIR_I965);
+	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+		pr_err("  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+		       slice, subslice, instdone->sampler[slice][subslice]);
 
-			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
-			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
-			for (i = 0; i < ARRAY_SIZE(instdone); i++)
-				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
-			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
-			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
-			I915_WRITE(IPEIR_I965, ipeir);
-			POSTING_READ(IPEIR_I965);
-		}
-		if (eir & GM45_ERROR_PAGE_TABLE) {
-			u32 pgtbl_err = I915_READ(PGTBL_ER);
-			pr_err("page table error\n");
-			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
-			I915_WRITE(PGTBL_ER, pgtbl_err);
-			POSTING_READ(PGTBL_ER);
-		}
-	}
+	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+		pr_err("  ROW_INSTDONE[%d][%d]: 0x%08x\n",
+		       slice, subslice, instdone->row[slice][subslice]);
+}
 
-	if (!IS_GEN2(dev_priv)) {
-		if (eir & I915_ERROR_PAGE_TABLE) {
-			u32 pgtbl_err = I915_READ(PGTBL_ER);
-			pr_err("page table error\n");
-			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
-			I915_WRITE(PGTBL_ER, pgtbl_err);
-			POSTING_READ(PGTBL_ER);
-		}
-	}
+static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+	u32 eir;
 
-	if (eir & I915_ERROR_MEMORY_REFRESH) {
-		pr_err("memory refresh error:\n");
-		for_each_pipe(dev_priv, pipe)
-			pr_err("pipe %c stat: 0x%08x\n",
-			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
-		/* pipestat has already been acked */
-	}
-	if (eir & I915_ERROR_INSTRUCTION) {
-		pr_err("instruction error\n");
-		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
-		for (i = 0; i < ARRAY_SIZE(instdone); i++)
-			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
-		if (INTEL_GEN(dev_priv) < 4) {
-			u32 ipeir = I915_READ(IPEIR);
+	if (!IS_GEN2(dev_priv))
+		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
 
-			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
-			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
-			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
-			I915_WRITE(IPEIR, ipeir);
-			POSTING_READ(IPEIR);
-		} else {
-			u32 ipeir = I915_READ(IPEIR_I965);
+	if (INTEL_GEN(dev_priv) < 4)
+		I915_WRITE(IPEIR, I915_READ(IPEIR));
+	else
+		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
 
-			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
-			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
-			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
-			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
-			I915_WRITE(IPEIR_I965, ipeir);
-			POSTING_READ(IPEIR_I965);
-		}
-	}
-
-	I915_WRITE(EIR, eir);
-	POSTING_READ(EIR);
+	I915_WRITE(EIR, I915_READ(EIR));
 	eir = I915_READ(EIR);
 	if (eir) {
 		/*
 		 * some errors might have become stuck,
 		 * mask them.
 		 */
-		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
 		I915_WRITE(EMR, I915_READ(EMR) | eir);
 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 	}
@@ -2665,7 +2722,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 	va_end(args);
 
 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
-	i915_report_and_clear_eir(dev_priv);
+	i915_clear_error_registers(dev_priv);
 
 	if (!engine_mask)
 		return;
@@ -2694,18 +2751,26 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	if (INTEL_INFO(dev)->gen >= 4)
-		i915_enable_pipestat(dev_priv, pipe,
-				     PIPE_START_VBLANK_INTERRUPT_STATUS);
-	else
-		i915_enable_pipestat(dev_priv, pipe,
-				     PIPE_VBLANK_INTERRUPT_STATUS);
+	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return 0;
+}
+
+static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, pipe,
+			     PIPE_START_VBLANK_INTERRUPT_STATUS);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	return 0;
@@ -2715,8 +2780,8 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	unsigned long irqflags;
-	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-						     DE_PIPE_VBLANK(pipe);
+	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	ilk_enable_display_irq(dev_priv, bit);
@@ -2725,19 +2790,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	return 0;
 }
 
-static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, pipe,
-			     PIPE_START_VBLANK_INTERRUPT_STATUS);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
-	return 0;
-}
-
 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2753,14 +2805,23 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	i915_disable_pipestat(dev_priv, pipe,
-			      PIPE_VBLANK_INTERRUPT_STATUS |
 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
@@ -2769,25 +2830,14 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	unsigned long irqflags;
-	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-						     DE_PIPE_VBLANK(pipe);
+	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	ilk_disable_display_irq(dev_priv, bit);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_disable_pipestat(dev_priv, pipe,
-			      PIPE_START_VBLANK_INTERRUPT_STATUS);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2798,411 +2848,16 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
-	if (INTEL_GEN(engine->i915) >= 8) {
-		return (ipehr >> 23) == 0x1c;
-	} else {
-		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
-		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
-				 MI_SEMAPHORE_REGISTER);
-	}
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
-				 u64 offset)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_engine_cs *signaller;
-
-	if (INTEL_GEN(dev_priv) >= 8) {
-		for_each_engine(signaller, dev_priv) {
-			if (engine == signaller)
-				continue;
-
-			if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
-				return signaller;
-		}
-	} else {
-		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
-
-		for_each_engine(signaller, dev_priv) {
-			if(engine == signaller)
-				continue;
-
-			if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
-				return signaller;
-		}
-	}
-
-	DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
-			 engine->name, ipehr, offset);
-
-	return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	void __iomem *vaddr;
-	u32 cmd, ipehr, head;
-	u64 offset = 0;
-	int i, backwards;
-
-	/*
-	 * This function does not support execlist mode - any attempt to
-	 * proceed further into this function will result in a kernel panic
-	 * when dereferencing ring->buffer, which is not set up in execlist
-	 * mode.
-	 *
-	 * The correct way of doing it would be to derive the currently
-	 * executing ring buffer from the current context, which is derived
-	 * from the currently running request. Unfortunately, to get the
-	 * current request we would have to grab the struct_mutex before doing
-	 * anything else, which would be ill-advised since some other thread
-	 * might have grabbed it already and managed to hang itself, causing
-	 * the hang checker to deadlock.
-	 *
-	 * Therefore, this function does not support execlist mode in its
-	 * current form. Just return NULL and move on.
-	 */
-	if (engine->buffer == NULL)
-		return NULL;
-
-	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-	if (!ipehr_is_semaphore_wait(engine, ipehr))
-		return NULL;
-
-	/*
-	 * HEAD is likely pointing to the dword after the actual command,
-	 * so scan backwards until we find the MBOX. But limit it to just 3
-	 * or 4 dwords depending on the semaphore wait command size.
-	 * Note that we don't care about ACTHD here since that might
-	 * point at at batch, and semaphores are always emitted into the
-	 * ringbuffer itself.
-	 */
-	head = I915_READ_HEAD(engine) & HEAD_ADDR;
-	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
-	vaddr = (void __iomem *)engine->buffer->vaddr;
-
-	for (i = backwards; i; --i) {
-		/*
-		 * Be paranoid and presume the hw has gone off into the wild -
-		 * our ring is smaller than what the hardware (and hence
-		 * HEAD_ADDR) allows. Also handles wrap-around.
-		 */
-		head &= engine->buffer->size - 1;
-
-		/* This here seems to blow up */
-		cmd = ioread32(vaddr + head);
-		if (cmd == ipehr)
-			break;
-
-		head -= 4;
-	}
-
-	if (!i)
-		return NULL;
-
-	*seqno = ioread32(vaddr + head + 4) + 1;
-	if (INTEL_GEN(dev_priv) >= 8) {
-		offset = ioread32(vaddr + head + 12);
-		offset <<= 32;
-		offset |= ioread32(vaddr + head + 8);
-	}
-	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_engine_cs *signaller;
-	u32 seqno;
-
-	engine->hangcheck.deadlock++;
-
-	signaller = semaphore_waits_for(engine, &seqno);
-	if (signaller == NULL)
-		return -1;
-
-	if (IS_ERR(signaller))
-		return 0;
-
-	/* Prevent pathological recursion due to driver bugs */
-	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
-		return -1;
-
-	if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
-		return 1;
-
-	/* cursory check for an unkickable deadlock */
-	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
-	    semaphore_passed(signaller) < 0)
-		return -1;
-
-	return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-
-	for_each_engine(engine, dev_priv)
-		engine->hangcheck.deadlock = 0;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
-	u32 instdone[I915_NUM_INSTDONE_REG];
-	bool stuck;
-	int i;
-
-	if (engine->id != RCS)
-		return true;
-
-	i915_get_extra_instdone(engine->i915, instdone);
-
-	/* There might be unstable subunit states even when
-	 * actual head is not moving. Filter out the unstable ones by
-	 * accumulating the undone -> done transitions and only
-	 * consider those as progress.
-	 */
-	stuck = true;
-	for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
-		const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
-
-		if (tmp != engine->hangcheck.instdone[i])
-			stuck = false;
-
-		engine->hangcheck.instdone[i] |= tmp;
-	}
-
-	return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
-	if (acthd != engine->hangcheck.acthd) {
-
-		/* Clear subunit states on head movement */
-		memset(engine->hangcheck.instdone, 0,
-		       sizeof(engine->hangcheck.instdone));
-
-		return HANGCHECK_ACTIVE;
-	}
-
-	if (!subunits_stuck(engine))
-		return HANGCHECK_ACTIVE;
-
-	return HANGCHECK_HUNG;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	enum intel_engine_hangcheck_action ha;
-	u32 tmp;
-
-	ha = head_stuck(engine, acthd);
-	if (ha != HANGCHECK_HUNG)
-		return ha;
-
-	if (IS_GEN2(dev_priv))
-		return HANGCHECK_HUNG;
-
-	/* Is the chip hanging on a WAIT_FOR_EVENT?
-	 * If so we can simply poke the RB_WAIT bit
-	 * and break the hang. This should work on
-	 * all but the second generation chipsets.
-	 */
-	tmp = I915_READ_CTL(engine);
-	if (tmp & RING_WAIT) {
-		i915_handle_error(dev_priv, 0,
-				  "Kicking stuck wait on %s",
-				  engine->name);
-		I915_WRITE_CTL(engine, tmp);
-		return HANGCHECK_KICK;
-	}
-
-	if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
-		switch (semaphore_passed(engine)) {
-		default:
-			return HANGCHECK_HUNG;
-		case 1:
-			i915_handle_error(dev_priv, 0,
-					  "Kicking stuck semaphore on %s",
-					  engine->name);
-			I915_WRITE_CTL(engine, tmp);
-			return HANGCHECK_KICK;
-		case 0:
-			return HANGCHECK_WAIT;
-		}
-	}
-
-	return HANGCHECK_HUNG;
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void i915_hangcheck_elapsed(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv),
-			     gpu_error.hangcheck_work.work);
-	struct intel_engine_cs *engine;
-	unsigned int hung = 0, stuck = 0;
-	int busy_count = 0;
-#define BUSY 1
-#define KICK 5
-#define HUNG 20
-#define ACTIVE_DECAY 15
-
-	if (!i915.enable_hangcheck)
-		return;
-
-	if (!READ_ONCE(dev_priv->gt.awake))
-		return;
-
-	/* As enabling the GPU requires fairly extensive mmio access,
-	 * periodically arm the mmio checker to see if we are triggering
-	 * any invalid access.
-	 */
-	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
-
-	for_each_engine(engine, dev_priv) {
-		bool busy = intel_engine_has_waiter(engine);
-		u64 acthd;
-		u32 seqno;
-		u32 submit;
-
-		semaphore_clear_deadlocks(dev_priv);
-
-		/* We don't strictly need an irq-barrier here, as we are not
-		 * serving an interrupt request, be paranoid in case the
-		 * barrier has side-effects (such as preventing a broken
-		 * cacheline snoop) and so be sure that we can see the seqno
-		 * advance. If the seqno should stick, due to a stale
-		 * cacheline, we would erroneously declare the GPU hung.
-		 */
-		if (engine->irq_seqno_barrier)
-			engine->irq_seqno_barrier(engine);
-
-		acthd = intel_engine_get_active_head(engine);
-		seqno = intel_engine_get_seqno(engine);
-		submit = READ_ONCE(engine->last_submitted_seqno);
-
-		if (engine->hangcheck.seqno == seqno) {
-			if (i915_seqno_passed(seqno, submit)) {
-				engine->hangcheck.action = HANGCHECK_IDLE;
-			} else {
-				/* We always increment the hangcheck score
-				 * if the engine is busy and still processing
-				 * the same request, so that no single request
-				 * can run indefinitely (such as a chain of
-				 * batches). The only time we do not increment
-				 * the hangcheck score on this ring, if this
-				 * engine is in a legitimate wait for another
-				 * engine. In that case the waiting engine is a
-				 * victim and we want to be sure we catch the
-				 * right culprit. Then every time we do kick
-				 * the ring, add a small increment to the
-				 * score so that we can catch a batch that is
-				 * being repeatedly kicked and so responsible
-				 * for stalling the machine.
-				 */
-				engine->hangcheck.action =
-					engine_stuck(engine, acthd);
-
-				switch (engine->hangcheck.action) {
-				case HANGCHECK_IDLE:
-				case HANGCHECK_WAIT:
-					break;
-				case HANGCHECK_ACTIVE:
-					engine->hangcheck.score += BUSY;
-					break;
-				case HANGCHECK_KICK:
-					engine->hangcheck.score += KICK;
-					break;
-				case HANGCHECK_HUNG:
-					engine->hangcheck.score += HUNG;
-					break;
-				}
-			}
-
-			if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
-				hung |= intel_engine_flag(engine);
-				if (engine->hangcheck.action != HANGCHECK_HUNG)
-					stuck |= intel_engine_flag(engine);
-			}
-		} else {
-			engine->hangcheck.action = HANGCHECK_ACTIVE;
-
-			/* Gradually reduce the count so that we catch DoS
-			 * attempts across multiple batches.
-			 */
-			if (engine->hangcheck.score > 0)
-				engine->hangcheck.score -= ACTIVE_DECAY;
-			if (engine->hangcheck.score < 0)
-				engine->hangcheck.score = 0;
-
-			/* Clear head and subunit states on seqno movement */
-			acthd = 0;
-
-			memset(engine->hangcheck.instdone, 0,
-			       sizeof(engine->hangcheck.instdone));
-		}
-
-		engine->hangcheck.seqno = seqno;
-		engine->hangcheck.acthd = acthd;
-		busy_count += busy;
-	}
-
-	if (hung) {
-		char msg[80];
-		unsigned int tmp;
-		int len;
-
-		/* If some rings hung but others were still busy, only
-		 * blame the hanging rings in the synopsis.
-		 */
-		if (stuck != hung)
-			hung &= ~stuck;
-		len = scnprintf(msg, sizeof(msg),
-				"%s on ", stuck == hung ? "No progress" : "Hang");
-		for_each_engine_masked(engine, dev_priv, hung, tmp)
-			len += scnprintf(msg + len, sizeof(msg) - len,
-					 "%s, ", engine->name);
-		msg[len-2] = '\0';
-
-		return i915_handle_error(dev_priv, hung, msg);
-	}
-
-	/* Reset timer in case GPU hangs without another request being added */
-	if (busy_count)
-		i915_queue_hangcheck(dev_priv);
-}
-
 static void ibx_irq_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	if (HAS_PCH_NOP(dev))
+	if (HAS_PCH_NOP(dev_priv))
 		return;
 
 	GEN5_IRQ_RESET(SDE);
 
-	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
 		I915_WRITE(SERR_INT, 0xffffffff);
 }
 
@@ -3218,7 +2873,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	if (HAS_PCH_NOP(dev))
+	if (HAS_PCH_NOP(dev_priv))
 		return;
 
 	WARN_ON(I915_READ(SDEIER) != 0);
@@ -3293,7 +2948,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
 	I915_WRITE(HWSTAM, 0xffffffff);
 
 	GEN5_IRQ_RESET(DE);
-	if (IS_GEN7(dev))
+	if (IS_GEN7(dev_priv))
 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
 
 	gen5_gt_irq_reset(dev);
@@ -3343,7 +2998,7 @@ static void gen8_irq_reset(struct drm_device *dev)
 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
 	GEN5_IRQ_RESET(GEN8_PCU_);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		ibx_irq_reset(dev);
 }
 
@@ -3532,10 +3187,10 @@ static void ibx_irq_postinstall(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 mask;
 
-	if (HAS_PCH_NOP(dev))
+	if (HAS_PCH_NOP(dev_priv))
 		return;
 
-	if (HAS_PCH_IBX(dev))
+	if (HAS_PCH_IBX(dev_priv))
 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
 	else
 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
@@ -3552,14 +3207,14 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 	pm_irqs = gt_irqs = 0;
 
 	dev_priv->gt_irq_mask = ~0;
-	if (HAS_L3_DPF(dev)) {
+	if (HAS_L3_DPF(dev_priv)) {
 		/* L3 parity interrupt is always unmasked. */
-		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
-		gt_irqs |= GT_PARITY_ERROR(dev);
+		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
+		gt_irqs |= GT_PARITY_ERROR(dev_priv);
 	}
 
 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
-	if (IS_GEN5(dev)) {
+	if (IS_GEN5(dev_priv)) {
 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
 	} else {
 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -3572,11 +3227,13 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 		 * RPS interrupts will get enabled/disabled on demand when RPS
 		 * itself is enabled/disabled.
 		 */
-		if (HAS_VEBOX(dev))
+		if (HAS_VEBOX(dev_priv)) {
 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+		}
 
-		dev_priv->pm_irq_mask = 0xffffffff;
-		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+		dev_priv->pm_imr = 0xffffffff;
+		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
 	}
 }
 
@@ -3616,7 +3273,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 
 	ibx_irq_postinstall(dev);
 
-	if (IS_IRONLAKE_M(dev)) {
+	if (IS_IRONLAKE_M(dev_priv)) {
 		/* Enable PCU event interrupts
 		 *
 		 * spinlocking not required here for correctness since interrupt
@@ -3696,14 +3353,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 	if (HAS_L3_DPF(dev_priv))
 		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
-	dev_priv->pm_irq_mask = 0xffffffff;
+	dev_priv->pm_ier = 0x0;
+	dev_priv->pm_imr = ~dev_priv->pm_ier;
 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
 	/*
 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
-	 * is enabled/disabled.
+	 * is enabled/disabled. Same wil be the case for GuC interrupts.
 	 */
-	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
+	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
 }
 
@@ -3756,13 +3414,13 @@ static int gen8_irq_postinstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		ibx_irq_pre_postinstall(dev);
 
 	gen8_gt_irq_postinstall(dev_priv);
 	gen8_de_irq_postinstall(dev_priv);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		ibx_irq_postinstall(dev);
 
 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -3971,7 +3629,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 		new_iir = I915_READ16(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(&dev_priv->engine[RCS]);
+			notify_ring(dev_priv->engine[RCS]);
 
 		for_each_pipe(dev_priv, pipe) {
 			int plane = pipe;
@@ -4168,7 +3826,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 		new_iir = I915_READ(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(&dev_priv->engine[RCS]);
+			notify_ring(dev_priv->engine[RCS]);
 
 		for_each_pipe(dev_priv, pipe) {
 			int plane = pipe;
@@ -4400,9 +4058,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 		new_iir = I915_READ(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(&dev_priv->engine[RCS]);
+			notify_ring(dev_priv->engine[RCS]);
 		if (iir & I915_BSD_USER_INTERRUPT)
-			notify_ring(&dev_priv->engine[VCS]);
+			notify_ring(dev_priv->engine[VCS]);
 
 		for_each_pipe(dev_priv, pipe) {
 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4487,6 +4145,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
+	if (HAS_GUC_SCHED(dev))
+		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+
 	/* Let's track the enabled rps events */
 	if (IS_VALLEYVIEW(dev_priv))
 		/* WaGsvRC0ResidencyMethod:vlv */
@@ -4508,9 +4169,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	if (INTEL_INFO(dev_priv)->gen >= 8)
 		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
 
-	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
-			  i915_hangcheck_elapsed);
-
 	if (IS_GEN2(dev_priv)) {
 		/* Gen2 doesn't have a hardware frame counter */
 		dev->max_vblank_count = 0;
@@ -4539,16 +4197,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
-		dev->driver->enable_vblank = valleyview_enable_vblank;
-		dev->driver->disable_vblank = valleyview_disable_vblank;
+		dev->driver->enable_vblank = i965_enable_vblank;
+		dev->driver->disable_vblank = i965_disable_vblank;
 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
 	} else if (IS_VALLEYVIEW(dev_priv)) {
 		dev->driver->irq_handler = valleyview_irq_handler;
 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
-		dev->driver->enable_vblank = valleyview_enable_vblank;
-		dev->driver->disable_vblank = valleyview_disable_vblank;
+		dev->driver->enable_vblank = i965_enable_vblank;
+		dev->driver->disable_vblank = i965_disable_vblank;
 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
 		dev->driver->irq_handler = gen8_irq_handler;
@@ -4557,13 +4215,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 		dev->driver->irq_uninstall = gen8_irq_uninstall;
 		dev->driver->enable_vblank = gen8_enable_vblank;
 		dev->driver->disable_vblank = gen8_disable_vblank;
-		if (IS_BROXTON(dev))
+		if (IS_BROXTON(dev_priv))
 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
-		else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
+		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
 		else
 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
-	} else if (HAS_PCH_SPLIT(dev)) {
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		dev->driver->irq_handler = ironlake_irq_handler;
 		dev->driver->irq_preinstall = ironlake_irq_reset;
 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
@@ -4577,21 +4235,25 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
 			dev->driver->irq_handler = i8xx_irq_handler;
 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
+			dev->driver->enable_vblank = i8xx_enable_vblank;
+			dev->driver->disable_vblank = i8xx_disable_vblank;
 		} else if (IS_GEN3(dev_priv)) {
 			dev->driver->irq_preinstall = i915_irq_preinstall;
 			dev->driver->irq_postinstall = i915_irq_postinstall;
 			dev->driver->irq_uninstall = i915_irq_uninstall;
 			dev->driver->irq_handler = i915_irq_handler;
+			dev->driver->enable_vblank = i8xx_enable_vblank;
+			dev->driver->disable_vblank = i8xx_disable_vblank;
 		} else {
 			dev->driver->irq_preinstall = i965_irq_preinstall;
 			dev->driver->irq_postinstall = i965_irq_postinstall;
 			dev->driver->irq_uninstall = i965_irq_uninstall;
 			dev->driver->irq_handler = i965_irq_handler;
+			dev->driver->enable_vblank = i965_enable_vblank;
+			dev->driver->disable_vblank = i965_disable_vblank;
 		}
 		if (I915_HAS_HOTPLUG(dev_priv))
 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-		dev->driver->enable_vblank = i915_enable_vblank;
-		dev->driver->disable_vblank = i915_disable_vblank;
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 768ad89..629e433 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -47,6 +47,7 @@ struct i915_params i915 __read_mostly = {
 	.load_detect_test = 0,
 	.force_reset_modeset_test = 0,
 	.reset = true,
+	.error_capture = true,
 	.invert_brightness = 0,
 	.disable_display = 0,
 	.enable_cmd_parser = 1,
@@ -115,6 +116,14 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
 module_param_named_unsafe(reset, i915.reset, bool, 0600);
 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+module_param_named(error_capture, i915.error_capture, bool, 0600);
+MODULE_PARM_DESC(error_capture,
+	"Record the GPU state following a hang. "
+	"This information in /sys/class/drm/card<N>/error is vital for "
+	"triaging and debugging hangs.");
+#endif
+
 module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
 MODULE_PARM_DESC(enable_hangcheck,
 	"Periodically check GPU activity for detecting hangs. "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 3a0dd78..94efc89 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -59,6 +59,7 @@ struct i915_params {
 	bool load_detect_test;
 	bool force_reset_modeset_test;
 	bool reset;
+	bool error_capture;
 	bool disable_display;
 	bool verbose_state_checks;
 	bool nuclear_pageflip;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 687c768..2a41950 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -288,7 +288,8 @@ static const struct intel_device_info intel_haswell_info = {
 #define BDW_FEATURES \
 	HSW_FEATURES, \
 	BDW_COLORS, \
-	.has_logical_ring_contexts = 1
+	.has_logical_ring_contexts = 1, \
+	.has_64bit_reloc = 1
 
 static const struct intel_device_info intel_broadwell_info = {
 	BDW_FEATURES,
@@ -308,6 +309,7 @@ static const struct intel_device_info intel_cherryview_info = {
 	.has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.is_cherryview = 1,
+	.has_64bit_reloc = 1,
 	.has_psr = 1,
 	.has_runtime_pm = 1,
 	.has_resource_streamer = 1,
@@ -347,6 +349,7 @@ static const struct intel_device_info intel_broxton_info = {
 	.has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.num_pipes = 3,
+	.has_64bit_reloc = 1,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
 	.has_fbc = 1,
@@ -431,9 +434,6 @@ static const struct pci_device_id pciidlist[] = {
 };
 MODULE_DEVICE_TABLE(pci, pciidlist);
 
-extern int i915_driver_load(struct pci_dev *pdev,
-			    const struct pci_device_id *ent);
-
 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct intel_device_info *intel_info =
@@ -463,8 +463,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	return i915_driver_load(pdev, ent);
 }
 
-extern void i915_driver_unload(struct drm_device *dev);
-
 static void i915_pci_remove(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
@@ -473,8 +471,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
 	drm_dev_unref(dev);
 }
 
-extern const struct dev_pm_ops i915_pm_ops;
-
 static struct pci_driver i915_pci_driver = {
 	.name = DRIVER_NAME,
 	.id_table = pciidlist,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 70d9616..3361d7f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,8 +86,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define DEVEN 0x54
 #define   DEVEN_MCHBAR_EN (1 << 28)
 
-#define BSM 0x5c
-#define   BSM_MASK (0xFFFF << 20)
+/* BSM in include/drm/i915_drm.h */
 
 #define HPLLCC	0xc0 /* 85x only */
 #define   GC_CLOCK_CONTROL_MASK		(0x7 << 0)
@@ -831,96 +830,7 @@ enum skl_disp_power_wells {
 #define  CCK_FREQUENCY_STATUS_SHIFT		8
 #define  CCK_FREQUENCY_VALUES			(0x1f << 0)
 
-/**
- * DOC: DPIO
- *
- * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
- * ports. DPIO is the name given to such a display PHY. These PHYs
- * don't follow the standard programming model using direct MMIO
- * registers, and instead their registers must be accessed trough IOSF
- * sideband. VLV has one such PHY for driving ports B and C, and CHV
- * adds another PHY for driving port D. Each PHY responds to specific
- * IOSF-SB port.
- *
- * Each display PHY is made up of one or two channels. Each channel
- * houses a common lane part which contains the PLL and other common
- * logic. CH0 common lane also contains the IOSF-SB logic for the
- * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
- * must be running when any DPIO registers are accessed.
- *
- * In addition to having their own registers, the PHYs are also
- * controlled through some dedicated signals from the display
- * controller. These include PLL reference clock enable, PLL enable,
- * and CRI clock selection, for example.
- *
- * Eeach channel also has two splines (also called data lanes), and
- * each spline is made up of one Physical Access Coding Sub-Layer
- * (PCS) block and two TX lanes. So each channel has two PCS blocks
- * and four TX lanes. The TX lanes are used as DP lanes or TMDS
- * data/clock pairs depending on the output type.
- *
- * Additionally the PHY also contains an AUX lane with AUX blocks
- * for each channel. This is used for DP AUX communication, but
- * this fact isn't really relevant for the driver since AUX is
- * controlled from the display controller side. No DPIO registers
- * need to be accessed during AUX communication,
- *
- * Generally on VLV/CHV the common lane corresponds to the pipe and
- * the spline (PCS/TX) corresponds to the port.
- *
- * For dual channel PHY (VLV/CHV):
- *
- *  pipe A == CMN/PLL/REF CH0
- *
- *  pipe B == CMN/PLL/REF CH1
- *
- *  port B == PCS/TX CH0
- *
- *  port C == PCS/TX CH1
- *
- * This is especially important when we cross the streams
- * ie. drive port B with pipe B, or port C with pipe A.
- *
- * For single channel PHY (CHV):
- *
- *  pipe C == CMN/PLL/REF CH0
- *
- *  port D == PCS/TX CH0
- *
- * On BXT the entire PHY channel corresponds to the port. That means
- * the PLL is also now associated with the port rather than the pipe,
- * and so the clock needs to be routed to the appropriate transcoder.
- * Port A PLL is directly connected to transcoder EDP and port B/C
- * PLLs can be routed to any transcoder A/B/C.
- *
- * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT). ::
- *
- *
- *     Dual channel PHY (VLV/CHV/BXT)
- *     ---------------------------------
- *     |      CH0      |      CH1      |
- *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
- *     |---------------|---------------| Display PHY
- *     | PCS01 | PCS23 | PCS01 | PCS23 |
- *     |-------|-------|-------|-------|
- *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
- *     ---------------------------------
- *     |     DDI0      |     DDI1      | DP/HDMI ports
- *     ---------------------------------
- *
- *     Single channel PHY (CHV/BXT)
- *     -----------------
- *     |      CH0      |
- *     |  CMN/PLL/REF  |
- *     |---------------| Display PHY
- *     | PCS01 | PCS23 |
- *     |-------|-------|
- *     |TX0|TX1|TX2|TX3|
- *     -----------------
- *     |     DDI2      | DP/HDMI port
- *     -----------------
- */
+/* DPIO registers */
 #define DPIO_DEVFN			0
 
 #define DPIO_CTL			_MMIO(VLV_DISPLAY_BASE + 0x2110)
@@ -1276,7 +1186,19 @@ enum skl_disp_power_wells {
 #define   DPIO_UPAR_SHIFT		30
 
 /* BXT PHY registers */
-#define _BXT_PHY(phy, a, b)		_MMIO_PIPE((phy), (a), (b))
+#define _BXT_PHY0_BASE			0x6C000
+#define _BXT_PHY1_BASE			0x162000
+#define BXT_PHY_BASE(phy)		_PIPE((phy), _BXT_PHY0_BASE, \
+						     _BXT_PHY1_BASE)
+
+#define _BXT_PHY(phy, reg)						\
+	_MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
+
+#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1)		\
+	(BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE,	\
+					 (reg_ch1) - _BXT_PHY0_BASE))
+#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1)		\
+	_MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
 
 #define BXT_P_CR_GT_DISP_PWRON		_MMIO(0x138090)
 #define   GT_DISPLAY_POWER_ON(phy)	(1 << (phy))
@@ -1293,8 +1215,8 @@ enum skl_disp_power_wells {
 #define _PHY_CTL_FAMILY_EDP		0x64C80
 #define _PHY_CTL_FAMILY_DDI		0x64C90
 #define   COMMON_RESET_DIS		(1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy)		_BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
-							_PHY_CTL_FAMILY_EDP)
+#define BXT_PHY_CTL_FAMILY(phy)		_MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
+							  _PHY_CTL_FAMILY_EDP)
 
 /* BXT PHY PLL registers */
 #define _PORT_PLL_A			0x46074
@@ -1314,18 +1236,18 @@ enum skl_disp_power_wells {
 #define   PORT_PLL_P2_SHIFT		8
 #define   PORT_PLL_P2_MASK		(0x1f << PORT_PLL_P2_SHIFT)
 #define   PORT_PLL_P2(x)		((x)  << PORT_PLL_P2_SHIFT)
-#define BXT_PORT_PLL_EBB_0(port)	_MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
-						_PORT_PLL_EBB_0_B,	\
-						_PORT_PLL_EBB_0_C)
+#define BXT_PORT_PLL_EBB_0(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_PLL_EBB_0_B, \
+							 _PORT_PLL_EBB_0_C)
 
 #define _PORT_PLL_EBB_4_A		0x162038
 #define _PORT_PLL_EBB_4_B		0x6C038
 #define _PORT_PLL_EBB_4_C		0x6C344
 #define   PORT_PLL_10BIT_CLK_ENABLE	(1 << 13)
 #define   PORT_PLL_RECALIBRATE		(1 << 14)
-#define BXT_PORT_PLL_EBB_4(port)	_MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
-						_PORT_PLL_EBB_4_B,	\
-						_PORT_PLL_EBB_4_C)
+#define BXT_PORT_PLL_EBB_4(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_PLL_EBB_4_B, \
+							 _PORT_PLL_EBB_4_C)
 
 #define _PORT_PLL_0_A			0x162100
 #define _PORT_PLL_0_B			0x6C100
@@ -1356,57 +1278,56 @@ enum skl_disp_power_wells {
 #define  PORT_PLL_DCO_AMP_DEFAULT	15
 #define  PORT_PLL_DCO_AMP_MASK		0x3c00
 #define  PORT_PLL_DCO_AMP(x)		((x)<<10)
-#define _PORT_PLL_BASE(port)		_PORT3(port, _PORT_PLL_0_A,	\
-						_PORT_PLL_0_B,		\
-						_PORT_PLL_0_C)
-#define BXT_PORT_PLL(port, idx)		_MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
+#define _PORT_PLL_BASE(phy, ch)		_BXT_PHY_CH(phy, ch, \
+						    _PORT_PLL_0_B, \
+						    _PORT_PLL_0_C)
+#define BXT_PORT_PLL(phy, ch, idx)	_MMIO(_PORT_PLL_BASE(phy, ch) + \
+					      (idx) * 4)
 
 /* BXT PHY common lane registers */
 #define _PORT_CL1CM_DW0_A		0x162000
 #define _PORT_CL1CM_DW0_BC		0x6C000
 #define   PHY_POWER_GOOD		(1 << 16)
 #define   PHY_RESERVED			(1 << 7)
-#define BXT_PORT_CL1CM_DW0(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
-							_PORT_CL1CM_DW0_A)
+#define BXT_PORT_CL1CM_DW0(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
 
 #define _PORT_CL1CM_DW9_A		0x162024
 #define _PORT_CL1CM_DW9_BC		0x6C024
 #define   IREF0RC_OFFSET_SHIFT		8
 #define   IREF0RC_OFFSET_MASK		(0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
-							_PORT_CL1CM_DW9_A)
+#define BXT_PORT_CL1CM_DW9(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
 
 #define _PORT_CL1CM_DW10_A		0x162028
 #define _PORT_CL1CM_DW10_BC		0x6C028
 #define   IREF1RC_OFFSET_SHIFT		8
 #define   IREF1RC_OFFSET_MASK		(0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
-							_PORT_CL1CM_DW10_A)
+#define BXT_PORT_CL1CM_DW10(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
 
 #define _PORT_CL1CM_DW28_A		0x162070
 #define _PORT_CL1CM_DW28_BC		0x6C070
 #define   OCL1_POWER_DOWN_EN		(1 << 23)
 #define   DW28_OLDO_DYN_PWR_DOWN_EN	(1 << 22)
 #define   SUS_CLK_CONFIG		0x3
-#define BXT_PORT_CL1CM_DW28(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
-							_PORT_CL1CM_DW28_A)
+#define BXT_PORT_CL1CM_DW28(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
 
 #define _PORT_CL1CM_DW30_A		0x162078
 #define _PORT_CL1CM_DW30_BC		0x6C078
 #define   OCL2_LDOFUSE_PWR_DIS		(1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
-							_PORT_CL1CM_DW30_A)
+#define BXT_PORT_CL1CM_DW30(phy)	_BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
 
-/* Defined for PHY0 only */
-#define BXT_PORT_CL2CM_DW6_BC		_MMIO(0x6C358)
+/* The spec defines this only for BXT PHY0, but lets assume that this
+ * would exist for PHY1 too if it had a second channel.
+ */
+#define _PORT_CL2CM_DW6_A		0x162358
+#define _PORT_CL2CM_DW6_BC		0x6C358
+#define BXT_PORT_CL2CM_DW6(phy)		_BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
 #define   DW6_OLDO_DYN_PWR_DOWN_EN	(1 << 28)
 
 /* BXT PHY Ref registers */
 #define _PORT_REF_DW3_A			0x16218C
 #define _PORT_REF_DW3_BC		0x6C18C
 #define   GRC_DONE			(1 << 22)
-#define BXT_PORT_REF_DW3(phy)		_BXT_PHY((phy), _PORT_REF_DW3_BC, \
-							_PORT_REF_DW3_A)
+#define BXT_PORT_REF_DW3(phy)		_BXT_PHY((phy), _PORT_REF_DW3_BC)
 
 #define _PORT_REF_DW6_A			0x162198
 #define _PORT_REF_DW6_BC		0x6C198
@@ -1417,15 +1338,13 @@ enum skl_disp_power_wells {
 #define   GRC_CODE_SLOW_SHIFT		8
 #define   GRC_CODE_SLOW_MASK		(0xFF << GRC_CODE_SLOW_SHIFT)
 #define   GRC_CODE_NOM_MASK		0xFF
-#define BXT_PORT_REF_DW6(phy)		_BXT_PHY((phy), _PORT_REF_DW6_BC,	\
-						      _PORT_REF_DW6_A)
+#define BXT_PORT_REF_DW6(phy)		_BXT_PHY((phy), _PORT_REF_DW6_BC)
 
 #define _PORT_REF_DW8_A			0x1621A0
 #define _PORT_REF_DW8_BC		0x6C1A0
 #define   GRC_DIS			(1 << 15)
 #define   GRC_RDY_OVRD			(1 << 1)
-#define BXT_PORT_REF_DW8(phy)		_BXT_PHY((phy), _PORT_REF_DW8_BC,	\
-						      _PORT_REF_DW8_A)
+#define BXT_PORT_REF_DW8(phy)		_BXT_PHY((phy), _PORT_REF_DW8_BC)
 
 /* BXT PHY PCS registers */
 #define _PORT_PCS_DW10_LN01_A		0x162428
@@ -1434,12 +1353,13 @@ enum skl_disp_power_wells {
 #define _PORT_PCS_DW10_GRP_A		0x162C28
 #define _PORT_PCS_DW10_GRP_B		0x6CC28
 #define _PORT_PCS_DW10_GRP_C		0x6CE28
-#define BXT_PORT_PCS_DW10_LN01(port)	_MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
-						     _PORT_PCS_DW10_LN01_B, \
-						     _PORT_PCS_DW10_LN01_C)
-#define BXT_PORT_PCS_DW10_GRP(port)	_MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A,  \
-						     _PORT_PCS_DW10_GRP_B,  \
-						     _PORT_PCS_DW10_GRP_C)
+#define BXT_PORT_PCS_DW10_LN01(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_PCS_DW10_LN01_B, \
+							 _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_PCS_DW10_GRP_B, \
+							 _PORT_PCS_DW10_GRP_C)
+
 #define   TX2_SWING_CALC_INIT		(1 << 31)
 #define   TX1_SWING_CALC_INIT		(1 << 30)
 
@@ -1454,15 +1374,15 @@ enum skl_disp_power_wells {
 #define _PORT_PCS_DW12_GRP_C		0x6CE30
 #define   LANESTAGGER_STRAP_OVRD	(1 << 6)
 #define   LANE_STAGGER_MASK		0x1F
-#define BXT_PORT_PCS_DW12_LN01(port)	_MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
-						     _PORT_PCS_DW12_LN01_B, \
-						     _PORT_PCS_DW12_LN01_C)
-#define BXT_PORT_PCS_DW12_LN23(port)	_MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
-						     _PORT_PCS_DW12_LN23_B, \
-						     _PORT_PCS_DW12_LN23_C)
-#define BXT_PORT_PCS_DW12_GRP(port)	_MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
-						     _PORT_PCS_DW12_GRP_B, \
-						     _PORT_PCS_DW12_GRP_C)
+#define BXT_PORT_PCS_DW12_LN01(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_PCS_DW12_LN01_B, \
+							 _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_PCS_DW12_LN23_B, \
+							 _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_PCS_DW12_GRP_B, \
+							 _PORT_PCS_DW12_GRP_C)
 
 /* BXT PHY TX registers */
 #define _BXT_LANE_OFFSET(lane)           (((lane) >> 1) * 0x200 +	\
@@ -1474,12 +1394,12 @@ enum skl_disp_power_wells {
 #define _PORT_TX_DW2_GRP_A		0x162D08
 #define _PORT_TX_DW2_GRP_B		0x6CD08
 #define _PORT_TX_DW2_GRP_C		0x6CF08
-#define BXT_PORT_TX_DW2_GRP(port)	_MMIO_PORT3(port, _PORT_TX_DW2_GRP_A,  \
-						     _PORT_TX_DW2_GRP_B,  \
-						     _PORT_TX_DW2_GRP_C)
-#define BXT_PORT_TX_DW2_LN0(port)	_MMIO_PORT3(port, _PORT_TX_DW2_LN0_A,  \
-						     _PORT_TX_DW2_LN0_B,  \
-						     _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_LN0(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_TX_DW2_LN0_B, \
+							 _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_GRP(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_TX_DW2_GRP_B, \
+							 _PORT_TX_DW2_GRP_C)
 #define   MARGIN_000_SHIFT		16
 #define   MARGIN_000			(0xFF << MARGIN_000_SHIFT)
 #define   UNIQ_TRANS_SCALE_SHIFT	8
@@ -1491,12 +1411,12 @@ enum skl_disp_power_wells {
 #define _PORT_TX_DW3_GRP_A		0x162D0C
 #define _PORT_TX_DW3_GRP_B		0x6CD0C
 #define _PORT_TX_DW3_GRP_C		0x6CF0C
-#define BXT_PORT_TX_DW3_GRP(port)	_MMIO_PORT3(port, _PORT_TX_DW3_GRP_A,  \
-						     _PORT_TX_DW3_GRP_B,  \
-						     _PORT_TX_DW3_GRP_C)
-#define BXT_PORT_TX_DW3_LN0(port)	_MMIO_PORT3(port, _PORT_TX_DW3_LN0_A,  \
-						     _PORT_TX_DW3_LN0_B,  \
-						     _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_LN0(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_TX_DW3_LN0_B, \
+							 _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_GRP(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_TX_DW3_GRP_B, \
+							 _PORT_TX_DW3_GRP_C)
 #define   SCALE_DCOMP_METHOD		(1 << 26)
 #define   UNIQUE_TRANGE_EN_METHOD	(1 << 27)
 
@@ -1506,12 +1426,12 @@ enum skl_disp_power_wells {
 #define _PORT_TX_DW4_GRP_A		0x162D10
 #define _PORT_TX_DW4_GRP_B		0x6CD10
 #define _PORT_TX_DW4_GRP_C		0x6CF10
-#define BXT_PORT_TX_DW4_LN0(port)	_MMIO_PORT3(port, _PORT_TX_DW4_LN0_A,  \
-						     _PORT_TX_DW4_LN0_B,  \
-						     _PORT_TX_DW4_LN0_C)
-#define BXT_PORT_TX_DW4_GRP(port)	_MMIO_PORT3(port, _PORT_TX_DW4_GRP_A,  \
-						     _PORT_TX_DW4_GRP_B,  \
-						     _PORT_TX_DW4_GRP_C)
+#define BXT_PORT_TX_DW4_LN0(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_TX_DW4_LN0_B, \
+							 _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(phy, ch)	_MMIO_BXT_PHY_CH(phy, ch, \
+							 _PORT_TX_DW4_GRP_B, \
+							 _PORT_TX_DW4_GRP_C)
 #define   DEEMPH_SHIFT			24
 #define   DE_EMPHASIS			(0xFF << DEEMPH_SHIFT)
 
@@ -1520,10 +1440,10 @@ enum skl_disp_power_wells {
 #define _PORT_TX_DW14_LN0_C		0x6C938
 #define   LATENCY_OPTIM_SHIFT		30
 #define   LATENCY_OPTIM			(1 << LATENCY_OPTIM_SHIFT)
-#define BXT_PORT_TX_DW14_LN(port, lane)	_MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A,   \
-							_PORT_TX_DW14_LN0_B,   \
-							_PORT_TX_DW14_LN0_C) + \
-					 _BXT_LANE_OFFSET(lane))
+#define BXT_PORT_TX_DW14_LN(phy, ch, lane)				\
+	_MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B,			\
+				   _PORT_TX_DW14_LN0_C) +		\
+	      _BXT_LANE_OFFSET(lane))
 
 /* UAIMI scratch pad register 1 */
 #define UAIMI_SPR1			_MMIO(0x4F074)
@@ -1605,6 +1525,7 @@ enum skl_disp_power_wells {
 #define RING_HEAD(base)		_MMIO((base)+0x34)
 #define RING_START(base)	_MMIO((base)+0x38)
 #define RING_CTL(base)		_MMIO((base)+0x3c)
+#define   RING_CTL_SIZE(size)	((size) - PAGE_SIZE) /* in bytes -> pages */
 #define RING_SYNC_0(base)	_MMIO((base)+0x40)
 #define RING_SYNC_1(base)	_MMIO((base)+0x44)
 #define RING_SYNC_2(base)	_MMIO((base)+0x48)
@@ -1708,7 +1629,11 @@ enum skl_disp_power_wells {
 #define GEN7_SC_INSTDONE	_MMIO(0x7100)
 #define GEN7_SAMPLER_INSTDONE	_MMIO(0xe160)
 #define GEN7_ROW_INSTDONE	_MMIO(0xe164)
-#define I915_NUM_INSTDONE_REG	4
+#define GEN8_MCR_SELECTOR		_MMIO(0xfdc)
+#define   GEN8_MCR_SLICE(slice)		(((slice) & 3) << 26)
+#define   GEN8_MCR_SLICE_MASK		GEN8_MCR_SLICE(3)
+#define   GEN8_MCR_SUBSLICE(subslice)	(((subslice) & 3) << 24)
+#define   GEN8_MCR_SUBSLICE_MASK	GEN8_MCR_SUBSLICE(3)
 #define RING_IPEIR(base)	_MMIO((base)+0x64)
 #define RING_IPEHR(base)	_MMIO((base)+0x68)
 /*
@@ -2089,9 +2014,9 @@ enum skl_disp_power_wells {
 #define PM_VEBOX_CS_ERROR_INTERRUPT		(1 << 12) /* hsw+ */
 #define PM_VEBOX_USER_INTERRUPT			(1 << 10) /* hsw+ */
 
-#define GT_PARITY_ERROR(dev) \
+#define GT_PARITY_ERROR(dev_priv) \
 	(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
-	 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+	 (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
 
 /* These are all the "old" interrupts */
 #define ILK_BSD_USER_INTERRUPT				(1<<5)
@@ -2184,8 +2109,9 @@ enum skl_disp_power_wells {
 #define FBC_FENCE_OFF		_MMIO(0x3218) /* BSpec typo has 321Bh */
 #define FBC_TAG(i)		_MMIO(0x3300 + (i) * 4)
 
-#define FBC_STATUS2		_MMIO(0x43214)
-#define  FBC_COMPRESSION_MASK	0x7ff
+#define FBC_STATUS2			_MMIO(0x43214)
+#define  IVB_FBC_COMPRESSION_MASK	0x7ff
+#define  BDW_FBC_COMPRESSION_MASK	0xfff
 
 #define FBC_LL_SIZE		(1536)
 
@@ -6011,6 +5937,7 @@ enum {
 #define  GEN8_DE_PIPE_A_IRQ		(1<<16)
 #define  GEN8_DE_PIPE_IRQ(pipe)		(1<<(16+(pipe)))
 #define  GEN8_GT_VECS_IRQ		(1<<6)
+#define  GEN8_GT_GUC_IRQ		(1<<5)
 #define  GEN8_GT_PM_IRQ			(1<<4)
 #define  GEN8_GT_VCS2_IRQ		(1<<3)
 #define  GEN8_GT_VCS1_IRQ		(1<<2)
@@ -6022,6 +5949,16 @@ enum {
 #define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
 #define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
 
+#define GEN9_GUC_TO_HOST_INT_EVENT	(1<<31)
+#define GEN9_GUC_EXEC_ERROR_EVENT	(1<<30)
+#define GEN9_GUC_DISPLAY_EVENT		(1<<29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT	(1<<28)
+#define GEN9_GUC_IOMMU_MSG_EVENT	(1<<27)
+#define GEN9_GUC_DB_RING_EVENT		(1<<26)
+#define GEN9_GUC_DMA_DONE_EVENT		(1<<25)
+#define GEN9_GUC_FATAL_ERROR_EVENT	(1<<24)
+#define GEN9_GUC_NOTIFICATION_EVENT	(1<<23)
+
 #define GEN8_RCS_IRQ_SHIFT 0
 #define GEN8_BCS_IRQ_SHIFT 16
 #define GEN8_VCS1_IRQ_SHIFT 0
@@ -7327,6 +7264,10 @@ enum {
 #define   AUD_CONFIG_UPPER_N_MASK		(0xff << 20)
 #define   AUD_CONFIG_LOWER_N_SHIFT		4
 #define   AUD_CONFIG_LOWER_N_MASK		(0xfff << 4)
+#define   AUD_CONFIG_N_MASK			(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
+#define   AUD_CONFIG_N(n) \
+	(((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) |	\
+	 (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT	16
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK	(0xf << 16)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25175	(0 << 16)
@@ -7350,6 +7291,13 @@ enum {
 #define _HSW_AUD_MISC_CTRL_B		0x65110
 #define HSW_AUD_MISC_CTRL(pipe)		_MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
 
+#define _HSW_AUD_M_CTS_ENABLE_A		0x65028
+#define _HSW_AUD_M_CTS_ENABLE_B		0x65128
+#define HSW_AUD_M_CTS_ENABLE(pipe)	_MMIO_PIPE(pipe, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define   AUD_M_CTS_M_VALUE_INDEX	(1 << 21)
+#define   AUD_M_CTS_M_PROG_ENABLE	(1 << 20)
+#define   AUD_CONFIG_M_MASK		0xfffff
+
 #define _HSW_AUD_DIP_ELD_CTRL_ST_A	0x650b4
 #define _HSW_AUD_DIP_ELD_CTRL_ST_B	0x651b4
 #define HSW_AUD_DIP_ELD_CTRL(pipe)	_MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a0af170..344cbf3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -38,7 +38,7 @@ static void i915_save_display(struct drm_device *dev)
 		dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
 
 	/* save FBC interval */
-	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
 		dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
 }
 
@@ -54,7 +54,7 @@ static void i915_restore_display(struct drm_device *dev)
 	intel_fbc_global_disable(dev_priv);
 
 	/* restore FBC interval */
-	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
 		I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
 
 	i915_redisable_vga(dev);
@@ -70,7 +70,7 @@ int i915_save_state(struct drm_device *dev)
 
 	i915_save_display(dev);
 
-	if (IS_GEN4(dev))
+	if (IS_GEN4(dev_priv))
 		pci_read_config_word(pdev, GCDGMBUS,
 				     &dev_priv->regfile.saveGCDGMBUS);
 
@@ -116,7 +116,7 @@ int i915_restore_state(struct drm_device *dev)
 
 	i915_gem_restore_fences(dev);
 
-	if (IS_GEN4(dev))
+	if (IS_GEN4(dev_priv))
 		pci_write_config_word(pdev, GCDGMBUS,
 				      dev_priv->regfile.saveGCDGMBUS);
 	i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1e5cbc5..95f2f12 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -8,11 +8,13 @@
  */
 
 #include <linux/slab.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/reservation.h>
 
 #include "i915_sw_fence.h"
 
+#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
+
 static DEFINE_SPINLOCK(i915_sw_fence_lock);
 
 static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
@@ -135,6 +137,8 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
 	list_del(&wq->task_list);
 	__i915_sw_fence_complete(wq->private, key);
 	i915_sw_fence_put(wq->private);
+	if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
+		kfree(wq);
 	return 0;
 }
 
@@ -192,9 +196,9 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
 	return err;
 }
 
-int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
-				 struct i915_sw_fence *signaler,
-				 wait_queue_t *wq)
+static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+					  struct i915_sw_fence *signaler,
+					  wait_queue_t *wq, gfp_t gfp)
 {
 	unsigned long flags;
 	int pending;
@@ -206,8 +210,22 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 	if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
 		return -EINVAL;
 
+	pending = 0;
+	if (!wq) {
+		wq = kmalloc(sizeof(*wq), gfp);
+		if (!wq) {
+			if (!gfpflags_allow_blocking(gfp))
+				return -ENOMEM;
+
+			i915_sw_fence_wait(signaler);
+			return 0;
+		}
+
+		pending |= I915_SW_FENCE_FLAG_ALLOC;
+	}
+
 	INIT_LIST_HEAD(&wq->task_list);
-	wq->flags = 0;
+	wq->flags = pending;
 	wq->func = i915_sw_fence_wake;
 	wq->private = i915_sw_fence_get(fence);
 
@@ -226,49 +244,64 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 	return pending;
 }
 
-struct dma_fence_cb {
-	struct fence_cb base;
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+				 struct i915_sw_fence *signaler,
+				 wait_queue_t *wq)
+{
+	return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
+}
+
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+				     struct i915_sw_fence *signaler,
+				     gfp_t gfp)
+{
+	return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
+}
+
+struct i915_sw_dma_fence_cb {
+	struct dma_fence_cb base;
 	struct i915_sw_fence *fence;
-	struct fence *dma;
+	struct dma_fence *dma;
 	struct timer_list timer;
 };
 
 static void timer_i915_sw_fence_wake(unsigned long data)
 {
-	struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+	struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
 
 	printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
 	       cb->dma->ops->get_driver_name(cb->dma),
 	       cb->dma->ops->get_timeline_name(cb->dma),
 	       cb->dma->seqno);
-	fence_put(cb->dma);
+	dma_fence_put(cb->dma);
 	cb->dma = NULL;
 
 	i915_sw_fence_commit(cb->fence);
 	cb->timer.function = NULL;
 }
 
-static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+static void dma_i915_sw_fence_wake(struct dma_fence *dma,
+				   struct dma_fence_cb *data)
 {
-	struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+	struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
 
 	del_timer_sync(&cb->timer);
 	if (cb->timer.function)
 		i915_sw_fence_commit(cb->fence);
-	fence_put(cb->dma);
+	dma_fence_put(cb->dma);
 
 	kfree(cb);
 }
 
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
-				  struct fence *dma,
+				  struct dma_fence *dma,
 				  unsigned long timeout,
 				  gfp_t gfp)
 {
-	struct dma_fence_cb *cb;
+	struct i915_sw_dma_fence_cb *cb;
 	int ret;
 
-	if (fence_is_signaled(dma))
+	if (dma_fence_is_signaled(dma))
 		return 0;
 
 	cb = kmalloc(sizeof(*cb), gfp);
@@ -276,7 +309,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 		if (!gfpflags_allow_blocking(gfp))
 			return -ENOMEM;
 
-		return fence_wait(dma, false);
+		return dma_fence_wait(dma, false);
 	}
 
 	cb->fence = i915_sw_fence_get(fence);
@@ -287,11 +320,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 		      timer_i915_sw_fence_wake, (unsigned long)cb,
 		      TIMER_IRQSAFE);
 	if (timeout) {
-		cb->dma = fence_get(dma);
+		cb->dma = dma_fence_get(dma);
 		mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
 	}
 
-	ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+	ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
 	if (ret == 0) {
 		ret = 1;
 	} else {
@@ -305,16 +338,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    struct reservation_object *resv,
-				    const struct fence_ops *exclude,
+				    const struct dma_fence_ops *exclude,
 				    bool write,
 				    unsigned long timeout,
 				    gfp_t gfp)
 {
-	struct fence *excl;
+	struct dma_fence *excl;
 	int ret = 0, pending;
 
 	if (write) {
-		struct fence **shared;
+		struct dma_fence **shared;
 		unsigned int count, i;
 
 		ret = reservation_object_get_fences_rcu(resv,
@@ -339,7 +372,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 		}
 
 		for (i = 0; i < count; i++)
-			fence_put(shared[i]);
+			dma_fence_put(shared[i]);
 		kfree(shared);
 	} else {
 		excl = reservation_object_get_excl_rcu(resv);
@@ -356,7 +389,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 			ret |= pending;
 	}
 
-	fence_put(excl);
+	dma_fence_put(excl);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 3731416..707dfc4 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,8 +16,8 @@
 #include <linux/wait.h>
 
 struct completion;
-struct fence;
-struct fence_ops;
+struct dma_fence;
+struct dma_fence_ops;
 struct reservation_object;
 
 struct i915_sw_fence {
@@ -46,13 +46,16 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 				 struct i915_sw_fence *after,
 				 wait_queue_t *wq);
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+				     struct i915_sw_fence *after,
+				     gfp_t gfp);
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
-				  struct fence *dma,
+				  struct dma_fence *dma,
 				  unsigned long timeout,
 				  gfp_t gfp);
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    struct reservation_object *resv,
-				    const struct fence_ops *exclude,
+				    const struct dma_fence_ops *exclude,
 				    bool write,
 				    unsigned long timeout,
 				    gfp_t gfp);
@@ -62,4 +65,9 @@ static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
 	return atomic_read(&fence->pending) < 0;
 }
 
+static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
+{
+	wait_event(fence->wait, i915_sw_fence_done(fence));
+}
+
 #endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1012eee..47590ab 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -514,6 +514,8 @@ static const struct attribute *vlv_attrs[] = {
 	NULL,
 };
 
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 				struct bin_attribute *attr, char *buf,
 				loff_t off, size_t count)
@@ -571,6 +573,21 @@ static struct bin_attribute error_state_attr = {
 	.write = error_state_write,
 };
 
+static void i915_setup_error_capture(struct device *kdev)
+{
+	if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+		DRM_ERROR("error_state sysfs setup failed\n");
+}
+
+static void i915_teardown_error_capture(struct device *kdev)
+{
+	sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
+#else
+static void i915_setup_error_capture(struct device *kdev) {}
+static void i915_teardown_error_capture(struct device *kdev) {}
+#endif
+
 void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 {
 	struct device *kdev = dev_priv->drm.primary->kdev;
@@ -617,17 +634,15 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 	if (ret)
 		DRM_ERROR("RPS sysfs setup failed\n");
 
-	ret = sysfs_create_bin_file(&kdev->kobj,
-				    &error_state_attr);
-	if (ret)
-		DRM_ERROR("error_state sysfs setup failed\n");
+	i915_setup_error_capture(kdev);
 }
 
 void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 {
 	struct device *kdev = dev_priv->drm.primary->kdev;
 
-	sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+	i915_teardown_error_capture(kdev);
+
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		sysfs_remove_files(&kdev->kobj, vlv_attrs);
 	else
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 1787980..c5d210e 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 			   __entry->dev = from->i915->drm.primary->index;
 			   __entry->sync_from = from->engine->id;
 			   __entry->sync_to = to->engine->id;
-			   __entry->seqno = from->fence.seqno;
+			   __entry->seqno = from->global_seqno;
 			   ),
 
 	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -489,9 +489,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->fence.seqno;
+			   __entry->seqno = req->global_seqno;
 			   __entry->flags = flags;
-			   fence_enable_sw_signaling(&req->fence);
+			   dma_fence_enable_sw_signaling(&req->fence);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->fence.seqno;
+			   __entry->seqno = req->global_seqno;
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->fence.seqno;
+			   __entry->seqno = req->global_seqno;
 			   __entry->blocking =
 				     mutex_is_locked(&req->i915->drm.struct_mutex);
 			   ),
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index b82de30..cb55944 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 	state = &intel_state->base;
 
 	__drm_atomic_helper_plane_duplicate_state(plane, state);
-	intel_state->wait_req = NULL;
 
 	return state;
 }
@@ -101,7 +100,6 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
 			  struct drm_plane_state *state)
 {
-	WARN_ON(state && to_intel_plane_state(state)->wait_req);
 	drm_atomic_helper_plane_destroy_state(plane, state);
 }
 
@@ -142,8 +140,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 	intel_state->clip.y2 =
 		crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
 
-	if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+	if (state->fb && drm_rotation_90_or_270(state->rotation)) {
 		char *format_name;
+
 		if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 			state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
 			DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 6c70a5b..1c509f741 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -57,6 +57,63 @@
  * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
  */
 
+/* DP N/M table */
+#define LC_540M	540000
+#define LC_270M	270000
+#define LC_162M	162000
+
+struct dp_aud_n_m {
+	int sample_rate;
+	int clock;
+	u16 m;
+	u16 n;
+};
+
+/* Values according to DP 1.4 Table 2-104 */
+static const struct dp_aud_n_m dp_aud_n_m[] = {
+	{ 32000, LC_162M, 1024, 10125 },
+	{ 44100, LC_162M, 784, 5625 },
+	{ 48000, LC_162M, 512, 3375 },
+	{ 64000, LC_162M, 2048, 10125 },
+	{ 88200, LC_162M, 1568, 5625 },
+	{ 96000, LC_162M, 1024, 3375 },
+	{ 128000, LC_162M, 4096, 10125 },
+	{ 176400, LC_162M, 3136, 5625 },
+	{ 192000, LC_162M, 2048, 3375 },
+	{ 32000, LC_270M, 1024, 16875 },
+	{ 44100, LC_270M, 784, 9375 },
+	{ 48000, LC_270M, 512, 5625 },
+	{ 64000, LC_270M, 2048, 16875 },
+	{ 88200, LC_270M, 1568, 9375 },
+	{ 96000, LC_270M, 1024, 5625 },
+	{ 128000, LC_270M, 4096, 16875 },
+	{ 176400, LC_270M, 3136, 9375 },
+	{ 192000, LC_270M, 2048, 5625 },
+	{ 32000, LC_540M, 1024, 33750 },
+	{ 44100, LC_540M, 784, 18750 },
+	{ 48000, LC_540M, 512, 11250 },
+	{ 64000, LC_540M, 2048, 33750 },
+	{ 88200, LC_540M, 1568, 18750 },
+	{ 96000, LC_540M, 1024, 11250 },
+	{ 128000, LC_540M, 4096, 33750 },
+	{ 176400, LC_540M, 3136, 18750 },
+	{ 192000, LC_540M, 2048, 11250 },
+};
+
+static const struct dp_aud_n_m *
+audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+		if (rate == dp_aud_n_m[i].sample_rate &&
+		    intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
+			return &dp_aud_n_m[i];
+	}
+
+	return NULL;
+}
+
 static const struct {
 	int clock;
 	u32 config;
@@ -81,7 +138,7 @@ static const struct {
 	int clock;
 	int n;
 	int cts;
-} aud_ncts[] = {
+} hdmi_aud_ncts[] = {
 	{ 44100, TMDS_296M, 4459, 234375 },
 	{ 44100, TMDS_297M, 4704, 247500 },
 	{ 48000, TMDS_296M, 5824, 281250 },
@@ -121,45 +178,20 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
 	return hdmi_audio_clock[i].config;
 }
 
-static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
+static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
+				   int rate)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
-		if ((rate == aud_ncts[i].sample_rate) &&
-			(mode->clock == aud_ncts[i].clock)) {
-			return aud_ncts[i].n;
+	for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
+		if (rate == hdmi_aud_ncts[i].sample_rate &&
+		    adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
+			return hdmi_aud_ncts[i].n;
 		}
 	}
 	return 0;
 }
 
-static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
-{
-	int n_low, n_up;
-	uint32_t tmp = val;
-
-	n_low = n & 0xfff;
-	n_up = (n >> 12) & 0xff;
-	tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
-	tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
-			(n_low << AUD_CONFIG_LOWER_N_SHIFT) |
-			AUD_CONFIG_N_PROG_ENABLE);
-	return tmp;
-}
-
-/* check whether N/CTS/M need be set manually */
-static bool audio_rate_need_prog(struct intel_crtc *crtc,
-				 const struct drm_display_mode *mode)
-{
-	if (((mode->clock == TMDS_297M) ||
-		 (mode->clock == TMDS_296M)) &&
-		intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
-		return true;
-	else
-		return false;
-}
-
 static bool intel_eld_uptodate(struct drm_connector *connector,
 			       i915_reg_t reg_eldv, uint32_t bits_eldv,
 			       i915_reg_t reg_elda, uint32_t bits_elda,
@@ -245,6 +277,97 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
 	I915_WRITE(G4X_AUD_CNTL_ST, tmp);
 }
 
+static void
+hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+			   const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+	const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 tmp;
+
+	if (nm)
+		DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+	else
+		DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+
+	tmp = I915_READ(HSW_AUD_CFG(pipe));
+	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+	tmp |= AUD_CONFIG_N_VALUE_INDEX;
+
+	if (nm) {
+		tmp &= ~AUD_CONFIG_N_MASK;
+		tmp |= AUD_CONFIG_N(nm->n);
+		tmp |= AUD_CONFIG_N_PROG_ENABLE;
+	}
+
+	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+	tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+	tmp &= ~AUD_CONFIG_M_MASK;
+	tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+	tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+
+	if (nm) {
+		tmp |= nm->m;
+		tmp |= AUD_M_CTS_M_VALUE_INDEX;
+		tmp |= AUD_M_CTS_M_PROG_ENABLE;
+	}
+
+	I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+}
+
+static void
+hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+			     const struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+	enum pipe pipe = intel_crtc->pipe;
+	int n;
+	u32 tmp;
+
+	tmp = I915_READ(HSW_AUD_CFG(pipe));
+	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+	tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+
+	n = audio_config_hdmi_get_n(adjusted_mode, rate);
+	if (n != 0) {
+		DRM_DEBUG_KMS("using N %d\n", n);
+
+		tmp &= ~AUD_CONFIG_N_MASK;
+		tmp |= AUD_CONFIG_N(n);
+		tmp |= AUD_CONFIG_N_PROG_ENABLE;
+	} else {
+		DRM_DEBUG_KMS("using automatic N\n");
+	}
+
+	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+	tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+	tmp &= ~AUD_CONFIG_M_MASK;
+	tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+	tmp |= AUD_M_CTS_M_PROG_ENABLE;
+	I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+}
+
+static void
+hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+			const struct drm_display_mode *adjusted_mode)
+{
+	if (intel_crtc_has_dp_encoder(intel_crtc->config))
+		hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
+	else
+		hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
+}
+
 static void hsw_audio_codec_disable(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -276,20 +399,16 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
 }
 
 static void hsw_audio_codec_enable(struct drm_connector *connector,
-				   struct intel_encoder *encoder,
+				   struct intel_encoder *intel_encoder,
 				   const struct drm_display_mode *adjusted_mode)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
 	enum pipe pipe = intel_crtc->pipe;
-	struct i915_audio_component *acomp = dev_priv->audio_component;
+	enum port port = intel_encoder->port;
 	const uint8_t *eld = connector->eld;
-	struct intel_digital_port *intel_dig_port =
-		enc_to_dig_port(&encoder->base);
-	enum port port = intel_dig_port->port;
 	uint32_t tmp;
 	int len, i;
-	int n, rate;
 
 	DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
 		      pipe_name(pipe), drm_eld_size(eld));
@@ -325,42 +444,17 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
 	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
 
 	/* Enable timestamps */
-	tmp = I915_READ(HSW_AUD_CFG(pipe));
-	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
-	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-	if (intel_crtc_has_dp_encoder(intel_crtc->config))
-		tmp |= AUD_CONFIG_N_VALUE_INDEX;
-	else
-		tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
-
-	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
-	if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
-		if (!acomp)
-			rate = 0;
-		else if (port >= PORT_A && port <= PORT_E)
-			rate = acomp->aud_sample_rate[port];
-		else {
-			DRM_ERROR("invalid port: %d\n", port);
-			rate = 0;
-		}
-		n = audio_config_get_n(adjusted_mode, rate);
-		if (n != 0)
-			tmp = audio_config_setup_n_reg(n, tmp);
-		else
-			DRM_DEBUG_KMS("no suitable N value is found\n");
-	}
-
-	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+	hsw_audio_config_update(intel_crtc, port, adjusted_mode);
 
 	mutex_unlock(&dev_priv->av_mutex);
 }
 
-static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	enum port port = enc_to_dig_port(&encoder->base)->port;
+	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
 	enum pipe pipe = intel_crtc->pipe;
+	enum port port = intel_encoder->port;
 	uint32_t tmp, eldv;
 	i915_reg_t aud_config, aud_cntrl_st2;
 
@@ -400,13 +494,13 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
 }
 
 static void ilk_audio_codec_enable(struct drm_connector *connector,
-				   struct intel_encoder *encoder,
+				   struct intel_encoder *intel_encoder,
 				   const struct drm_display_mode *adjusted_mode)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	enum port port = enc_to_dig_port(&encoder->base)->port;
+	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
 	enum pipe pipe = intel_crtc->pipe;
+	enum port port = intel_encoder->port;
 	uint8_t *eld = connector->eld;
 	uint32_t tmp, eldv;
 	int len, i;
@@ -425,13 +519,13 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
 	 * infrastructure is not there yet.
 	 */
 
-	if (HAS_PCH_IBX(connector->dev)) {
+	if (HAS_PCH_IBX(dev_priv)) {
 		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
 		aud_config = IBX_AUD_CFG(pipe);
 		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
-	} else if (IS_VALLEYVIEW(connector->dev) ||
-		   IS_CHERRYVIEW(connector->dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) ||
+		   IS_CHERRYVIEW(dev_priv)) {
 		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
 		aud_config = VLV_AUD_CFG(pipe);
 		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@@ -480,24 +574,26 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
 /**
  * intel_audio_codec_enable - Enable the audio codec for HD audio
  * @intel_encoder: encoder on which to enable audio
+ * @crtc_state: pointer to the current crtc state.
+ * @conn_state: pointer to the current connector state.
  *
  * The enable sequences may only be performed after enabling the transcoder and
  * port, and after completed link training.
  */
-void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+			      const struct intel_crtc_state *crtc_state,
+			      const struct drm_connector_state *conn_state)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
-	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
 	struct drm_connector *connector;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
 	struct i915_audio_component *acomp = dev_priv->audio_component;
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
-	enum port port = intel_dig_port->port;
+	enum port port = intel_encoder->port;
+	enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
 
-	connector = drm_select_eld(encoder);
-	if (!connector)
+	connector = conn_state->connector;
+	if (!connector || !connector->eld[0])
 		return;
 
 	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -508,7 +604,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
 
 	/* ELD Conn_Type */
 	connector->eld[5] &= ~(3 << 2);
-	if (intel_crtc_has_dp_encoder(crtc->config))
+	if (intel_crtc_has_dp_encoder(crtc_state))
 		connector->eld[5] |= (1 << 2);
 
 	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -518,13 +614,19 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
 						     adjusted_mode);
 
 	mutex_lock(&dev_priv->av_mutex);
-	intel_dig_port->audio_connector = connector;
+	intel_encoder->audio_connector = connector;
+
 	/* referred in audio callbacks */
-	dev_priv->dig_port_map[port] = intel_encoder;
+	dev_priv->av_enc_map[pipe] = intel_encoder;
 	mutex_unlock(&dev_priv->av_mutex);
 
+	/* audio drivers expect pipe = -1 to indicate Non-MST cases */
+	if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+		pipe = -1;
+
 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
-		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+						 (int) port, (int) pipe);
 }
 
 /**
@@ -537,22 +639,27 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
 void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
 	struct i915_audio_component *acomp = dev_priv->audio_component;
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
-	enum port port = intel_dig_port->port;
+	enum port port = intel_encoder->port;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = crtc->pipe;
 
 	if (dev_priv->display.audio_codec_disable)
 		dev_priv->display.audio_codec_disable(intel_encoder);
 
 	mutex_lock(&dev_priv->av_mutex);
-	intel_dig_port->audio_connector = NULL;
-	dev_priv->dig_port_map[port] = NULL;
+	intel_encoder->audio_connector = NULL;
+	dev_priv->av_enc_map[pipe] = NULL;
 	mutex_unlock(&dev_priv->av_mutex);
 
+	/* audio drivers expect pipe = -1 to indicate Non-MST cases */
+	if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+		pipe = -1;
+
 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
-		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+						 (int) port, (int) pipe);
 }
 
 /**
@@ -627,74 +734,68 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
 	return dev_priv->cdclk_freq;
 }
 
-static int i915_audio_component_sync_audio_rate(struct device *kdev,
-						int port, int rate)
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+					       int port, int pipe)
+{
+
+	if (WARN_ON(pipe >= I915_MAX_PIPES))
+		return NULL;
+
+	/* MST */
+	if (pipe >= 0)
+		return dev_priv->av_enc_map[pipe];
+
+	/* Non-MST */
+	for_each_pipe(dev_priv, pipe) {
+		struct intel_encoder *encoder;
+
+		encoder = dev_priv->av_enc_map[pipe];
+		if (encoder == NULL)
+			continue;
+
+		if (port == encoder->port)
+			return encoder;
+	}
+
+	return NULL;
+}
+
+static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
+						int pipe, int rate)
 {
 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	struct intel_encoder *intel_encoder;
 	struct intel_crtc *crtc;
-	struct drm_display_mode *mode;
+	struct drm_display_mode *adjusted_mode;
 	struct i915_audio_component *acomp = dev_priv->audio_component;
-	enum pipe pipe = INVALID_PIPE;
-	u32 tmp;
-	int n;
 	int err = 0;
 
-	/* HSW, BDW, SKL, KBL need this fix */
-	if (!IS_SKYLAKE(dev_priv) &&
-	    !IS_KABYLAKE(dev_priv) &&
-	    !IS_BROADWELL(dev_priv) &&
-	    !IS_HASWELL(dev_priv))
+	if (!HAS_DDI(dev_priv))
 		return 0;
 
 	i915_audio_component_get_power(kdev);
 	mutex_lock(&dev_priv->av_mutex);
+
 	/* 1. get the pipe */
-	intel_encoder = dev_priv->dig_port_map[port];
-	/* intel_encoder might be NULL for DP MST */
+	intel_encoder = get_saved_enc(dev_priv, port, pipe);
 	if (!intel_encoder || !intel_encoder->base.crtc ||
-	    intel_encoder->type != INTEL_OUTPUT_HDMI) {
-		DRM_DEBUG_KMS("no valid port %c\n", port_name(port));
-		err = -ENODEV;
-		goto unlock;
-	}
-	crtc = to_intel_crtc(intel_encoder->base.crtc);
-	pipe = crtc->pipe;
-	if (pipe == INVALID_PIPE) {
-		DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
+	    (intel_encoder->type != INTEL_OUTPUT_HDMI &&
+	     intel_encoder->type != INTEL_OUTPUT_DP)) {
+		DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
 		err = -ENODEV;
 		goto unlock;
 	}
 
-	DRM_DEBUG_KMS("pipe %c connects port %c\n",
-				  pipe_name(pipe), port_name(port));
-	mode = &crtc->config->base.adjusted_mode;
+	/* pipe passed from the audio driver will be -1 for Non-MST case */
+	crtc = to_intel_crtc(intel_encoder->base.crtc);
+	pipe = crtc->pipe;
+
+	adjusted_mode = &crtc->config->base.adjusted_mode;
 
 	/* port must be valid now, otherwise the pipe will be invalid */
 	acomp->aud_sample_rate[port] = rate;
 
-	/* 2. check whether to set the N/CTS/M manually or not */
-	if (!audio_rate_need_prog(crtc, mode)) {
-		tmp = I915_READ(HSW_AUD_CFG(pipe));
-		tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
-		I915_WRITE(HSW_AUD_CFG(pipe), tmp);
-		goto unlock;
-	}
-
-	n = audio_config_get_n(mode, rate);
-	if (n == 0) {
-		DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
-					  port_name(port));
-		tmp = I915_READ(HSW_AUD_CFG(pipe));
-		tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
-		I915_WRITE(HSW_AUD_CFG(pipe), tmp);
-		goto unlock;
-	}
-
-	/* 3. set the N/CTS/M */
-	tmp = I915_READ(HSW_AUD_CFG(pipe));
-	tmp = audio_config_setup_n_reg(n, tmp);
-	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+	hsw_audio_config_update(crtc, port, adjusted_mode);
 
  unlock:
 	mutex_unlock(&dev_priv->av_mutex);
@@ -703,27 +804,29 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev,
 }
 
 static int i915_audio_component_get_eld(struct device *kdev, int port,
-					bool *enabled,
+					int pipe, bool *enabled,
 					unsigned char *buf, int max_bytes)
 {
 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	struct intel_encoder *intel_encoder;
-	struct intel_digital_port *intel_dig_port;
 	const u8 *eld;
 	int ret = -EINVAL;
 
 	mutex_lock(&dev_priv->av_mutex);
-	intel_encoder = dev_priv->dig_port_map[port];
-	/* intel_encoder might be NULL for DP MST */
-	if (intel_encoder) {
-		ret = 0;
-		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-		*enabled = intel_dig_port->audio_connector != NULL;
-		if (*enabled) {
-			eld = intel_dig_port->audio_connector->eld;
-			ret = drm_eld_size(eld);
-			memcpy(buf, eld, min(max_bytes, ret));
-		}
+
+	intel_encoder = get_saved_enc(dev_priv, port, pipe);
+	if (!intel_encoder) {
+		DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+		mutex_unlock(&dev_priv->av_mutex);
+		return ret;
+	}
+
+	ret = 0;
+	*enabled = intel_encoder->audio_connector != NULL;
+	if (*enabled) {
+		eld = intel_encoder->audio_connector->eld;
+		ret = drm_eld_size(eld);
+		memcpy(buf, eld, min(max_bytes, ret));
 	}
 
 	mutex_unlock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c6e69e4..5ab646e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -996,6 +996,10 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
 			goto err;
 		}
 
+		/* Log about presence of sequences we won't run. */
+		if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
+			DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+
 		dev_priv->vbt.dsi.sequence[seq_id] = data + index;
 
 		if (sequence->version >= 3)
@@ -1031,6 +1035,77 @@ static u8 translate_iboost(u8 val)
 	return mapping[val];
 }
 
+static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+			     enum port port)
+{
+	const struct ddi_vbt_port_info *info =
+		&dev_priv->vbt.ddi_port_info[port];
+	enum port p;
+
+	if (!info->alternate_ddc_pin)
+		return;
+
+	for_each_port_masked(p, (1 << port) - 1) {
+		struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+		if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+			continue;
+
+		DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
+			      "disabling port %c DVI/HDMI support\n",
+			      port_name(p), i->alternate_ddc_pin,
+			      port_name(port), port_name(p));
+
+		/*
+		 * If we have multiple ports supposedly sharing the
+		 * pin, then dvi/hdmi couldn't exist on the shared
+		 * port. Otherwise they share the same ddc bin and
+		 * system couldn't communicate with them separately.
+		 *
+		 * Due to parsing the ports in alphabetical order,
+		 * a higher port will always clobber a lower one.
+		 */
+		i->supports_dvi = false;
+		i->supports_hdmi = false;
+		i->alternate_ddc_pin = 0;
+	}
+}
+
+static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+			    enum port port)
+{
+	const struct ddi_vbt_port_info *info =
+		&dev_priv->vbt.ddi_port_info[port];
+	enum port p;
+
+	if (!info->alternate_aux_channel)
+		return;
+
+	for_each_port_masked(p, (1 << port) - 1) {
+		struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+		if (info->alternate_aux_channel != i->alternate_aux_channel)
+			continue;
+
+		DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
+			      "disabling port %c DP support\n",
+			      port_name(p), i->alternate_aux_channel,
+			      port_name(port), port_name(p));
+
+		/*
+		 * If we have multiple ports supposedlt sharing the
+		 * aux channel, then DP couldn't exist on the shared
+		 * port. Otherwise they share the same aux channel
+		 * and system couldn't communicate with them separately.
+		 *
+		 * Due to parsing the ports in alphabetical order,
+		 * a higher port will always clobber a lower one.
+		 */
+		i->supports_dp = false;
+		i->alternate_aux_channel = 0;
+	}
+}
+
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 			   const struct bdb_header *bdb)
 {
@@ -1105,54 +1180,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
 	if (is_dvi) {
-		if (port == PORT_E) {
-			info->alternate_ddc_pin = ddc_pin;
-			/* if DDIE share ddc pin with other port, then
-			 * dvi/hdmi couldn't exist on the shared port.
-			 * Otherwise they share the same ddc bin and system
-			 * couldn't communicate with them seperately. */
-			if (ddc_pin == DDC_PIN_B) {
-				dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
-				dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
-			} else if (ddc_pin == DDC_PIN_C) {
-				dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
-				dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
-			} else if (ddc_pin == DDC_PIN_D) {
-				dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
-				dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
-			}
-		} else if (ddc_pin == DDC_PIN_B && port != PORT_B)
-			DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
-		else if (ddc_pin == DDC_PIN_C && port != PORT_C)
-			DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
-		else if (ddc_pin == DDC_PIN_D && port != PORT_D)
-			DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+		info->alternate_ddc_pin = ddc_pin;
+
+		sanitize_ddc_pin(dev_priv, port);
 	}
 
 	if (is_dp) {
-		if (port == PORT_E) {
-			info->alternate_aux_channel = aux_channel;
-			/* if DDIE share aux channel with other port, then
-			 * DP couldn't exist on the shared port. Otherwise
-			 * they share the same aux channel and system
-			 * couldn't communicate with them seperately. */
-			if (aux_channel == DP_AUX_A)
-				dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
-			else if (aux_channel == DP_AUX_B)
-				dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
-			else if (aux_channel == DP_AUX_C)
-				dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
-			else if (aux_channel == DP_AUX_D)
-				dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
-		}
-		else if (aux_channel == DP_AUX_A && port != PORT_A)
-			DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
-		else if (aux_channel == DP_AUX_B && port != PORT_B)
-			DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
-		else if (aux_channel == DP_AUX_C && port != PORT_C)
-			DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
-		else if (aux_channel == DP_AUX_D && port != PORT_D)
-			DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+		info->alternate_aux_channel = aux_channel;
+
+		sanitize_aux_ch(dev_priv, port);
 	}
 
 	if (bdb->version >= 158) {
@@ -1759,3 +1795,52 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
 
 	return false;
 }
+
+/**
+ * intel_bios_is_lspcon_present - if LSPCON is attached on %port
+ * @dev_priv:	i915 device instance
+ * @port:	port to check
+ *
+ * Return true if LSPCON is present on this port
+ */
+bool
+intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+				enum port port)
+{
+	int i;
+
+	if (!HAS_LSPCON(dev_priv))
+		return false;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		if (!dev_priv->vbt.child_dev[i].common.lspcon)
+			continue;
+
+		switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+		case DVO_PORT_DPA:
+		case DVO_PORT_HDMIA:
+			if (port == PORT_A)
+				return true;
+			break;
+		case DVO_PORT_DPB:
+		case DVO_PORT_HDMIB:
+			if (port == PORT_B)
+				return true;
+			break;
+		case DVO_PORT_DPC:
+		case DVO_PORT_HDMIC:
+			if (port == PORT_C)
+				return true;
+			break;
+		case DVO_PORT_DPD:
+		case DVO_PORT_HDMID:
+			if (port == PORT_D)
+				return true;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 495611b..c9c46a5 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -83,16 +83,18 @@ static void irq_enable(struct intel_engine_cs *engine)
 	 */
 	engine->breadcrumbs.irq_posted = true;
 
-	spin_lock_irq(&engine->i915->irq_lock);
+	/* Caller disables interrupts */
+	spin_lock(&engine->i915->irq_lock);
 	engine->irq_enable(engine);
-	spin_unlock_irq(&engine->i915->irq_lock);
+	spin_unlock(&engine->i915->irq_lock);
 }
 
 static void irq_disable(struct intel_engine_cs *engine)
 {
-	spin_lock_irq(&engine->i915->irq_lock);
+	/* Caller disables interrupts */
+	spin_lock(&engine->i915->irq_lock);
 	engine->irq_disable(engine);
-	spin_unlock_irq(&engine->i915->irq_lock);
+	spin_unlock(&engine->i915->irq_lock);
 
 	engine->breadcrumbs.irq_posted = false;
 }
@@ -293,9 +295,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 	bool first;
 
-	spin_lock(&b->lock);
+	spin_lock_irq(&b->lock);
 	first = __intel_engine_add_wait(engine, wait);
-	spin_unlock(&b->lock);
+	spin_unlock_irq(&b->lock);
 
 	return first;
 }
@@ -326,7 +328,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
 	if (RB_EMPTY_NODE(&wait->node))
 		return;
 
-	spin_lock(&b->lock);
+	spin_lock_irq(&b->lock);
 
 	if (RB_EMPTY_NODE(&wait->node))
 		goto out_unlock;
@@ -400,7 +402,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
 	GEM_BUG_ON(rb_first(&b->waiters) !=
 		   (b->first_wait ? &b->first_wait->node : NULL));
 	GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
-	spin_unlock(&b->lock);
+	spin_unlock_irq(&b->lock);
 }
 
 static bool signal_complete(struct drm_i915_gem_request *request)
@@ -464,7 +466,7 @@ static int intel_breadcrumbs_signaler(void *arg)
 						 &request->signaling.wait);
 
 			local_bh_disable();
-			fence_signal(&request->fence);
+			dma_fence_signal(&request->fence);
 			local_bh_enable(); /* kick start the tasklets */
 
 			/* Find the next oldest signal. Note that as we have
@@ -473,14 +475,14 @@ static int intel_breadcrumbs_signaler(void *arg)
 			 * we just completed - so double check we are still
 			 * the oldest before picking the next one.
 			 */
-			spin_lock(&b->lock);
+			spin_lock_irq(&b->lock);
 			if (request == b->first_signal) {
 				struct rb_node *rb =
 					rb_next(&request->signaling.node);
 				b->first_signal = rb ? to_signaler(rb) : NULL;
 			}
 			rb_erase(&request->signaling.node, &b->signals);
-			spin_unlock(&b->lock);
+			spin_unlock_irq(&b->lock);
 
 			i915_gem_request_put(request);
 		} else {
@@ -502,11 +504,20 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
 	struct rb_node *parent, **p;
 	bool first, wakeup;
 
-	/* locked by fence_enable_sw_signaling() */
+	/* Note that we may be called from an interrupt handler on another
+	 * device (e.g. nouveau signaling a fence completion causing us
+	 * to submit a request, and so enable signaling). As such,
+	 * we need to make sure that all other users of b->lock protect
+	 * against interrupts, i.e. use spin_lock_irqsave.
+	 */
+
+	/* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
 	assert_spin_locked(&request->lock);
+	if (!request->global_seqno)
+		return;
 
 	request->signaling.wait.tsk = b->signaler;
-	request->signaling.wait.seqno = request->fence.seqno;
+	request->signaling.wait.seqno = request->global_seqno;
 	i915_gem_request_get(request);
 
 	spin_lock(&b->lock);
@@ -530,8 +541,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
 	p = &b->signals.rb_node;
 	while (*p) {
 		parent = *p;
-		if (i915_seqno_passed(request->fence.seqno,
-				      to_signaler(parent)->fence.seqno)) {
+		if (i915_seqno_passed(request->global_seqno,
+				      to_signaler(parent)->global_seqno)) {
 			p = &parent->rb_right;
 			first = false;
 		} else {
@@ -592,7 +603,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
 	cancel_fake_irq(engine);
-	spin_lock(&b->lock);
+	spin_lock_irq(&b->lock);
 
 	__intel_breadcrumbs_disable_irq(b);
 	if (intel_engine_has_waiter(engine)) {
@@ -605,7 +616,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 		irq_disable(engine);
 	}
 
-	spin_unlock(&b->lock);
+	spin_unlock_irq(&b->lock);
 }
 
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
@@ -618,33 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
 	cancel_fake_irq(engine);
 }
 
-unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	unsigned int mask = 0;
 
-	/* To avoid the task_struct disappearing beneath us as we wake up
-	 * the process, we must first inspect the task_struct->state under the
-	 * RCU lock, i.e. as we call wake_up_process() we must be holding the
-	 * rcu_read_lock().
-	 */
-	for_each_engine(engine, i915)
-		if (unlikely(intel_engine_wakeup(engine)))
-			mask |= intel_engine_flag(engine);
+	for_each_engine(engine, i915, id) {
+		struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
-	return mask;
-}
+		spin_lock_irq(&b->lock);
 
-unsigned int intel_kick_signalers(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	unsigned int mask = 0;
-
-	for_each_engine(engine, i915) {
-		if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
-			wake_up_process(engine->breadcrumbs.signaler);
+		if (b->first_wait) {
+			wake_up_process(b->first_wait->tsk);
 			mask |= intel_engine_flag(engine);
 		}
+
+		if (b->first_signal) {
+			wake_up_process(b->signaler);
+			mask |= intel_engine_flag(engine);
+		}
+
+		spin_unlock_irq(&b->lock);
 	}
 
 	return mask;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 95a7277..4451088 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -273,7 +273,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
 	enum pipe pipe = intel_crtc->pipe;
 	int i;
 
-	if (HAS_GMCH_DISPLAY(dev)) {
+	if (HAS_GMCH_DISPLAY(dev_priv)) {
 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
 			assert_dsi_pll_enabled(dev_priv);
 		else
@@ -288,7 +288,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
 				(drm_color_lut_extract(lut[i].green, 8) << 8) |
 				drm_color_lut_extract(lut[i].blue, 8);
 
-			if (HAS_GMCH_DISPLAY(dev))
+			if (HAS_GMCH_DISPLAY(dev_priv))
 				I915_WRITE(PALETTE(pipe, i), word);
 			else
 				I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -297,7 +297,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
 		for (i = 0; i < 256; i++) {
 			uint32_t word = (i << 16) | (i << 8) | i;
 
-			if (HAS_GMCH_DISPLAY(dev))
+			if (HAS_GMCH_DISPLAY(dev_priv))
 				I915_WRITE(PALETTE(pipe, i), word);
 			else
 				I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -326,7 +326,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
 	 * Workaround : Do not read or write the pipe palette/gamma data while
 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
 	 */
-	if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
+	if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
 	    (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
 		hsw_disable_ips(intel_crtc);
 		reenable_ips = true;
@@ -534,14 +534,14 @@ void intel_color_init(struct drm_crtc *crtc)
 
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
 		dev_priv->display.load_luts = cherryview_load_luts;
-	} else if (IS_HASWELL(dev)) {
+	} else if (IS_HASWELL(dev_priv)) {
 		dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
 		dev_priv->display.load_luts = haswell_load_luts;
-	} else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
-		   IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
+	} else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) ||
+		   IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
 		dev_priv->display.load_luts = broadwell_load_luts;
 	} else {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dfbcf16..30eb95b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -84,7 +84,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
 	if (!(tmp & ADPA_DAC_ENABLE))
 		goto out;
 
-	if (HAS_PCH_CPT(dev))
+	if (HAS_PCH_CPT(dev_priv))
 		*pipe = PORT_TO_PIPE_CPT(tmp);
 	else
 		*pipe = PORT_TO_PIPE(tmp);
@@ -165,16 +165,16 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
 		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
 	/* For CPT allow 3 pipe config, for others just use A or B */
-	if (HAS_PCH_LPT(dev))
+	if (HAS_PCH_LPT(dev_priv))
 		; /* Those bits don't exist here */
-	else if (HAS_PCH_CPT(dev))
+	else if (HAS_PCH_CPT(dev_priv))
 		adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
 	else if (crtc->pipe == 0)
 		adpa |= ADPA_PIPE_A_SELECT;
 	else
 		adpa |= ADPA_PIPE_B_SELECT;
 
-	if (!HAS_PCH_SPLIT(dev))
+	if (!HAS_PCH_SPLIT(dev_priv))
 		I915_WRITE(BCLRPAT(crtc->pipe), 0);
 
 	switch (mode) {
@@ -241,7 +241,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
-	int max_dotclk = to_i915(dev)->max_dotclk_freq;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int max_dotclk = dev_priv->max_dotclk_freq;
 	int max_clock;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -250,15 +251,15 @@ intel_crt_mode_valid(struct drm_connector *connector,
 	if (mode->clock < 25000)
 		return MODE_CLOCK_LOW;
 
-	if (HAS_PCH_LPT(dev))
+	if (HAS_PCH_LPT(dev_priv))
 		max_clock = 180000;
-	else if (IS_VALLEYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		/*
 		 * 270 MHz due to current DPLL limits,
 		 * DAC limit supposedly 355 MHz.
 		 */
 		max_clock = 270000;
-	else if (IS_GEN3(dev) || IS_GEN4(dev))
+	else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
 		max_clock = 400000;
 	else
 		max_clock = 350000;
@@ -269,7 +270,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
 		return MODE_CLOCK_HIGH;
 
 	/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
-	if (HAS_PCH_LPT(dev) &&
+	if (HAS_PCH_LPT(dev_priv) &&
 	    (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
 		return MODE_CLOCK_HIGH;
 
@@ -280,13 +281,13 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *pipe_config,
 				     struct drm_connector_state *conn_state)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		pipe_config->has_pch_encoder = true;
 
 	/* LPT FDI RX only supports 8bpc. */
-	if (HAS_PCH_LPT(dev)) {
+	if (HAS_PCH_LPT(dev_priv)) {
 		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
 			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
 			return false;
@@ -296,7 +297,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
 	}
 
 	/* FDI must always be 2.7 GHz */
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		pipe_config->port_clock = 135000 * 2;
 
 	return true;
@@ -312,7 +313,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 
 	/* The first time through, trigger an explicit detection cycle */
 	if (crt->force_hotplug_required) {
-		bool turn_off_dac = HAS_PCH_SPLIT(dev);
+		bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
 		u32 save_adpa;
 
 		crt->force_hotplug_required = 0;
@@ -419,10 +420,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 	bool ret = false;
 	int i, tries = 0;
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		return intel_ironlake_crt_detect_hotplug(connector);
 
-	if (IS_VALLEYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv))
 		return valleyview_crt_detect_hotplug(connector);
 
 	/*
@@ -430,7 +431,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 	 * to get a reliable result.
 	 */
 
-	if (IS_G4X(dev) && !IS_GM45(dev))
+	if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
 		tries = 2;
 	else
 		tries = 1;
@@ -566,13 +567,13 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
 	/* Set the border color to purple. */
 	I915_WRITE(bclrpat_reg, 0x500050);
 
-	if (!IS_GEN2(dev)) {
+	if (!IS_GEN2(dev_priv)) {
 		uint32_t pipeconf = I915_READ(pipeconf_reg);
 		I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
 		POSTING_READ(pipeconf_reg);
 		/* Wait for next Vblank to substitue
 		 * border color for Color info */
-		intel_wait_for_vblank(dev, pipe);
+		intel_wait_for_vblank(dev_priv, pipe);
 		st00 = I915_READ8(_VGA_MSR_WRITE);
 		status = ((st00 & (1 << 4)) != 0) ?
 			connector_status_connected :
@@ -643,6 +644,32 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
 	return status;
 }
 
+static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id)
+{
+	DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_spurious_crt_detect[] = {
+	{
+		.callback = intel_spurious_crt_detect_dmi_callback,
+		.ident = "ACER ZGB",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+		},
+	},
+	{
+		.callback = intel_spurious_crt_detect_dmi_callback,
+		.ident = "Intel DZ77BH-55K",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+			DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"),
+		},
+	},
+	{ }
+};
+
 static enum drm_connector_status
 intel_crt_detect(struct drm_connector *connector, bool force)
 {
@@ -659,6 +686,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
 		      connector->base.id, connector->name,
 		      force);
 
+	/* Skip machines without VGA that falsely report hotplug events */
+	if (dmi_check_system(intel_spurious_crt_detect))
+		return connector_status_disconnected;
+
 	power_domain = intel_display_port_power_domain(intel_encoder);
 	intel_display_power_get(dev_priv, power_domain);
 
@@ -740,7 +771,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
 
 	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
 	ret = intel_crt_ddc_get_modes(connector, i2c);
-	if (ret || !IS_G4X(dev))
+	if (ret || !IS_G4X(dev_priv))
 		goto out;
 
 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -808,32 +839,6 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
 	.destroy = intel_encoder_destroy,
 };
 
-static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
-{
-	DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
-	return 1;
-}
-
-static const struct dmi_system_id intel_no_crt[] = {
-	{
-		.callback = intel_no_crt_dmi_callback,
-		.ident = "ACER ZGB",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
-		},
-	},
-	{
-		.callback = intel_no_crt_dmi_callback,
-		.ident = "DELL XPS 8700",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
-		},
-	},
-	{ }
-};
-
 void intel_crt_init(struct drm_device *dev)
 {
 	struct drm_connector *connector;
@@ -843,13 +848,9 @@ void intel_crt_init(struct drm_device *dev)
 	i915_reg_t adpa_reg;
 	u32 adpa;
 
-	/* Skip machines without VGA that falsely report hotplug events */
-	if (dmi_check_system(intel_no_crt))
-		return;
-
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		adpa_reg = PCH_ADPA;
-	else if (IS_VALLEYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		adpa_reg = VLV_ADPA;
 	else
 		adpa_reg = ADPA;
@@ -893,12 +894,12 @@ void intel_crt_init(struct drm_device *dev)
 
 	crt->base.type = INTEL_OUTPUT_ANALOG;
 	crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
-	if (IS_I830(dev))
+	if (IS_I830(dev_priv))
 		crt->base.crtc_mask = (1 << 0);
 	else
 		crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		connector->interlace_allowed = 0;
 	else
 		connector->interlace_allowed = 1;
@@ -907,20 +908,23 @@ void intel_crt_init(struct drm_device *dev)
 	crt->adpa_reg = adpa_reg;
 
 	crt->base.compute_config = intel_crt_compute_config;
-	if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_PCH_SPLIT(dev_priv)) {
 		crt->base.disable = pch_disable_crt;
 		crt->base.post_disable = pch_post_disable_crt;
 	} else {
 		crt->base.disable = intel_disable_crt;
 	}
 	crt->base.enable = intel_enable_crt;
-	if (I915_HAS_HOTPLUG(dev))
+	if (I915_HAS_HOTPLUG(dev) &&
+	    !dmi_check_system(intel_spurious_crt_detect))
 		crt->base.hpd_pin = HPD_CRT;
-	if (HAS_DDI(dev)) {
+	if (HAS_DDI(dev_priv)) {
+		crt->base.port = PORT_E;
 		crt->base.get_config = hsw_crt_get_config;
 		crt->base.get_hw_state = intel_ddi_get_hw_state;
 		crt->base.post_disable = hsw_post_disable_crt;
 	} else {
+		crt->base.port = PORT_NONE;
 		crt->base.get_config = intel_crt_get_config;
 		crt->base.get_hw_state = intel_crt_get_hw_state;
 	}
@@ -941,7 +945,7 @@ void intel_crt_init(struct drm_device *dev)
 	 * polarity and link reversal bits or not, instead of relying on the
 	 * BIOS.
 	 */
-	if (HAS_PCH_LPT(dev)) {
+	if (HAS_PCH_LPT(dev_priv)) {
 		u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
 				 FDI_RX_LINK_REVERSAL_OVERRIDE;
 
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 1ea0e1f..d7a04bc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -168,12 +168,6 @@ struct stepping_info {
 	char substepping;
 };
 
-static const struct stepping_info kbl_stepping_info[] = {
-	{'A', '0'}, {'B', '0'}, {'C', '0'},
-	{'D', '0'}, {'E', '0'}, {'F', '0'},
-	{'G', '0'}, {'H', '0'}, {'I', '0'},
-};
-
 static const struct stepping_info skl_stepping_info[] = {
 	{'A', '0'}, {'B', '0'}, {'C', '0'},
 	{'D', '0'}, {'E', '0'}, {'F', '0'},
@@ -194,10 +188,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
 	const struct stepping_info *si;
 	unsigned int size;
 
-	if (IS_KABYLAKE(dev_priv)) {
-		size = ARRAY_SIZE(kbl_stepping_info);
-		si = kbl_stepping_info;
-	} else if (IS_SKYLAKE(dev_priv)) {
+	if (IS_SKYLAKE(dev_priv)) {
 		size = ARRAY_SIZE(skl_stepping_info);
 		si = skl_stepping_info;
 	} else if (IS_BROXTON(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 15d47c8..0ad4e16 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -167,8 +167,47 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
 	{ 0x80005012, 0x000000C0, 0x3 },
 };
 
+/* Kabylake H and S */
+static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
+	{ 0x00002016, 0x000000A0, 0x0 },
+	{ 0x00005012, 0x0000009B, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x80009010, 0x000000C0, 0x1 },
+	{ 0x00002016, 0x0000009B, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x80007011, 0x000000C0, 0x1 },
+	{ 0x00002016, 0x00000097, 0x0 },
+	{ 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Kabylake U */
+static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
+	{ 0x0000201B, 0x000000A1, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x80007011, 0x000000CD, 0x3 },
+	{ 0x80009010, 0x000000C0, 0x3 },
+	{ 0x0000201B, 0x0000009D, 0x0 },
+	{ 0x80005012, 0x000000C0, 0x3 },
+	{ 0x80007011, 0x000000C0, 0x3 },
+	{ 0x00002016, 0x0000004F, 0x0 },
+	{ 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake Y */
+static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
+	{ 0x00001017, 0x000000A1, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x80007011, 0x000000CD, 0x3 },
+	{ 0x8000800F, 0x000000C0, 0x3 },
+	{ 0x00001017, 0x0000009D, 0x0 },
+	{ 0x80005012, 0x000000C0, 0x3 },
+	{ 0x80007011, 0x000000C0, 0x3 },
+	{ 0x00001017, 0x0000004C, 0x0 },
+	{ 0x80005012, 0x000000C0, 0x3 },
+};
+
 /*
- * Skylake H and S
+ * Skylake/Kabylake H and S
  * eDP 1.4 low vswing translation parameters
  */
 static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
@@ -185,7 +224,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
 };
 
 /*
- * Skylake U
+ * Skylake/Kabylake U
  * eDP 1.4 low vswing translation parameters
  */
 static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
@@ -202,7 +241,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
 };
 
 /*
- * Skylake Y
+ * Skylake/Kabylake Y
  * eDP 1.4 low vswing translation parameters
  */
 static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
@@ -218,7 +257,7 @@ static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
 	{ 0x00000018, 0x0000008A, 0x0 },
 };
 
-/* Skylake U, H and S */
+/* Skylake/Kabylake U, H and S */
 static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
 	{ 0x00000018, 0x000000AC, 0x0 },
 	{ 0x00005012, 0x0000009D, 0x0 },
@@ -233,7 +272,7 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
 	{ 0x80000018, 0x000000C0, 0x1 },
 };
 
-/* Skylake Y */
+/* Skylake/Kabylake Y */
 static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
 	{ 0x00000018, 0x000000A1, 0x0 },
 	{ 0x00005012, 0x000000DF, 0x0 },
@@ -334,10 +373,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 static const struct ddi_buf_trans *
 skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+	if (IS_SKL_ULX(dev_priv)) {
 		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
 		return skl_y_ddi_translations_dp;
-	} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
+	} else if (IS_SKL_ULT(dev_priv)) {
 		*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
 		return skl_u_ddi_translations_dp;
 	} else {
@@ -347,6 +386,21 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 }
 
 static const struct ddi_buf_trans *
+kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+	if (IS_KBL_ULX(dev_priv)) {
+		*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
+		return kbl_y_ddi_translations_dp;
+	} else if (IS_KBL_ULT(dev_priv)) {
+		*n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
+		return kbl_u_ddi_translations_dp;
+	} else {
+		*n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
+		return kbl_ddi_translations_dp;
+	}
+}
+
+static const struct ddi_buf_trans *
 skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
 	if (dev_priv->vbt.edp.low_vswing) {
@@ -362,7 +416,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 		}
 	}
 
-	return skl_get_buf_trans_dp(dev_priv, n_entries);
+	if (IS_KABYLAKE(dev_priv))
+		return kbl_get_buf_trans_dp(dev_priv, n_entries);
+	else
+		return skl_get_buf_trans_dp(dev_priv, n_entries);
 }
 
 static const struct ddi_buf_trans *
@@ -430,21 +487,18 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
 	if (IS_BROXTON(dev_priv))
 		return;
 
-	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+	if (IS_KABYLAKE(dev_priv)) {
+		ddi_translations_fdi = NULL;
+		ddi_translations_dp =
+				kbl_get_buf_trans_dp(dev_priv, &n_dp_entries);
+		ddi_translations_edp =
+				skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
+	} else if (IS_SKYLAKE(dev_priv)) {
 		ddi_translations_fdi = NULL;
 		ddi_translations_dp =
 				skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
 		ddi_translations_edp =
 				skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
-
-		/* If we're boosting the current, set bit 31 of trans1 */
-		if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
-			iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
-		if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
-			    port != PORT_A && port != PORT_E &&
-			    n_edp_entries > 9))
-			n_edp_entries = 9;
 	} else if (IS_BROADWELL(dev_priv)) {
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
@@ -464,6 +518,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
 	}
 
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+		/* If we're boosting the current, set bit 31 of trans1 */
+		if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+			iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+
+		if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+			    port != PORT_A && port != PORT_E &&
+			    n_edp_entries > 9))
+			n_edp_entries = 9;
+	}
+
 	switch (encoder->type) {
 	case INTEL_OUTPUT_EDP:
 		ddi_translations = ddi_translations_edp;
@@ -1020,13 +1085,13 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
 void intel_ddi_clock_get(struct intel_encoder *encoder,
 			 struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (INTEL_INFO(dev)->gen <= 8)
+	if (INTEL_GEN(dev_priv) <= 8)
 		hsw_ddi_clock_get(encoder, pipe_config);
-	else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+	else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		skl_ddi_clock_get(encoder, pipe_config);
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		bxt_ddi_clock_get(encoder, pipe_config);
 }
 
@@ -1081,14 +1146,14 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
 bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
 			  struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 	struct intel_encoder *intel_encoder =
 		intel_ddi_get_crtc_new_encoder(crtc_state);
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		return skl_ddi_pll_select(intel_crtc, crtc_state,
 					  intel_encoder);
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		return bxt_ddi_pll_select(intel_crtc, crtc_state,
 					  intel_encoder);
 	else
@@ -1189,7 +1254,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
 			 * eDP when not using the panel fitter, and when not
 			 * using motion blur mitigation (which we don't
 			 * support). */
-			if (IS_HASWELL(dev) &&
+			if (IS_HASWELL(dev_priv) &&
 			    (intel_crtc->config->pch_pfit.enabled ||
 			     intel_crtc->config->pch_pfit.force_thru))
 				temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
@@ -1434,7 +1499,12 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
 		if (dp_iboost) {
 			iboost = dp_iboost;
 		} else {
-			ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
+			if (IS_KABYLAKE(dev_priv))
+				ddi_translations = kbl_get_buf_trans_dp(dev_priv,
+									&n_entries);
+			else
+				ddi_translations = skl_get_buf_trans_dp(dev_priv,
+									&n_entries);
 			iboost = ddi_translations[level].i_boost;
 		}
 	} else if (type == INTEL_OUTPUT_EDP) {
@@ -1477,7 +1547,6 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
 {
 	const struct bxt_ddi_buf_trans *ddi_translations;
 	u32 n_entries, i;
-	uint32_t val;
 
 	if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
 		n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
@@ -1506,38 +1575,11 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
 		}
 	}
 
-	/*
-	 * While we write to the group register to program all lanes at once we
-	 * can read only lane registers and we pick lanes 0/1 for that.
-	 */
-	val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
-	val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
-	I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
-
-	val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
-	val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
-	val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
-	       ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
-	I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
-
-	val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
-	val &= ~SCALE_DCOMP_METHOD;
-	if (ddi_translations[level].enable)
-		val |= SCALE_DCOMP_METHOD;
-
-	if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
-		DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
-
-	I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
-
-	val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
-	val &= ~DE_EMPHASIS;
-	val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
-	I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
-
-	val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
-	val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
-	I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+	bxt_ddi_phy_set_signal_level(dev_priv, port,
+				     ddi_translations[level].margin,
+				     ddi_translations[level].scale,
+				     ddi_translations[level].enable,
+				     ddi_translations[level].deemphasis);
 }
 
 static uint32_t translate_signal_level(int signal_levels)
@@ -1742,7 +1784,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
 		intel_edp_panel_off(intel_dp);
 	}
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
 					DPLL_CTRL2_DDI_CLK_OFF(port)));
 	else if (INTEL_INFO(dev)->gen < 9)
@@ -1824,7 +1866,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
 
 	if (intel_crtc->config->has_audio) {
 		intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-		intel_audio_codec_enable(intel_encoder);
+		intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
 	}
 }
 
@@ -1853,332 +1895,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
 	}
 }
 
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
-			    enum dpio_phy phy)
-{
-	enum port port;
-
-	if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
-		return false;
-
-	if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
-	     (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
-		DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
-				 phy);
-
-		return false;
-	}
-
-	if (phy == DPIO_PHY1 &&
-	    !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
-		DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
-
-		return false;
-	}
-
-	if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
-		DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
-				 phy);
-
-		return false;
-	}
-
-	for_each_port_masked(port,
-			     phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
-						BIT(PORT_A)) {
-		u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
-		if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
-			DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
-					 "for port %c powered down "
-					 "(PHY_CTL %08x)\n",
-					 phy, port_name(port), tmp);
-
-			return false;
-		}
-	}
-
-	return true;
-}
-
-static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
-	u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
-
-	return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
-}
-
-static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
-				  enum dpio_phy phy)
-{
-	if (intel_wait_for_register(dev_priv,
-				    BXT_PORT_REF_DW3(phy),
-				    GRC_DONE, GRC_DONE,
-				    10))
-		DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
-}
-
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
-	u32 val;
-
-	if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
-		/* Still read out the GRC value for state verification */
-		if (phy == DPIO_PHY0)
-			dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
-
-		if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
-			DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
-					 "won't reprogram it\n", phy);
-
-			return;
-		}
-
-		DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
-				 "force reprogramming it\n", phy);
-	}
-
-	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
-	val |= GT_DISPLAY_POWER_ON(phy);
-	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-
-	/*
-	 * The PHY registers start out inaccessible and respond to reads with
-	 * all 1s.  Eventually they become accessible as they power up, then
-	 * the reserved bit will give the default 0.  Poll on the reserved bit
-	 * becoming 0 to find when the PHY is accessible.
-	 * HW team confirmed that the time to reach phypowergood status is
-	 * anywhere between 50 us and 100us.
-	 */
-	if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
-		(PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
-		DRM_ERROR("timeout during PHY%d power on\n", phy);
-	}
-
-	/* Program PLL Rcomp code offset */
-	val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
-	val &= ~IREF0RC_OFFSET_MASK;
-	val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
-	I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
-
-	val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
-	val &= ~IREF1RC_OFFSET_MASK;
-	val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
-	I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
-
-	/* Program power gating */
-	val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
-	val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
-		SUS_CLK_CONFIG;
-	I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
-
-	if (phy == DPIO_PHY0) {
-		val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
-		val |= DW6_OLDO_DYN_PWR_DOWN_EN;
-		I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
-	}
-
-	val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
-	val &= ~OCL2_LDOFUSE_PWR_DIS;
-	/*
-	 * On PHY1 disable power on the second channel, since no port is
-	 * connected there. On PHY0 both channels have a port, so leave it
-	 * enabled.
-	 * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
-	 * power down the second channel on PHY0 as well.
-	 *
-	 * FIXME: Clarify programming of the following, the register is
-	 * read-only with bit 6 fixed at 0 at least in stepping A.
-	 */
-	if (phy == DPIO_PHY1)
-		val |= OCL2_LDOFUSE_PWR_DIS;
-	I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
-
-	if (phy == DPIO_PHY0) {
-		uint32_t grc_code;
-		/*
-		 * PHY0 isn't connected to an RCOMP resistor so copy over
-		 * the corresponding calibrated value from PHY1, and disable
-		 * the automatic calibration on PHY0.
-		 */
-		val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
-		grc_code = val << GRC_CODE_FAST_SHIFT |
-			   val << GRC_CODE_SLOW_SHIFT |
-			   val;
-		I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
-
-		val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
-		val |= GRC_DIS | GRC_RDY_OVRD;
-		I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
-	}
-
-	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
-	val |= COMMON_RESET_DIS;
-	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
-	if (phy == DPIO_PHY1)
-		bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
-}
-
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
-	uint32_t val;
-
-	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
-	val &= ~COMMON_RESET_DIS;
-	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
-	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
-	val &= ~GT_DISPLAY_POWER_ON(phy);
-	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-}
-
-static bool __printf(6, 7)
-__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
-		       i915_reg_t reg, u32 mask, u32 expected,
-		       const char *reg_fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-	u32 val;
-
-	val = I915_READ(reg);
-	if ((val & mask) == expected)
-		return true;
-
-	va_start(args, reg_fmt);
-	vaf.fmt = reg_fmt;
-	vaf.va = &args;
-
-	DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
-			 "current %08x, expected %08x (mask %08x)\n",
-			 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
-			 mask);
-
-	va_end(args);
-
-	return false;
-}
-
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
-			      enum dpio_phy phy)
-{
-	uint32_t mask;
-	bool ok;
-
-#define _CHK(reg, mask, exp, fmt, ...)					\
-	__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,	\
-			       ## __VA_ARGS__)
-
-	if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
-		return false;
-
-	ok = true;
-
-	/* PLL Rcomp code offset */
-	ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
-		    IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
-		    "BXT_PORT_CL1CM_DW9(%d)", phy);
-	ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
-		    IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
-		    "BXT_PORT_CL1CM_DW10(%d)", phy);
-
-	/* Power gating */
-	mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
-	ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
-		    "BXT_PORT_CL1CM_DW28(%d)", phy);
-
-	if (phy == DPIO_PHY0)
-		ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
-			   DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
-			   "BXT_PORT_CL2CM_DW6_BC");
-
-	/*
-	 * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
-	 * at least on stepping A this bit is read-only and fixed at 0.
-	 */
-
-	if (phy == DPIO_PHY0) {
-		u32 grc_code = dev_priv->bxt_phy_grc;
-
-		grc_code = grc_code << GRC_CODE_FAST_SHIFT |
-			   grc_code << GRC_CODE_SLOW_SHIFT |
-			   grc_code;
-		mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
-		       GRC_CODE_NOM_MASK;
-		ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
-			    "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
-
-		mask = GRC_DIS | GRC_RDY_OVRD;
-		ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
-			    "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
-	}
-
-	return ok;
-#undef _CHK
-}
-
-static uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
-{
-	switch (pipe_config->lane_count) {
-	case 1:
-		return 0;
-	case 2:
-		return BIT(2) | BIT(0);
-	case 4:
-		return BIT(3) | BIT(2) | BIT(0);
-	default:
-		MISSING_CASE(pipe_config->lane_count);
-
-		return 0;
-	}
-}
-
 static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
 				   struct intel_crtc_state *pipe_config,
 				   struct drm_connector_state *conn_state)
 {
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
-	enum port port = dport->port;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	int lane;
+	uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
 
-	for (lane = 0; lane < 4; lane++) {
-		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
-		/*
-		 * Note that on CHV this flag is called UPAR, but has
-		 * the same function.
-		 */
-		val &= ~LATENCY_OPTIM;
-		if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
-			val |= LATENCY_OPTIM;
-
-		I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
-	}
-}
-
-static uint8_t
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
-{
-	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
-	enum port port = dport->port;
-	int lane;
-	uint8_t mask;
-
-	mask = 0;
-	for (lane = 0; lane < 4; lane++) {
-		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
-		if (val & LATENCY_OPTIM)
-			mask |= BIT(lane);
-	}
-
-	return mask;
+	bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
 }
 
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2347,7 +2071,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
 	if (IS_BROXTON(dev_priv) && ret)
 		pipe_config->lane_lat_optim_mask =
 			bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
-							     pipe_config);
+							     pipe_config->lane_count);
 
 	return ret;
 
@@ -2438,7 +2162,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 	struct intel_digital_port *intel_dig_port;
 	struct intel_encoder *intel_encoder;
 	struct drm_encoder *encoder;
-	bool init_hdmi, init_dp;
+	bool init_hdmi, init_dp, init_lspcon = false;
 	int max_lanes;
 
 	if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
@@ -2470,6 +2194,19 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 	init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
 		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
 	init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+
+	if (intel_bios_is_lspcon_present(dev_priv, port)) {
+		/*
+		 * Lspcon device needs to be driven with DP connector
+		 * with special detection sequence. So make sure DP
+		 * is initialized before lspcon.
+		 */
+		init_dp = true;
+		init_lspcon = true;
+		init_hdmi = false;
+		DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+	}
+
 	if (!init_dp && !init_hdmi) {
 		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
 			      port_name(port));
@@ -2509,7 +2246,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 	 * configuration so that we use the proper lane count for our
 	 * calculations.
 	 */
-	if (IS_BROXTON(dev) && port == PORT_A) {
+	if (IS_BROXTON(dev_priv) && port == PORT_A) {
 		if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
 			DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
 			intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
@@ -2520,6 +2257,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 	intel_dig_port->max_lanes = max_lanes;
 
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+	intel_encoder->port = port;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
 
@@ -2532,7 +2270,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 		 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
 		 * interrupts to check the external panel connection.
 		 */
-		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
+		if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
 			dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
 		else
 			dev_priv->hotplug.irq_port[port] = intel_dig_port;
@@ -2545,6 +2283,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 			goto err;
 	}
 
+	if (init_lspcon) {
+		if (lspcon_init(intel_dig_port))
+			/* TODO: handle hdmi info frame part */
+			DRM_DEBUG_KMS("LSPCON init success on port %c\n",
+				port_name(port));
+		else
+			/*
+			 * LSPCON init faied, but DP init was success, so
+			 * lets try to drive as DP++ port.
+			 */
+			DRM_ERROR("LSPCON init failed on port %c\n",
+				port_name(port));
+	}
+
 	return;
 
 err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 73b6858..185e3bb 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -28,20 +28,14 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
 {
 	const struct intel_device_info *info = &dev_priv->info;
 
-#define PRINT_S(name) "%s"
-#define SEP_EMPTY
-#define PRINT_FLAG(name) info->name ? #name "," : ""
-#define SEP_COMMA ,
-	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
-			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
 			 info->gen,
 			 dev_priv->drm.pdev->device,
-			 dev_priv->drm.pdev->revision,
-			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
-#undef PRINT_S
-#undef SEP_EMPTY
+			 dev_priv->drm.pdev->revision);
+#define PRINT_FLAG(name) \
+	DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
 #undef PRINT_FLAG
-#undef SEP_COMMA
 }
 
 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
@@ -192,7 +186,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
 	const int s_max = 3, ss_max = 3, eu_max = 8;
 	int s, ss;
-	u32 fuse2, eu_disable[s_max];
+	u32 fuse2, eu_disable[3]; /* s_max */
 
 	fuse2 = I915_READ(GEN8_FUSE2);
 	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
@@ -288,12 +282,13 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 		info->num_sprites[PIPE_A] = 2;
 		info->num_sprites[PIPE_B] = 2;
 		info->num_sprites[PIPE_C] = 1;
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		for_each_pipe(dev_priv, pipe)
 			info->num_sprites[pipe] = 2;
-	else
+	} else if (INTEL_GEN(dev_priv) >= 5) {
 		for_each_pipe(dev_priv, pipe)
 			info->num_sprites[pipe] = 1;
+	}
 
 	if (i915.disable_display) {
 		DRM_INFO("Display disabled (module parameter)\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fbcfed6..1547349 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,7 +37,6 @@
 #include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
 #include "intel_dsi.h"
 #include "i915_trace.h"
 #include <drm/drm_atomic.h>
@@ -116,8 +115,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
-	struct intel_crtc_state *crtc_state);
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+			     struct intel_crtc *crtc,
+			     struct intel_crtc_state *crtc_state);
 static void skylake_pfit_enable(struct intel_crtc *crtc);
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
 static void ironlake_pfit_enable(struct intel_crtc *crtc);
@@ -600,7 +600,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
  * the given connectors.
  */
 
-static bool intel_PLL_is_valid(struct drm_device *dev,
+static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
 			       const struct intel_limit *limit,
 			       const struct dpll *clock)
 {
@@ -613,12 +613,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 		INTELPllInvalid("m1 out of range\n");
 
-	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
-	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
+	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+	    !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
 		if (clock->m1 <= clock->m2)
 			INTELPllInvalid("m1 <= m2\n");
 
-	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+	    !IS_BROXTON(dev_priv)) {
 		if (clock->p < limit->p.min || limit->p.max < clock->p)
 			INTELPllInvalid("p out of range\n");
 		if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -698,7 +699,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
 					int this_err;
 
 					i9xx_calc_dpll_params(refclk, &clock);
-					if (!intel_PLL_is_valid(dev, limit,
+					if (!intel_PLL_is_valid(to_i915(dev),
+								limit,
 								&clock))
 						continue;
 					if (match_clock &&
@@ -753,7 +755,8 @@ pnv_find_best_dpll(const struct intel_limit *limit,
 					int this_err;
 
 					pnv_calc_dpll_params(refclk, &clock);
-					if (!intel_PLL_is_valid(dev, limit,
+					if (!intel_PLL_is_valid(to_i915(dev),
+								limit,
 								&clock))
 						continue;
 					if (match_clock &&
@@ -813,7 +816,8 @@ g4x_find_best_dpll(const struct intel_limit *limit,
 					int this_err;
 
 					i9xx_calc_dpll_params(refclk, &clock);
-					if (!intel_PLL_is_valid(dev, limit,
+					if (!intel_PLL_is_valid(to_i915(dev),
+								limit,
 								&clock))
 						continue;
 
@@ -845,7 +849,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
 	 * For CHV ignore the error and consider only the P value.
 	 * Prefer a bigger P value based on HW requirements.
 	 */
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(to_i915(dev))) {
 		*error_ppm = 0;
 
 		return calculated_clock->p > best_clock->p;
@@ -909,7 +913,8 @@ vlv_find_best_dpll(const struct intel_limit *limit,
 
 					vlv_calc_dpll_params(refclk, &clock);
 
-					if (!intel_PLL_is_valid(dev, limit,
+					if (!intel_PLL_is_valid(to_i915(dev),
+								limit,
 								&clock))
 						continue;
 
@@ -977,7 +982,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
 
 			chv_calc_dpll_params(refclk, &clock);
 
-			if (!intel_PLL_is_valid(dev, limit, &clock))
+			if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
 				continue;
 
 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -1003,10 +1008,8 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
 				  target_clock, refclk, NULL, best_clock);
 }
 
-bool intel_crtc_active(struct drm_crtc *crtc)
+bool intel_crtc_active(struct intel_crtc *crtc)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 	/* Be paranoid as we can arrive here with only partial
 	 * state retrieved from the hardware during setup.
 	 *
@@ -1020,17 +1023,16 @@ bool intel_crtc_active(struct drm_crtc *crtc)
 	 * crtc->state->active once we have proper CRTC states wired up
 	 * for atomic.
 	 */
-	return intel_crtc->active && crtc->primary->state->fb &&
-		intel_crtc->config->base.adjusted_mode.crtc_clock;
+	return crtc->active && crtc->base.primary->state->fb &&
+		crtc->config->base.adjusted_mode.crtc_clock;
 }
 
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
 					     enum pipe pipe)
 {
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
-	return intel_crtc->config->cpu_transcoder;
+	return crtc->config->cpu_transcoder;
 }
 
 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
@@ -1040,7 +1042,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 	u32 line1, line2;
 	u32 line_mask;
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		line_mask = DSL_LINEMASK_GEN2;
 	else
 		line_mask = DSL_LINEMASK_GEN3;
@@ -1187,19 +1189,17 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 			onoff(state), onoff(cur_state));
 }
 
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
-			   enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	struct drm_device *dev = &dev_priv->drm;
 	i915_reg_t pp_reg;
 	u32 val;
 	enum pipe panel_pipe = PIPE_A;
 	bool locked = true;
 
-	if (WARN_ON(HAS_DDI(dev)))
+	if (WARN_ON(HAS_DDI(dev_priv)))
 		return;
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_PCH_SPLIT(dev_priv)) {
 		u32 port_sel;
 
 		pp_reg = PP_CONTROL(0);
@@ -1209,7 +1209,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
 		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
 			panel_pipe = PIPE_B;
 		/* XXX: else fix for eDP */
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		/* presumably write lock depends on pipe, not port select */
 		pp_reg = PP_CONTROL(pipe);
 		panel_pipe = pipe;
@@ -1232,10 +1232,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
 static void assert_cursor(struct drm_i915_private *dev_priv,
 			  enum pipe pipe, bool state)
 {
-	struct drm_device *dev = &dev_priv->drm;
 	bool cur_state;
 
-	if (IS_845G(dev) || IS_I865G(dev))
+	if (IS_845G(dev_priv) || IS_I865G(dev_priv))
 		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
 	else
 		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -1330,7 +1329,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
 			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
 			     sprite, pipe_name(pipe));
 		}
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		for_each_sprite(dev_priv, pipe, sprite) {
 			u32 val = I915_READ(SPCNTR(pipe, sprite));
 			I915_STATE_WARN(val & SP_ENABLE,
@@ -1619,11 +1618,11 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
 	assert_pipe_disabled(dev_priv, crtc->pipe);
 
 	/* PLL is protected by panel, make sure we can write it */
-	if (IS_MOBILE(dev) && !IS_I830(dev))
+	if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
 		assert_panel_unlocked(dev_priv, crtc->pipe);
 
 	/* Enable DVO 2x clock on both PLLs if necessary */
-	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
+	if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev) > 0) {
 		/*
 		 * It appears to be important that we don't enable this
 		 * for the current pipe before otherwise configuring the
@@ -1688,7 +1687,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
 	enum pipe pipe = crtc->pipe;
 
 	/* Disable DVO 2x clock on both PLLs if necessary */
-	if (IS_I830(dev) &&
+	if (IS_I830(dev_priv) &&
 	    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
 	    !intel_num_dvo_pipes(dev)) {
 		I915_WRITE(DPLL(PIPE_B),
@@ -1786,9 +1785,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 					   enum pipe pipe)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+								pipe);
 	i915_reg_t reg;
 	uint32_t val, pipeconf_val;
 
@@ -1799,7 +1797,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 	assert_fdi_tx_enabled(dev_priv, pipe);
 	assert_fdi_rx_enabled(dev_priv, pipe);
 
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		/* Workaround: Set the timing override bit before enabling the
 		 * pch transcoder. */
 		reg = TRANS_CHICKEN2(pipe);
@@ -1877,7 +1875,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
 					    enum pipe pipe)
 {
-	struct drm_device *dev = &dev_priv->drm;
 	i915_reg_t reg;
 	uint32_t val;
 
@@ -1898,7 +1895,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
 				    50))
 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		/* Workaround: Clear the timing override chicken bit again. */
 		reg = TRANS_CHICKEN2(pipe);
 		val = I915_READ(reg);
@@ -1926,6 +1923,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
 }
 
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+	WARN_ON(!crtc->config->has_pch_encoder);
+
+	if (HAS_PCH_LPT(dev_priv))
+		return TRANSCODER_A;
+	else
+		return (enum transcoder) crtc->pipe;
+}
+
 /**
  * intel_enable_pipe - enable a pipe, asserting requirements
  * @crtc: crtc responsible for the pipe
@@ -1939,7 +1948,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe = crtc->pipe;
 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
-	enum pipe pch_transcoder;
 	i915_reg_t reg;
 	u32 val;
 
@@ -1949,11 +1957,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
 	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
 
-	if (HAS_PCH_LPT(dev_priv))
-		pch_transcoder = TRANSCODER_A;
-	else
-		pch_transcoder = pipe;
-
 	/*
 	 * A pipe without a PLL won't actually be able to drive bits from
 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
@@ -1967,7 +1970,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
 	} else {
 		if (crtc->config->has_pch_encoder) {
 			/* if driving the PCH, we need FDI enabled */
-			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+			assert_fdi_rx_pll_enabled(dev_priv,
+						  (enum pipe) intel_crtc_pch_transcoder(crtc));
 			assert_fdi_tx_pll_enabled(dev_priv,
 						  (enum pipe) cpu_transcoder);
 		}
@@ -2139,7 +2143,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
 			const struct drm_framebuffer *fb,
 			unsigned int rotation)
 {
-	if (intel_rotation_90_or_270(rotation)) {
+	if (drm_rotation_90_or_270(rotation)) {
 		*view = i915_ggtt_view_rotated;
 		view->params.rotated = to_intel_framebuffer(fb)->rot_info;
 	} else {
@@ -2260,7 +2264,7 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
 			  unsigned int rotation)
 {
-	if (intel_rotation_90_or_270(rotation))
+	if (drm_rotation_90_or_270(rotation))
 		return to_intel_framebuffer(fb)->rotated[plane].pitch;
 	else
 		return fb->pitches[plane];
@@ -2296,7 +2300,7 @@ void intel_add_fb_offsets(int *x, int *y,
 	const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
 	unsigned int rotation = state->base.rotation;
 
-	if (intel_rotation_90_or_270(rotation)) {
+	if (drm_rotation_90_or_270(rotation)) {
 		*x += intel_fb->rotated[plane].x;
 		*y += intel_fb->rotated[plane].y;
 	} else {
@@ -2360,7 +2364,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
 				fb->modifier[plane], cpp);
 
-		if (intel_rotation_90_or_270(rotation)) {
+		if (drm_rotation_90_or_270(rotation)) {
 			pitch_tiles = pitch / tile_height;
 			swap(tile_width, tile_height);
 		} else {
@@ -2416,7 +2420,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
 				fb_modifier, cpp);
 
-		if (intel_rotation_90_or_270(rotation)) {
+		if (drm_rotation_90_or_270(rotation)) {
 			pitch_tiles = pitch / tile_height;
 			swap(tile_width, tile_height);
 		} else {
@@ -2976,9 +2980,10 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
 	int ret;
 
 	/* Rotate src coordinates to match rotated GTT view */
-	if (intel_rotation_90_or_270(rotation))
+	if (drm_rotation_90_or_270(rotation))
 		drm_rect_rotate(&plane_state->base.src,
-				fb->width, fb->height, DRM_ROTATE_270);
+				fb->width << 16, fb->height << 16,
+				DRM_ROTATE_270);
 
 	/*
 	 * Handle the AUX surface first since
@@ -3009,7 +3014,6 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int plane = intel_crtc->plane;
 	u32 linear_offset;
 	u32 dspcntr;
@@ -3033,7 +3037,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 			   ((crtc_state->pipe_src_h - 1) << 16) |
 			   (crtc_state->pipe_src_w - 1));
 		I915_WRITE(DSPPOS(plane), 0);
-	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+	} else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
 		I915_WRITE(PRIMSIZE(plane),
 			   ((crtc_state->pipe_src_h - 1) << 16) |
 			   (crtc_state->pipe_src_w - 1));
@@ -3071,7 +3075,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	    fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		dspcntr |= DISPPLANE_TILED;
 
-	if (IS_G4X(dev))
+	if (IS_G4X(dev_priv))
 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
 	intel_add_fb_offsets(&x, &y, plane_state, 0);
@@ -3104,8 +3108,11 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 			   intel_crtc->dspaddr_offset);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
-	} else
-		I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
+	} else {
+		I915_WRITE(DSPADDR(plane),
+			   intel_fb_gtt_offset(fb, rotation) +
+			   intel_crtc->dspaddr_offset);
+	}
 	POSTING_READ(reg);
 }
 
@@ -3144,7 +3151,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 	dspcntr |= DISPLAY_PLANE_ENABLE;
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
 
 	switch (fb->pixel_format) {
@@ -3173,7 +3180,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		dspcntr |= DISPPLANE_TILED;
 
-	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
 	intel_add_fb_offsets(&x, &y, plane_state, 0);
@@ -3184,7 +3191,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 	if (rotation == DRM_ROTATE_180) {
 		dspcntr |= DISPPLANE_ROTATE_180;
 
-		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+		if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
 			x += (crtc_state->pipe_src_w - 1);
 			y += (crtc_state->pipe_src_h - 1);
 		}
@@ -3201,7 +3208,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 	I915_WRITE(DSPSURF(plane),
 		   intel_fb_gtt_offset(fb, rotation) +
 		   intel_crtc->dspaddr_offset);
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
 	} else {
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -3276,7 +3283,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
 	 * The stride is either expressed as a multiple of 64 bytes chunks for
 	 * linear buffers or in number of tiles for tiled buffers.
 	 */
-	if (intel_rotation_90_or_270(rotation)) {
+	if (drm_rotation_90_or_270(rotation)) {
 		int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
 
 		stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
@@ -3378,6 +3385,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_framebuffer *fb = plane_state->base.fb;
 	const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+	const struct skl_plane_wm *p_wm =
+		&crtc_state->wm.skl.optimal.planes[0];
 	int pipe = intel_crtc->pipe;
 	u32 plane_ctl;
 	unsigned int rotation = plane_state->base.rotation;
@@ -3414,7 +3423,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	intel_crtc->adjusted_y = src_y;
 
 	if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
-		skl_write_plane_wm(intel_crtc, wm, 0);
+		skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, 0);
 
 	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
 	I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
@@ -3448,6 +3457,8 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+	const struct skl_plane_wm *p_wm = &cstate->wm.skl.optimal.planes[0];
 	int pipe = intel_crtc->pipe;
 
 	/*
@@ -3455,7 +3466,8 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
 	 * plane's visiblity isn't actually changing neither is its watermarks.
 	 */
 	if (!crtc->primary->state->visible)
-		skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
+		skl_write_plane_wm(intel_crtc, p_wm,
+				   &dev_priv->wm.skl_results.ddb, 0);
 
 	I915_WRITE(PLANE_CTL(pipe, 0), 0);
 	I915_WRITE(PLANE_SURF(pipe, 0), 0);
@@ -3584,7 +3596,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
 	return;
 
 err:
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 }
 
 void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -3603,8 +3615,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 
 	dev_priv->modeset_restore_state = NULL;
 
-	dev_priv->modeset_restore_state = NULL;
-
 	/* reset doesn't touch the display */
 	if (!gpu_reset_clobbers_display(dev_priv)) {
 		if (!state) {
@@ -3646,6 +3656,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 		intel_hpd_init(dev_priv);
 	}
 
+	if (state)
+		drm_atomic_state_put(state);
 	drm_modeset_drop_locks(ctx);
 	drm_modeset_acquire_fini(ctx);
 	mutex_unlock(&dev->mode_config.mutex);
@@ -3714,7 +3726,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
 
 		if (pipe_config->pch_pfit.enabled)
 			skylake_pfit_enable(crtc);
-	} else if (HAS_PCH_SPLIT(dev)) {
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		if (pipe_config->pch_pfit.enabled)
 			ironlake_pfit_enable(crtc);
 		else if (old_crtc_state->pch_pfit.enabled)
@@ -3734,7 +3746,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
 	/* enable normal train */
 	reg = FDI_TX_CTL(pipe);
 	temp = I915_READ(reg);
-	if (IS_IVYBRIDGE(dev)) {
+	if (IS_IVYBRIDGE(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
 	} else {
@@ -3745,7 +3757,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
 
 	reg = FDI_RX_CTL(pipe);
 	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
 	} else {
@@ -3759,7 +3771,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
 	udelay(1000);
 
 	/* IVB wants error correction enabled */
-	if (IS_IVYBRIDGE(dev))
+	if (IS_IVYBRIDGE(dev_priv))
 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
 			   FDI_FE_ERRC_ENABLE);
 }
@@ -3903,7 +3915,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
 
 	reg = FDI_RX_CTL(pipe);
 	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 	} else {
@@ -3947,7 +3959,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
 	temp = I915_READ(reg);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_2;
-	if (IS_GEN6(dev)) {
+	if (IS_GEN6(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 		/* SNB-B */
 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -3956,7 +3968,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
 
 	reg = FDI_RX_CTL(pipe);
 	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
 	} else {
@@ -4210,7 +4222,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
 	udelay(100);
 
 	/* Ironlake workaround, disable clock pointer after downing FDI */
-	if (HAS_PCH_IBX(dev))
+	if (HAS_PCH_IBX(dev_priv))
 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
 
 	/* still set train pattern 1 */
@@ -4222,7 +4234,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
 
 	reg = FDI_RX_CTL(pipe);
 	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 	} else {
@@ -4240,6 +4252,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
 
 bool intel_has_pending_fb_unpin(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *crtc;
 
 	/* Note that we don't need to be called with mode_config.lock here
@@ -4254,7 +4267,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
 			continue;
 
 		if (crtc->flip_work)
-			intel_wait_for_vblank(dev, crtc->pipe);
+			intel_wait_for_vblank(dev_priv, crtc->pipe);
 
 		return true;
 	}
@@ -4545,7 +4558,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 
 	assert_pch_transcoder_disabled(dev_priv, pipe);
 
-	if (IS_IVYBRIDGE(dev))
+	if (IS_IVYBRIDGE(dev_priv))
 		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
 
 	/* Write the TU size bits before fdi link training, so that error
@@ -4558,7 +4571,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 
 	/* We need to program the right clock selection before writing the pixel
 	 * mutliplier into the DPLL. */
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		u32 sel;
 
 		temp = I915_READ(PCH_DPLL_SEL);
@@ -4588,7 +4601,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 	intel_fdi_normal_train(crtc);
 
 	/* For PCH DP, enable TRANS_DP_CTL */
-	if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
+	if (HAS_PCH_CPT(dev_priv) &&
+	    intel_crtc_has_dp_encoder(intel_crtc->config)) {
 		const struct drm_display_mode *adjusted_mode =
 			&intel_crtc->config->base.adjusted_mode;
 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4667,7 +4681,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 		to_intel_crtc(crtc_state->base.crtc);
 	int need_scaling;
 
-	need_scaling = intel_rotation_90_or_270(rotation) ?
+	need_scaling = drm_rotation_90_or_270(rotation) ?
 		(src_h != dst_w || src_w != dst_h):
 		(src_w != dst_w || src_h != dst_h);
 
@@ -4858,7 +4872,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
 		 * as some pre-programmed values are broken,
 		 * e.g. x201.
 		 */
-		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+		if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
 						 PF_PIPE_SEL_IVB(pipe));
 		else
@@ -4883,7 +4897,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
 	 */
 
 	assert_plane_enabled(dev_priv, crtc->plane);
-	if (IS_BROADWELL(dev)) {
+	if (IS_BROADWELL(dev_priv)) {
 		mutex_lock(&dev_priv->rps.hw_lock);
 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
 		mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4915,7 +4929,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
 		return;
 
 	assert_plane_enabled(dev_priv, crtc->plane);
-	if (IS_BROADWELL(dev)) {
+	if (IS_BROADWELL(dev_priv)) {
 		mutex_lock(&dev_priv->rps.hw_lock);
 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
 		mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4930,7 +4944,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
 	}
 
 	/* We need to wait for a vblank before we can disable the plane. */
-	intel_wait_for_vblank(dev, crtc->pipe);
+	intel_wait_for_vblank(dev_priv, crtc->pipe);
 }
 
 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
@@ -4984,7 +4998,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
 	 * FIXME: Need to fix the logic to work when we turn off all planes
 	 * but leave the pipe running.
 	 */
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
 	/* Underruns don't always raise interrupts, so check manually. */
@@ -5007,7 +5021,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
 	 * FIXME: Need to fix the logic to work when we turn off all planes
 	 * but leave the pipe running.
 	 */
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
 	/*
@@ -5039,10 +5053,10 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
 	 * event which is after the vblank start event, so we need to have a
 	 * wait-for-vblank between disabling the plane and the pipe.
 	 */
-	if (HAS_GMCH_DISPLAY(dev)) {
+	if (HAS_GMCH_DISPLAY(dev_priv)) {
 		intel_set_memory_cxsr(dev_priv, false);
 		dev_priv->wm.vlv.cxsr = false;
-		intel_wait_for_vblank(dev, pipe);
+		intel_wait_for_vblank(dev_priv, pipe);
 	}
 }
 
@@ -5061,7 +5075,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 	crtc->wm.cxsr_allowed = true;
 
 	if (pipe_config->update_wm_post && pipe_config->base.active)
-		intel_update_watermarks(&crtc->base);
+		intel_update_watermarks(crtc);
 
 	if (old_pri_state) {
 		struct intel_plane_state *primary_state =
@@ -5104,7 +5118,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 			intel_pre_disable_primary(&crtc->base);
 	}
 
-	if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
+	if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
 		crtc->wm.cxsr_allowed = false;
 
 		/*
@@ -5119,7 +5133,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 		if (old_crtc_state->base.active) {
 			intel_set_memory_cxsr(dev_priv, false);
 			dev_priv->wm.vlv.cxsr = false;
-			intel_wait_for_vblank(dev, crtc->pipe);
+			intel_wait_for_vblank(dev_priv, crtc->pipe);
 		}
 	}
 
@@ -5132,7 +5146,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 	 */
 	if (pipe_config->disable_lp_wm) {
 		ilk_disable_lp_wm(dev);
-		intel_wait_for_vblank(dev, crtc->pipe);
+		intel_wait_for_vblank(dev_priv, crtc->pipe);
 	}
 
 	/*
@@ -5159,7 +5173,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 	if (dev_priv->display.initial_watermarks != NULL)
 		dev_priv->display.initial_watermarks(pipe_config);
 	else if (pipe_config->update_wm_pre)
-		intel_update_watermarks(&crtc->base);
+		intel_update_watermarks(crtc);
 }
 
 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
@@ -5382,12 +5396,12 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_encoders_enable(crtc, pipe_config, old_state);
 
-	if (HAS_PCH_CPT(dev))
+	if (HAS_PCH_CPT(dev_priv))
 		cpt_verify_modeset(dev, intel_crtc->pipe);
 
 	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
 	if (intel_crtc->config->has_pch_encoder)
-		intel_wait_for_vblank(dev, pipe);
+		intel_wait_for_vblank(dev_priv, pipe);
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
@@ -5395,7 +5409,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 /* IPS only exists on ULT machines and is tied to pipe A. */
 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
 {
-	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
+	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
 }
 
 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -5479,7 +5493,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	if (dev_priv->display.initial_watermarks != NULL)
 		dev_priv->display.initial_watermarks(pipe_config);
 	else
-		intel_update_watermarks(crtc);
+		intel_update_watermarks(intel_crtc);
 
 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
 	if (!transcoder_is_dsi(cpu_transcoder))
@@ -5497,8 +5511,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_encoders_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->has_pch_encoder) {
-		intel_wait_for_vblank(dev, pipe);
-		intel_wait_for_vblank(dev, pipe);
+		intel_wait_for_vblank(dev_priv, pipe);
+		intel_wait_for_vblank(dev_priv, pipe);
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      true);
@@ -5507,9 +5521,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	/* If we change the relative order between pipe/planes enabling, we need
 	 * to change the workaround. */
 	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
-	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
-		intel_wait_for_vblank(dev, hsw_workaround_pipe);
-		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
 	}
 }
 
@@ -5564,7 +5578,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	if (intel_crtc->config->has_pch_encoder) {
 		ironlake_disable_pch_transcoder(dev_priv, pipe);
 
-		if (HAS_PCH_CPT(dev)) {
+		if (HAS_PCH_CPT(dev_priv)) {
 			i915_reg_t reg;
 			u32 temp;
 
@@ -5698,13 +5712,13 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
 enum intel_display_power_domain
 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
 {
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
 	struct intel_digital_port *intel_dig_port;
 
 	switch (intel_encoder->type) {
 	case INTEL_OUTPUT_UNKNOWN:
 		/* Only DDI platforms should ever use this output type */
-		WARN_ON_ONCE(!HAS_DDI(dev));
+		WARN_ON_ONCE(!HAS_DDI(dev_priv));
 	case INTEL_OUTPUT_DP:
 	case INTEL_OUTPUT_HDMI:
 	case INTEL_OUTPUT_EDP:
@@ -5725,7 +5739,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
 enum intel_display_power_domain
 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
 {
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
 	struct intel_digital_port *intel_dig_port;
 
 	switch (intel_encoder->type) {
@@ -5738,7 +5752,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
 		 * what's the status of the given connectors, play safe and
 		 * run the DP detection too.
 		 */
-		WARN_ON_ONCE(!HAS_DDI(dev));
+		WARN_ON_ONCE(!HAS_DDI(dev_priv));
 	case INTEL_OUTPUT_DP:
 	case INTEL_OUTPUT_EDP:
 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5830,11 +5844,9 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
 
 static int skl_calc_cdclk(int max_pixclk, int vco);
 
-static void intel_update_max_cdclk(struct drm_device *dev)
+static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
 		int max_cdclk, vco;
 
@@ -5856,9 +5868,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
 			max_cdclk = 308571;
 
 		dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		dev_priv->max_cdclk_freq = 624000;
-	} else if (IS_BROADWELL(dev))  {
+	} else if (IS_BROADWELL(dev_priv))  {
 		/*
 		 * FIXME with extra cooling we can allow
 		 * 540 MHz for ULX and 675 Mhz for ULT.
@@ -5867,15 +5879,15 @@ static void intel_update_max_cdclk(struct drm_device *dev)
 		 */
 		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
 			dev_priv->max_cdclk_freq = 450000;
-		else if (IS_BDW_ULX(dev))
+		else if (IS_BDW_ULX(dev_priv))
 			dev_priv->max_cdclk_freq = 450000;
-		else if (IS_BDW_ULT(dev))
+		else if (IS_BDW_ULT(dev_priv))
 			dev_priv->max_cdclk_freq = 540000;
 		else
 			dev_priv->max_cdclk_freq = 675000;
-	} else if (IS_CHERRYVIEW(dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->max_cdclk_freq = 320000;
-	} else if (IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		dev_priv->max_cdclk_freq = 400000;
 	} else {
 		/* otherwise assume cdclk is fixed */
@@ -5891,11 +5903,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
 			 dev_priv->max_dotclk_freq);
 }
 
-static void intel_update_cdclk(struct drm_device *dev)
+static void intel_update_cdclk(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
 
 	if (INTEL_GEN(dev_priv) >= 9)
 		DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
@@ -6056,14 +6066,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
 		return;
 	}
 
-	intel_update_cdclk(&dev_priv->drm);
+	intel_update_cdclk(dev_priv);
 }
 
 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
 {
 	u32 cdctl, expected;
 
-	intel_update_cdclk(&dev_priv->drm);
+	intel_update_cdclk(dev_priv);
 
 	if (dev_priv->cdclk_pll.vco == 0 ||
 	    dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6196,7 +6206,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
 	dev_priv->skl_preferred_vco_freq = vco;
 
 	if (changed)
-		intel_update_max_cdclk(&dev_priv->drm);
+		intel_update_max_cdclk(dev_priv);
 }
 
 static void
@@ -6282,7 +6292,6 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
 {
-	struct drm_device *dev = &dev_priv->drm;
 	u32 freq_select, pcu_ack;
 
 	WARN_ON((cdclk == 24000) != (vco == 0));
@@ -6333,7 +6342,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
-	intel_update_cdclk(dev);
+	intel_update_cdclk(dev_priv);
 }
 
 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
@@ -6380,7 +6389,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
 		goto sanitize;
 
-	intel_update_cdclk(&dev_priv->drm);
+	intel_update_cdclk(dev_priv);
 	/* Is PLL enabled and locked ? */
 	if (dev_priv->cdclk_pll.vco == 0 ||
 	    dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6414,7 +6423,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 val, cmd;
 
-	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+	WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
 					!= dev_priv->cdclk_freq);
 
 	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
@@ -6471,7 +6480,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 
 	mutex_unlock(&dev_priv->sb_lock);
 
-	intel_update_cdclk(dev);
+	intel_update_cdclk(dev_priv);
 }
 
 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -6479,7 +6488,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 val, cmd;
 
-	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+	WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
 						!= dev_priv->cdclk_freq);
 
 	switch (cdclk) {
@@ -6512,7 +6521,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
-	intel_update_cdclk(dev);
+	intel_update_cdclk(dev_priv);
 }
 
 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -6675,7 +6684,7 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 	 */
 	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		cherryview_set_cdclk(dev, req_cdclk);
 	else
 		valleyview_set_cdclk(dev, req_cdclk);
@@ -6703,7 +6712,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_set_pipe_timings(intel_crtc);
 	intel_set_pipe_src_size(intel_crtc);
 
-	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
 		struct drm_i915_private *dev_priv = to_i915(dev);
 
 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
@@ -6718,7 +6727,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev_priv)) {
 		chv_prepare_pll(intel_crtc, intel_crtc->config);
 		chv_enable_pll(intel_crtc, intel_crtc->config);
 	} else {
@@ -6732,7 +6741,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_color_load_luts(&pipe_config->base);
 
-	intel_update_watermarks(crtc);
+	intel_update_watermarks(intel_crtc);
 	intel_enable_pipe(intel_crtc);
 
 	assert_vblank_disabled(crtc);
@@ -6774,7 +6783,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_crtc->active = true;
 
-	if (!IS_GEN2(dev))
+	if (!IS_GEN2(dev_priv))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6785,7 +6794,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_color_load_luts(&pipe_config->base);
 
-	intel_update_watermarks(crtc);
+	intel_update_watermarks(intel_crtc);
 	intel_enable_pipe(intel_crtc);
 
 	assert_vblank_disabled(crtc);
@@ -6822,8 +6831,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	 * On gen2 planes are double buffered but the pipe isn't, so we must
 	 * wait for planes to fully turn off before disabling the pipe.
 	 */
-	if (IS_GEN2(dev))
-		intel_wait_for_vblank(dev, pipe);
+	if (IS_GEN2(dev_priv))
+		intel_wait_for_vblank(dev_priv, pipe);
 
 	intel_encoders_disable(crtc, old_crtc_state, old_state);
 
@@ -6837,9 +6846,9 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
 	if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
-		if (IS_CHERRYVIEW(dev))
+		if (IS_CHERRYVIEW(dev_priv))
 			chv_disable_pll(dev_priv, pipe);
-		else if (IS_VALLEYVIEW(dev))
+		else if (IS_VALLEYVIEW(dev_priv))
 			vlv_disable_pll(dev_priv, pipe);
 		else
 			i9xx_disable_pll(intel_crtc);
@@ -6847,7 +6856,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 
 	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 
-	if (!IS_GEN2(dev))
+	if (!IS_GEN2(dev_priv))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 }
 
@@ -6885,7 +6894,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 
 	dev_priv->display.crtc_disable(crtc_state, state);
 
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
 		      crtc->base.id, crtc->name);
@@ -6901,7 +6910,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 		encoder->base.crtc = NULL;
 
 	intel_fbc_disable(intel_crtc);
-	intel_update_watermarks(crtc);
+	intel_update_watermarks(intel_crtc);
 	intel_disable_shared_dpll(intel_crtc);
 
 	domains = intel_crtc->enabled_power_domains;
@@ -7027,6 +7036,7 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 				     struct intel_crtc_state *pipe_config)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_atomic_state *state = pipe_config->base.state;
 	struct intel_crtc *other_crtc;
 	struct intel_crtc_state *other_crtc_state;
@@ -7039,7 +7049,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 		return -EINVAL;
 	}
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		if (pipe_config->fdi_lanes > 2) {
 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
 				      pipe_config->fdi_lanes);
@@ -7060,7 +7070,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 		if (pipe_config->fdi_lanes <= 2)
 			return 0;
 
-		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
+		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
 		other_crtc_state =
 			intel_atomic_get_crtc_state(state, other_crtc);
 		if (IS_ERR(other_crtc_state))
@@ -7079,7 +7089,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 			return -EINVAL;
 		}
 
-		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
+		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
 		other_crtc_state =
 			intel_atomic_get_crtc_state(state, other_crtc);
 		if (IS_ERR(other_crtc_state))
@@ -7224,11 +7234,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
 	 */
-	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
 		return -EINVAL;
 
-	if (HAS_IPS(dev))
+	if (HAS_IPS(dev_priv))
 		hsw_compute_ips_config(crtc, pipe_config);
 
 	if (pipe_config->has_pch_encoder)
@@ -7237,10 +7247,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 	return 0;
 }
 
-static int skylake_get_display_clock_speed(struct drm_device *dev)
+static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	uint32_t cdctl;
+	u32 cdctl;
 
 	skl_dpll0_update(dev_priv);
 
@@ -7299,9 +7308,8 @@ static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
 		dev_priv->cdclk_pll.ref;
 }
 
-static int broxton_get_display_clock_speed(struct drm_device *dev)
+static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 divider;
 	int div, vco;
 
@@ -7334,9 +7342,8 @@ static int broxton_get_display_clock_speed(struct drm_device *dev)
 	return DIV_ROUND_CLOSEST(vco, div);
 }
 
-static int broadwell_get_display_clock_speed(struct drm_device *dev)
+static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t lcpll = I915_READ(LCPLL_CTL);
 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
@@ -7354,9 +7361,8 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
 		return 675000;
 }
 
-static int haswell_get_display_clock_speed(struct drm_device *dev)
+static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t lcpll = I915_READ(LCPLL_CTL);
 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
@@ -7366,41 +7372,41 @@ static int haswell_get_display_clock_speed(struct drm_device *dev)
 		return 450000;
 	else if (freq == LCPLL_CLK_FREQ_450)
 		return 450000;
-	else if (IS_HSW_ULT(dev))
+	else if (IS_HSW_ULT(dev_priv))
 		return 337500;
 	else
 		return 540000;
 }
 
-static int valleyview_get_display_clock_speed(struct drm_device *dev)
+static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
+	return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
 				      CCK_DISPLAY_CLOCK_CONTROL);
 }
 
-static int ilk_get_display_clock_speed(struct drm_device *dev)
+static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
 	return 450000;
 }
 
-static int i945_get_display_clock_speed(struct drm_device *dev)
+static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
 	return 400000;
 }
 
-static int i915_get_display_clock_speed(struct drm_device *dev)
+static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
 	return 333333;
 }
 
-static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
 	return 200000;
 }
 
-static int pnv_get_display_clock_speed(struct drm_device *dev)
+static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct pci_dev *pdev = dev->pdev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u16 gcfgc = 0;
 
 	pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7423,9 +7429,9 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
 	}
 }
 
-static int i915gm_get_display_clock_speed(struct drm_device *dev)
+static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct pci_dev *pdev = dev->pdev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u16 gcfgc = 0;
 
 	pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7443,14 +7449,14 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
 	}
 }
 
-static int i865_get_display_clock_speed(struct drm_device *dev)
+static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
 	return 266667;
 }
 
-static int i85x_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct pci_dev *pdev = dev->pdev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u16 hpllcc = 0;
 
 	/*
@@ -7486,14 +7492,13 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
 	return 0;
 }
 
-static int i830_get_display_clock_speed(struct drm_device *dev)
+static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
 	return 133333;
 }
 
-static unsigned int intel_hpll_vco(struct drm_device *dev)
+static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	static const unsigned int blb_vco[8] = {
 		[0] = 3200000,
 		[1] = 4000000,
@@ -7536,20 +7541,20 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
 	uint8_t tmp = 0;
 
 	/* FIXME other chipsets? */
-	if (IS_GM45(dev))
+	if (IS_GM45(dev_priv))
 		vco_table = ctg_vco;
-	else if (IS_G4X(dev))
+	else if (IS_G4X(dev_priv))
 		vco_table = elk_vco;
-	else if (IS_CRESTLINE(dev))
+	else if (IS_CRESTLINE(dev_priv))
 		vco_table = cl_vco;
-	else if (IS_PINEVIEW(dev))
+	else if (IS_PINEVIEW(dev_priv))
 		vco_table = pnv_vco;
-	else if (IS_G33(dev))
+	else if (IS_G33(dev_priv))
 		vco_table = blb_vco;
 	else
 		return 0;
 
-	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+	tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
 
 	vco = vco_table[tmp & 0x7];
 	if (vco == 0)
@@ -7560,10 +7565,10 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
 	return vco;
 }
 
-static int gm45_get_display_clock_speed(struct drm_device *dev)
+static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct pci_dev *pdev = dev->pdev;
-	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
 	uint16_t tmp = 0;
 
 	pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7583,14 +7588,14 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
 	}
 }
 
-static int i965gm_get_display_clock_speed(struct drm_device *dev)
+static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct pci_dev *pdev = dev->pdev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	static const uint8_t div_3200[] = { 16, 10,  8 };
 	static const uint8_t div_4000[] = { 20, 12, 10 };
 	static const uint8_t div_5333[] = { 24, 16, 14 };
 	const uint8_t *div_table;
-	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
 	uint16_t tmp = 0;
 
 	pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7621,15 +7626,15 @@ static int i965gm_get_display_clock_speed(struct drm_device *dev)
 	return 200000;
 }
 
-static int g33_get_display_clock_speed(struct drm_device *dev)
+static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
 {
-	struct pci_dev *pdev = dev->pdev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
 	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
 	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
 	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
 	const uint8_t *div_table;
-	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
 	uint16_t tmp = 0;
 
 	pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7718,10 +7723,10 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
 				     struct intel_crtc_state *crtc_state,
 				     struct dpll *reduced_clock)
 {
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 fp, fp2 = 0;
 
-	if (IS_PINEVIEW(dev)) {
+	if (IS_PINEVIEW(dev_priv)) {
 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
 		if (reduced_clock)
 			fp2 = pnv_dpll_compute_fp(reduced_clock);
@@ -7803,8 +7808,8 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 		 * for gen < 8) and if DRRS is supported (to make sure the
 		 * registers are not unnecessarily accessed).
 		 */
-		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
-			crtc->config->has_drrs) {
+		if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
+		    INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
 			I915_WRITE(PIPE_DATA_M2(transcoder),
 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -8091,11 +8096,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
  * in cases where we need the PLL enabled even when @pipe is not going to
  * be enabled.
  */
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
 		     const struct dpll *dpll)
 {
-	struct intel_crtc *crtc =
-		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	struct intel_crtc_state *pipe_config;
 
 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
@@ -8106,7 +8110,7 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
 	pipe_config->pixel_multiplier = 1;
 	pipe_config->dpll = *dpll;
 
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev_priv)) {
 		chv_compute_dpll(crtc, pipe_config);
 		chv_prepare_pll(crtc, pipe_config);
 		chv_enable_pll(crtc, pipe_config);
@@ -8129,20 +8133,19 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
  * Disable the PLL for @pipe. To be used in cases where we need
  * the PLL enabled even when @pipe is not going to be enabled.
  */
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	if (IS_CHERRYVIEW(dev))
-		chv_disable_pll(to_i915(dev), pipe);
+	if (IS_CHERRYVIEW(dev_priv))
+		chv_disable_pll(dev_priv, pipe);
 	else
-		vlv_disable_pll(to_i915(dev), pipe);
+		vlv_disable_pll(dev_priv, pipe);
 }
 
 static void i9xx_compute_dpll(struct intel_crtc *crtc,
 			      struct intel_crtc_state *crtc_state,
 			      struct dpll *reduced_clock)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 dpll;
 	struct dpll *clock = &crtc_state->dpll;
 
@@ -8155,7 +8158,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
 	else
 		dpll |= DPLLB_MODE_DAC_SERIAL;
 
-	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
 		dpll |= (crtc_state->pixel_multiplier - 1)
 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
 	}
@@ -8168,11 +8171,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
 		dpll |= DPLL_SDVO_HIGH_SPEED;
 
 	/* compute bitmask from p1 value */
-	if (IS_PINEVIEW(dev))
+	if (IS_PINEVIEW(dev_priv))
 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
 	else {
 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-		if (IS_G4X(dev) && reduced_clock)
+		if (IS_G4X(dev_priv) && reduced_clock)
 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 	}
 	switch (clock->p2) {
@@ -8189,7 +8192,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
 		break;
 	}
-	if (INTEL_INFO(dev)->gen >= 4)
+	if (INTEL_GEN(dev_priv) >= 4)
 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
 	if (crtc_state->sdvo_tv_clock)
@@ -8203,7 +8206,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
 	dpll |= DPLL_VCO_ENABLE;
 	crtc_state->dpll_hw_state.dpll = dpll;
 
-	if (INTEL_INFO(dev)->gen >= 4) {
+	if (INTEL_GEN(dev_priv) >= 4) {
 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
@@ -8234,7 +8237,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
 			dpll |= PLL_P2_DIVIDE_BY_4;
 	}
 
-	if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+	if (!IS_I830(dev_priv) &&
+	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
 		dpll |= DPLL_DVO_2X_MODE;
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
@@ -8303,7 +8307,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
 	 * bits. */
-	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
 	    (pipe == PIPE_B || pipe == PIPE_C))
 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
 
@@ -8413,7 +8417,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 		pipeconf |= PIPECONF_DOUBLE_WIDE;
 
 	/* only g4x and later have fancy bpc/dither controls */
-	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+	    IS_CHERRYVIEW(dev_priv)) {
 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
 		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
 			pipeconf |= PIPECONF_DITHER_EN |
@@ -8453,7 +8458,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 	} else
 		pipeconf |= PIPECONF_PROGRESSIVE;
 
-	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	     intel_crtc->config->limited_color_range)
 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
 
@@ -8657,7 +8662,8 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t tmp;
 
-	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+	if (INTEL_GEN(dev_priv) <= 3 &&
+	    (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
 		return;
 
 	tmp = I915_READ(PFIT_CONTROL);
@@ -8829,7 +8835,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 	if (!(tmp & PIPECONF_ENABLE))
 		goto out;
 
-	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+	    IS_CHERRYVIEW(dev_priv)) {
 		switch (tmp & PIPECONF_BPC_MASK) {
 		case PIPECONF_6BPC:
 			pipe_config->pipe_bpp = 18;
@@ -8845,7 +8852,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 		}
 	}
 
-	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
 		pipe_config->limited_color_range = true;
 
@@ -8859,7 +8866,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		/* No way to read it out on pipes B and C */
-		if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
 		else
 			tmp = I915_READ(DPLL_MD(crtc->pipe));
@@ -8867,7 +8874,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
 		pipe_config->dpll_hw_state.dpll_md = tmp;
-	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+		   IS_G33(dev_priv)) {
 		tmp = I915_READ(DPLL(crtc->pipe));
 		pipe_config->pixel_multiplier =
 			((tmp & SDVO_MULTIPLIER_MASK)
@@ -8879,13 +8887,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 		pipe_config->pixel_multiplier = 1;
 	}
 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
-	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
 		/*
 		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
 		 * on 830. Filter it out here so that we don't
 		 * report errors due to that.
 		 */
-		if (IS_I830(dev))
+		if (IS_I830(dev_priv))
 			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
 
 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
@@ -8897,9 +8905,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 						     DPLL_PORTB_READY_MASK);
 	}
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		chv_crtc_clock_get(crtc, pipe_config);
-	else if (IS_VALLEYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		vlv_crtc_clock_get(crtc, pipe_config);
 	else
 		i9xx_crtc_clock_get(crtc, pipe_config);
@@ -8950,7 +8958,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
 		}
 	}
 
-	if (HAS_PCH_IBX(dev)) {
+	if (HAS_PCH_IBX(dev_priv)) {
 		has_ck505 = dev_priv->vbt.display_clock_mode;
 		can_ssc = has_ck505;
 	} else {
@@ -9198,7 +9206,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
 
 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
 		with_spread = true;
-	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
+	if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
+	    with_fdi, "LP PCH doesn't have FDI\n"))
 		with_fdi = false;
 
 	mutex_lock(&dev_priv->sb_lock);
@@ -9221,7 +9230,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
 		}
 	}
 
-	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9237,7 +9246,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
 
 	mutex_lock(&dev_priv->sb_lock);
 
-	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9345,9 +9354,11 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
  */
 void intel_init_pch_refclk(struct drm_device *dev)
 {
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
 		ironlake_init_pch_refclk(dev);
-	else if (HAS_PCH_LPT(dev))
+	else if (HAS_PCH_LPT(dev_priv))
 		lpt_init_pch_refclk(dev);
 }
 
@@ -9476,7 +9487,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		if ((intel_panel_use_ssc(dev_priv) &&
 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
-		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+		    (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
 			factor = 25;
 	} else if (crtc_state->sdvo_tv_clock)
 		factor = 20;
@@ -9836,7 +9847,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
 		/* We currently do not free assignements of panel fitters on
 		 * ivb/hsw (since we don't use the higher upscaling modes which
 		 * differentiates them) so just WARN about this case for now. */
-		if (IS_GEN7(dev)) {
+		if (IS_GEN7(dev_priv)) {
 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
 				PF_PIPE_SEL_IVB(crtc->pipe));
 		}
@@ -9881,7 +9892,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
 
 	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		offset = I915_READ(DSPOFFSET(pipe));
 	} else {
 		if (plane_config->tiling)
@@ -10025,7 +10036,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 	I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
 	     "CPU PWM1 enabled\n");
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev_priv))
 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
 		     "CPU PWM2 enabled\n");
 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
@@ -10045,9 +10056,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 
 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev_priv))
 		return I915_READ(D_COMP_HSW);
 	else
 		return I915_READ(D_COMP_BDW);
@@ -10055,9 +10064,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
 
 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
 {
-	struct drm_device *dev = &dev_priv->drm;
-
-	if (IS_HASWELL(dev)) {
+	if (IS_HASWELL(dev_priv)) {
 		mutex_lock(&dev_priv->rps.hw_lock);
 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
 					    val))
@@ -10172,7 +10179,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 	}
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-	intel_update_cdclk(&dev_priv->drm);
+	intel_update_cdclk(dev_priv);
 }
 
 /*
@@ -10205,7 +10212,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 
 	DRM_DEBUG_KMS("Enabling package C8+\n");
 
-	if (HAS_PCH_LPT_LP(dev)) {
+	if (HAS_PCH_LPT_LP(dev_priv)) {
 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10225,7 +10232,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 	hsw_restore_lcpll(dev_priv);
 	lpt_init_pch_refclk(dev);
 
-	if (HAS_PCH_LPT_LP(dev)) {
+	if (HAS_PCH_LPT_LP(dev_priv)) {
 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10242,6 +10249,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 	bxt_set_cdclk(to_i915(dev), req_cdclk);
 }
 
+static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+					  int pixel_rate)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+	if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+		pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+	/* BSpec says "Do not use DisplayPort with CDCLK less than
+	 * 432 MHz, audio enabled, port width x4, and link rate
+	 * HBR2 (5.4 GHz), or else there may be audio corruption or
+	 * screen corruption."
+	 */
+	if (intel_crtc_has_dp_encoder(crtc_state) &&
+	    crtc_state->has_audio &&
+	    crtc_state->port_clock >= 540000 &&
+	    crtc_state->lane_count == 4)
+		pixel_rate = max(432000, pixel_rate);
+
+	return pixel_rate;
+}
+
 /* compute the max rate for new configuration */
 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 {
@@ -10267,9 +10297,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 
 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
 
-		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
-			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+		if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+			pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
+								    pixel_rate);
 
 		intel_state->min_pixclk[i] = pixel_rate;
 	}
@@ -10352,7 +10382,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
 
 	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
 
-	intel_update_cdclk(dev);
+	intel_update_cdclk(dev_priv);
 
 	WARN(cdclk != dev_priv->cdclk_freq,
 	     "cdclk requested %d kHz but got %d kHz\n",
@@ -10649,9 +10679,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
 
 	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		skylake_get_ddi_pll(dev_priv, port, pipe_config);
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
 	else
 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10717,10 +10747,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
 
 	if (INTEL_INFO(dev)->gen >= 9) {
-		skl_init_scalers(dev, crtc, pipe_config);
-	}
+		skl_init_scalers(dev_priv, crtc, pipe_config);
 
-	if (INTEL_INFO(dev)->gen >= 9) {
 		pipe_config->scaler_state.scaler_id = -1;
 		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
 	}
@@ -10734,7 +10762,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 			ironlake_get_pfit_config(crtc, pipe_config);
 	}
 
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev_priv))
 		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
 			(I915_READ(IPS_CTL) & IPS_ENABLE);
 
@@ -10822,12 +10850,15 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 	const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+	const struct skl_plane_wm *p_wm =
+		&cstate->wm.skl.optimal.planes[PLANE_CURSOR];
 	int pipe = intel_crtc->pipe;
 	uint32_t cntl = 0;
 
 	if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
-		skl_write_cursor_wm(intel_crtc, wm);
+		skl_write_cursor_wm(intel_crtc, p_wm, &wm->ddb);
 
 	if (plane_state && plane_state->base.visible) {
 		cntl = MCURSOR_GAMMA_ENABLE;
@@ -10847,7 +10878,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
 		}
 		cntl |= pipe << 28; /* Connect to correct pipe */
 
-		if (HAS_DDI(dev))
+		if (HAS_DDI(dev_priv))
 			cntl |= CURSOR_PIPE_CSC_ENABLE;
 
 		if (plane_state->base.rotation == DRM_ROTATE_180)
@@ -10895,7 +10926,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 		pos |= y << CURSOR_Y_SHIFT;
 
 		/* ILK+ do this automagically */
-		if (HAS_GMCH_DISPLAY(dev) &&
+		if (HAS_GMCH_DISPLAY(dev_priv) &&
 		    plane_state->base.rotation == DRM_ROTATE_180) {
 			base += (plane_state->base.crtc_h *
 				 plane_state->base.crtc_w - 1) * 4;
@@ -10904,13 +10935,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 
 	I915_WRITE(CURPOS(pipe), pos);
 
-	if (IS_845G(dev) || IS_I865G(dev))
+	if (IS_845G(dev_priv) || IS_I865G(dev_priv))
 		i845_update_cursor(crtc, base, plane_state);
 	else
 		i9xx_update_cursor(crtc, base, plane_state);
 }
 
-static bool cursor_size_ok(struct drm_device *dev,
+static bool cursor_size_ok(struct drm_i915_private *dev_priv,
 			   uint32_t width, uint32_t height)
 {
 	if (width == 0 || height == 0)
@@ -10922,11 +10953,11 @@ static bool cursor_size_ok(struct drm_device *dev,
 	 * the precision of the register. Everything else requires
 	 * square cursors, limited to a few power-of-two sizes.
 	 */
-	if (IS_845G(dev) || IS_I865G(dev)) {
+	if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
 		if ((width & 63) != 0)
 			return false;
 
-		if (width > (IS_845G(dev) ? 64 : 512))
+		if (width > (IS_845G(dev_priv) ? 64 : 512))
 			return false;
 
 		if (height > 1023)
@@ -10935,7 +10966,7 @@ static bool cursor_size_ok(struct drm_device *dev,
 		switch (width | height) {
 		case 256:
 		case 128:
-			if (IS_GEN2(dev))
+			if (IS_GEN2(dev_priv))
 				return false;
 		case 64:
 			break;
@@ -11029,7 +11060,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
 
 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
 	if (IS_ERR(fb))
-		i915_gem_object_put_unlocked(obj);
+		i915_gem_object_put(obj);
 
 	return fb;
 }
@@ -11114,6 +11145,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_crtc *crtc = NULL;
 	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_framebuffer *fb;
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
@@ -11266,13 +11298,18 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 	old->restore_state = restore_state;
 
 	/* let the connector get through one full cycle before testing */
-	intel_wait_for_vblank(dev, intel_crtc->pipe);
+	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 	return true;
 
 fail:
-	drm_atomic_state_free(state);
-	drm_atomic_state_free(restore_state);
-	restore_state = state = NULL;
+	if (state) {
+		drm_atomic_state_put(state);
+		state = NULL;
+	}
+	if (restore_state) {
+		drm_atomic_state_put(restore_state);
+		restore_state = NULL;
+	}
 
 	if (ret == -EDEADLK) {
 		drm_modeset_backoff(ctx);
@@ -11300,10 +11337,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 		return;
 
 	ret = drm_atomic_commit(state);
-	if (ret) {
+	if (ret)
 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
-		drm_atomic_state_free(state);
-	}
+	drm_atomic_state_put(state);
 }
 
 static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11314,9 +11350,9 @@ static int i9xx_pll_refclk(struct drm_device *dev,
 
 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
 		return dev_priv->vbt.lvds_ssc_freq;
-	else if (HAS_PCH_SPLIT(dev))
+	else if (HAS_PCH_SPLIT(dev_priv))
 		return 120000;
-	else if (!IS_GEN2(dev))
+	else if (!IS_GEN2(dev_priv))
 		return 96000;
 	else
 		return 48000;
@@ -11341,7 +11377,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 		fp = pipe_config->dpll_hw_state.fp1;
 
 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
-	if (IS_PINEVIEW(dev)) {
+	if (IS_PINEVIEW(dev_priv)) {
 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
 	} else {
@@ -11349,8 +11385,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
 	}
 
-	if (!IS_GEN2(dev)) {
-		if (IS_PINEVIEW(dev))
+	if (!IS_GEN2(dev_priv)) {
+		if (IS_PINEVIEW(dev_priv))
 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
 		else
@@ -11372,12 +11408,12 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 			return;
 		}
 
-		if (IS_PINEVIEW(dev))
+		if (IS_PINEVIEW(dev_priv))
 			port_clock = pnv_calc_dpll_params(refclk, &clock);
 		else
 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	} else {
-		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+		u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
 
 		if (is_lvds) {
@@ -11578,7 +11614,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
 	 * really needed there. But since ctg has the registers,
 	 * include it in the check anyway.
 	 */
-	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
 		return true;
 
 	/*
@@ -11641,8 +11677,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	struct intel_flip_work *work;
 	unsigned long flags;
 
@@ -11655,12 +11690,12 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
 	 * lost pageflips) so needs the full irqsave spinlocks.
 	 */
 	spin_lock_irqsave(&dev->event_lock, flags);
-	work = intel_crtc->flip_work;
+	work = crtc->flip_work;
 
 	if (work != NULL &&
 	    !is_mmio_work(work) &&
-	    pageflip_finished(intel_crtc, work))
-		page_flip_completed(intel_crtc);
+	    pageflip_finished(crtc, work))
+		page_flip_completed(crtc);
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
@@ -11668,8 +11703,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	struct intel_flip_work *work;
 	unsigned long flags;
 
@@ -11682,12 +11716,12 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
 	 * lost pageflips) so needs the full irqsave spinlocks.
 	 */
 	spin_lock_irqsave(&dev->event_lock, flags);
-	work = intel_crtc->flip_work;
+	work = crtc->flip_work;
 
 	if (work != NULL &&
 	    is_mmio_work(work) &&
-	    pageflip_finished(intel_crtc, work))
-		page_flip_completed(intel_crtc);
+	    pageflip_finished(crtc, work))
+		page_flip_completed(crtc);
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
@@ -11848,6 +11882,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
@@ -11876,7 +11911,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 		 * 48bits addresses, and we need a NOOP for the batch size to
 		 * stay even.
 		 */
-		if (IS_GEN8(dev))
+		if (IS_GEN8(dev_priv))
 			len += 2;
 	}
 
@@ -11913,7 +11948,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
 					  DERRMR_PIPEB_PRI_FLIP_DONE |
 					  DERRMR_PIPEC_PRI_FLIP_DONE));
-		if (IS_GEN8(dev))
+		if (IS_GEN8(dev_priv))
 			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		else
@@ -11922,7 +11957,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 		intel_ring_emit_reg(ring, DERRMR);
 		intel_ring_emit(ring,
 				i915_ggtt_offset(req->engine->scratch) + 256);
-		if (IS_GEN8(dev)) {
+		if (IS_GEN8(dev_priv)) {
 			intel_ring_emit(ring, 0);
 			intel_ring_emit(ring, MI_NOOP);
 		}
@@ -11940,8 +11975,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 static bool use_mmio_flip(struct intel_engine_cs *engine,
 			  struct drm_i915_gem_object *obj)
 {
-	struct reservation_object *resv;
-
 	/*
 	 * This is not being used for older platforms, because
 	 * non-availability of flip done interrupt forces us to use
@@ -11963,12 +11996,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
 	else if (i915.enable_execlists)
 		return true;
 
-	resv = i915_gem_object_get_dmabuf_resv(obj);
-	if (resv && !reservation_object_test_signaled_rcu(resv, false))
-		return true;
-
-	return engine != i915_gem_active_get_engine(&obj->last_write,
-						    &obj->base.dev->struct_mutex);
+	return engine != i915_gem_object_last_write_engine(obj);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -12041,17 +12069,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
 	struct intel_framebuffer *intel_fb =
 		to_intel_framebuffer(crtc->base.primary->fb);
 	struct drm_i915_gem_object *obj = intel_fb->obj;
-	struct reservation_object *resv;
 
-	if (work->flip_queued_req)
-		WARN_ON(i915_wait_request(work->flip_queued_req,
-					  0, NULL, NO_WAITBOOST));
-
-	/* For framebuffer backed by dmabuf, wait for fence */
-	resv = i915_gem_object_get_dmabuf_resv(obj);
-	if (resv)
-		WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
-							    MAX_SCHEDULE_TIMEOUT) < 0);
+	WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
 
 	intel_pipe_update_start(crtc);
 
@@ -12114,8 +12133,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	struct intel_flip_work *work;
 
 	WARN_ON(!in_interrupt());
@@ -12124,19 +12142,19 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
 		return;
 
 	spin_lock(&dev->event_lock);
-	work = intel_crtc->flip_work;
+	work = crtc->flip_work;
 
 	if (work != NULL && !is_mmio_work(work) &&
-	    __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+	    __pageflip_stall_check_cs(dev_priv, crtc, work)) {
 		WARN_ONCE(1,
 			  "Kicking stuck page flip: queued at %d, now %d\n",
-			work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
-		page_flip_completed(intel_crtc);
+			work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
+		page_flip_completed(crtc);
 		work = NULL;
 	}
 
 	if (work != NULL && !is_mmio_work(work) &&
-	    intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+	    intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
 		intel_queue_rps_boost_for_request(work->flip_queued_req);
 	spin_unlock(&dev->event_lock);
 }
@@ -12241,23 +12259,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 	atomic_inc(&intel_crtc->unpin_work_count);
 
-	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-		engine = &dev_priv->engine[BCS];
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		engine = dev_priv->engine[BCS];
 		if (fb->modifier[0] != old_fb->modifier[0])
 			/* vlv: DISPLAY_FLIP fails to change tiling */
 			engine = NULL;
-	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-		engine = &dev_priv->engine[BCS];
+	} else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+		engine = dev_priv->engine[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		engine = i915_gem_active_get_engine(&obj->last_write,
-						    &obj->base.dev->struct_mutex);
+		engine = i915_gem_object_last_write_engine(obj);
 		if (engine == NULL || engine->id != RCS)
-			engine = &dev_priv->engine[BCS];
+			engine = dev_priv->engine[BCS];
 	} else {
-		engine = &dev_priv->engine[RCS];
+		engine = dev_priv->engine[RCS];
 	}
 
 	mmio_flip = use_mmio_flip(engine, obj);
@@ -12285,10 +12302,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 	if (mmio_flip) {
 		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
-
-		work->flip_queued_req = i915_gem_active_get(&obj->last_write,
-							    &obj->base.dev->struct_mutex);
-		schedule_work(&work->mmio_work);
+		queue_work(system_unbound_wq, &work->mmio_work);
 	} else {
 		request = i915_gem_request_alloc(engine, engine->last_context);
 		if (IS_ERR(request)) {
@@ -12333,7 +12347,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	crtc->primary->fb = old_fb;
 	update_state_fb(crtc->primary);
 
-	i915_gem_object_put_unlocked(obj);
+	i915_gem_object_put(obj);
 	drm_framebuffer_unreference(work->old_fb);
 
 	spin_lock_irq(&dev->event_lock);
@@ -12371,8 +12385,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 			goto retry;
 		}
 
-		if (ret)
-			drm_atomic_state_free(state);
+		drm_atomic_state_put(state);
 
 		if (ret == 0 && event) {
 			spin_lock_irq(&dev->event_lock);
@@ -12446,7 +12459,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
 	struct drm_framebuffer *fb = plane_state->fb;
 	int ret;
 
-	if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
+	if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
 		ret = skl_update_scaler_plane(
 			to_intel_crtc_state(crtc_state),
 			to_intel_plane_state(plane_state));
@@ -12525,7 +12538,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
 	 * cstate->update_wm was already set above, so this flag will
 	 * take effect when we commit and program watermarks.
 	 */
-	if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+	if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
 	    needs_scaling(to_intel_plane_state(plane_state)) &&
 	    !needs_scaling(old_plane_state))
 		pipe_config->disable_lp_wm = true;
@@ -12701,15 +12714,16 @@ static int
 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
 			  struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct drm_atomic_state *state;
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
 	int bpp, i;
 
-	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
+	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+	    IS_CHERRYVIEW(dev_priv)))
 		bpp = 10*3;
-	else if (INTEL_INFO(dev)->gen >= 5)
+	else if (INTEL_GEN(dev_priv) >= 5)
 		bpp = 12*3;
 	else
 		bpp = 8*3;
@@ -12747,6 +12761,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 				   const char *context)
 {
 	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_plane *plane;
 	struct intel_plane *intel_plane;
 	struct intel_plane_state *state;
@@ -12808,7 +12823,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
-	if (IS_BROXTON(dev)) {
+	if (IS_BROXTON(dev_priv)) {
 		DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
 			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
@@ -12823,13 +12838,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 			      pipe_config->dpll_hw_state.pll9,
 			      pipe_config->dpll_hw_state.pll10,
 			      pipe_config->dpll_hw_state.pcsdw12);
-	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		DRM_DEBUG_KMS("dpll_hw_state: "
 			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
 			      pipe_config->dpll_hw_state.ctrl1,
 			      pipe_config->dpll_hw_state.cfgcr1,
 			      pipe_config->dpll_hw_state.cfgcr2);
-	} else if (HAS_DDI(dev)) {
+	} else if (HAS_DDI(dev_priv)) {
 		DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
 			      pipe_config->dpll_hw_state.wrpll,
 			      pipe_config->dpll_hw_state.spll);
@@ -12907,7 +12922,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
 		switch (encoder->type) {
 			unsigned int port_mask;
 		case INTEL_OUTPUT_UNKNOWN:
-			if (WARN_ON(!HAS_DDI(dev)))
+			if (WARN_ON(!HAS_DDI(to_i915(dev))))
 				break;
 		case INTEL_OUTPUT_DP:
 		case INTEL_OUTPUT_HDMI:
@@ -13193,6 +13208,7 @@ intel_pipe_config_compare(struct drm_device *dev,
 			  struct intel_crtc_state *pipe_config,
 			  bool adjust)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	bool ret = true;
 
 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
@@ -13338,8 +13354,8 @@ intel_pipe_config_compare(struct drm_device *dev,
 
 	PIPE_CONF_CHECK_I(pixel_multiplier);
 	PIPE_CONF_CHECK_I(has_hdmi_sink);
-	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
-	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
+	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		PIPE_CONF_CHECK_I(limited_color_range);
 	PIPE_CONF_CHECK_I(has_infoframe);
 
@@ -13379,7 +13395,7 @@ intel_pipe_config_compare(struct drm_device *dev,
 	}
 
 	/* BDW+ don't expose a synchronous way to read the state */
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev_priv))
 		PIPE_CONF_CHECK_I(ips_enabled);
 
 	PIPE_CONF_CHECK_I(double_wide);
@@ -13398,7 +13414,7 @@ intel_pipe_config_compare(struct drm_device *dev,
 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
 	PIPE_CONF_CHECK_X(dsi_pll.div);
 
-	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+	if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
 		PIPE_CONF_CHECK_I(pipe_bpp);
 
 	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
@@ -13439,30 +13455,65 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct skl_ddb_allocation hw_ddb, *sw_ddb;
-	struct skl_ddb_entry *hw_entry, *sw_entry;
+	struct skl_pipe_wm hw_wm, *sw_wm;
+	struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const enum pipe pipe = intel_crtc->pipe;
-	int plane;
+	int plane, level, max_level = ilk_wm_max_level(dev_priv);
 
 	if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
 		return;
 
+	skl_pipe_wm_get_hw_state(crtc, &hw_wm);
+	sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+
 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
 	/* planes */
-	for_each_plane(dev_priv, pipe, plane) {
-		hw_entry = &hw_ddb.plane[pipe][plane];
-		sw_entry = &sw_ddb->plane[pipe][plane];
+	for_each_universal_plane(dev_priv, pipe, plane) {
+		hw_plane_wm = &hw_wm.planes[plane];
+		sw_plane_wm = &sw_wm->planes[plane];
 
-		if (skl_ddb_entry_equal(hw_entry, sw_entry))
-			continue;
+		/* Watermarks */
+		for (level = 0; level <= max_level; level++) {
+			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+						&sw_plane_wm->wm[level]))
+				continue;
 
-		DRM_ERROR("mismatch in DDB state pipe %c plane %d "
-			  "(expected (%u,%u), found (%u,%u))\n",
-			  pipe_name(pipe), plane + 1,
-			  sw_entry->start, sw_entry->end,
-			  hw_entry->start, hw_entry->end);
+			DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+				  pipe_name(pipe), plane + 1, level,
+				  sw_plane_wm->wm[level].plane_en,
+				  sw_plane_wm->wm[level].plane_res_b,
+				  sw_plane_wm->wm[level].plane_res_l,
+				  hw_plane_wm->wm[level].plane_en,
+				  hw_plane_wm->wm[level].plane_res_b,
+				  hw_plane_wm->wm[level].plane_res_l);
+		}
+
+		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+					 &sw_plane_wm->trans_wm)) {
+			DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+				  pipe_name(pipe), plane + 1,
+				  sw_plane_wm->trans_wm.plane_en,
+				  sw_plane_wm->trans_wm.plane_res_b,
+				  sw_plane_wm->trans_wm.plane_res_l,
+				  hw_plane_wm->trans_wm.plane_en,
+				  hw_plane_wm->trans_wm.plane_res_b,
+				  hw_plane_wm->trans_wm.plane_res_l);
+		}
+
+		/* DDB */
+		hw_ddb_entry = &hw_ddb.plane[pipe][plane];
+		sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+
+		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+				  pipe_name(pipe), plane + 1,
+				  sw_ddb_entry->start, sw_ddb_entry->end,
+				  hw_ddb_entry->start, hw_ddb_entry->end);
+		}
 	}
 
 	/*
@@ -13472,25 +13523,60 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	 * once the plane becomes visible, we can skip this check
 	 */
 	if (intel_crtc->cursor_addr) {
-		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+		hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
 
-		if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
-			DRM_ERROR("mismatch in DDB state pipe %c cursor "
-				  "(expected (%u,%u), found (%u,%u))\n",
+		/* Watermarks */
+		for (level = 0; level <= max_level; level++) {
+			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+						&sw_plane_wm->wm[level]))
+				continue;
+
+			DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+				  pipe_name(pipe), level,
+				  sw_plane_wm->wm[level].plane_en,
+				  sw_plane_wm->wm[level].plane_res_b,
+				  sw_plane_wm->wm[level].plane_res_l,
+				  hw_plane_wm->wm[level].plane_en,
+				  hw_plane_wm->wm[level].plane_res_b,
+				  hw_plane_wm->wm[level].plane_res_l);
+		}
+
+		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+					 &sw_plane_wm->trans_wm)) {
+			DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
 				  pipe_name(pipe),
-				  sw_entry->start, sw_entry->end,
-				  hw_entry->start, hw_entry->end);
+				  sw_plane_wm->trans_wm.plane_en,
+				  sw_plane_wm->trans_wm.plane_res_b,
+				  sw_plane_wm->trans_wm.plane_res_l,
+				  hw_plane_wm->trans_wm.plane_en,
+				  hw_plane_wm->trans_wm.plane_res_b,
+				  hw_plane_wm->trans_wm.plane_res_l);
+		}
+
+		/* DDB */
+		hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+		sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+				  pipe_name(pipe),
+				  sw_ddb_entry->start, sw_ddb_entry->end,
+				  hw_ddb_entry->start, hw_ddb_entry->end);
 		}
 	}
 }
 
 static void
-verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
+verify_connector_state(struct drm_device *dev,
+		       struct drm_atomic_state *state,
+		       struct drm_crtc *crtc)
 {
 	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	int i;
 
-	drm_for_each_connector(connector, dev) {
+	for_each_connector_in_state(state, connector, old_conn_state, i) {
 		struct drm_encoder *encoder = connector->encoder;
 		struct drm_connector_state *state = connector->state;
 
@@ -13698,15 +13784,16 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
 
 static void
 intel_modeset_verify_crtc(struct drm_crtc *crtc,
-			 struct drm_crtc_state *old_state,
-			 struct drm_crtc_state *new_state)
+			  struct drm_atomic_state *state,
+			  struct drm_crtc_state *old_state,
+			  struct drm_crtc_state *new_state)
 {
 	if (!needs_modeset(new_state) &&
 	    !to_intel_crtc_state(new_state)->update_pipe)
 		return;
 
 	verify_wm_state(crtc, new_state);
-	verify_connector_state(crtc->dev, crtc);
+	verify_connector_state(crtc->dev, state, crtc);
 	verify_crtc_state(crtc, old_state, new_state);
 	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
 }
@@ -13722,16 +13809,17 @@ verify_disabled_dpll_state(struct drm_device *dev)
 }
 
 static void
-intel_modeset_verify_disabled(struct drm_device *dev)
+intel_modeset_verify_disabled(struct drm_device *dev,
+			      struct drm_atomic_state *state)
 {
 	verify_encoder_state(dev);
-	verify_connector_state(dev, NULL);
+	verify_connector_state(dev, state, NULL);
 	verify_disabled_dpll_state(dev);
 }
 
 static void update_scanline_offset(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	/*
 	 * The scanline counter increments at the leading edge of hsync.
@@ -13751,7 +13839,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 	 * there's an extra 1 line difference. So we need to add two instead of
 	 * one to the value.
 	 */
-	if (IS_GEN2(dev)) {
+	if (IS_GEN2(dev_priv)) {
 		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
 		int vtotal;
 
@@ -13760,7 +13848,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 			vtotal /= 2;
 
 		crtc->scanline_offset = vtotal - 1;
-	} else if (HAS_DDI(dev) &&
+	} else if (HAS_DDI(dev_priv) &&
 		   intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
 		crtc->scanline_offset = 2;
 	} else
@@ -14059,13 +14147,10 @@ static int intel_atomic_check(struct drm_device *dev,
 }
 
 static int intel_atomic_prepare_commit(struct drm_device *dev,
-				       struct drm_atomic_state *state,
-				       bool nonblock)
+				       struct drm_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_plane_state *plane_state;
 	struct drm_crtc_state *crtc_state;
-	struct drm_plane *plane;
 	struct drm_crtc *crtc;
 	int i, ret;
 
@@ -14088,28 +14173,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
 	ret = drm_atomic_helper_prepare_planes(dev, state);
 	mutex_unlock(&dev->struct_mutex);
 
-	if (!ret && !nonblock) {
-		for_each_plane_in_state(state, plane, plane_state, i) {
-			struct intel_plane_state *intel_plane_state =
-				to_intel_plane_state(plane_state);
-
-			if (!intel_plane_state->wait_req)
-				continue;
-
-			ret = i915_wait_request(intel_plane_state->wait_req,
-						I915_WAIT_INTERRUPTIBLE,
-						NULL, NULL);
-			if (ret) {
-				/* Any hang should be swallowed by the wait */
-				WARN_ON(ret == -EIO);
-				mutex_lock(&dev->struct_mutex);
-				drm_atomic_helper_cleanup_planes(dev, state);
-				mutex_unlock(&dev->struct_mutex);
-				break;
-			}
-		}
-	}
-
 	return ret;
 }
 
@@ -14135,22 +14198,24 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
 		return;
 
 	for_each_pipe(dev_priv, pipe) {
-		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+								  pipe);
 
 		if (!((1 << pipe) & crtc_mask))
 			continue;
 
-		ret = drm_crtc_vblank_get(crtc);
+		ret = drm_crtc_vblank_get(&crtc->base);
 		if (WARN_ON(ret != 0)) {
 			crtc_mask &= ~(1 << pipe);
 			continue;
 		}
 
-		last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
+		last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
 	}
 
 	for_each_pipe(dev_priv, pipe) {
-		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+								  pipe);
 		long lret;
 
 		if (!((1 << pipe) & crtc_mask))
@@ -14158,12 +14223,12 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
 
 		lret = wait_event_timeout(dev->vblank[pipe].queue,
 				last_vblank_count[pipe] !=
-					drm_crtc_vblank_count(crtc),
+					drm_crtc_vblank_count(&crtc->base),
 				msecs_to_jiffies(50));
 
 		WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
 
-		drm_crtc_vblank_put(crtc);
+		drm_crtc_vblank_put(&crtc->base);
 	}
 }
 
@@ -14237,13 +14302,12 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
 static void skl_update_crtcs(struct drm_atomic_state *state,
 			     unsigned int *crtc_vblank_mask)
 {
-	struct drm_device *dev = state->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_crtc *crtc;
+	struct intel_crtc *intel_crtc;
 	struct drm_crtc_state *old_crtc_state;
-	struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
-	struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+	struct intel_crtc_state *cstate;
 	unsigned int updated = 0;
 	bool progress;
 	enum pipe pipe;
@@ -14261,12 +14325,14 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
 		for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
 			bool vbl_wait = false;
 			unsigned int cmask = drm_crtc_mask(crtc);
-			pipe = to_intel_crtc(crtc)->pipe;
+
+			intel_crtc = to_intel_crtc(crtc);
+			cstate = to_intel_crtc_state(crtc->state);
+			pipe = intel_crtc->pipe;
 
 			if (updated & cmask || !crtc->state->active)
 				continue;
-			if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
-							pipe))
+			if (skl_ddb_allocation_overlaps(state, intel_crtc))
 				continue;
 
 			updated |= cmask;
@@ -14277,7 +14343,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
 			 * then we need to wait for a vblank to pass for the
 			 * new ddb allocation to take effect.
 			 */
-			if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+			if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
+						 &intel_crtc->hw_ddb) &&
 			    !crtc->state->active_changed &&
 			    intel_state->wm_results.dirty_pipes != updated)
 				vbl_wait = true;
@@ -14286,7 +14353,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
 					  crtc_vblank_mask);
 
 			if (vbl_wait)
-				intel_wait_for_vblank(dev, pipe);
+				intel_wait_for_vblank(dev_priv, pipe);
 
 			progress = true;
 		}
@@ -14301,37 +14368,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	struct drm_crtc_state *old_crtc_state;
 	struct drm_crtc *crtc;
 	struct intel_crtc_state *intel_cstate;
-	struct drm_plane *plane;
-	struct drm_plane_state *plane_state;
 	bool hw_check = intel_state->modeset;
 	unsigned long put_domains[I915_MAX_PIPES] = {};
 	unsigned crtc_vblank_mask = 0;
-	int i, ret;
-
-	for_each_plane_in_state(state, plane, plane_state, i) {
-		struct intel_plane_state *intel_plane_state =
-			to_intel_plane_state(plane_state);
-
-		if (!intel_plane_state->wait_req)
-			continue;
-
-		ret = i915_wait_request(intel_plane_state->wait_req,
-					0, NULL, NULL);
-		/* EIO should be eaten, and we can't get interrupted in the
-		 * worker, and blocking commits have waited already. */
-		WARN_ON(ret);
-	}
+	int i;
 
 	drm_atomic_helper_wait_for_dependencies(state);
 
-	if (intel_state->modeset) {
-		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
-		       sizeof(intel_state->min_pixclk));
-		dev_priv->active_crtcs = intel_state->active_crtcs;
-		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
-
+	if (intel_state->modeset)
 		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
-	}
 
 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -14365,7 +14410,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 			intel_check_pch_fifo_underruns(dev_priv);
 
 			if (!crtc->state->active)
-				intel_update_watermarks(crtc);
+				intel_update_watermarks(intel_crtc);
 		}
 	}
 
@@ -14388,7 +14433,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		if (!intel_can_enable_sagv(state))
 			intel_disable_sagv(dev_priv);
 
-		intel_modeset_verify_disabled(dev);
+		intel_modeset_verify_disabled(dev, state);
 	}
 
 	/* Complete the events for pipes that have now been disabled */
@@ -14440,7 +14485,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		if (put_domains[i])
 			modeset_put_power_domains(dev_priv, put_domains[i]);
 
-		intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
+		intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
 	}
 
 	if (intel_state->modeset && intel_can_enable_sagv(state))
@@ -14457,7 +14502,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
 	drm_atomic_helper_commit_cleanup_done(state);
 
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 
 	/* As one of the primary mmio accessors, KMS has a high likelihood
 	 * of triggering bugs in unclaimed access. After we finish
@@ -14475,12 +14520,33 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
 static void intel_atomic_commit_work(struct work_struct *work)
 {
-	struct drm_atomic_state *state = container_of(work,
-						      struct drm_atomic_state,
-						      commit_work);
+	struct drm_atomic_state *state =
+		container_of(work, struct drm_atomic_state, commit_work);
+
 	intel_atomic_commit_tail(state);
 }
 
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+			  enum i915_sw_fence_notify notify)
+{
+	struct intel_atomic_state *state =
+		container_of(fence, struct intel_atomic_state, commit_ready);
+
+	switch (notify) {
+	case FENCE_COMPLETE:
+		if (state->base.commit_work.func)
+			queue_work(system_unbound_wq, &state->base.commit_work);
+		break;
+
+	case FENCE_FREE:
+		drm_atomic_state_put(&state->base);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
 {
 	struct drm_plane_state *old_plane_state;
@@ -14526,11 +14592,14 @@ static int intel_atomic_commit(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+	drm_atomic_state_get(state);
+	i915_sw_fence_init(&intel_state->commit_ready,
+			   intel_atomic_commit_ready);
 
-	ret = intel_atomic_prepare_commit(dev, state, nonblock);
+	ret = intel_atomic_prepare_commit(dev, state);
 	if (ret) {
 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+		i915_sw_fence_commit(&intel_state->commit_ready);
 		return ret;
 	}
 
@@ -14540,10 +14609,22 @@ static int intel_atomic_commit(struct drm_device *dev,
 	intel_shared_dpll_commit(state);
 	intel_atomic_track_fbs(state);
 
-	if (nonblock)
-		queue_work(system_unbound_wq, &state->commit_work);
-	else
+	if (intel_state->modeset) {
+		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+		       sizeof(intel_state->min_pixclk));
+		dev_priv->active_crtcs = intel_state->active_crtcs;
+		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+	}
+
+	drm_atomic_state_get(state);
+	INIT_WORK(&state->commit_work,
+		  nonblock ? intel_atomic_commit_work : NULL);
+
+	i915_sw_fence_commit(&intel_state->commit_ready);
+	if (!nonblock) {
+		i915_sw_fence_wait(&intel_state->commit_ready);
 		intel_atomic_commit_tail(state);
+	}
 
 	return 0;
 }
@@ -14581,9 +14662,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
 		goto retry;
 	}
 
-	if (ret)
 out:
-		drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 }
 
 /*
@@ -14656,19 +14736,22 @@ int
 intel_prepare_plane_fb(struct drm_plane *plane,
 		       struct drm_plane_state *new_state)
 {
+	struct intel_atomic_state *intel_state =
+		to_intel_atomic_state(new_state->state);
 	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_framebuffer *fb = new_state->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
-	struct reservation_object *resv;
-	int ret = 0;
+	int ret;
 
 	if (!obj && !old_obj)
 		return 0;
 
 	if (old_obj) {
 		struct drm_crtc_state *crtc_state =
-			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
+			drm_atomic_get_existing_crtc_state(new_state->state,
+							   plane->state->crtc);
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14681,52 +14764,56 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 		 * This should only fail upon a hung GPU, in which case we
 		 * can safely continue.
 		 */
-		if (needs_modeset(crtc_state))
-			ret = i915_gem_object_wait_rendering(old_obj, true);
-		if (ret) {
-			/* GPU hangs should have been swallowed by the wait */
-			WARN_ON(ret == -EIO);
-			return ret;
+		if (needs_modeset(crtc_state)) {
+			ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+							      old_obj->resv, NULL,
+							      false, 0,
+							      GFP_KERNEL);
+			if (ret < 0)
+				return ret;
 		}
 	}
 
+	if (new_state->fence) { /* explicit fencing */
+		ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+						    new_state->fence,
+						    I915_FENCE_TIMEOUT,
+						    GFP_KERNEL);
+		if (ret < 0)
+			return ret;
+	}
+
 	if (!obj)
 		return 0;
 
-	/* For framebuffer backed by dmabuf, wait for fence */
-	resv = i915_gem_object_get_dmabuf_resv(obj);
-	if (resv) {
-		long lret;
-
-		lret = reservation_object_wait_timeout_rcu(resv, false, true,
-							   MAX_SCHEDULE_TIMEOUT);
-		if (lret == -ERESTARTSYS)
-			return lret;
-
-		WARN(lret < 0, "waiting returns %li\n", lret);
+	if (!new_state->fence) { /* implicit fencing */
+		ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+						      obj->resv, NULL,
+						      false, I915_FENCE_TIMEOUT,
+						      GFP_KERNEL);
+		if (ret < 0)
+			return ret;
 	}
 
 	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
 	    INTEL_INFO(dev)->cursor_needs_physical) {
-		int align = IS_I830(dev) ? 16 * 1024 : 256;
+		int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
 		ret = i915_gem_object_attach_phys(obj, align);
-		if (ret)
+		if (ret) {
 			DRM_DEBUG_KMS("failed to attach phys object\n");
+			return ret;
+		}
 	} else {
 		struct i915_vma *vma;
 
 		vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
-		if (IS_ERR(vma))
-			ret = PTR_ERR(vma);
+		if (IS_ERR(vma)) {
+			DRM_DEBUG_KMS("failed to pin object\n");
+			return PTR_ERR(vma);
+		}
 	}
 
-	if (ret == 0) {
-		to_intel_plane_state(new_state)->wait_req =
-			i915_gem_active_get(&obj->last_write,
-					    &obj->base.dev->struct_mutex);
-	}
-
-	return ret;
+	return 0;
 }
 
 /**
@@ -14744,7 +14831,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane_state *old_intel_state;
-	struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
 	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
 
@@ -14756,9 +14842,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
 	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
 	    !INTEL_INFO(dev)->cursor_needs_physical))
 		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
-
-	i915_gem_request_assign(&intel_state->wait_req, NULL);
-	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
 }
 
 int
@@ -14833,6 +14916,8 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *intel_cstate =
+		to_intel_crtc_state(crtc->state);
 	struct intel_crtc_state *old_intel_state =
 		to_intel_crtc_state(old_crtc_state);
 	bool modeset = needs_modeset(crtc->state);
@@ -14849,13 +14934,13 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 		intel_color_load_luts(crtc->state);
 	}
 
-	if (to_intel_crtc_state(crtc->state)->update_pipe)
+	if (intel_cstate->update_pipe) {
 		intel_update_pipe_config(intel_crtc, old_intel_state);
-	else if (INTEL_GEN(dev_priv) >= 9) {
+	} else if (INTEL_GEN(dev_priv) >= 9) {
 		skl_detach_scalers(intel_crtc);
 
 		I915_WRITE(PIPE_WM_LINETIME(pipe),
-			   dev_priv->wm.skl_hw.wm_linetime[pipe]);
+			   intel_cstate->wm.skl.optimal.linetime);
 	}
 }
 
@@ -14876,9 +14961,6 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
  */
 void intel_plane_destroy(struct drm_plane *plane)
 {
-	if (!plane)
-		return;
-
 	drm_plane_cleanup(plane);
 	kfree(to_intel_plane(plane));
 }
@@ -14892,30 +14974,35 @@ const struct drm_plane_funcs intel_plane_funcs = {
 	.atomic_set_property = intel_plane_atomic_set_property,
 	.atomic_duplicate_state = intel_plane_duplicate_state,
 	.atomic_destroy_state = intel_plane_destroy_state,
-
 };
 
-static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
-						    int pipe)
+static struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
 	struct intel_plane *primary = NULL;
 	struct intel_plane_state *state = NULL;
 	const uint32_t *intel_primary_formats;
+	unsigned int supported_rotations;
 	unsigned int num_formats;
 	int ret;
 
 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-	if (!primary)
+	if (!primary) {
+		ret = -ENOMEM;
 		goto fail;
+	}
 
 	state = intel_create_plane_state(&primary->base);
-	if (!state)
+	if (!state) {
+		ret = -ENOMEM;
 		goto fail;
+	}
+
 	primary->base.state = &state->base;
 
 	primary->can_scale = false;
 	primary->max_downscale = 1;
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		primary->can_scale = true;
 		state->scaler_id = -1;
 	}
@@ -14923,22 +15010,22 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
 	primary->plane = pipe;
 	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
 	primary->check_plane = intel_check_primary_plane;
-	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
 		primary->plane = !pipe;
 
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		intel_primary_formats = skl_primary_formats;
 		num_formats = ARRAY_SIZE(skl_primary_formats);
 
 		primary->update_plane = skylake_update_primary_plane;
 		primary->disable_plane = skylake_disable_primary_plane;
-	} else if (HAS_PCH_SPLIT(dev)) {
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		intel_primary_formats = i965_primary_formats;
 		num_formats = ARRAY_SIZE(i965_primary_formats);
 
 		primary->update_plane = ironlake_update_primary_plane;
 		primary->disable_plane = i9xx_disable_primary_plane;
-	} else if (INTEL_INFO(dev)->gen >= 4) {
+	} else if (INTEL_GEN(dev_priv) >= 4) {
 		intel_primary_formats = i965_primary_formats;
 		num_formats = ARRAY_SIZE(i965_primary_formats);
 
@@ -14952,57 +15039,52 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
 		primary->disable_plane = i9xx_disable_primary_plane;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 9)
-		ret = drm_universal_plane_init(dev, &primary->base, 0,
-					       &intel_plane_funcs,
+	if (INTEL_GEN(dev_priv) >= 9)
+		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+					       0, &intel_plane_funcs,
 					       intel_primary_formats, num_formats,
 					       DRM_PLANE_TYPE_PRIMARY,
 					       "plane 1%c", pipe_name(pipe));
-	else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
-		ret = drm_universal_plane_init(dev, &primary->base, 0,
-					       &intel_plane_funcs,
+	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+					       0, &intel_plane_funcs,
 					       intel_primary_formats, num_formats,
 					       DRM_PLANE_TYPE_PRIMARY,
 					       "primary %c", pipe_name(pipe));
 	else
-		ret = drm_universal_plane_init(dev, &primary->base, 0,
-					       &intel_plane_funcs,
+		ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+					       0, &intel_plane_funcs,
 					       intel_primary_formats, num_formats,
 					       DRM_PLANE_TYPE_PRIMARY,
 					       "plane %c", plane_name(primary->plane));
 	if (ret)
 		goto fail;
 
-	if (INTEL_INFO(dev)->gen >= 4)
-		intel_create_rotation_property(dev, primary);
+	if (INTEL_GEN(dev_priv) >= 9) {
+		supported_rotations =
+			DRM_ROTATE_0 | DRM_ROTATE_90 |
+			DRM_ROTATE_180 | DRM_ROTATE_270;
+	} else if (INTEL_GEN(dev_priv) >= 4) {
+		supported_rotations =
+			DRM_ROTATE_0 | DRM_ROTATE_180;
+	} else {
+		supported_rotations = DRM_ROTATE_0;
+	}
+
+	if (INTEL_GEN(dev_priv) >= 4)
+		drm_plane_create_rotation_property(&primary->base,
+						   DRM_ROTATE_0,
+						   supported_rotations);
 
 	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
 
-	return &primary->base;
+	return primary;
 
 fail:
 	kfree(state);
 	kfree(primary);
 
-	return NULL;
-}
-
-void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
-{
-	if (!dev->mode_config.rotation_property) {
-		unsigned long flags = DRM_ROTATE_0 |
-			DRM_ROTATE_180;
-
-		if (INTEL_INFO(dev)->gen >= 9)
-			flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
-
-		dev->mode_config.rotation_property =
-			drm_mode_create_rotation_property(dev, flags);
-	}
-	if (dev->mode_config.rotation_property)
-		drm_object_attach_property(&plane->base.base,
-				dev->mode_config.rotation_property,
-				plane->base.state->rotation);
+	return ERR_PTR(ret);
 }
 
 static int
@@ -15029,7 +15111,8 @@ intel_check_cursor_plane(struct drm_plane *plane,
 		return 0;
 
 	/* Check for which cursor types we support */
-	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
+	if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
+			    state->base.crtc_h)) {
 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
 			  state->base.crtc_w, state->base.crtc_h);
 		return -EINVAL;
@@ -15056,7 +15139,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
 	 * display power well must be turned off and on again.
 	 * Refuse the put the cursor into that compromised position.
 	 */
-	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
+	if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
 	    state->base.visible && state->base.crtc_x < 0) {
 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
 		return -EINVAL;
@@ -15097,20 +15180,25 @@ intel_update_cursor_plane(struct drm_plane *plane,
 	intel_crtc_update_cursor(crtc, state);
 }
 
-static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
-						   int pipe)
+static struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
 	struct intel_plane *cursor = NULL;
 	struct intel_plane_state *state = NULL;
 	int ret;
 
 	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
-	if (!cursor)
+	if (!cursor) {
+		ret = -ENOMEM;
 		goto fail;
+	}
 
 	state = intel_create_plane_state(&cursor->base);
-	if (!state)
+	if (!state) {
+		ret = -ENOMEM;
 		goto fail;
+	}
+
 	cursor->base.state = &state->base;
 
 	cursor->can_scale = false;
@@ -15122,8 +15210,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
 	cursor->update_plane = intel_update_cursor_plane;
 	cursor->disable_plane = intel_disable_cursor_plane;
 
-	ret = drm_universal_plane_init(dev, &cursor->base, 0,
-				       &intel_plane_funcs,
+	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+				       0, &intel_plane_funcs,
 				       intel_cursor_formats,
 				       ARRAY_SIZE(intel_cursor_formats),
 				       DRM_PLANE_TYPE_CURSOR,
@@ -15131,88 +15219,100 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
 	if (ret)
 		goto fail;
 
-	if (INTEL_INFO(dev)->gen >= 4) {
-		if (!dev->mode_config.rotation_property)
-			dev->mode_config.rotation_property =
-				drm_mode_create_rotation_property(dev,
-							DRM_ROTATE_0 |
-							DRM_ROTATE_180);
-		if (dev->mode_config.rotation_property)
-			drm_object_attach_property(&cursor->base.base,
-				dev->mode_config.rotation_property,
-				state->base.rotation);
-	}
+	if (INTEL_GEN(dev_priv) >= 4)
+		drm_plane_create_rotation_property(&cursor->base,
+						   DRM_ROTATE_0,
+						   DRM_ROTATE_0 |
+						   DRM_ROTATE_180);
 
-	if (INTEL_INFO(dev)->gen >=9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		state->scaler_id = -1;
 
 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
 
-	return &cursor->base;
+	return cursor;
 
 fail:
 	kfree(state);
 	kfree(cursor);
 
-	return NULL;
+	return ERR_PTR(ret);
 }
 
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
-	struct intel_crtc_state *crtc_state)
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+			     struct intel_crtc *crtc,
+			     struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
 	int i;
-	struct intel_scaler *intel_scaler;
-	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
 
-	for (i = 0; i < intel_crtc->num_scalers; i++) {
-		intel_scaler = &scaler_state->scalers[i];
-		intel_scaler->in_use = 0;
-		intel_scaler->mode = PS_SCALER_MODE_DYN;
+	for (i = 0; i < crtc->num_scalers; i++) {
+		struct intel_scaler *scaler = &scaler_state->scalers[i];
+
+		scaler->in_use = 0;
+		scaler->mode = PS_SCALER_MODE_DYN;
 	}
 
 	scaler_state->scaler_id = -1;
 }
 
-static void intel_crtc_init(struct drm_device *dev, int pipe)
+static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc;
 	struct intel_crtc_state *crtc_state = NULL;
-	struct drm_plane *primary = NULL;
-	struct drm_plane *cursor = NULL;
-	int ret;
+	struct intel_plane *primary = NULL;
+	struct intel_plane *cursor = NULL;
+	int sprite, ret;
 
 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
-	if (intel_crtc == NULL)
-		return;
+	if (!intel_crtc)
+		return -ENOMEM;
 
 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-	if (!crtc_state)
+	if (!crtc_state) {
+		ret = -ENOMEM;
 		goto fail;
+	}
 	intel_crtc->config = crtc_state;
 	intel_crtc->base.state = &crtc_state->base;
 	crtc_state->base.crtc = &intel_crtc->base;
 
 	/* initialize shared scalers */
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		if (pipe == PIPE_C)
 			intel_crtc->num_scalers = 1;
 		else
 			intel_crtc->num_scalers = SKL_NUM_SCALERS;
 
-		skl_init_scalers(dev, intel_crtc, crtc_state);
+		skl_init_scalers(dev_priv, intel_crtc, crtc_state);
 	}
 
-	primary = intel_primary_plane_create(dev, pipe);
-	if (!primary)
+	primary = intel_primary_plane_create(dev_priv, pipe);
+	if (IS_ERR(primary)) {
+		ret = PTR_ERR(primary);
 		goto fail;
+	}
 
-	cursor = intel_cursor_plane_create(dev, pipe);
-	if (!cursor)
+	for_each_sprite(dev_priv, pipe, sprite) {
+		struct intel_plane *plane;
+
+		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+		if (IS_ERR(plane)) {
+			ret = PTR_ERR(plane);
+			goto fail;
+		}
+	}
+
+	cursor = intel_cursor_plane_create(dev_priv, pipe);
+	if (IS_ERR(cursor)) {
+		ret = PTR_ERR(cursor);
 		goto fail;
+	}
 
-	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
-					cursor, &intel_crtc_funcs,
+	ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+					&primary->base, &cursor->base,
+					&intel_crtc_funcs,
 					"pipe %c", pipe_name(pipe));
 	if (ret)
 		goto fail;
@@ -15222,8 +15322,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
 	 */
 	intel_crtc->pipe = pipe;
-	intel_crtc->plane = pipe;
-	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
+	intel_crtc->plane = (enum plane) pipe;
+	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) {
 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
 		intel_crtc->plane = !pipe;
 	}
@@ -15236,21 +15336,26 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 
 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
-	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
-	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
+	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
 
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
 	intel_color_init(&intel_crtc->base);
 
 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
-	return;
+
+	return 0;
 
 fail:
-	intel_plane_destroy(primary);
-	intel_plane_destroy(cursor);
+	/*
+	 * drm_mode_config_cleanup() will free up any
+	 * crtcs/planes already initialized.
+	 */
 	kfree(crtc_state);
 	kfree(intel_crtc);
+
+	return ret;
 }
 
 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -15300,17 +15405,15 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
 	return index_mask;
 }
 
-static bool has_edp_a(struct drm_device *dev)
+static bool has_edp_a(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	if (!IS_MOBILE(dev))
+	if (!IS_MOBILE(dev_priv))
 		return false;
 
 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
 		return false;
 
-	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+	if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
 		return false;
 
 	return true;
@@ -15323,17 +15426,18 @@ static bool intel_crt_present(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen >= 9)
 		return false;
 
-	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
+	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
 		return false;
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		return false;
 
-	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+	if (HAS_PCH_LPT_H(dev_priv) &&
+	    I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
 		return false;
 
 	/* DDI E can't be used if DDI A requires 4 lanes */
-	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+	if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
 		return false;
 
 	if (!dev_priv->vbt.int_crt_support)
@@ -15396,7 +15500,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 	if (intel_crt_present(dev))
 		intel_crt_init(dev);
 
-	if (IS_BROXTON(dev)) {
+	if (IS_BROXTON(dev_priv)) {
 		/*
 		 * FIXME: Broxton doesn't support port detection via the
 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -15407,7 +15511,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 		intel_ddi_init(dev, PORT_C);
 
 		intel_dsi_init(dev);
-	} else if (HAS_DDI(dev)) {
+	} else if (HAS_DDI(dev_priv)) {
 		int found;
 
 		/*
@@ -15417,7 +15521,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 		 */
 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
 		/* WaIgnoreDDIAStrap: skl */
-		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+		if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 			intel_ddi_init(dev, PORT_A);
 
 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -15433,17 +15537,17 @@ static void intel_setup_outputs(struct drm_device *dev)
 		/*
 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
 		 */
-		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+		if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
 		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
 			intel_ddi_init(dev, PORT_E);
 
-	} else if (HAS_PCH_SPLIT(dev)) {
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		int found;
 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
 
-		if (has_edp_a(dev))
+		if (has_edp_a(dev_priv))
 			intel_dp_init(dev, DP_A, PORT_A);
 
 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
@@ -15466,7 +15570,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 
 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
 			intel_dp_init(dev, PCH_DP_D, PORT_D);
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		bool has_edp, has_port;
 
 		/*
@@ -15498,7 +15602,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
 			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
 
-		if (IS_CHERRYVIEW(dev)) {
+		if (IS_CHERRYVIEW(dev_priv)) {
 			/*
 			 * eDP not supported on port D,
 			 * so no need to worry about it
@@ -15511,18 +15615,18 @@ static void intel_setup_outputs(struct drm_device *dev)
 		}
 
 		intel_dsi_init(dev);
-	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
+	} else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
 		bool found = false;
 
 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
 			DRM_DEBUG_KMS("probing SDVOB\n");
 			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
-			if (!found && IS_G4X(dev)) {
+			if (!found && IS_G4X(dev_priv)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
 			}
 
-			if (!found && IS_G4X(dev))
+			if (!found && IS_G4X(dev_priv))
 				intel_dp_init(dev, DP_B, PORT_B);
 		}
 
@@ -15535,18 +15639,17 @@ static void intel_setup_outputs(struct drm_device *dev)
 
 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
 
-			if (IS_G4X(dev)) {
+			if (IS_G4X(dev_priv)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
 			}
-			if (IS_G4X(dev))
+			if (IS_G4X(dev_priv))
 				intel_dp_init(dev, DP_C, PORT_C);
 		}
 
-		if (IS_G4X(dev) &&
-		    (I915_READ(DP_D) & DP_DETECTED))
+		if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
 			intel_dp_init(dev, DP_D, PORT_D);
-	} else if (IS_GEN2(dev))
+	} else if (IS_GEN2(dev_priv))
 		intel_dvo_init(dev);
 
 	if (SUPPORTS_TV(dev))
@@ -15617,10 +15720,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
 };
 
 static
-u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
-			 uint32_t pixel_format)
+u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
+			 uint64_t fb_modifier, uint32_t pixel_format)
 {
-	u32 gen = INTEL_INFO(dev)->gen;
+	u32 gen = INTEL_INFO(dev_priv)->gen;
 
 	if (gen >= 9) {
 		int cpp = drm_format_plane_cpp(pixel_format, 0);
@@ -15629,7 +15732,8 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
 		 *  pixels and 32K bytes."
 		 */
 		return min(8192 * cpp, 32768);
-	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+	} else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
+		   !IS_CHERRYVIEW(dev_priv)) {
 		return 32*1024;
 	} else if (gen >= 4) {
 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
@@ -15716,7 +15820,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
 		return -EINVAL;
 	}
 
-	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+	pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
 					   mode_cmd->pixel_format);
 	if (mode_cmd->pitches[0] > pitch_limit) {
 		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
@@ -15754,7 +15858,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
 		}
 		break;
 	case DRM_FORMAT_ABGR8888:
-		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
 		    INTEL_INFO(dev)->gen < 9) {
 			format_name = drm_get_format_name(mode_cmd->pixel_format);
 			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
@@ -15773,7 +15877,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
 		}
 		break;
 	case DRM_FORMAT_ABGR2101010:
-		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
 			format_name = drm_get_format_name(mode_cmd->pixel_format);
 			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
 			kfree(format_name);
@@ -15835,17 +15939,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
 
 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
 	if (IS_ERR(fb))
-		i915_gem_object_put_unlocked(obj);
+		i915_gem_object_put(obj);
 
 	return fb;
 }
 
-#ifndef CONFIG_DRM_FBDEV_EMULATION
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-#endif
-
 static const struct drm_mode_config_funcs intel_mode_funcs = {
 	.fb_create = intel_user_framebuffer_create,
 	.output_poll_changed = intel_fbdev_output_poll_changed,
@@ -16226,7 +16324,7 @@ static void i915_disable_vga(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u8 sr1;
-	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+	i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
 
 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
 	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
@@ -16244,11 +16342,11 @@ void intel_modeset_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	intel_update_cdclk(dev);
+	intel_update_cdclk(dev_priv);
 
 	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
 
-	intel_init_clock_gating(dev);
+	intel_init_clock_gating(dev_priv);
 }
 
 /*
@@ -16314,7 +16412,7 @@ static void sanitize_watermarks(struct drm_device *dev)
 		 * BIOS-programmed watermarks untouched and hope for the best.
 		 */
 		WARN(true, "Could not determine valid watermarks for inherited state\n");
-		goto fail;
+		goto put_state;
 	}
 
 	/* Write calculated watermark values back */
@@ -16325,17 +16423,17 @@ static void sanitize_watermarks(struct drm_device *dev)
 		dev_priv->display.optimize_watermarks(cs);
 	}
 
-	drm_atomic_state_free(state);
+put_state:
+	drm_atomic_state_put(state);
 fail:
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 }
 
-void intel_modeset_init(struct drm_device *dev)
+int intel_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	int sprite, ret;
 	enum pipe pipe;
 	struct intel_crtc *crtc;
 
@@ -16353,10 +16451,10 @@ void intel_modeset_init(struct drm_device *dev)
 
 	intel_init_quirks(dev);
 
-	intel_init_pm(dev);
+	intel_init_pm(dev_priv);
 
 	if (INTEL_INFO(dev)->num_pipes == 0)
-		return;
+		return 0;
 
 	/*
 	 * There may be no VBT; and if the BIOS enabled SSC we can
@@ -16364,7 +16462,7 @@ void intel_modeset_init(struct drm_device *dev)
 	 * BIOS isn't using it, don't assume it will work even if the VBT
 	 * indicates as much.
 	 */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
 		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
 					    DREF_SSC1_ENABLE);
 
@@ -16376,10 +16474,10 @@ void intel_modeset_init(struct drm_device *dev)
 		}
 	}
 
-	if (IS_GEN2(dev)) {
+	if (IS_GEN2(dev_priv)) {
 		dev->mode_config.max_width = 2048;
 		dev->mode_config.max_height = 2048;
-	} else if (IS_GEN3(dev)) {
+	} else if (IS_GEN3(dev_priv)) {
 		dev->mode_config.max_width = 4096;
 		dev->mode_config.max_height = 4096;
 	} else {
@@ -16387,10 +16485,10 @@ void intel_modeset_init(struct drm_device *dev)
 		dev->mode_config.max_height = 8192;
 	}
 
-	if (IS_845G(dev) || IS_I865G(dev)) {
-		dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
+	if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
+		dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
 		dev->mode_config.cursor_height = 1023;
-	} else if (IS_GEN2(dev)) {
+	} else if (IS_GEN2(dev_priv)) {
 		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
 		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
 	} else {
@@ -16405,22 +16503,22 @@ void intel_modeset_init(struct drm_device *dev)
 		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
 	for_each_pipe(dev_priv, pipe) {
-		intel_crtc_init(dev, pipe);
-		for_each_sprite(dev_priv, pipe, sprite) {
-			ret = intel_plane_init(dev, pipe, sprite);
-			if (ret)
-				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
-					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
+		int ret;
+
+		ret = intel_crtc_init(dev_priv, pipe);
+		if (ret) {
+			drm_mode_config_cleanup(dev);
+			return ret;
 		}
 	}
 
 	intel_update_czclk(dev_priv);
-	intel_update_cdclk(dev);
+	intel_update_cdclk(dev_priv);
 
 	intel_shared_dpll_init(dev);
 
 	if (dev_priv->max_cdclk_freq == 0)
-		intel_update_max_cdclk(dev);
+		intel_update_max_cdclk(dev_priv);
 
 	/* Just disable it once at startup */
 	i915_disable_vga(dev);
@@ -16459,6 +16557,8 @@ void intel_modeset_init(struct drm_device *dev)
 	 * since the watermark calculation done here will use pstate->fb.
 	 */
 	sanitize_watermarks(dev);
+
+	return 0;
 }
 
 static void intel_enable_pipe_a(struct drm_device *dev)
@@ -16596,7 +16696,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 	if (crtc->active && !intel_crtc_has_encoders(crtc))
 		intel_crtc_disable_noatomic(&crtc->base);
 
-	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
+	if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
 		/*
 		 * We start out with underrun reporting disabled to avoid races.
 		 * For correct bookkeeping mark this on active crtcs.
@@ -16671,7 +16771,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 void i915_redisable_vga_power_on(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+	i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
 
 	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
@@ -16788,7 +16888,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 		pipe = 0;
 
 		if (encoder->get_hw_state(encoder, &pipe)) {
-			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
 			encoder->base.crtc = &crtc->base;
 			crtc->config->output_types |= 1 << encoder->type;
 			encoder->get_config(encoder, crtc->config);
@@ -16889,7 +16990,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
 	}
 
 	for_each_pipe(dev_priv, pipe) {
-		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
 		intel_sanitize_crtc(crtc);
 		intel_dump_pipe_config(crtc, crtc->config,
 				       "[setup_hw_state]");
@@ -16909,11 +17011,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
 		pll->on = false;
 	}
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		vlv_wm_get_hw_state(dev);
-	else if (IS_GEN9(dev))
+	else if (IS_GEN9(dev_priv))
 		skl_wm_get_hw_state(dev);
-	else if (HAS_PCH_SPLIT(dev))
+	else if (HAS_PCH_SPLIT(dev_priv))
 		ilk_wm_get_hw_state(dev);
 
 	for_each_intel_crtc(dev, crtc) {
@@ -16963,10 +17065,9 @@ void intel_display_resume(struct drm_device *dev)
 	drm_modeset_acquire_fini(&ctx);
 	mutex_unlock(&dev->mode_config.mutex);
 
-	if (ret) {
+	if (ret)
 		DRM_ERROR("Restoring old state failed with %i\n", ret);
-		drm_atomic_state_free(state);
-	}
+	drm_atomic_state_put(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -17105,6 +17206,8 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
 struct intel_display_error_state {
 
 	u32 power_well_driver;
@@ -17243,7 +17346,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 		return;
 
 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
 			   error->power_well_driver);
 	for_each_pipe(dev_priv, i) {
@@ -17260,7 +17363,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
 		}
-		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
 		if (INTEL_INFO(dev)->gen >= 4) {
 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
@@ -17287,3 +17390,5 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
 	}
 }
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 14a3cf0..117a714 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -213,6 +213,81 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
 	return max_dotclk;
 }
 
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+	if (intel_dp->num_sink_rates) {
+		*sink_rates = intel_dp->sink_rates;
+		return intel_dp->num_sink_rates;
+	}
+
+	*sink_rates = default_rates;
+
+	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static int
+intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	int size;
+
+	if (IS_BROXTON(dev_priv)) {
+		*source_rates = bxt_rates;
+		size = ARRAY_SIZE(bxt_rates);
+	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+		*source_rates = skl_rates;
+		size = ARRAY_SIZE(skl_rates);
+	} else {
+		*source_rates = default_rates;
+		size = ARRAY_SIZE(default_rates);
+	}
+
+	/* This depends on the fact that 5.4 is last value in the array */
+	if (!intel_dp_source_supports_hbr2(intel_dp))
+		size--;
+
+	return size;
+}
+
+static int intersect_rates(const int *source_rates, int source_len,
+			   const int *sink_rates, int sink_len,
+			   int *common_rates)
+{
+	int i = 0, j = 0, k = 0;
+
+	while (i < source_len && j < sink_len) {
+		if (source_rates[i] == sink_rates[j]) {
+			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+				return k;
+			common_rates[k] = source_rates[i];
+			++k;
+			++i;
+			++j;
+		} else if (source_rates[i] < sink_rates[j]) {
+			++i;
+		} else {
+			++j;
+		}
+	}
+	return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+				 int *common_rates)
+{
+	const int *source_rates, *sink_rates;
+	int source_len, sink_len;
+
+	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+	source_len = intel_dp_source_rates(intel_dp, &source_rates);
+
+	return intersect_rates(source_rates, source_len,
+			       sink_rates, sink_len,
+			       common_rates);
+}
+
 static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
@@ -320,8 +395,7 @@ static void
 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 	enum pipe pipe = intel_dp->pps_pipe;
 	bool pll_enabled, release_cl_override = false;
 	enum dpio_phy phy = DPIO_PHY(pipe);
@@ -344,7 +418,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 	DP |= DP_PORT_WIDTH(1);
 	DP |= DP_LINK_TRAIN_PAT_1;
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		DP |= DP_PIPE_SELECT_CHV(pipe);
 	else if (pipe == PIPE_B)
 		DP |= DP_PIPEB_SELECT;
@@ -356,10 +430,10 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 	 * So enable temporarily it if it's not already enabled.
 	 */
 	if (!pll_enabled) {
-		release_cl_override = IS_CHERRYVIEW(dev) &&
+		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 
-		if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
 				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
 			DRM_ERROR("Failed to force on pll for pipe %c!\n",
 				  pipe_name(pipe));
@@ -383,7 +457,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 	POSTING_READ(intel_dp->output_reg);
 
 	if (!pll_enabled) {
-		vlv_force_pll_off(dev, pipe);
+		vlv_force_pll_off(dev_priv, pipe);
 
 		if (release_cl_override)
 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
@@ -570,8 +644,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_encoder *encoder;
 
-	if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
-		    !IS_BROXTON(dev)))
+	if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+		    !IS_BROXTON(dev_priv)))
 		return;
 
 	/*
@@ -591,7 +665,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 			continue;
 
 		intel_dp = enc_to_intel_dp(&encoder->base);
-		if (IS_BROXTON(dev))
+		if (IS_BROXTON(dev_priv))
 			intel_dp->pps_reset = true;
 		else
 			intel_dp->pps_pipe = INVALID_PIPE;
@@ -664,7 +738,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 
 	pps_lock(intel_dp);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 		i915_reg_t pp_ctrl_reg, pp_div_reg;
 		u32 pp_div;
@@ -692,7 +766,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	    intel_dp->pps_pipe == INVALID_PIPE)
 		return false;
 
@@ -706,7 +780,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	    intel_dp->pps_pipe == INVALID_PIPE)
 		return false;
 
@@ -821,15 +895,16 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
 				     uint32_t aux_clock_divider)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv =
+			to_i915(intel_dig_port->base.base.dev);
 	uint32_t precharge, timeout;
 
-	if (IS_GEN6(dev))
+	if (IS_GEN6(dev_priv))
 		precharge = 3;
 	else
 		precharge = 5;
 
-	if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
+	if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
 	else
 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -1108,8 +1183,46 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 	return ret;
 }
 
+static enum port intel_aux_port(struct drm_i915_private *dev_priv,
+				enum port port)
+{
+	const struct ddi_vbt_port_info *info =
+		&dev_priv->vbt.ddi_port_info[port];
+	enum port aux_port;
+
+	if (!info->alternate_aux_channel) {
+		DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+			      port_name(port), port_name(port));
+		return port;
+	}
+
+	switch (info->alternate_aux_channel) {
+	case DP_AUX_A:
+		aux_port = PORT_A;
+		break;
+	case DP_AUX_B:
+		aux_port = PORT_B;
+		break;
+	case DP_AUX_C:
+		aux_port = PORT_C;
+		break;
+	case DP_AUX_D:
+		aux_port = PORT_D;
+		break;
+	default:
+		MISSING_CASE(info->alternate_aux_channel);
+		aux_port = PORT_A;
+		break;
+	}
+
+	DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+		      port_name(aux_port), port_name(port));
+
+	return aux_port;
+}
+
 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
-				       enum port port)
+				  enum port port)
 {
 	switch (port) {
 	case PORT_B:
@@ -1123,7 +1236,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
 }
 
 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
-					enum port port, int index)
+				   enum port port, int index)
 {
 	switch (port) {
 	case PORT_B:
@@ -1137,7 +1250,7 @@ static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
 }
 
 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
-				       enum port port)
+				  enum port port)
 {
 	switch (port) {
 	case PORT_A:
@@ -1153,7 +1266,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
 }
 
 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
-					enum port port, int index)
+				   enum port port, int index)
 {
 	switch (port) {
 	case PORT_A:
@@ -1168,36 +1281,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
 	}
 }
 
-/*
- * On SKL we don't have Aux for port E so we rely
- * on VBT to set a proper alternate aux channel.
- */
-static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
-{
-	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[PORT_E];
-
-	switch (info->alternate_aux_channel) {
-	case DP_AUX_A:
-		return PORT_A;
-	case DP_AUX_B:
-		return PORT_B;
-	case DP_AUX_C:
-		return PORT_C;
-	case DP_AUX_D:
-		return PORT_D;
-	default:
-		MISSING_CASE(info->alternate_aux_channel);
-		return PORT_A;
-	}
-}
-
 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
-				       enum port port)
+				  enum port port)
 {
-	if (port == PORT_E)
-		port = skl_porte_aux_port(dev_priv);
-
 	switch (port) {
 	case PORT_A:
 	case PORT_B:
@@ -1211,11 +1297,8 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
 }
 
 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
-					enum port port, int index)
+				   enum port port, int index)
 {
-	if (port == PORT_E)
-		port = skl_porte_aux_port(dev_priv);
-
 	switch (port) {
 	case PORT_A:
 	case PORT_B:
@@ -1229,7 +1312,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
 }
 
 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
-					 enum port port)
+				    enum port port)
 {
 	if (INTEL_INFO(dev_priv)->gen >= 9)
 		return skl_aux_ctl_reg(dev_priv, port);
@@ -1240,7 +1323,7 @@ static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
 }
 
 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
-					  enum port port, int index)
+				     enum port port, int index)
 {
 	if (INTEL_INFO(dev_priv)->gen >= 9)
 		return skl_aux_data_reg(dev_priv, port, index);
@@ -1253,7 +1336,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
 static void intel_aux_reg_init(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-	enum port port = dp_to_dig_port(intel_dp)->port;
+	enum port port = intel_aux_port(dev_priv,
+					dp_to_dig_port(intel_dp)->port);
 	int i;
 
 	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
@@ -1281,78 +1365,37 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
 	intel_dp->aux.transfer = intel_dp_aux_transfer;
 }
 
-static int
-intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
-{
-	if (intel_dp->num_sink_rates) {
-		*sink_rates = intel_dp->sink_rates;
-		return intel_dp->num_sink_rates;
-	}
-
-	*sink_rates = default_rates;
-
-	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
-}
-
 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 
-	/* WaDisableHBR2:skl */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
-		return false;
-
-	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
-	    (INTEL_INFO(dev)->gen >= 9))
+	if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+	    IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
 		return true;
 	else
 		return false;
 }
 
-static int
-intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
-{
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
-	int size;
-
-	if (IS_BROXTON(dev)) {
-		*source_rates = bxt_rates;
-		size = ARRAY_SIZE(bxt_rates);
-	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-		*source_rates = skl_rates;
-		size = ARRAY_SIZE(skl_rates);
-	} else {
-		*source_rates = default_rates;
-		size = ARRAY_SIZE(default_rates);
-	}
-
-	/* This depends on the fact that 5.4 is last value in the array */
-	if (!intel_dp_source_supports_hbr2(intel_dp))
-		size--;
-
-	return size;
-}
-
 static void
 intel_dp_set_clock(struct intel_encoder *encoder,
 		   struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	const struct dp_link_dpll *divisor = NULL;
 	int i, count = 0;
 
-	if (IS_G4X(dev)) {
+	if (IS_G4X(dev_priv)) {
 		divisor = gen4_dpll;
 		count = ARRAY_SIZE(gen4_dpll);
-	} else if (HAS_PCH_SPLIT(dev)) {
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		divisor = pch_dpll;
 		count = ARRAY_SIZE(pch_dpll);
-	} else if (IS_CHERRYVIEW(dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		divisor = chv_dpll;
 		count = ARRAY_SIZE(chv_dpll);
-	} else if (IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		divisor = vlv_dpll;
 		count = ARRAY_SIZE(vlv_dpll);
 	}
@@ -1368,43 +1411,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
 	}
 }
 
-static int intersect_rates(const int *source_rates, int source_len,
-			   const int *sink_rates, int sink_len,
-			   int *common_rates)
-{
-	int i = 0, j = 0, k = 0;
-
-	while (i < source_len && j < sink_len) {
-		if (source_rates[i] == sink_rates[j]) {
-			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
-				return k;
-			common_rates[k] = source_rates[i];
-			++k;
-			++i;
-			++j;
-		} else if (source_rates[i] < sink_rates[j]) {
-			++i;
-		} else {
-			++j;
-		}
-	}
-	return k;
-}
-
-static int intel_dp_common_rates(struct intel_dp *intel_dp,
-				 int *common_rates)
-{
-	const int *source_rates, *sink_rates;
-	int source_len, sink_len;
-
-	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
-	source_len = intel_dp_source_rates(intel_dp, &source_rates);
-
-	return intersect_rates(source_rates, source_len,
-			       sink_rates, sink_len,
-			       common_rates);
-}
-
 static void snprintf_int_array(char *str, size_t len,
 			       const int *array, int nelem)
 {
@@ -1444,42 +1450,35 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
 	DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
-static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+bool
+__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
 {
-	uint8_t rev;
-	int len;
+	u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
+						      DP_SINK_OUI;
 
-	if ((drm_debug & DRM_UT_KMS) == 0)
-		return;
-
-	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-	      DP_DWN_STRM_PORT_PRESENT))
-		return;
-
-	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
-	if (len < 0)
-		return;
-
-	DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+	return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
+	       sizeof(*desc);
 }
 
-static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+bool intel_dp_read_desc(struct intel_dp *intel_dp)
 {
-	uint8_t rev[2];
-	int len;
+	struct intel_dp_desc *desc = &intel_dp->desc;
+	bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+		       DP_OUI_SUPPORT;
+	int dev_id_len;
 
-	if ((drm_debug & DRM_UT_KMS) == 0)
-		return;
+	if (!__intel_dp_read_desc(intel_dp, desc))
+		return false;
 
-	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-	      DP_DWN_STRM_PORT_PRESENT))
-		return;
+	dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
+	DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
+		      drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
+		      (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
+		      dev_id_len, desc->device_id,
+		      desc->hw_rev >> 4, desc->hw_rev & 0xf,
+		      desc->sw_major_rev, desc->sw_minor_rev);
 
-	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
-	if (len < 0)
-		return;
-
-	DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+	return true;
 }
 
 static int rate_to_index(int find, const int *rates)
@@ -1569,7 +1568,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
 	max_clock = common_len - 1;
 
-	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
+	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
 		pipe_config->has_pch_encoder = true;
 
 	pipe_config->has_drrs = false;
@@ -1586,7 +1585,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 				return ret;
 		}
 
-		if (HAS_GMCH_DISPLAY(dev))
+		if (HAS_GMCH_DISPLAY(dev_priv))
 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
 						 intel_connector->panel.fitting_mode);
 		else
@@ -1711,7 +1710,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 		to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
 	}
 
-	if (!HAS_DDI(dev))
+	if (!HAS_DDI(dev_priv))
 		intel_dp_set_clock(encoder, pipe_config);
 
 	return true;
@@ -1769,7 +1768,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 
 	/* Split out the IBX/CPU vs CPT settings */
 
-	if (IS_GEN7(dev) && port == PORT_A) {
+	if (IS_GEN7(dev_priv) && port == PORT_A) {
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 			intel_dp->DP |= DP_SYNC_HS_HIGH;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1780,7 +1779,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 			intel_dp->DP |= DP_ENHANCED_FRAMING;
 
 		intel_dp->DP |= crtc->pipe << 29;
-	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
 		u32 trans_dp;
 
 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
@@ -1792,8 +1791,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
 	} else {
-		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
-		    !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
+		if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+		    !IS_CHERRYVIEW(dev_priv) &&
+		    pipe_config->limited_color_range)
 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1805,7 +1805,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 			intel_dp->DP |= DP_ENHANCED_FRAMING;
 
-		if (IS_CHERRYVIEW(dev))
+		if (IS_CHERRYVIEW(dev_priv))
 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
 		else if (crtc->pipe == PIPE_B)
 			intel_dp->DP |= DP_PIPEB_SELECT;
@@ -2114,7 +2114,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
 
 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 	pp = ironlake_get_pp_control(intel_dp);
-	if (IS_GEN5(dev)) {
+	if (IS_GEN5(dev_priv)) {
 		/* ILK workaround: disable reset around power sequence */
 		pp &= ~PANEL_POWER_RESET;
 		I915_WRITE(pp_ctrl_reg, pp);
@@ -2122,7 +2122,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
 	}
 
 	pp |= PANEL_POWER_ON;
-	if (!IS_GEN5(dev))
+	if (!IS_GEN5(dev_priv))
 		pp |= PANEL_POWER_RESET;
 
 	I915_WRITE(pp_ctrl_reg, pp);
@@ -2131,7 +2131,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
 	wait_panel_on(intel_dp);
 	intel_dp->last_power_on = jiffies;
 
-	if (IS_GEN5(dev)) {
+	if (IS_GEN5(dev_priv)) {
 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 		I915_WRITE(pp_ctrl_reg, pp);
 		POSTING_READ(pp_ctrl_reg);
@@ -2363,7 +2363,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
 	 * 2. Program DP PLL enable
 	 */
 	if (IS_GEN5(dev_priv))
-		intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
+		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
 
 	intel_dp->DP |= DP_PLL_ENABLE;
 
@@ -2444,9 +2444,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
 	if (!(tmp & DP_PORT_EN))
 		goto out;
 
-	if (IS_GEN7(dev) && port == PORT_A) {
+	if (IS_GEN7(dev_priv) && port == PORT_A) {
 		*pipe = PORT_TO_PIPE_CPT(tmp);
-	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
 		enum pipe p;
 
 		for_each_pipe(dev_priv, p) {
@@ -2461,7 +2461,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
 
 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
 			      i915_mmio_reg_offset(intel_dp->output_reg));
-	} else if (IS_CHERRYVIEW(dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
 	} else {
 		*pipe = PORT_TO_PIPE(tmp);
@@ -2489,7 +2489,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 
 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
 
-	if (HAS_PCH_CPT(dev) && port != PORT_A) {
+	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
 		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
 
 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
@@ -2515,8 +2515,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 
 	pipe_config->base.adjusted_mode.flags |= flags;
 
-	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
-	    !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
+	if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+	    !IS_CHERRYVIEW(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
 		pipe_config->limited_color_range = true;
 
 	pipe_config->lane_count =
@@ -2636,7 +2636,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
 			      dp_train_pat & DP_TRAINING_PATTERN_MASK);
 
-	if (HAS_DDI(dev)) {
+	if (HAS_DDI(dev_priv)) {
 		uint32_t temp = I915_READ(DP_TP_CTL(port));
 
 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -2662,8 +2662,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 		}
 		I915_WRITE(DP_TP_CTL(port), temp);
 
-	} else if ((IS_GEN7(dev) && port == PORT_A) ||
-		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
+	} else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -2683,7 +2683,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 		}
 
 	} else {
-		if (IS_CHERRYVIEW(dev))
+		if (IS_CHERRYVIEW(dev_priv))
 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
 		else
 			*DP &= ~DP_LINK_TRAIN_MASK;
@@ -2699,7 +2699,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		case DP_TRAINING_PATTERN_3:
-			if (IS_CHERRYVIEW(dev)) {
+			if (IS_CHERRYVIEW(dev_priv)) {
 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
 			} else {
 				DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
@@ -2735,7 +2735,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
 }
 
 static void intel_enable_dp(struct intel_encoder *encoder,
-			    struct intel_crtc_state *pipe_config)
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -2749,7 +2750,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
 
 	pps_lock(intel_dp);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		vlv_init_panel_power_sequencer(intel_dp);
 
 	intel_dp_enable_port(intel_dp, pipe_config);
@@ -2760,10 +2761,10 @@ static void intel_enable_dp(struct intel_encoder *encoder,
 
 	pps_unlock(intel_dp);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		unsigned int lane_mask = 0x0;
 
-		if (IS_CHERRYVIEW(dev))
+		if (IS_CHERRYVIEW(dev_priv))
 			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
 
 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
@@ -2777,7 +2778,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
 	if (pipe_config->has_audio) {
 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
 				 pipe_name(pipe));
-		intel_audio_codec_enable(encoder);
+		intel_audio_codec_enable(encoder, pipe_config, conn_state);
 	}
 }
 
@@ -2787,7 +2788,7 @@ static void g4x_enable_dp(struct intel_encoder *encoder,
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-	intel_enable_dp(encoder, pipe_config);
+	intel_enable_dp(encoder, pipe_config, conn_state);
 	intel_edp_backlight_on(intel_dp);
 }
 
@@ -2924,7 +2925,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
 {
 	vlv_phy_pre_encoder_enable(encoder);
 
-	intel_enable_dp(encoder, pipe_config);
+	intel_enable_dp(encoder, pipe_config, conn_state);
 }
 
 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
@@ -2942,7 +2943,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
 {
 	chv_phy_pre_encoder_enable(encoder);
 
-	intel_enable_dp(encoder, pipe_config);
+	intel_enable_dp(encoder, pipe_config, conn_state);
 
 	/* Second common lane will stay alive on its own now */
 	chv_phy_release_cl2_override(encoder);
@@ -2983,17 +2984,17 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	if (IS_BROXTON(dev))
+	if (IS_BROXTON(dev_priv))
 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 	else if (INTEL_INFO(dev)->gen >= 9) {
 		if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-	else if (IS_GEN7(dev) && port == PORT_A)
+	else if (IS_GEN7(dev_priv) && port == PORT_A)
 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
-	else if (HAS_PCH_CPT(dev) && port != PORT_A)
+	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 	else
 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -3002,10 +3003,10 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
 uint8_t
 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 {
-	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3018,7 +3019,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 		default:
 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
 		}
-	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3030,7 +3031,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 		default:
 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
 		}
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3042,7 +3043,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 		default:
 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
 		}
-	} else if (IS_GEN7(dev) && port == PORT_A) {
+	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3343,21 +3344,21 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
 	uint32_t signal_levels, mask = 0;
 	uint8_t train_set = intel_dp->train_set[0];
 
-	if (HAS_DDI(dev)) {
+	if (HAS_DDI(dev_priv)) {
 		signal_levels = ddi_signal_levels(intel_dp);
 
-		if (IS_BROXTON(dev))
+		if (IS_BROXTON(dev_priv))
 			signal_levels = 0;
 		else
 			mask = DDI_BUF_EMP_MASK;
-	} else if (IS_CHERRYVIEW(dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		signal_levels = chv_signal_levels(intel_dp);
-	} else if (IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		signal_levels = vlv_signal_levels(intel_dp);
-	} else if (IS_GEN7(dev) && port == PORT_A) {
+	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
 		signal_levels = gen7_edp_signal_levels(train_set);
 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
-	} else if (IS_GEN6(dev) && port == PORT_A) {
+	} else if (IS_GEN6(dev_priv) && port == PORT_A) {
 		signal_levels = gen6_edp_signal_levels(train_set);
 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
 	} else {
@@ -3402,7 +3403,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 	enum port port = intel_dig_port->port;
 	uint32_t val;
 
-	if (!HAS_DDI(dev))
+	if (!HAS_DDI(dev_priv))
 		return;
 
 	val = I915_READ(DP_TP_CTL(port));
@@ -3437,7 +3438,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t DP = intel_dp->DP;
 
-	if (WARN_ON(HAS_DDI(dev)))
+	if (WARN_ON(HAS_DDI(dev_priv)))
 		return;
 
 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -3445,12 +3446,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 
 	DRM_DEBUG_KMS("\n");
 
-	if ((IS_GEN7(dev) && port == PORT_A) ||
-	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
+	if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
 	} else {
-		if (IS_CHERRYVIEW(dev))
+		if (IS_CHERRYVIEW(dev_priv))
 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
 		else
 			DP &= ~DP_LINK_TRAIN_MASK;
@@ -3468,7 +3469,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 	 * to transcoder A after disabling it to allow the
 	 * matching HDMI port to be enabled on transcoder A.
 	 */
-	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
+	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
 		/*
 		 * We get CPU/PCH FIFO underruns on the other pipe when
 		 * doing the workaround. Sweep them under the rug.
@@ -3486,7 +3487,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 		I915_WRITE(intel_dp->output_reg, DP);
 		POSTING_READ(intel_dp->output_reg);
 
-		intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 	}
@@ -3496,7 +3497,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 	intel_dp->DP = DP;
 }
 
-static bool
+bool
 intel_dp_read_dpcd(struct intel_dp *intel_dp)
 {
 	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
@@ -3520,6 +3521,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 	if (!intel_dp_read_dpcd(intel_dp))
 		return false;
 
+	intel_dp_read_desc(intel_dp);
+
 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
 		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
 			DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
@@ -3551,8 +3554,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 	/* Read the eDP Display control capabilities registers */
 	if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
 	    drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
-			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) ==
-			     sizeof(intel_dp->edp_dpcd)))
+			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+			     sizeof(intel_dp->edp_dpcd))
 		DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
 			      intel_dp->edp_dpcd);
 
@@ -3607,8 +3610,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 	if (!is_edp(intel_dp) && !intel_dp->sink_count)
 		return false;
 
-	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-	      DP_DWN_STRM_PORT_PRESENT))
+	if (!drm_dp_is_branch(intel_dp->dpcd))
 		return true; /* native DP sink */
 
 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
@@ -3622,23 +3624,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 	return true;
 }
 
-static void
-intel_dp_probe_oui(struct intel_dp *intel_dp)
-{
-	u8 buf[3];
-
-	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
-		return;
-
-	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
-		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
-			      buf[0], buf[1], buf[2]);
-
-	if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
-		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
-			      buf[0], buf[1], buf[2]);
-}
-
 static bool
 intel_dp_can_mst(struct intel_dp *intel_dp)
 {
@@ -3682,7 +3667,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
 	u8 buf;
 	int ret = 0;
@@ -3703,7 +3688,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
 	}
 
 	do {
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
+		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 
 		if (drm_dp_dpcd_readb(&intel_dp->aux,
 				      DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3726,7 +3711,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
 	u8 buf;
 	int ret;
@@ -3754,14 +3739,14 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
 		return -EIO;
 	}
 
-	intel_wait_for_vblank(dev, intel_crtc->pipe);
+	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 	return 0;
 }
 
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
 	u8 buf;
 	int count, ret;
@@ -3772,7 +3757,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
 		return ret;
 
 	do {
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
+		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 
 		if (drm_dp_dpcd_readb(&intel_dp->aux,
 				      DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3989,6 +3974,31 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
 }
 
 static void
+intel_dp_retrain_link(struct intel_dp *intel_dp)
+{
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	/* Suppress underruns caused by re-training */
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+	if (crtc->config->has_pch_encoder)
+		intel_set_pch_fifo_underrun_reporting(dev_priv,
+						      intel_crtc_pch_transcoder(crtc), false);
+
+	intel_dp_start_link_train(intel_dp);
+	intel_dp_stop_link_train(intel_dp);
+
+	/* Keep underrun reporting disabled until things are stable */
+	intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+	if (crtc->config->has_pch_encoder)
+		intel_set_pch_fifo_underrun_reporting(dev_priv,
+						      intel_crtc_pch_transcoder(crtc), true);
+}
+
+static void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
@@ -4008,13 +4018,18 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
 		return;
 
+	/* FIXME: we need to synchronize this sort of stuff with hardware
+	 * readout */
+	if (WARN_ON_ONCE(!intel_dp->lane_count))
+		return;
+
 	/* if link training is requested we should perform it always */
 	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
 	    (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
 			      intel_encoder->base.name);
-		intel_dp_start_link_train(intel_dp);
-		intel_dp_stop_link_train(intel_dp);
+
+		intel_dp_retrain_link(intel_dp);
 	}
 }
 
@@ -4096,7 +4111,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 		return connector_status_connected;
 
 	/* if there's no downstream port, we're done */
-	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+	if (!drm_dp_is_branch(dpcd))
 		return connector_status_connected;
 
 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
@@ -4387,10 +4402,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
 	intel_dp_print_rates(intel_dp);
 
-	intel_dp_probe_oui(intel_dp);
-
-	intel_dp_print_hw_revision(intel_dp);
-	intel_dp_print_sw_revision(intel_dp);
+	intel_dp_read_desc(intel_dp);
 
 	intel_dp_configure_mst(intel_dp);
 
@@ -4454,21 +4466,11 @@ static enum drm_connector_status
 intel_dp_detect(struct drm_connector *connector, bool force)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct intel_encoder *intel_encoder = &intel_dig_port->base;
 	enum drm_connector_status status = connector->status;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, connector->name);
 
-	if (intel_dp->is_mst) {
-		/* MST devices are disconnected from a monitor POV */
-		intel_dp_unset_edid(intel_dp);
-		if (intel_encoder->type != INTEL_OUTPUT_EDP)
-			intel_encoder->type = INTEL_OUTPUT_DP;
-		return connector_status_disconnected;
-	}
-
 	/* If full detect is not performed yet, do a full detect */
 	if (!intel_dp->detect_done)
 		status = intel_dp_long_pulse(intel_dp->attached_connector);
@@ -4756,11 +4758,16 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
 
 	if (!HAS_DDI(dev_priv))
 		intel_dp->DP = I915_READ(intel_dp->output_reg);
 
+	if (IS_GEN9(dev_priv) && lspcon->active)
+		lspcon_resume(lspcon);
+
 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
 		return;
 
@@ -5074,7 +5081,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
 	/* Compute the divisor for the pp clock, simply match the Bspec
 	 * formula. */
-	if (IS_BROXTON(dev)) {
+	if (IS_BROXTON(dev_priv)) {
 		pp_div = I915_READ(regs.pp_ctrl);
 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@@ -5087,9 +5094,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 
 	/* Haswell doesn't have any port selection bits for the panel
 	 * power sequencer any more. */
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		port_sel = PANEL_PORT_SELECT_VLV(port);
-	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
 		if (port == PORT_A)
 			port_sel = PANEL_PORT_SELECT_DPA;
 		else
@@ -5100,7 +5107,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 
 	I915_WRITE(regs.pp_on, pp_on);
 	I915_WRITE(regs.pp_off, pp_off);
-	if (IS_BROXTON(dev))
+	if (IS_BROXTON(dev_priv))
 		I915_WRITE(regs.pp_ctrl, pp_div);
 	else
 		I915_WRITE(regs.pp_div, pp_div);
@@ -5108,7 +5115,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
 		      I915_READ(regs.pp_on),
 		      I915_READ(regs.pp_off),
-		      IS_BROXTON(dev) ?
+		      IS_BROXTON(dev_priv) ?
 		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
 		      I915_READ(regs.pp_div));
 }
@@ -5116,7 +5123,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 static void intel_dp_pps_init(struct drm_device *dev,
 			      struct intel_dp *intel_dp)
 {
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		vlv_initial_power_sequencer_setup(intel_dp);
 	} else {
 		intel_dp_init_panel_power_sequencer(dev, intel_dp);
@@ -5586,7 +5595,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	}
 	mutex_unlock(&dev->mode_config.mutex);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
 		register_reboot_notifier(&intel_dp->edp_notifier);
 
@@ -5595,7 +5604,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 		 * If the current pipe isn't valid, try the PPS pipe, and if that
 		 * fails just assume pipe A.
 		 */
-		if (IS_CHERRYVIEW(dev))
+		if (IS_CHERRYVIEW(dev_priv))
 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
 		else
 			pipe = PORT_TO_PIPE(intel_dp->DP);
@@ -5651,9 +5660,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	/* intel_dp vfuncs */
 	if (INTEL_INFO(dev)->gen >= 9)
 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
-	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
-	else if (HAS_PCH_SPLIT(dev))
+	else if (HAS_PCH_SPLIT(dev_priv))
 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
 	else
 		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
@@ -5663,7 +5672,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	else
 		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
 
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
 
 	/* Preserve the current hw state. */
@@ -5684,7 +5693,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 		intel_encoder->type = INTEL_OUTPUT_EDP;
 
 	/* eDP only on port B and/or C on vlv/chv */
-	if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
 		return false;
 
@@ -5705,7 +5714,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
 	else
 		intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -5717,7 +5726,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 		break;
 	case PORT_B:
 		intel_encoder->hpd_pin = HPD_PORT_B;
-		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+		if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 			intel_encoder->hpd_pin = HPD_PORT_A;
 		break;
 	case PORT_C:
@@ -5751,7 +5760,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	 * 0xd.  Failure to do so will result in spurious interrupts being
 	 * generated on the port when a cable is not attached.
 	 */
-	if (IS_G4X(dev) && !IS_GM45(dev)) {
+	if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
 	}
@@ -5794,13 +5803,13 @@ bool intel_dp_init(struct drm_device *dev,
 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
 	intel_encoder->get_config = intel_dp_get_config;
 	intel_encoder->suspend = intel_dp_encoder_suspend;
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev_priv)) {
 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
 		intel_encoder->pre_enable = chv_pre_enable_dp;
 		intel_encoder->enable = vlv_enable_dp;
 		intel_encoder->post_disable = chv_post_disable_dp;
 		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
-	} else if (IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
 		intel_encoder->pre_enable = vlv_pre_enable_dp;
 		intel_encoder->enable = vlv_enable_dp;
@@ -5817,7 +5826,7 @@ bool intel_dp_init(struct drm_device *dev,
 	intel_dig_port->max_lanes = 4;
 
 	intel_encoder->type = INTEL_OUTPUT_DP;
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev_priv)) {
 		if (port == PORT_D)
 			intel_encoder->crtc_mask = 1 << 2;
 		else
@@ -5826,6 +5835,7 @@ bool intel_dp_init(struct drm_device *dev,
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	}
 	intel_encoder->cloneable = 0;
+	intel_encoder->port = port;
 
 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index c438b02..0048b52 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -225,9 +225,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
 	 * also mandatory for downstream devices that support HBR2. However, not
 	 * all sinks follow the spec.
-	 *
-	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
-	 * supported in source but still not enabled.
 	 */
 	source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
 	sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 54a9d76..3ffbd69 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -523,6 +523,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
 			 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
 
 	intel_encoder->type = INTEL_OUTPUT_DP_MST;
+	intel_encoder->port = intel_dig_port->port;
 	intel_encoder->crtc_mask = 0x7;
 	intel_encoder->cloneable = 0;
 
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 047f487..7a8e82d 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -23,6 +23,565 @@
 
 #include "intel_drv.h"
 
+/**
+ * DOC: DPIO
+ *
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
+ * the spline (PCS/TX) corresponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ *  pipe A == CMN/PLL/REF CH0
+ *
+ *  pipe B == CMN/PLL/REF CH1
+ *
+ *  port B == PCS/TX CH0
+ *
+ *  port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ *  pipe C == CMN/PLL/REF CH0
+ *
+ *  port D == PCS/TX CH0
+ *
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT). ::
+ *
+ *
+ *     Dual channel PHY (VLV/CHV/BXT)
+ *     ---------------------------------
+ *     |      CH0      |      CH1      |
+ *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
+ *     |---------------|---------------| Display PHY
+ *     | PCS01 | PCS23 | PCS01 | PCS23 |
+ *     |-------|-------|-------|-------|
+ *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ *     ---------------------------------
+ *     |     DDI0      |     DDI1      | DP/HDMI ports
+ *     ---------------------------------
+ *
+ *     Single channel PHY (CHV/BXT)
+ *     -----------------
+ *     |      CH0      |
+ *     |  CMN/PLL/REF  |
+ *     |---------------| Display PHY
+ *     | PCS01 | PCS23 |
+ *     |-------|-------|
+ *     |TX0|TX1|TX2|TX3|
+ *     -----------------
+ *     |     DDI2      | DP/HDMI port
+ *     -----------------
+ */
+
+/**
+ * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ */
+struct bxt_ddi_phy_info {
+	/**
+	 * @dual_channel: true if this phy has a second channel.
+	 */
+	bool dual_channel;
+
+	/**
+	 * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
+	 * Otherwise the GRC value will be copied from the phy indicated by
+	 * this field.
+	 */
+	enum dpio_phy rcomp_phy;
+
+	/**
+	 * @channel: struct containing per channel information.
+	 */
+	struct {
+		/**
+		 * @port: which port maps to this channel.
+		 */
+		enum port port;
+	} channel[2];
+};
+
+static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+	[DPIO_PHY0] = {
+		.dual_channel = true,
+		.rcomp_phy = DPIO_PHY1,
+
+		.channel = {
+			[DPIO_CH0] = { .port = PORT_B },
+			[DPIO_CH1] = { .port = PORT_C },
+		}
+	},
+	[DPIO_PHY1] = {
+		.dual_channel = false,
+		.rcomp_phy = -1,
+
+		.channel = {
+			[DPIO_CH0] = { .port = PORT_A },
+		}
+	},
+};
+
+static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
+{
+	return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
+		BIT(phy_info->channel[DPIO_CH0].port);
+}
+
+void bxt_port_to_phy_channel(enum port port,
+			     enum dpio_phy *phy, enum dpio_channel *ch)
+{
+	const struct bxt_ddi_phy_info *phy_info;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
+		phy_info = &bxt_ddi_phy_info[i];
+
+		if (port == phy_info->channel[DPIO_CH0].port) {
+			*phy = i;
+			*ch = DPIO_CH0;
+			return;
+		}
+
+		if (phy_info->dual_channel &&
+		    port == phy_info->channel[DPIO_CH1].port) {
+			*phy = i;
+			*ch = DPIO_CH1;
+			return;
+		}
+	}
+
+	WARN(1, "PHY not found for PORT %c", port_name(port));
+	*phy = DPIO_PHY0;
+	*ch = DPIO_CH0;
+}
+
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+				  enum port port, u32 margin, u32 scale,
+				  u32 enable, u32 deemphasis)
+{
+	u32 val;
+	enum dpio_phy phy;
+	enum dpio_channel ch;
+
+	bxt_port_to_phy_channel(port, &phy, &ch);
+
+	/*
+	 * While we write to the group register to program all lanes at once we
+	 * can read only lane registers and we pick lanes 0/1 for that.
+	 */
+	val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+	val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+	I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+
+	val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+	val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+	val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
+	I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+
+	val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+	val &= ~SCALE_DCOMP_METHOD;
+	if (enable)
+		val |= SCALE_DCOMP_METHOD;
+
+	if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+		DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+
+	I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+
+	val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+	val &= ~DE_EMPHASIS;
+	val |= deemphasis << DEEMPH_SHIFT;
+	I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+
+	val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+	val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+	I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+}
+
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+			    enum dpio_phy phy)
+{
+	const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+	enum port port;
+
+	if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
+		return false;
+
+	if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+	     (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+		DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+				 phy);
+
+		return false;
+	}
+
+	if (phy_info->rcomp_phy == -1 &&
+	    !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
+		DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
+				 phy);
+
+		return false;
+	}
+
+	if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+		DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+				 phy);
+
+		return false;
+	}
+
+	for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
+		u32 tmp = I915_READ(BXT_PHY_CTL(port));
+
+		if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
+			DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
+					 "for port %c powered down "
+					 "(PHY_CTL %08x)\n",
+					 phy, port_name(port), tmp);
+
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+	u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+
+	return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+}
+
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+				  enum dpio_phy phy)
+{
+	if (intel_wait_for_register(dev_priv,
+				    BXT_PORT_REF_DW3(phy),
+				    GRC_DONE, GRC_DONE,
+				    10))
+		DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+}
+
+static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
+			      enum dpio_phy phy)
+{
+	const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+	u32 val;
+
+	if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+		/* Still read out the GRC value for state verification */
+		if (phy_info->rcomp_phy != -1)
+			dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+
+		if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+			DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+					 "won't reprogram it\n", phy);
+
+			return;
+		}
+
+		DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+				 "force reprogramming it\n", phy);
+	}
+
+	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+	val |= GT_DISPLAY_POWER_ON(phy);
+	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+
+	/*
+	 * The PHY registers start out inaccessible and respond to reads with
+	 * all 1s.  Eventually they become accessible as they power up, then
+	 * the reserved bit will give the default 0.  Poll on the reserved bit
+	 * becoming 0 to find when the PHY is accessible.
+	 * HW team confirmed that the time to reach phypowergood status is
+	 * anywhere between 50 us and 100us.
+	 */
+	if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+		(PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
+		DRM_ERROR("timeout during PHY%d power on\n", phy);
+	}
+
+	/* Program PLL Rcomp code offset */
+	val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+	val &= ~IREF0RC_OFFSET_MASK;
+	val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+	I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+
+	val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+	val &= ~IREF1RC_OFFSET_MASK;
+	val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+	I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+
+	/* Program power gating */
+	val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+	val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+		SUS_CLK_CONFIG;
+	I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+
+	if (phy_info->dual_channel) {
+		val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+		val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+		I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+	}
+
+	if (phy_info->rcomp_phy != -1) {
+		uint32_t grc_code;
+		/*
+		 * PHY0 isn't connected to an RCOMP resistor so copy over
+		 * the corresponding calibrated value from PHY1, and disable
+		 * the automatic calibration on PHY0.
+		 */
+		val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
+							  phy_info->rcomp_phy);
+		grc_code = val << GRC_CODE_FAST_SHIFT |
+			   val << GRC_CODE_SLOW_SHIFT |
+			   val;
+		I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+
+		val = I915_READ(BXT_PORT_REF_DW8(phy));
+		val |= GRC_DIS | GRC_RDY_OVRD;
+		I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+	}
+
+	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+	val |= COMMON_RESET_DIS;
+	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+	if (phy_info->rcomp_phy == -1)
+		bxt_phy_wait_grc_done(dev_priv, phy);
+
+}
+
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+	uint32_t val;
+
+	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+	val &= ~COMMON_RESET_DIS;
+	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+	val &= ~GT_DISPLAY_POWER_ON(phy);
+	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+}
+
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+	const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+	enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
+	bool was_enabled;
+
+	lockdep_assert_held(&dev_priv->power_domains.lock);
+
+	if (rcomp_phy != -1) {
+		was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+
+		/*
+		 * We need to copy the GRC calibration value from rcomp_phy,
+		 * so make sure it's powered up.
+		 */
+		if (!was_enabled)
+			_bxt_ddi_phy_init(dev_priv, rcomp_phy);
+	}
+
+	_bxt_ddi_phy_init(dev_priv, phy);
+
+	if (rcomp_phy != -1 && !was_enabled)
+		bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
+}
+
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+		       i915_reg_t reg, u32 mask, u32 expected,
+		       const char *reg_fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+	u32 val;
+
+	val = I915_READ(reg);
+	if ((val & mask) == expected)
+		return true;
+
+	va_start(args, reg_fmt);
+	vaf.fmt = reg_fmt;
+	vaf.va = &args;
+
+	DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+			 "current %08x, expected %08x (mask %08x)\n",
+			 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+			 mask);
+
+	va_end(args);
+
+	return false;
+}
+
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+			      enum dpio_phy phy)
+{
+	const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+	uint32_t mask;
+	bool ok;
+
+#define _CHK(reg, mask, exp, fmt, ...)					\
+	__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,	\
+			       ## __VA_ARGS__)
+
+	if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+		return false;
+
+	ok = true;
+
+	/* PLL Rcomp code offset */
+	ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+		    IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+		    "BXT_PORT_CL1CM_DW9(%d)", phy);
+	ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+		    IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+		    "BXT_PORT_CL1CM_DW10(%d)", phy);
+
+	/* Power gating */
+	mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+	ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+		    "BXT_PORT_CL1CM_DW28(%d)", phy);
+
+	if (phy_info->dual_channel)
+		ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
+			   DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+			   "BXT_PORT_CL2CM_DW6(%d)", phy);
+
+	if (phy_info->rcomp_phy != -1) {
+		u32 grc_code = dev_priv->bxt_phy_grc;
+
+		grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+			   grc_code << GRC_CODE_SLOW_SHIFT |
+			   grc_code;
+		mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+		       GRC_CODE_NOM_MASK;
+		ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
+			   "BXT_PORT_REF_DW6(%d)", phy);
+
+		mask = GRC_DIS | GRC_RDY_OVRD;
+		ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
+			    "BXT_PORT_REF_DW8(%d)", phy);
+	}
+
+	return ok;
+#undef _CHK
+}
+
+uint8_t
+bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+				     uint8_t lane_count)
+{
+	switch (lane_count) {
+	case 1:
+		return 0;
+	case 2:
+		return BIT(2) | BIT(0);
+	case 4:
+		return BIT(3) | BIT(2) | BIT(0);
+	default:
+		MISSING_CASE(lane_count);
+
+		return 0;
+	}
+}
+
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+				     uint8_t lane_lat_optim_mask)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+	enum port port = dport->port;
+	enum dpio_phy phy;
+	enum dpio_channel ch;
+	int lane;
+
+	bxt_port_to_phy_channel(port, &phy, &ch);
+
+	for (lane = 0; lane < 4; lane++) {
+		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+		/*
+		 * Note that on CHV this flag is called UPAR, but has
+		 * the same function.
+		 */
+		val &= ~LATENCY_OPTIM;
+		if (lane_lat_optim_mask & BIT(lane))
+			val |= LATENCY_OPTIM;
+
+		I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+	}
+}
+
+uint8_t
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+	enum port port = dport->port;
+	enum dpio_phy phy;
+	enum dpio_channel ch;
+	int lane;
+	uint8_t mask;
+
+	bxt_port_to_phy_channel(port, &phy, &ch);
+
+	mask = 0;
+	for (lane = 0; lane < 4; lane++) {
+		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+		if (val & LATENCY_OPTIM)
+			mask |= BIT(lane);
+	}
+
+	return mask;
+}
+
+
 void chv_set_phy_signal_level(struct intel_encoder *encoder,
 			      u32 deemph_reg_value, u32 margin_reg_value,
 			      bool uniq_trans_scale)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 1c59ca5..21853a1 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1371,6 +1371,10 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 {
 	uint32_t temp;
 	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	enum dpio_phy phy;
+	enum dpio_channel ch;
+
+	bxt_port_to_phy_channel(port, &phy, &ch);
 
 	/* Non-SSC reference */
 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1378,72 +1382,72 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
 
 	/* Disable 10 bit clock */
-	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
 
 	/* Write P1 & P2 */
-	temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
 	temp |= pll->config.hw_state.ebb0;
-	I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
 
 	/* Write M2 integer */
-	temp = I915_READ(BXT_PORT_PLL(port, 0));
+	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
 	temp &= ~PORT_PLL_M2_MASK;
 	temp |= pll->config.hw_state.pll0;
-	I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
 
 	/* Write N */
-	temp = I915_READ(BXT_PORT_PLL(port, 1));
+	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
 	temp &= ~PORT_PLL_N_MASK;
 	temp |= pll->config.hw_state.pll1;
-	I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
 
 	/* Write M2 fraction */
-	temp = I915_READ(BXT_PORT_PLL(port, 2));
+	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
 	temp &= ~PORT_PLL_M2_FRAC_MASK;
 	temp |= pll->config.hw_state.pll2;
-	I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
 
 	/* Write M2 fraction enable */
-	temp = I915_READ(BXT_PORT_PLL(port, 3));
+	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
 	temp |= pll->config.hw_state.pll3;
-	I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
 
 	/* Write coeff */
-	temp = I915_READ(BXT_PORT_PLL(port, 6));
+	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
 	temp &= ~PORT_PLL_INT_COEFF_MASK;
 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
 	temp |= pll->config.hw_state.pll6;
-	I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
 
 	/* Write calibration val */
-	temp = I915_READ(BXT_PORT_PLL(port, 8));
+	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
 	temp |= pll->config.hw_state.pll8;
-	I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
 
-	temp = I915_READ(BXT_PORT_PLL(port, 9));
+	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
 	temp |= pll->config.hw_state.pll9;
-	I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
 
-	temp = I915_READ(BXT_PORT_PLL(port, 10));
+	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
 	temp &= ~PORT_PLL_DCO_AMP_MASK;
 	temp |= pll->config.hw_state.pll10;
-	I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
 
 	/* Recalibrate with new settings */
-	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
 	temp |= PORT_PLL_RECALIBRATE;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
 	temp |= pll->config.hw_state.ebb4;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
 
 	/* Enable PLL */
 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1459,11 +1463,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	 * While we write to the group register to program all lanes at once we
 	 * can read only lane registers and we pick lanes 0/1 for that.
 	 */
-	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
 	temp &= ~LANE_STAGGER_MASK;
 	temp &= ~LANESTAGGER_STRAP_OVRD;
 	temp |= pll->config.hw_state.pcsdw12;
-	I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
 }
 
 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1485,6 +1489,10 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
 	uint32_t val;
 	bool ret;
+	enum dpio_phy phy;
+	enum dpio_channel ch;
+
+	bxt_port_to_phy_channel(port, &phy, &ch);
 
 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
 		return false;
@@ -1495,36 +1503,36 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!(val & PORT_PLL_ENABLE))
 		goto out;
 
-	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
 
-	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
 
-	hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
 	hw_state->pll0 &= PORT_PLL_M2_MASK;
 
-	hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
 	hw_state->pll1 &= PORT_PLL_N_MASK;
 
-	hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
 
-	hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
 
-	hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
 			  PORT_PLL_INT_COEFF_MASK |
 			  PORT_PLL_GAIN_CTL_MASK;
 
-	hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
 
-	hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
 
-	hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
 			   PORT_PLL_DCO_AMP_MASK;
 
@@ -1533,11 +1541,11 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	 * can read only lane registers. We configure all lanes the same way, so
 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
 	 */
-	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
-	if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
 				 hw_state->pcsdw12,
-				 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
 	ret = true;
@@ -1851,13 +1859,13 @@ void intel_shared_dpll_init(struct drm_device *dev)
 	const struct dpll_info *dpll_info;
 	int i;
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		dpll_mgr = &skl_pll_mgr;
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		dpll_mgr = &bxt_pll_mgr;
-	else if (HAS_DDI(dev))
+	else if (HAS_DDI(dev_priv))
 		dpll_mgr = &hsw_pll_mgr;
-	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
 		dpll_mgr = &pch_pll_mgr;
 
 	if (!dpll_mgr) {
@@ -1883,7 +1891,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
 
 	/* FIXME: Move this to a more suitable place */
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		intel_ddi_pll_init(dev);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a19ec06..003afb8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -206,6 +206,7 @@ struct intel_encoder {
 	struct drm_encoder base;
 
 	enum intel_output_type type;
+	enum port port;
 	unsigned int cloneable;
 	void (*hot_plug)(struct intel_encoder *);
 	bool (*compute_config)(struct intel_encoder *,
@@ -247,6 +248,8 @@ struct intel_encoder {
 	void (*suspend)(struct intel_encoder *);
 	int crtc_mask;
 	enum hpd_pin hpd_pin;
+	/* for communication with audio component; protected by av_mutex */
+	const struct drm_connector *audio_connector;
 };
 
 struct intel_panel {
@@ -362,6 +365,8 @@ struct intel_atomic_state {
 
 	/* Gen9+ only */
 	struct skl_wm_values wm_results;
+
+	struct i915_sw_fence commit_ready;
 };
 
 struct intel_plane_state {
@@ -398,9 +403,6 @@ struct intel_plane_state {
 	int scaler_id;
 
 	struct drm_intel_sprite_colorkey ckey;
-
-	/* async flip related structures */
-	struct drm_i915_gem_request *wait_req;
 };
 
 struct intel_initial_plane_config {
@@ -465,9 +467,13 @@ struct intel_pipe_wm {
 	bool sprites_scaled;
 };
 
-struct skl_pipe_wm {
+struct skl_plane_wm {
 	struct skl_wm_level wm[8];
 	struct skl_wm_level trans_wm;
+};
+
+struct skl_pipe_wm {
+	struct skl_plane_wm planes[I915_MAX_PLANES];
 	uint32_t linetime;
 };
 
@@ -493,14 +499,7 @@ struct intel_crtc_wm_state {
 		struct {
 			/* gen9+ only needs 1-step wm programming */
 			struct skl_pipe_wm optimal;
-
-			/* cached plane data rate */
-			unsigned plane_data_rate[I915_MAX_PLANES];
-			unsigned plane_y_data_rate[I915_MAX_PLANES];
-
-			/* minimum block allocation */
-			uint16_t minimum_blocks[I915_MAX_PLANES];
-			uint16_t minimum_y_blocks[I915_MAX_PLANES];
+			struct skl_ddb_entry ddb;
 		} skl;
 	};
 
@@ -723,13 +722,15 @@ struct intel_crtc {
 		/* watermarks currently being used  */
 		union {
 			struct intel_pipe_wm ilk;
-			struct skl_pipe_wm skl;
 		} active;
 
 		/* allow CxSR on this pipe */
 		bool cxsr_allowed;
 	} wm;
 
+	/* gen9+: ddb allocation currently being used */
+	struct skl_ddb_entry hw_ddb;
+
 	int scanline_offset;
 
 	struct {
@@ -796,22 +797,22 @@ struct intel_plane {
 };
 
 struct intel_watermark_params {
-	unsigned long fifo_size;
-	unsigned long max_wm;
-	unsigned long default_wm;
-	unsigned long guard_size;
-	unsigned long cacheline_size;
+	u16 fifo_size;
+	u16 max_wm;
+	u8 default_wm;
+	u8 guard_size;
+	u8 cacheline_size;
 };
 
 struct cxsr_latency {
-	int is_desktop;
-	int is_ddr3;
-	unsigned long fsb_freq;
-	unsigned long mem_freq;
-	unsigned long display_sr;
-	unsigned long display_hpll_disable;
-	unsigned long cursor_sr;
-	unsigned long cursor_hpll_disable;
+	bool is_desktop : 1;
+	bool is_ddr3 : 1;
+	u16 fsb_freq;
+	u16 mem_freq;
+	u16 display_sr;
+	u16 display_hpll_disable;
+	u16 cursor_sr;
+	u16 cursor_hpll_disable;
 };
 
 #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -872,6 +873,14 @@ enum link_m_n_set {
 	M2_N2
 };
 
+struct intel_dp_desc {
+	u8 oui[3];
+	u8 device_id[6];
+	u8 hw_rev;
+	u8 sw_major_rev;
+	u8 sw_minor_rev;
+} __packed;
+
 struct intel_dp {
 	i915_reg_t output_reg;
 	i915_reg_t aux_ch_ctl_reg;
@@ -894,6 +903,8 @@ struct intel_dp {
 	/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
 	uint8_t num_sink_rates;
 	int sink_rates[DP_MAX_SUPPORTED_RATES];
+	/* sink or branch descriptor */
+	struct intel_dp_desc desc;
 	struct drm_dp_aux aux;
 	uint8_t train_set[4];
 	int panel_power_up_delay;
@@ -950,17 +961,22 @@ struct intel_dp {
 	bool compliance_test_active;
 };
 
+struct intel_lspcon {
+	bool active;
+	enum drm_lspcon_mode mode;
+	bool desc_valid;
+};
+
 struct intel_digital_port {
 	struct intel_encoder base;
 	enum port port;
 	u32 saved_port_bits;
 	struct intel_dp dp;
 	struct intel_hdmi hdmi;
+	struct intel_lspcon lspcon;
 	enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
 	bool release_cl2_override;
 	uint8_t max_lanes;
-	/* for communication with audio component; protected by av_mutex */
-	const struct drm_connector *audio_connector;
 };
 
 struct intel_dp_mst_encoder {
@@ -1012,17 +1028,15 @@ vlv_pipe_to_channel(enum pipe pipe)
 	}
 }
 
-static inline struct drm_crtc *
-intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+static inline struct intel_crtc *
+intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	return dev_priv->pipe_to_crtc_mapping[pipe];
 }
 
-static inline struct drm_crtc *
-intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+static inline struct intel_crtc *
+intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	return dev_priv->plane_to_crtc_mapping[plane];
 }
 
@@ -1082,15 +1096,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
 	return container_of(intel_hdmi, struct intel_digital_port, hdmi);
 }
 
-/*
- * Returns the number of planes for this pipe, ie the number of sprites + 1
- * (primary plane). This doesn't count the cursor plane then.
- */
-static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
-{
-	return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
-}
-
 /* intel_fifo_underrun.c */
 bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
 					   enum pipe pipe, bool enable);
@@ -1107,6 +1112,9 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
 /* i915_irq.c */
 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1129,6 +1137,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
 				     unsigned int pipe_mask);
 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
 				     unsigned int pipe_mask);
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
 
 /* intel_crt.c */
 void intel_crt_init(struct drm_device *dev);
@@ -1176,12 +1187,15 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
 
 /* intel_audio.c */
 void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *crtc_state,
+			      const struct drm_connector_state *conn_state);
 void intel_audio_codec_disable(struct intel_encoder *encoder);
 void i915_audio_component_init(struct drm_i915_private *dev_priv);
 void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
 
 /* intel_display.c */
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
 void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
 void intel_update_rawclk(struct drm_i915_private *dev_priv);
 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
@@ -1230,18 +1244,17 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
 		 (1 << INTEL_OUTPUT_EDP));
 }
 static inline void
-intel_wait_for_vblank(struct drm_device *dev, int pipe)
+intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	drm_wait_one_vblank(dev, pipe);
+	drm_wait_one_vblank(&dev_priv->drm, pipe);
 }
 static inline void
-intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
 {
-	const struct intel_crtc *crtc =
-		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+	const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
 	if (crtc->active)
-		intel_wait_for_vblank(dev, pipe);
+		intel_wait_for_vblank(dev_priv, pipe);
 }
 
 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
@@ -1285,21 +1298,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
 			       uint64_t fb_modifier, unsigned int cpp);
 
-static inline bool
-intel_rotation_90_or_270(unsigned int rotation)
-{
-	return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
-}
-
-void intel_create_rotation_property(struct drm_device *dev,
-					struct intel_plane *plane);
-
 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
 				    enum pipe pipe);
 
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
 		     const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
 int lpt_get_iclkip(struct drm_i915_private *dev_priv);
 
 /* modesetting asserts */
@@ -1327,12 +1331,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
 void bxt_init_cdclk(struct drm_i915_private *dev_priv);
 void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
-			    enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
-			      enum dpio_phy phy);
 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
 void bxt_disable_dc9(struct drm_i915_private *dev_priv);
@@ -1350,7 +1348,7 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
 			struct dpll *best_clock);
 int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
 
-bool intel_crtc_active(struct drm_crtc *crtc);
+bool intel_crtc_active(struct intel_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
 enum intel_display_power_domain
@@ -1443,6 +1441,11 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 	return ~((1 << lane_count) - 1) & 0xf;
 }
 
+bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+bool __intel_dp_read_desc(struct intel_dp *intel_dp,
+			  struct intel_dp_desc *desc);
+bool intel_dp_read_desc(struct intel_dp *intel_dp);
+
 /* intel_dp_aux_backlight.c */
 int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
 
@@ -1487,6 +1490,10 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
 {
 }
 
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
 static inline void intel_fbdev_restore_mode(struct drm_device *dev)
 {
 }
@@ -1513,6 +1520,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
 		     unsigned int frontbuffer_bits, enum fb_op_origin origin);
 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
 
 /* intel_hdmi.c */
 void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
@@ -1642,23 +1650,6 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
 		DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
 }
 
-static inline int
-assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
-{
-	int seq = atomic_read(&dev_priv->pm.atomic_seq);
-
-	assert_rpm_wakelock_held(dev_priv);
-
-	return seq;
-}
-
-static inline void
-assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
-{
-	WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
-		  "HW access outside of RPM atomic section\n");
-}
-
 /**
  * disable_rpm_wakeref_asserts - disable the RPM assert checks
  * @dev_priv: i915 device instance
@@ -1714,11 +1705,11 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
 
 
 /* intel_pm.c */
-void intel_init_clock_gating(struct drm_device *dev);
-void intel_suspend_hw(struct drm_device *dev);
-int ilk_wm_max_level(const struct drm_device *dev);
-void intel_update_watermarks(struct drm_crtc *crtc);
-void intel_init_pm(struct drm_device *dev);
+void intel_init_clock_gating(struct drm_i915_private *dev_priv);
+void intel_suspend_hw(struct drm_i915_private *dev_priv);
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
+void intel_update_watermarks(struct intel_crtc *crtc);
+void intel_init_pm(struct drm_i915_private *dev_priv);
 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
 void intel_pm_setup(struct drm_device *dev);
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -1742,20 +1733,24 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+			      struct skl_pipe_wm *out);
 bool intel_can_enable_sagv(struct drm_atomic_state *state);
 int intel_enable_sagv(struct drm_i915_private *dev_priv);
 int intel_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+			 const struct skl_wm_level *l2);
 bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
 			       const struct skl_ddb_allocation *new,
 			       enum pipe pipe);
 bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
-				 const struct skl_ddb_allocation *old,
-				 const struct skl_ddb_allocation *new,
-				 enum pipe pipe);
+				 struct intel_crtc *intel_crtc);
 void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
-			 const struct skl_wm_values *wm);
+			 const struct skl_plane_wm *wm,
+			 const struct skl_ddb_allocation *ddb);
 void skl_write_plane_wm(struct intel_crtc *intel_crtc,
-			const struct skl_wm_values *wm,
+			const struct skl_plane_wm *wm,
+			const struct skl_ddb_allocation *ddb,
 			int plane);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 bool ilk_disable_lp_wm(struct drm_device *dev);
@@ -1773,7 +1768,8 @@ bool intel_sdvo_init(struct drm_device *dev,
 /* intel_sprite.c */
 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
 			     int usecs);
-int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+					      enum pipe pipe, int plane);
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 void intel_pipe_update_start(struct intel_crtc *crtc);
@@ -1835,4 +1831,7 @@ int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
 void intel_color_set_csc(struct drm_crtc_state *crtc_state);
 void intel_color_load_luts(struct drm_crtc_state *crtc_state);
 
+/* intel_lspcon.c */
+bool lspcon_init(struct intel_digital_port *intel_dig_port);
+void lspcon_resume(struct intel_lspcon *lspcon);
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b2e3d3a..4e0d025 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -437,11 +437,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
 
 static void intel_dsi_device_ready(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		vlv_dsi_device_ready(encoder);
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		bxt_dsi_device_ready(encoder);
 }
 
@@ -464,7 +464,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
 	}
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+		i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
 			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
 		u32 temp;
 
@@ -494,7 +494,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
 	enum port port;
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+		i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
 			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
 		u32 temp;
 
@@ -656,7 +656,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
 
 static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 	enum port port;
@@ -664,7 +663,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 	DRM_DEBUG_KMS("\n");
 	for_each_dsi_port(port, intel_dsi->ports) {
 		/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
-		i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+		i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
 			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
 		u32 val;
 
@@ -741,7 +740,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	struct drm_device *dev = encoder->base.dev;
 	enum intel_display_power_domain power_domain;
 	enum port port;
 	bool active = false;
@@ -762,7 +760,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 
 	/* XXX: this only works for one DSI output */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
+		i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
 			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
 		bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
 
@@ -771,7 +769,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 		 * bit in port C control register does not get set. As a
 		 * workaround, check pipe B conf instead.
 		 */
-		if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
+		if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+		    port == PORT_C)
 			enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
 
 		/* Try command mode if video mode not enabled */
@@ -970,11 +969,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
 static void intel_dsi_get_config(struct intel_encoder *encoder,
 				 struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 pclk;
 	DRM_DEBUG_KMS("\n");
 
-	if (IS_BROXTON(dev))
+	if (IS_BROXTON(dev_priv))
 		bxt_dsi_get_pipe_config(encoder, pipe_config);
 
 	pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@@ -1066,7 +1065,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
 	hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (IS_BROXTON(dev)) {
+		if (IS_BROXTON(dev_priv)) {
 			/*
 			 * Program hdisplay and vdisplay on MIPI transcoder.
 			 * This is different from calculated hactive and
@@ -1138,7 +1137,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 	}
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 			/*
 			 * escape clock divider, 20MHz, shared for A and C.
 			 * device ready must be off when doing this! txclkesc?
@@ -1153,7 +1152,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 			tmp &= ~READ_REQUEST_PRIORITY_MASK;
 			I915_WRITE(MIPI_CTRL(port), tmp |
 					READ_REQUEST_PRIORITY_HIGH);
-		} else if (IS_BROXTON(dev)) {
+		} else if (IS_BROXTON(dev_priv)) {
 			enum pipe pipe = intel_crtc->pipe;
 
 			tmp = I915_READ(MIPI_CTRL(port));
@@ -1242,7 +1241,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 		I915_WRITE(MIPI_INIT_COUNT(port),
 				txclkesc(intel_dsi->escape_clk_div, 100));
 
-		if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
+		if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
 			/*
 			 * BXT spec says write MIPI_INIT_COUNT for
 			 * both the ports, even if only one is
@@ -1346,7 +1345,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
 			DRM_DEBUG_KMS("no scaling not supported\n");
 			return -EINVAL;
 		}
-		if (HAS_GMCH_DISPLAY(dev) &&
+		if (HAS_GMCH_DISPLAY(to_i915(dev)) &&
 		    val == DRM_MODE_SCALE_CENTER) {
 			DRM_DEBUG_KMS("centering not supported\n");
 			return -EINVAL;
@@ -1450,9 +1449,9 @@ void intel_dsi_init(struct drm_device *dev)
 	if (!intel_bios_is_dsi_present(dev_priv, &port))
 		return;
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
 	} else {
 		DRM_ERROR("Unsupported Mipi device to reg base");
@@ -1488,6 +1487,7 @@ void intel_dsi_init(struct drm_device *dev)
 
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
 
+	intel_encoder->port = port;
 	/*
 	 * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
 	 * port C. BXT isn't limited like this.
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index cd154ce..9f279a3 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -126,6 +126,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 	u16 len;
 	enum port port;
 
+	DRM_DEBUG_KMS("\n");
+
 	flags = *data++;
 	type = *data++;
 
@@ -199,6 +201,8 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
 {
 	u32 delay = *((const u32 *) data);
 
+	DRM_DEBUG_KMS("\n");
+
 	usleep_range(delay, delay + 10);
 	data += 4;
 
@@ -307,6 +311,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 	u8 gpio_source, gpio_index;
 	bool value;
 
+	DRM_DEBUG_KMS("\n");
+
 	if (dev_priv->vbt.dsi.seq_version >= 3)
 		data++;
 
@@ -331,18 +337,36 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 	return data;
 }
 
-static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
 {
+	DRM_DEBUG_KMS("Skipping I2C element execution\n");
+
 	return data + *(data + 6) + 7;
 }
 
+static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
+{
+	DRM_DEBUG_KMS("Skipping SPI element execution\n");
+
+	return data + *(data + 5) + 6;
+}
+
+static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
+{
+	DRM_DEBUG_KMS("Skipping PMIC element execution\n");
+
+	return data + 15;
+}
+
 typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
 					const u8 *data);
 static const fn_mipi_elem_exec exec_elem[] = {
 	[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
 	[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
 	[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
-	[MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
+	[MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c,
+	[MIPI_SEQ_ELEM_SPI] = mipi_exec_spi,
+	[MIPI_SEQ_ELEM_PMIC] = mipi_exec_pmic,
 };
 
 /*
@@ -385,11 +409,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
 		return;
 
 	data = dev_priv->vbt.dsi.sequence[seq_id];
-	if (!data) {
-		DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
-			      seq_id, sequence_name(seq_id));
+	if (!data)
 		return;
-	}
 
 	WARN_ON(*data != seq_id);
 
@@ -420,7 +441,15 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
 			operation_size = *data++;
 
 		if (mipi_elem_exec) {
+			const u8 *next = data + operation_size;
+
 			data = mipi_elem_exec(intel_dsi, data);
+
+			/* Consistency check if we have size. */
+			if (operation_size && data != next) {
+				DRM_ERROR("Inconsistent operation size\n");
+				return;
+			}
 		} else if (operation_size) {
 			/* We have size, skip. */
 			DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
@@ -438,6 +467,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
 static int vbt_panel_prepare(struct drm_panel *panel)
 {
 	generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+	generic_exec_sequence(panel, MIPI_SEQ_POWER_ON);
+	generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
 	generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
 
 	return 0;
@@ -445,7 +476,8 @@ static int vbt_panel_prepare(struct drm_panel *panel)
 
 static int vbt_panel_unprepare(struct drm_panel *panel)
 {
-	generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
+	generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+	generic_exec_sequence(panel, MIPI_SEQ_POWER_OFF);
 
 	return 0;
 }
@@ -453,12 +485,14 @@ static int vbt_panel_unprepare(struct drm_panel *panel)
 static int vbt_panel_enable(struct drm_panel *panel)
 {
 	generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
+	generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_ON);
 
 	return 0;
 }
 
 static int vbt_panel_disable(struct drm_panel *panel)
 {
+	generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_OFF);
 	generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 6ab58a0..56eff60 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -351,7 +351,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
 u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
 		       struct intel_crtc_state *config)
 {
-	if (IS_BROXTON(encoder->base.dev))
+	if (IS_BROXTON(to_i915(encoder->base.dev)))
 		return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
 	else
 		return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
@@ -515,11 +515,11 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
 int intel_compute_dsi_pll(struct intel_encoder *encoder,
 			  struct intel_crtc_state *config)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		return vlv_compute_dsi_pll(encoder, config);
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		return bxt_compute_dsi_pll(encoder, config);
 
 	return -ENODEV;
@@ -528,21 +528,21 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
 void intel_enable_dsi_pll(struct intel_encoder *encoder,
 			  const struct intel_crtc_state *config)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		vlv_enable_dsi_pll(encoder, config);
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		bxt_enable_dsi_pll(encoder, config);
 }
 
 void intel_disable_dsi_pll(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		vlv_disable_dsi_pll(encoder);
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		bxt_disable_dsi_pll(encoder);
 }
 
@@ -564,10 +564,10 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 
 void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (IS_BROXTON(dev))
+	if (IS_BROXTON(dev_priv))
 		bxt_dsi_reset_clocks(encoder, port);
-	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		vlv_dsi_reset_clocks(encoder, port);
 }
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 2e452c5..7086454 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,12 +393,12 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
 	 * its timings to get how the BIOS set up the panel.
 	 */
 	if (dvo_val & DVO_ENABLE) {
-		struct drm_crtc *crtc;
+		struct intel_crtc *crtc;
 		int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
 
-		crtc = intel_get_crtc_for_pipe(dev, pipe);
+		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 		if (crtc) {
-			mode = intel_crtc_mode_get(dev, crtc);
+			mode = intel_crtc_mode_get(dev, &crtc->base);
 			if (mode) {
 				mode->type |= DRM_MODE_TYPE_PREFERRED;
 				if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -412,16 +412,14 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
 	return mode;
 }
 
-static char intel_dvo_port_name(i915_reg_t dvo_reg)
+static enum port intel_dvo_port(i915_reg_t dvo_reg)
 {
 	if (i915_mmio_reg_equal(dvo_reg, DVOA))
-		return 'A';
+		return PORT_A;
 	else if (i915_mmio_reg_equal(dvo_reg, DVOB))
-		return 'B';
-	else if (i915_mmio_reg_equal(dvo_reg, DVOC))
-		return 'C';
+		return PORT_B;
 	else
-		return '?';
+		return PORT_C;
 }
 
 void intel_dvo_init(struct drm_device *dev)
@@ -464,6 +462,7 @@ void intel_dvo_init(struct drm_device *dev)
 		bool dvoinit;
 		enum pipe pipe;
 		uint32_t dpll[I915_MAX_PIPES];
+		enum port port;
 
 		/* Allow the I2C driver info to specify the GPIO to be used in
 		 * special cases, but otherwise default to what's defined
@@ -511,12 +510,15 @@ void intel_dvo_init(struct drm_device *dev)
 		if (!dvoinit)
 			continue;
 
+		port = intel_dvo_port(dvo->dvo_reg);
 		drm_encoder_init(dev, &intel_encoder->base,
 				 &intel_dvo_enc_funcs, encoder_type,
-				 "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+				 "DVO %c", port_name(port));
 
 		intel_encoder->type = INTEL_OUTPUT_DVO;
+		intel_encoder->port = port;
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+
 		switch (dvo->type) {
 		case INTEL_DVO_CHIP_TMDS:
 			intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 025e232..841f8d1 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -82,12 +82,17 @@ static const struct engine_info {
 	},
 };
 
-static struct intel_engine_cs *
+static int
 intel_engine_setup(struct drm_i915_private *dev_priv,
 		   enum intel_engine_id id)
 {
 	const struct engine_info *info = &intel_engines[id];
-	struct intel_engine_cs *engine = &dev_priv->engine[id];
+	struct intel_engine_cs *engine;
+
+	GEM_BUG_ON(dev_priv->engine[id]);
+	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+	if (!engine)
+		return -ENOMEM;
 
 	engine->id = id;
 	engine->i915 = dev_priv;
@@ -97,7 +102,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	engine->mmio_base = info->mmio_base;
 	engine->irq_shift = info->irq_shift;
 
-	return engine;
+	dev_priv->engine[id] = engine;
+	return 0;
 }
 
 /**
@@ -110,13 +116,16 @@ int intel_engines_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+	unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
 	unsigned int mask = 0;
 	int (*init)(struct intel_engine_cs *engine);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	unsigned int i;
 	int ret;
 
-	WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
-	WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+	WARN_ON(ring_mask == 0);
+	WARN_ON(ring_mask &
 		GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
 
 	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
@@ -131,7 +140,11 @@ int intel_engines_init(struct drm_device *dev)
 		if (!init)
 			continue;
 
-		ret = init(intel_engine_setup(dev_priv, i));
+		ret = intel_engine_setup(dev_priv, i);
+		if (ret)
+			goto cleanup;
+
+		ret = init(dev_priv->engine[i]);
 		if (ret)
 			goto cleanup;
 
@@ -143,7 +156,7 @@ int intel_engines_init(struct drm_device *dev)
 	 * are added to the driver by a warning and disabling the forgotten
 	 * engines.
 	 */
-	if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+	if (WARN_ON(mask != ring_mask))
 		device_info->ring_mask = mask;
 
 	device_info->num_rings = hweight32(mask);
@@ -151,17 +164,17 @@ int intel_engines_init(struct drm_device *dev)
 	return 0;
 
 cleanup:
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
+	for_each_engine(engine, dev_priv, id) {
 		if (i915.enable_execlists)
-			intel_logical_ring_cleanup(&dev_priv->engine[i]);
+			intel_logical_ring_cleanup(engine);
 		else
-			intel_engine_cleanup(&dev_priv->engine[i]);
+			intel_engine_cleanup(engine);
 	}
 
 	return ret;
 }
 
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
@@ -191,13 +204,13 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 				       I915_NUM_ENGINES * gen8_semaphore_seqno_size);
 		kunmap(page);
 	}
-	memset(engine->semaphore.sync_seqno, 0,
-	       sizeof(engine->semaphore.sync_seqno));
 
 	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 	if (engine->irq_seqno_barrier)
 		engine->irq_seqno_barrier(engine);
-	engine->last_submitted_seqno = seqno;
+
+	GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+	engine->timeline->last_submitted_seqno = seqno;
 
 	engine->hangcheck.seqno = seqno;
 
@@ -207,15 +220,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 	intel_engine_wakeup(engine);
 }
 
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+static void intel_engine_init_timeline(struct intel_engine_cs *engine)
 {
-	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-}
-
-static void intel_engine_init_requests(struct intel_engine_cs *engine)
-{
-	init_request_active(&engine->last_request, NULL);
-	INIT_LIST_HEAD(&engine->request_list);
+	engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
 }
 
 /**
@@ -232,9 +239,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
 	INIT_LIST_HEAD(&engine->execlist_queue);
 	spin_lock_init(&engine->execlist_lock);
 
-	engine->fence_context = fence_context_alloc(1);
-
-	intel_engine_init_requests(engine);
+	intel_engine_init_timeline(engine);
 	intel_engine_init_hangcheck(engine);
 	i915_gem_batch_pool_init(engine, &engine->batch_pool);
 
@@ -251,7 +256,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
 
 	obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
 	if (!obj)
-		obj = i915_gem_object_create(&engine->i915->drm, size);
+		obj = i915_gem_object_create_internal(engine->i915, size);
 	if (IS_ERR(obj)) {
 		DRM_ERROR("Failed to allocate scratch page\n");
 		return PTR_ERR(obj);
@@ -301,6 +306,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
+	ret = i915_gem_render_state_init(engine);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
@@ -315,7 +324,142 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
 	intel_engine_cleanup_scratch(engine);
 
+	i915_gem_render_state_fini(engine);
 	intel_engine_fini_breadcrumbs(engine);
 	intel_engine_cleanup_cmd_parser(engine);
 	i915_gem_batch_pool_fini(&engine->batch_pool);
 }
+
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	u64 acthd;
+
+	if (INTEL_GEN(dev_priv) >= 8)
+		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+					 RING_ACTHD_UDW(engine->mmio_base));
+	else if (INTEL_GEN(dev_priv) >= 4)
+		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+	else
+		acthd = I915_READ(ACTHD);
+
+	return acthd;
+}
+
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	u64 bbaddr;
+
+	if (INTEL_GEN(dev_priv) >= 8)
+		bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
+					  RING_BBADDR_UDW(engine->mmio_base));
+	else
+		bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+
+	return bbaddr;
+}
+
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+	switch (type) {
+	case I915_CACHE_NONE: return " uncached";
+	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+	case I915_CACHE_L3_LLC: return " L3+LLC";
+	case I915_CACHE_WT: return " WT";
+	default: return "";
+	}
+}
+
+static inline uint32_t
+read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
+		  int subslice, i915_reg_t reg)
+{
+	uint32_t mcr;
+	uint32_t ret;
+	enum forcewake_domains fw_domains;
+
+	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+						    FW_REG_READ);
+	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+						     GEN8_MCR_SELECTOR,
+						     FW_REG_READ | FW_REG_WRITE);
+
+	spin_lock_irq(&dev_priv->uncore.lock);
+	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
+	mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+	/*
+	 * The HW expects the slice and sublice selectors to be reset to 0
+	 * after reading out the registers.
+	 */
+	WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
+	mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+	mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+	ret = I915_READ_FW(reg);
+
+	mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+	spin_unlock_irq(&dev_priv->uncore.lock);
+
+	return ret;
+}
+
+/* NB: please notice the memset */
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+			       struct intel_instdone *instdone)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	u32 mmio_base = engine->mmio_base;
+	int slice;
+	int subslice;
+
+	memset(instdone, 0, sizeof(*instdone));
+
+	switch (INTEL_GEN(dev_priv)) {
+	default:
+		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+		if (engine->id != RCS)
+			break;
+
+		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+		for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+			instdone->sampler[slice][subslice] =
+				read_subslice_reg(dev_priv, slice, subslice,
+						  GEN7_SAMPLER_INSTDONE);
+			instdone->row[slice][subslice] =
+				read_subslice_reg(dev_priv, slice, subslice,
+						  GEN7_ROW_INSTDONE);
+		}
+		break;
+	case 7:
+		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+		if (engine->id != RCS)
+			break;
+
+		instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+		instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
+		instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+
+		break;
+	case 6:
+	case 5:
+	case 4:
+		instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+		if (engine->id == RCS)
+			/* HACK: Using the wrong struct member */
+			instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+		break;
+	case 3:
+	case 2:
+		instdone->instdone = I915_READ(GEN2_INSTDONE);
+		break;
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index faa6762..e230d48 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
 {
 	int w, h;
 
-	if (intel_rotation_90_or_270(cache->plane.rotation)) {
+	if (drm_rotation_90_or_270(cache->plane.rotation)) {
 		w = cache->plane.src_h;
 		h = cache->plane.src_w;
 	} else {
@@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
 	int lines;
 
 	intel_fbc_get_plane_source_size(cache, NULL, &lines);
-	if (INTEL_INFO(dev_priv)->gen >= 7)
+	if (INTEL_GEN(dev_priv) == 7)
 		lines = min(lines, 2048);
+	else if (INTEL_GEN(dev_priv) >= 8)
+		lines = min(lines, 2560);
 
 	/* Hardware needs the full buffer stride, not just the active area. */
 	return lines * cache->fb.stride;
@@ -774,6 +776,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
+	/* We don't need to use a state cache here since this information is
+	 * global for all CRTC.
+	 */
+	if (fbc->underrun_detected) {
+		fbc->no_fbc_reason = "underrun detected";
+		return false;
+	}
+
 	if (!cache->plane.visible) {
 		fbc->no_fbc_reason = "primary plane not visible";
 		return false;
@@ -859,6 +869,11 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
 		return false;
 	}
 
+	if (fbc->underrun_detected) {
+		fbc->no_fbc_reason = "underrun detected";
+		return false;
+	}
+
 	if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
 		fbc->no_fbc_reason = "no enabled pipes can have FBC";
 		return false;
@@ -1221,6 +1236,59 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
 	cancel_work_sync(&fbc->work.work);
 }
 
+static void intel_fbc_underrun_work_fn(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, fbc.underrun_work);
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
+	mutex_lock(&fbc->lock);
+
+	/* Maybe we were scheduled twice. */
+	if (fbc->underrun_detected)
+		goto out;
+
+	DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+	fbc->underrun_detected = true;
+
+	intel_fbc_deactivate(dev_priv);
+out:
+	mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
+ * @dev_priv: i915 device instance
+ *
+ * Without FBC, most underruns are harmless and don't really cause too many
+ * problems, except for an annoying message on dmesg. With FBC, underruns can
+ * become black screens or even worse, especially when paired with bad
+ * watermarks. So in order for us to be on the safe side, completely disable FBC
+ * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
+ * already suggests that watermarks may be bad, so try to be as safe as
+ * possible.
+ *
+ * This function is called from the IRQ handler.
+ */
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
+{
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
+	if (!fbc_supported(dev_priv))
+		return;
+
+	/* There's no guarantee that underrun_detected won't be set to true
+	 * right after this check and before the work is scheduled, but that's
+	 * not a problem since we'll check it again under the work function
+	 * while FBC is locked. This check here is just to prevent us from
+	 * unnecessarily scheduling the work, and it relies on the fact that we
+	 * never switch underrun_detect back to false after it's true. */
+	if (READ_ONCE(fbc->underrun_detected))
+		return;
+
+	schedule_work(&fbc->underrun_work);
+}
+
 /**
  * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
  * @dev_priv: i915 device instance
@@ -1238,7 +1306,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
 		return;
 
 	for_each_intel_crtc(&dev_priv->drm, crtc)
-		if (intel_crtc_active(&crtc->base) &&
+		if (intel_crtc_active(crtc) &&
 		    to_intel_plane_state(crtc->base.primary->state)->base.visible)
 			dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
 }
@@ -1292,6 +1360,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 	enum pipe pipe;
 
 	INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
+	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
 	mutex_init(&fbc->lock);
 	fbc->enabled = false;
 	fbc->active = false;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 2aa7440..e660d8b 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -57,7 +57,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
 	assert_spin_locked(&dev_priv->irq_lock);
 
 	for_each_pipe(dev_priv, pipe) {
-		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
 		if (crtc->cpu_fifo_underrun_disabled)
 			return false;
@@ -75,7 +75,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
 	assert_spin_locked(&dev_priv->irq_lock);
 
 	for_each_pipe(dev_priv, pipe) {
-		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
 		if (crtc->pch_fifo_underrun_disabled)
 			return false;
@@ -245,22 +245,21 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
 						    enum pipe pipe, bool enable)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 	bool old;
 
 	assert_spin_locked(&dev_priv->irq_lock);
 
-	old = !intel_crtc->cpu_fifo_underrun_disabled;
-	intel_crtc->cpu_fifo_underrun_disabled = !enable;
+	old = !crtc->cpu_fifo_underrun_disabled;
+	crtc->cpu_fifo_underrun_disabled = !enable;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(dev_priv))
 		i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
-	else if (IS_GEN5(dev) || IS_GEN6(dev))
+	else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
 		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
-	else if (IS_GEN7(dev))
+	else if (IS_GEN7(dev_priv))
 		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
-	else if (IS_GEN8(dev) || IS_GEN9(dev))
+	else if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
 		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
 	return old;
@@ -314,8 +313,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
 					   enum transcoder pch_transcoder,
 					   bool enable)
 {
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc =
+		intel_get_crtc_for_pipe(dev_priv, (enum pipe) pch_transcoder);
 	unsigned long flags;
 	bool old;
 
@@ -330,8 +329,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
-	old = !intel_crtc->pch_fifo_underrun_disabled;
-	intel_crtc->pch_fifo_underrun_disabled = !enable;
+	old = !crtc->pch_fifo_underrun_disabled;
+	crtc->pch_fifo_underrun_disabled = !enable;
 
 	if (HAS_PCH_IBX(dev_priv))
 		ibx_set_fifo_underrun_reporting(&dev_priv->drm,
@@ -358,7 +357,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
 void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
 					 enum pipe pipe)
 {
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
 	/* We may be called too early in init, thanks BIOS! */
 	if (crtc == NULL)
@@ -366,12 +365,14 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
 
 	/* GMCH can't disable fifo underruns, filter them. */
 	if (HAS_GMCH_DISPLAY(dev_priv) &&
-	    to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
+	    crtc->cpu_fifo_underrun_disabled)
 		return;
 
 	if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
 		DRM_ERROR("CPU pipe %c FIFO underrun\n",
 			  pipe_name(pipe));
+
+	intel_fbc_handle_fifo_underrun_irq(dev_priv);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 5cdf7aa..0053258 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -64,7 +64,7 @@ struct drm_i915_gem_request;
  */
 struct i915_guc_client {
 	struct i915_vma *vma;
-	void *client_base;		/* first page (only) of above	*/
+	void *vaddr;
 	struct i915_gem_context *owner;
 	struct intel_guc *guc;
 
@@ -123,10 +123,28 @@ struct intel_guc_fw {
 	uint32_t ucode_offset;
 };
 
+struct intel_guc_log {
+	uint32_t flags;
+	struct i915_vma *vma;
+	void *buf_addr;
+	struct workqueue_struct *flush_wq;
+	struct work_struct flush_work;
+	struct rchan *relay_chan;
+
+	/* logging related stats */
+	u32 capture_miss_count;
+	u32 flush_interrupt_count;
+	u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
+	u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
+	u32 flush_count[GUC_MAX_LOG_BUFFER];
+};
+
 struct intel_guc {
 	struct intel_guc_fw guc_fw;
-	uint32_t log_flags;
-	struct i915_vma *log_vma;
+	struct intel_guc_log log;
+
+	/* GuC2Host interrupt related state */
+	bool interrupts_enabled;
 
 	struct i915_vma *ads_vma;
 	struct i915_vma *ctx_pool_vma;
@@ -146,6 +164,9 @@ struct intel_guc {
 
 	uint64_t submissions[I915_NUM_ENGINES];
 	uint32_t last_seqno[I915_NUM_ENGINES];
+
+	/* To serialize the Host2GuC actions */
+	struct mutex action_lock;
 };
 
 /* intel_guc_loader.c */
@@ -163,5 +184,10 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
 void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
+void i915_guc_register(struct drm_i915_private *dev_priv);
+void i915_guc_unregister(struct drm_i915_private *dev_priv);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index e40db2d..324ea90 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -104,9 +104,9 @@
 #define   GUC_LOG_ALLOC_IN_MEGABYTE	(1 << 3)
 #define   GUC_LOG_CRASH_PAGES		1
 #define   GUC_LOG_CRASH_SHIFT		4
-#define   GUC_LOG_DPC_PAGES		3
+#define   GUC_LOG_DPC_PAGES		7
 #define   GUC_LOG_DPC_SHIFT		6
-#define   GUC_LOG_ISR_PAGES		3
+#define   GUC_LOG_ISR_PAGES		7
 #define   GUC_LOG_ISR_SHIFT		9
 #define   GUC_LOG_BUF_ADDR_SHIFT	12
 
@@ -419,15 +419,87 @@ struct guc_ads {
 	u32 reserved2[4];
 } __packed;
 
+/* GuC logging structures */
+
+enum guc_log_buffer_type {
+	GUC_ISR_LOG_BUFFER,
+	GUC_DPC_LOG_BUFFER,
+	GUC_CRASH_DUMP_LOG_BUFFER,
+	GUC_MAX_LOG_BUFFER
+};
+
+/**
+ * DOC: GuC Log buffer Layout
+ *
+ * Page0  +-------------------------------+
+ *        |   ISR state header (32 bytes) |
+ *        |      DPC state header         |
+ *        |   Crash dump state header     |
+ * Page1  +-------------------------------+
+ *        |           ISR logs            |
+ * Page9  +-------------------------------+
+ *        |           DPC logs            |
+ * Page17 +-------------------------------+
+ *        |         Crash Dump logs       |
+ *        +-------------------------------+
+ *
+ * Below state structure is used for coordination of retrieval of GuC firmware
+ * logs. Separate state is maintained for each log buffer type.
+ * read_ptr points to the location where i915 read last in log buffer and
+ * is read only for GuC firmware. write_ptr is incremented by GuC with number
+ * of bytes written for each log entry and is read only for i915.
+ * When any type of log buffer becomes half full, GuC sends a flush interrupt.
+ * GuC firmware expects that while it is writing to 2nd half of the buffer,
+ * first half would get consumed by Host and then get a flush completed
+ * acknowledgment from Host, so that it does not end up doing any overwrite
+ * causing loss of logs. So when buffer gets half filled & i915 has requested
+ * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
+ * to the value of write_ptr and raise the interrupt.
+ * On receiving the interrupt i915 should read the buffer, clear flush_to_file
+ * field and also update read_ptr with the value of sample_write_ptr, before
+ * sending an acknowledgment to GuC. marker & version fields are for internal
+ * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
+ * time GuC detects the log buffer overflow.
+ */
+struct guc_log_buffer_state {
+	u32 marker[2];
+	u32 read_ptr;
+	u32 write_ptr;
+	u32 size;
+	u32 sampled_write_ptr;
+	union {
+		struct {
+			u32 flush_to_file:1;
+			u32 buffer_full_cnt:4;
+			u32 reserved:27;
+		};
+		u32 flags;
+	};
+	u32 version;
+} __packed;
+
+union guc_log_control {
+	struct {
+		u32 logging_enabled:1;
+		u32 reserved1:3;
+		u32 verbosity:4;
+		u32 reserved2:24;
+	};
+	u32 value;
+} __packed;
+
 /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
 enum host2guc_action {
 	HOST2GUC_ACTION_DEFAULT = 0x0,
 	HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
 	HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
 	HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+	HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+	HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
 	HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
 	HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
 	HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+	HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
 	HOST2GUC_ACTION_LIMIT
 };
 
@@ -449,4 +521,10 @@ enum guc2host_status {
 	GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
 };
 
+/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
+enum guc2host_message {
+	GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
+	GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
+};
+
 #endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 6fd39ef..1aa8523 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -100,12 +100,13 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 static void guc_interrupts_release(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int irqs;
 
 	/* tell all command streamers NOT to forward interrupts or vblank to GuC */
 	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
 	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
 	/* route all GT interrupts to the host */
@@ -117,12 +118,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
 static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int irqs;
 	u32 tmp;
 
 	/* tell all command streamers to forward interrupts (but not vblank) to GuC */
 	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
 	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -209,11 +211,13 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
 	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
 			GUC_CTL_VCS2_ENABLED;
 
+	params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
+
 	if (i915.guc_log_level >= 0) {
-		params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
 		params[GUC_CTL_DEBUG] =
 			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
-	}
+	} else
+		params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
 
 	if (guc->ads_vma) {
 		u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
@@ -347,7 +351,6 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
 static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-	struct drm_device *dev = &dev_priv->drm;
 	struct i915_vma *vma;
 	int ret;
 
@@ -375,24 +378,22 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 	/* Enable MIA caching. GuC clock gating is disabled. */
 	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
 
-	/* WaDisableMinuteIaClockGating:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	/* WaDisableMinuteIaClockGating:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
 					      ~GUC_ENABLE_MIA_CLOCK_GATING));
 	}
 
-	/* WaC6DisallowByGfxPause*/
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+	/* WaC6DisallowByGfxPause:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
 		I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
 
-	if (IS_BROXTON(dev))
+	if (IS_BROXTON(dev_priv))
 		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
 	else
 		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
 
-	if (IS_GEN9(dev)) {
+	if (IS_GEN9(dev_priv)) {
 		/* DOP Clock Gating Enable for GuC clocks */
 		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
 					    I915_READ(GEN7_MISCCPCTL)));
@@ -484,6 +485,7 @@ int intel_guc_setup(struct drm_device *dev)
 	}
 
 	guc_interrupts_release(dev_priv);
+	gen9_reset_guc_interrupts(dev_priv);
 
 	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
 
@@ -528,6 +530,9 @@ int intel_guc_setup(struct drm_device *dev)
 		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
 
 	if (i915.enable_guc_submission) {
+		if (i915.guc_log_level >= 0)
+			gen9_enable_guc_interrupts(dev_priv);
+
 		err = i915_guc_submission_enable(dev_priv);
 		if (err)
 			goto fail;
@@ -720,23 +725,28 @@ void intel_guc_init(struct drm_device *dev)
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 	const char *fw_path;
 
-	/* A negative value means "use platform default" */
-	if (i915.enable_guc_loading < 0)
-		i915.enable_guc_loading = HAS_GUC_UCODE(dev);
-	if (i915.enable_guc_submission < 0)
-		i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+	if (!HAS_GUC(dev)) {
+		i915.enable_guc_loading = 0;
+		i915.enable_guc_submission = 0;
+	} else {
+		/* A negative value means "use platform default" */
+		if (i915.enable_guc_loading < 0)
+			i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+		if (i915.enable_guc_submission < 0)
+			i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+	}
 
 	if (!HAS_GUC_UCODE(dev)) {
 		fw_path = NULL;
-	} else if (IS_SKYLAKE(dev)) {
+	} else if (IS_SKYLAKE(dev_priv)) {
 		fw_path = I915_SKL_GUC_UCODE;
 		guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
 		guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		fw_path = I915_BXT_GUC_UCODE;
 		guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
 		guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
-	} else if (IS_KABYLAKE(dev)) {
+	} else if (IS_KABYLAKE(dev_priv)) {
 		fw_path = I915_KBL_GUC_UCODE;
 		guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
 		guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 434f4d5..290384e 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -31,14 +31,20 @@
  * GPU among multiple virtual machines on a time-sharing basis. Each
  * virtual machine is presented a virtual GPU (vGPU), which has equivalent
  * features as the underlying physical GPU (pGPU), so i915 driver can run
- * seamlessly in a virtual machine. This file provides the englightments
- * of GVT and the necessary components used by GVT in i915 driver.
+ * seamlessly in a virtual machine.
+ *
+ * To virtualize GPU resources GVT-g driver depends on hypervisor technology
+ * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
+ * and be virtualized within GVT-g device module. More architectural design
+ * doc is available on https://01.org/group/2230/documentation-list.
  */
 
 static bool is_supported_device(struct drm_i915_private *dev_priv)
 {
 	if (IS_BROADWELL(dev_priv))
 		return true;
+	if (IS_SKYLAKE(dev_priv))
+		return true;
 	return false;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 960211d..25df2d6 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,7 +24,7 @@
 #ifndef _INTEL_GVT_H_
 #define _INTEL_GVT_H_
 
-#include "gvt/gvt.h"
+struct intel_gvt;
 
 #ifdef CONFIG_DRM_I915_GVT
 int intel_gvt_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
new file mode 100644
index 0000000..53df5b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static bool
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
+{
+	if (INTEL_GEN(engine->i915) >= 8) {
+		return (ipehr >> 23) == 0x1c;
+	} else {
+		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+				 MI_SEMAPHORE_REGISTER);
+	}
+}
+
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+				 u64 offset)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_engine_cs *signaller;
+	enum intel_engine_id id;
+
+	if (INTEL_GEN(dev_priv) >= 8) {
+		for_each_engine(signaller, dev_priv, id) {
+			if (engine == signaller)
+				continue;
+
+			if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
+				return signaller;
+		}
+	} else {
+		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+
+		for_each_engine(signaller, dev_priv, id) {
+			if(engine == signaller)
+				continue;
+
+			if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
+				return signaller;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
+			 engine->name, ipehr, offset);
+
+	return ERR_PTR(-ENODEV);
+}
+
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	void __iomem *vaddr;
+	u32 cmd, ipehr, head;
+	u64 offset = 0;
+	int i, backwards;
+
+	/*
+	 * This function does not support execlist mode - any attempt to
+	 * proceed further into this function will result in a kernel panic
+	 * when dereferencing ring->buffer, which is not set up in execlist
+	 * mode.
+	 *
+	 * The correct way of doing it would be to derive the currently
+	 * executing ring buffer from the current context, which is derived
+	 * from the currently running request. Unfortunately, to get the
+	 * current request we would have to grab the struct_mutex before doing
+	 * anything else, which would be ill-advised since some other thread
+	 * might have grabbed it already and managed to hang itself, causing
+	 * the hang checker to deadlock.
+	 *
+	 * Therefore, this function does not support execlist mode in its
+	 * current form. Just return NULL and move on.
+	 */
+	if (engine->buffer == NULL)
+		return NULL;
+
+	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+	if (!ipehr_is_semaphore_wait(engine, ipehr))
+		return NULL;
+
+	/*
+	 * HEAD is likely pointing to the dword after the actual command,
+	 * so scan backwards until we find the MBOX. But limit it to just 3
+	 * or 4 dwords depending on the semaphore wait command size.
+	 * Note that we don't care about ACTHD here since that might
+	 * point at at batch, and semaphores are always emitted into the
+	 * ringbuffer itself.
+	 */
+	head = I915_READ_HEAD(engine) & HEAD_ADDR;
+	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
+	vaddr = (void __iomem *)engine->buffer->vaddr;
+
+	for (i = backwards; i; --i) {
+		/*
+		 * Be paranoid and presume the hw has gone off into the wild -
+		 * our ring is smaller than what the hardware (and hence
+		 * HEAD_ADDR) allows. Also handles wrap-around.
+		 */
+		head &= engine->buffer->size - 1;
+
+		/* This here seems to blow up */
+		cmd = ioread32(vaddr + head);
+		if (cmd == ipehr)
+			break;
+
+		head -= 4;
+	}
+
+	if (!i)
+		return NULL;
+
+	*seqno = ioread32(vaddr + head + 4) + 1;
+	if (INTEL_GEN(dev_priv) >= 8) {
+		offset = ioread32(vaddr + head + 12);
+		offset <<= 32;
+		offset |= ioread32(vaddr + head + 8);
+	}
+	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
+}
+
+static int semaphore_passed(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_engine_cs *signaller;
+	u32 seqno;
+
+	engine->hangcheck.deadlock++;
+
+	signaller = semaphore_waits_for(engine, &seqno);
+	if (signaller == NULL)
+		return -1;
+
+	if (IS_ERR(signaller))
+		return 0;
+
+	/* Prevent pathological recursion due to driver bugs */
+	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
+		return -1;
+
+	if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+		return 1;
+
+	/* cursory check for an unkickable deadlock */
+	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+	    semaphore_passed(signaller) < 0)
+		return -1;
+
+	return 0;
+}
+
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, dev_priv, id)
+		engine->hangcheck.deadlock = 0;
+}
+
+static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
+{
+	u32 tmp = current_instdone | *old_instdone;
+	bool unchanged;
+
+	unchanged = tmp == *old_instdone;
+	*old_instdone |= tmp;
+
+	return unchanged;
+}
+
+static bool subunits_stuck(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_instdone instdone;
+	struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
+	bool stuck;
+	int slice;
+	int subslice;
+
+	if (engine->id != RCS)
+		return true;
+
+	intel_engine_get_instdone(engine, &instdone);
+
+	/* There might be unstable subunit states even when
+	 * actual head is not moving. Filter out the unstable ones by
+	 * accumulating the undone -> done transitions and only
+	 * consider those as progress.
+	 */
+	stuck = instdone_unchanged(instdone.instdone,
+				   &accu_instdone->instdone);
+	stuck &= instdone_unchanged(instdone.slice_common,
+				    &accu_instdone->slice_common);
+
+	for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+		stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
+					    &accu_instdone->sampler[slice][subslice]);
+		stuck &= instdone_unchanged(instdone.row[slice][subslice],
+					    &accu_instdone->row[slice][subslice]);
+	}
+
+	return stuck;
+}
+
+static enum intel_engine_hangcheck_action
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+	if (acthd != engine->hangcheck.acthd) {
+
+		/* Clear subunit states on head movement */
+		memset(&engine->hangcheck.instdone, 0,
+		       sizeof(engine->hangcheck.instdone));
+
+		return HANGCHECK_ACTIVE;
+	}
+
+	if (!subunits_stuck(engine))
+		return HANGCHECK_ACTIVE;
+
+	return HANGCHECK_HUNG;
+}
+
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	enum intel_engine_hangcheck_action ha;
+	u32 tmp;
+
+	ha = head_stuck(engine, acthd);
+	if (ha != HANGCHECK_HUNG)
+		return ha;
+
+	if (IS_GEN2(dev_priv))
+		return HANGCHECK_HUNG;
+
+	/* Is the chip hanging on a WAIT_FOR_EVENT?
+	 * If so we can simply poke the RB_WAIT bit
+	 * and break the hang. This should work on
+	 * all but the second generation chipsets.
+	 */
+	tmp = I915_READ_CTL(engine);
+	if (tmp & RING_WAIT) {
+		i915_handle_error(dev_priv, 0,
+				  "Kicking stuck wait on %s",
+				  engine->name);
+		I915_WRITE_CTL(engine, tmp);
+		return HANGCHECK_KICK;
+	}
+
+	if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+		switch (semaphore_passed(engine)) {
+		default:
+			return HANGCHECK_HUNG;
+		case 1:
+			i915_handle_error(dev_priv, 0,
+					  "Kicking stuck semaphore on %s",
+					  engine->name);
+			I915_WRITE_CTL(engine, tmp);
+			return HANGCHECK_KICK;
+		case 0:
+			return HANGCHECK_WAIT;
+		}
+	}
+
+	return HANGCHECK_HUNG;
+}
+
+/*
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
+ */
+static void i915_hangcheck_elapsed(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv),
+			     gpu_error.hangcheck_work.work);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	unsigned int hung = 0, stuck = 0;
+	int busy_count = 0;
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+#define ACTIVE_DECAY 15
+
+	if (!i915.enable_hangcheck)
+		return;
+
+	if (!READ_ONCE(dev_priv->gt.awake))
+		return;
+
+	/* As enabling the GPU requires fairly extensive mmio access,
+	 * periodically arm the mmio checker to see if we are triggering
+	 * any invalid access.
+	 */
+	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+	for_each_engine(engine, dev_priv, id) {
+		bool busy = intel_engine_has_waiter(engine);
+		u64 acthd;
+		u32 seqno;
+		u32 submit;
+
+		semaphore_clear_deadlocks(dev_priv);
+
+		/* We don't strictly need an irq-barrier here, as we are not
+		 * serving an interrupt request, be paranoid in case the
+		 * barrier has side-effects (such as preventing a broken
+		 * cacheline snoop) and so be sure that we can see the seqno
+		 * advance. If the seqno should stick, due to a stale
+		 * cacheline, we would erroneously declare the GPU hung.
+		 */
+		if (engine->irq_seqno_barrier)
+			engine->irq_seqno_barrier(engine);
+
+		acthd = intel_engine_get_active_head(engine);
+		seqno = intel_engine_get_seqno(engine);
+		submit = intel_engine_last_submit(engine);
+
+		if (engine->hangcheck.seqno == seqno) {
+			if (i915_seqno_passed(seqno, submit)) {
+				engine->hangcheck.action = HANGCHECK_IDLE;
+			} else {
+				/* We always increment the hangcheck score
+				 * if the engine is busy and still processing
+				 * the same request, so that no single request
+				 * can run indefinitely (such as a chain of
+				 * batches). The only time we do not increment
+				 * the hangcheck score on this ring, if this
+				 * engine is in a legitimate wait for another
+				 * engine. In that case the waiting engine is a
+				 * victim and we want to be sure we catch the
+				 * right culprit. Then every time we do kick
+				 * the ring, add a small increment to the
+				 * score so that we can catch a batch that is
+				 * being repeatedly kicked and so responsible
+				 * for stalling the machine.
+				 */
+				engine->hangcheck.action =
+					engine_stuck(engine, acthd);
+
+				switch (engine->hangcheck.action) {
+				case HANGCHECK_IDLE:
+				case HANGCHECK_WAIT:
+					break;
+				case HANGCHECK_ACTIVE:
+					engine->hangcheck.score += BUSY;
+					break;
+				case HANGCHECK_KICK:
+					engine->hangcheck.score += KICK;
+					break;
+				case HANGCHECK_HUNG:
+					engine->hangcheck.score += HUNG;
+					break;
+				}
+			}
+
+			if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+				hung |= intel_engine_flag(engine);
+				if (engine->hangcheck.action != HANGCHECK_HUNG)
+					stuck |= intel_engine_flag(engine);
+			}
+		} else {
+			engine->hangcheck.action = HANGCHECK_ACTIVE;
+
+			/* Gradually reduce the count so that we catch DoS
+			 * attempts across multiple batches.
+			 */
+			if (engine->hangcheck.score > 0)
+				engine->hangcheck.score -= ACTIVE_DECAY;
+			if (engine->hangcheck.score < 0)
+				engine->hangcheck.score = 0;
+
+			/* Clear head and subunit states on seqno movement */
+			acthd = 0;
+
+			memset(&engine->hangcheck.instdone, 0,
+			       sizeof(engine->hangcheck.instdone));
+		}
+
+		engine->hangcheck.seqno = seqno;
+		engine->hangcheck.acthd = acthd;
+		busy_count += busy;
+	}
+
+	if (hung) {
+		char msg[80];
+		unsigned int tmp;
+		int len;
+
+		/* If some rings hung but others were still busy, only
+		 * blame the hanging rings in the synopsis.
+		 */
+		if (stuck != hung)
+			hung &= ~stuck;
+		len = scnprintf(msg, sizeof(msg),
+				"%s on ", stuck == hung ? "No progress" : "Hang");
+		for_each_engine_masked(engine, dev_priv, hung, tmp)
+			len += scnprintf(msg + len, sizeof(msg) - len,
+					 "%s, ", engine->name);
+		msg[len-2] = '\0';
+
+		return i915_handle_error(dev_priv, hung, msg);
+	}
+
+	/* Reset timer in case GPU hangs without another request being added */
+	if (busy_count)
+		i915_queue_hangcheck(dev_priv);
+}
+
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+{
+	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+void intel_hangcheck_init(struct drm_i915_private *i915)
+{
+	INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
+			  i915_hangcheck_elapsed);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f40a35f..fb88e32 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t enabled_bits;
 
-	enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+	enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
 
 	WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
 	     "HDMI port enabled, expecting disabled\n");
@@ -864,7 +864,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
 
 	hdmi_val = SDVO_ENCODING_HDMI;
-	if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+	if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
 		hdmi_val |= HDMI_COLOR_RANGE_16_235;
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 		hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -879,9 +879,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
 	if (crtc->config->has_hdmi_sink)
 		hdmi_val |= HDMI_MODE_SELECT_HDMI;
 
-	if (HAS_PCH_CPT(dev))
+	if (HAS_PCH_CPT(dev_priv))
 		hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
-	else if (IS_CHERRYVIEW(dev))
+	else if (IS_CHERRYVIEW(dev_priv))
 		hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
 	else
 		hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
@@ -911,9 +911,9 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
 	if (!(tmp & SDVO_ENABLE))
 		goto out;
 
-	if (HAS_PCH_CPT(dev))
+	if (HAS_PCH_CPT(dev_priv))
 		*pipe = PORT_TO_PIPE_CPT(tmp);
-	else if (IS_CHERRYVIEW(dev))
+	else if (IS_CHERRYVIEW(dev_priv))
 		*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
 	else
 		*pipe = PORT_TO_PIPE(tmp);
@@ -956,7 +956,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	if (tmp & SDVO_AUDIO_ENABLE)
 		pipe_config->has_audio = true;
 
-	if (!HAS_PCH_SPLIT(dev) &&
+	if (!HAS_PCH_SPLIT(dev_priv) &&
 	    tmp & HDMI_COLOR_RANGE_16_235)
 		pipe_config->limited_color_range = true;
 
@@ -975,14 +975,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	pipe_config->lane_count = 4;
 }
 
-static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
 	WARN_ON(!crtc->config->has_hdmi_sink);
 	DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
 			 pipe_name(crtc->pipe));
-	intel_audio_codec_enable(encoder);
+	intel_audio_codec_enable(encoder, pipe_config, conn_state);
 }
 
 static void g4x_enable_hdmi(struct intel_encoder *encoder,
@@ -991,21 +993,20 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	u32 temp;
 
 	temp = I915_READ(intel_hdmi->hdmi_reg);
 
 	temp |= SDVO_ENABLE;
-	if (crtc->config->has_audio)
+	if (pipe_config->has_audio)
 		temp |= SDVO_AUDIO_ENABLE;
 
 	I915_WRITE(intel_hdmi->hdmi_reg, temp);
 	POSTING_READ(intel_hdmi->hdmi_reg);
 
-	if (crtc->config->has_audio)
-		intel_enable_hdmi_audio(encoder);
+	if (pipe_config->has_audio)
+		intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
 }
 
 static void ibx_enable_hdmi(struct intel_encoder *encoder,
@@ -1040,8 +1041,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
 	 * FIXME: BSpec says this should be done at the end of
 	 * of the modeset sequence, so not sure if this isn't too soon.
 	 */
-	if (crtc->config->pipe_bpp > 24 &&
-	    crtc->config->pixel_multiplier > 1) {
+	if (pipe_config->pipe_bpp > 24 &&
+	    pipe_config->pixel_multiplier > 1) {
 		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
 		POSTING_READ(intel_hdmi->hdmi_reg);
 
@@ -1055,8 +1056,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
 		POSTING_READ(intel_hdmi->hdmi_reg);
 	}
 
-	if (crtc->config->has_audio)
-		intel_enable_hdmi_audio(encoder);
+	if (pipe_config->has_audio)
+		intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
 }
 
 static void cpt_enable_hdmi(struct intel_encoder *encoder,
@@ -1073,7 +1074,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
 	temp = I915_READ(intel_hdmi->hdmi_reg);
 
 	temp |= SDVO_ENABLE;
-	if (crtc->config->has_audio)
+	if (pipe_config->has_audio)
 		temp |= SDVO_AUDIO_ENABLE;
 
 	/*
@@ -1086,7 +1087,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
 	 * 4. enable HDMI clock gating
 	 */
 
-	if (crtc->config->pipe_bpp > 24) {
+	if (pipe_config->pipe_bpp > 24) {
 		I915_WRITE(TRANS_CHICKEN1(pipe),
 			   I915_READ(TRANS_CHICKEN1(pipe)) |
 			   TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
@@ -1098,7 +1099,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
 	I915_WRITE(intel_hdmi->hdmi_reg, temp);
 	POSTING_READ(intel_hdmi->hdmi_reg);
 
-	if (crtc->config->pipe_bpp > 24) {
+	if (pipe_config->pipe_bpp > 24) {
 		temp &= ~SDVO_COLOR_FORMAT_MASK;
 		temp |= HDMI_COLOR_FORMAT_12bpc;
 
@@ -1110,8 +1111,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
 			   ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
 	}
 
-	if (crtc->config->has_audio)
-		intel_enable_hdmi_audio(encoder);
+	if (pipe_config->has_audio)
+		intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
 }
 
 static void vlv_enable_hdmi(struct intel_encoder *encoder,
@@ -1141,7 +1142,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
 	 * to transcoder A after disabling it to allow the
 	 * matching DP port to be enabled on transcoder A.
 	 */
-	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
+	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
 		/*
 		 * We get CPU/PCH FIFO underruns on the other pipe when
 		 * doing the workaround. Sweep them under the rug.
@@ -1164,7 +1165,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
 		I915_WRITE(intel_hdmi->hdmi_reg, temp);
 		POSTING_READ(intel_hdmi->hdmi_reg);
 
-		intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 	}
@@ -1178,9 +1179,7 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
 			     struct intel_crtc_state *old_crtc_state,
 			     struct drm_connector_state *old_conn_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
-	if (crtc->config->has_audio)
+	if (old_crtc_state->has_audio)
 		intel_audio_codec_disable(encoder);
 
 	intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
@@ -1190,9 +1189,7 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
 			     struct intel_crtc_state *old_crtc_state,
 			     struct drm_connector_state *old_conn_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
-	if (crtc->config->has_audio)
+	if (old_crtc_state->has_audio)
 		intel_audio_codec_disable(encoder);
 }
 
@@ -1241,7 +1238,7 @@ static enum drm_mode_status
 hdmi_port_clock_valid(struct intel_hdmi *hdmi,
 		      int clock, bool respect_downstream_limits)
 {
-	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+	struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
 
 	if (clock < 25000)
 		return MODE_CLOCK_LOW;
@@ -1249,11 +1246,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
 		return MODE_CLOCK_HIGH;
 
 	/* BXT DPLL can't generate 223-240 MHz */
-	if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+	if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
 		return MODE_CLOCK_RANGE;
 
 	/* CHV DPLL can't generate 216-240 MHz */
-	if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+	if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
 		return MODE_CLOCK_RANGE;
 
 	return MODE_OK;
@@ -1265,6 +1262,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 {
 	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum drm_mode_status status;
 	int clock;
 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1287,7 +1285,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 	status = hdmi_port_clock_valid(hdmi, clock, true);
 
 	/* if we can't do 8bpc we may still be able to do 12bpc */
-	if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+	if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK)
 		status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
 
 	return status;
@@ -1297,7 +1295,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = crtc_state->base.crtc->dev;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(to_i915(dev)))
 		return false;
 
 	/*
@@ -1312,7 +1310,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 			       struct drm_connector_state *conn_state)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
 	int clock_12bpc = clock_8bpc * 3 / 2;
@@ -1339,7 +1337,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 		clock_12bpc *= 2;
 	}
 
-	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
 		pipe_config->has_pch_encoder = true;
 
 	if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
@@ -1644,13 +1642,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
 				  struct drm_connector_state *conn_state)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
 	intel_hdmi_prepare(encoder);
 
 	intel_hdmi->set_infoframes(&encoder->base,
-				   intel_crtc->config->has_hdmi_sink,
+				   pipe_config->has_hdmi_sink,
 				   adjusted_mode);
 }
 
@@ -1662,9 +1659,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
 	struct intel_hdmi *intel_hdmi = &dport->hdmi;
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc =
-		to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
 	vlv_phy_pre_encoder_enable(encoder);
 
@@ -1673,7 +1668,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
 				 0x2b247878);
 
 	intel_hdmi->set_infoframes(&encoder->base,
-				   intel_crtc->config->has_hdmi_sink,
+				   pipe_config->has_hdmi_sink,
 				   adjusted_mode);
 
 	g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1799,6 +1794,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
 	intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 }
 
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+			     enum port port)
+{
+	const struct ddi_vbt_port_info *info =
+		&dev_priv->vbt.ddi_port_info[port];
+	u8 ddc_pin;
+
+	if (info->alternate_ddc_pin) {
+		DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+			      info->alternate_ddc_pin, port_name(port));
+		return info->alternate_ddc_pin;
+	}
+
+	switch (port) {
+	case PORT_B:
+		if (IS_BROXTON(dev_priv))
+			ddc_pin = GMBUS_PIN_1_BXT;
+		else
+			ddc_pin = GMBUS_PIN_DPB;
+		break;
+	case PORT_C:
+		if (IS_BROXTON(dev_priv))
+			ddc_pin = GMBUS_PIN_2_BXT;
+		else
+			ddc_pin = GMBUS_PIN_DPC;
+		break;
+	case PORT_D:
+		if (IS_CHERRYVIEW(dev_priv))
+			ddc_pin = GMBUS_PIN_DPD_CHV;
+		else
+			ddc_pin = GMBUS_PIN_DPD;
+		break;
+	default:
+		MISSING_CASE(port);
+		ddc_pin = GMBUS_PIN_DPB;
+		break;
+	}
+
+	DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+		      ddc_pin, port_name(port));
+
+	return ddc_pin;
+}
+
 void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 			       struct intel_connector *intel_connector)
 {
@@ -1808,7 +1847,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum port port = intel_dig_port->port;
-	uint8_t alternate_ddc_pin;
 
 	DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
 		      port_name(port));
@@ -1826,12 +1864,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	connector->doublescan_allowed = 0;
 	connector->stereo_allowed = 1;
 
+	intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+
 	switch (port) {
 	case PORT_B:
-		if (IS_BROXTON(dev_priv))
-			intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
-		else
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
 		/*
 		 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
 		 * interrupts to check the external panel connection.
@@ -1842,61 +1878,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 			intel_encoder->hpd_pin = HPD_PORT_B;
 		break;
 	case PORT_C:
-		if (IS_BROXTON(dev_priv))
-			intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
-		else
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
 		intel_encoder->hpd_pin = HPD_PORT_C;
 		break;
 	case PORT_D:
-		if (WARN_ON(IS_BROXTON(dev_priv)))
-			intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
-		else if (IS_CHERRYVIEW(dev_priv))
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
-		else
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		break;
 	case PORT_E:
-		/* On SKL PORT E doesn't have seperate GMBUS pin
-		 *  We rely on VBT to set a proper alternate GMBUS pin. */
-		alternate_ddc_pin =
-			dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
-		switch (alternate_ddc_pin) {
-		case DDC_PIN_B:
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
-			break;
-		case DDC_PIN_C:
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
-			break;
-		case DDC_PIN_D:
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
-			break;
-		default:
-			MISSING_CASE(alternate_ddc_pin);
-		}
 		intel_encoder->hpd_pin = HPD_PORT_E;
 		break;
-	case PORT_A:
-		intel_encoder->hpd_pin = HPD_PORT_A;
-		/* Internal port only for eDP. */
 	default:
-		BUG();
+		MISSING_CASE(port);
+		return;
 	}
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		intel_hdmi->write_infoframe = vlv_write_infoframe;
 		intel_hdmi->set_infoframes = vlv_set_infoframes;
 		intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
-	} else if (IS_G4X(dev)) {
+	} else if (IS_G4X(dev_priv)) {
 		intel_hdmi->write_infoframe = g4x_write_infoframe;
 		intel_hdmi->set_infoframes = g4x_set_infoframes;
 		intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
-	} else if (HAS_DDI(dev)) {
+	} else if (HAS_DDI(dev_priv)) {
 		intel_hdmi->write_infoframe = hsw_write_infoframe;
 		intel_hdmi->set_infoframes = hsw_set_infoframes;
 		intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
-	} else if (HAS_PCH_IBX(dev)) {
+	} else if (HAS_PCH_IBX(dev_priv)) {
 		intel_hdmi->write_infoframe = ibx_write_infoframe;
 		intel_hdmi->set_infoframes = ibx_set_infoframes;
 		intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
@@ -1906,7 +1913,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 		intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
 	}
 
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
 	else
 		intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1920,7 +1927,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	 * 0xd.  Failure to do so will result in spurious interrupts being
 	 * generated on the port when a cable is not attached.
 	 */
-	if (IS_G4X(dev) && !IS_GM45(dev)) {
+	if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
 	}
@@ -1929,6 +1936,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 void intel_hdmi_init(struct drm_device *dev,
 		     i915_reg_t hdmi_reg, enum port port)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_digital_port *intel_dig_port;
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
@@ -1949,7 +1957,7 @@ void intel_hdmi_init(struct drm_device *dev,
 			 DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
 
 	intel_encoder->compute_config = intel_hdmi_compute_config;
-	if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_PCH_SPLIT(dev_priv)) {
 		intel_encoder->disable = pch_disable_hdmi;
 		intel_encoder->post_disable = pch_post_disable_hdmi;
 	} else {
@@ -1957,29 +1965,30 @@ void intel_hdmi_init(struct drm_device *dev,
 	}
 	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
 	intel_encoder->get_config = intel_hdmi_get_config;
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev_priv)) {
 		intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
 		intel_encoder->pre_enable = chv_hdmi_pre_enable;
 		intel_encoder->enable = vlv_enable_hdmi;
 		intel_encoder->post_disable = chv_hdmi_post_disable;
 		intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
-	} else if (IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
 		intel_encoder->pre_enable = vlv_hdmi_pre_enable;
 		intel_encoder->enable = vlv_enable_hdmi;
 		intel_encoder->post_disable = vlv_hdmi_post_disable;
 	} else {
 		intel_encoder->pre_enable = intel_hdmi_pre_enable;
-		if (HAS_PCH_CPT(dev))
+		if (HAS_PCH_CPT(dev_priv))
 			intel_encoder->enable = cpt_enable_hdmi;
-		else if (HAS_PCH_IBX(dev))
+		else if (HAS_PCH_IBX(dev_priv))
 			intel_encoder->enable = ibx_enable_hdmi;
 		else
 			intel_encoder->enable = g4x_enable_hdmi;
 	}
 
 	intel_encoder->type = INTEL_OUTPUT_HDMI;
-	if (IS_CHERRYVIEW(dev)) {
+	intel_encoder->port = port;
+	if (IS_CHERRYVIEW(dev_priv)) {
 		if (port == PORT_D)
 			intel_encoder->crtc_mask = 1 << 2;
 		else
@@ -1993,7 +2002,7 @@ void intel_hdmi_init(struct drm_device *dev,
 	 * to work on real hardware. And since g4x can send infoframes to
 	 * only one port anyway, nothing is lost by allowing it.
 	 */
-	if (IS_G4X(dev))
+	if (IS_G4X(dev_priv))
 		intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
 
 	intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 79aab9a..83f260b 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -138,11 +138,10 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
 static u32 get_reserved(struct intel_gmbus *bus)
 {
 	struct drm_i915_private *dev_priv = bus->dev_priv;
-	struct drm_device *dev = &dev_priv->drm;
 	u32 reserved = 0;
 
 	/* On most chips, these bits must be preserved in software. */
-	if (!IS_I830(dev) && !IS_845G(dev))
+	if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
 		reserved = I915_READ_NOTRACE(bus->gpio_reg) &
 					     (GPIO_DATA_PULLUP_DISABLE |
 					      GPIO_CLOCK_PULLUP_DISABLE);
@@ -468,13 +467,9 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 					       struct intel_gmbus,
 					       adapter);
 	struct drm_i915_private *dev_priv = bus->dev_priv;
-	const unsigned int fw =
-		intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
-					       FW_REG_READ | FW_REG_WRITE);
 	int i = 0, inc, try = 0;
 	int ret = 0;
 
-	intel_uncore_forcewake_get(dev_priv, fw);
 retry:
 	I915_WRITE_FW(GMBUS0, bus->reg0);
 
@@ -576,7 +571,6 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 	ret = -EAGAIN;
 
 out:
-	intel_uncore_forcewake_put(dev_priv, fw);
 	return ret;
 }
 
@@ -633,10 +627,10 @@ int intel_setup_gmbus(struct drm_device *dev)
 	unsigned int pin;
 	int ret;
 
-	if (HAS_PCH_NOP(dev))
+	if (HAS_PCH_NOP(dev_priv))
 		return 0;
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
 	else if (!HAS_GMCH_DISPLAY(dev_priv))
 		dev_priv->gpio_mmio_base =
@@ -674,7 +668,7 @@ int intel_setup_gmbus(struct drm_device *dev)
 		bus->reg0 = pin | GMBUS_RATE_100KHZ;
 
 		/* gmbus seems to be broken on i830 */
-		if (IS_I830(dev))
+		if (IS_I830(dev_priv))
 			bus->force_bit = 1;
 
 		intel_gpio_setup(bus, pin);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0adb879..dde04b764 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -275,8 +275,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 	struct drm_i915_private *dev_priv = engine->i915;
 
 	engine->disable_lite_restore_wa =
-		(IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
-		 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+		IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) &&
 		(engine->id == VCS || engine->id == VCS2);
 
 	engine->ctx_desc_template = GEN8_CTX_VALID;
@@ -366,7 +365,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
 	u32 *reg_state = ce->lrc_reg_state;
 
-	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
+	reg_state[CTX_RING_TAIL+1] = rq->tail;
 
 	/* True 32b PPGTT with dynamic page allocation: update PDP
 	 * registers and point the unallocated PDPs to scratch page.
@@ -441,7 +440,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	if (last)
 		/* WaIdleLiteRestore:bdw,skl
 		 * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
-		 * as we resubmit the request. See gen8_emit_request()
+		 * as we resubmit the request. See gen8_emit_breadcrumb()
 		 * for where we prepare the padding after the end of the
 		 * request.
 		 */
@@ -523,6 +522,28 @@ static bool execlists_elsp_idle(struct intel_engine_cs *engine)
 	return !engine->execlist_port[0].request;
 }
 
+/**
+ * intel_execlists_idle() - Determine if all engine submission ports are idle
+ * @dev_priv: i915 device private
+ *
+ * Return true if there are no requests pending on any of the submission ports
+ * of any engines.
+ */
+bool intel_execlists_idle(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	if (!i915.enable_execlists)
+		return true;
+
+	for_each_engine(engine, dev_priv, id)
+		if (!execlists_elsp_idle(engine))
+			return false;
+
+	return true;
+}
+
 static bool execlists_elsp_ready(struct intel_engine_cs *engine)
 {
 	int port;
@@ -600,6 +621,15 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
 
 	spin_lock_irqsave(&engine->execlist_lock, flags);
 
+	/* We keep the previous context alive until we retire the following
+	 * request. This ensures that any the context object is still pinned
+	 * for any residual writes the HW makes into it on the context switch
+	 * into the next object following the breadcrumb. Otherwise, we may
+	 * retire the context too early.
+	 */
+	request->previous_context = engine->last_context;
+	engine->last_context = request->ctx;
+
 	list_add_tail(&request->execlist_link, &engine->execlist_queue);
 	if (execlists_elsp_idle(engine))
 		tasklet_hi_schedule(&engine->irq_tasklet);
@@ -672,46 +702,6 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 	return ret;
 }
 
-/*
- * intel_logical_ring_advance() - advance the tail and prepare for submission
- * @request: Request to advance the logical ringbuffer of.
- *
- * The tail is updated in our logical ringbuffer struct, not in the actual context. What
- * really happens during submission is that the context and current tail will be placed
- * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
- * point, the tail *inside* the context is updated and the ELSP written to.
- */
-static int
-intel_logical_ring_advance(struct drm_i915_gem_request *request)
-{
-	struct intel_ring *ring = request->ring;
-	struct intel_engine_cs *engine = request->engine;
-
-	intel_ring_advance(ring);
-	request->tail = ring->tail;
-
-	/*
-	 * Here we add two extra NOOPs as padding to avoid
-	 * lite restore of a context with HEAD==TAIL.
-	 *
-	 * Caller must reserve WA_TAIL_DWORDS for us!
-	 */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
-	request->wa_tail = ring->tail;
-
-	/* We keep the previous context alive until we retire the following
-	 * request. This ensures that any the context object is still pinned
-	 * for any residual writes the HW makes into it on the context switch
-	 * into the next object following the breadcrumb. Otherwise, we may
-	 * retire the context too early.
-	 */
-	request->previous_context = engine->last_context;
-	engine->last_context = request->ctx;
-	return 0;
-}
-
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine)
 {
@@ -745,7 +735,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
 		i915_ggtt_offset(ce->ring->vma);
 
-	ce->state->obj->dirty = true;
+	ce->state->obj->mm.dirty = true;
 
 	/* Invalidate GuC TLB. */
 	if (i915.enable_guc_submission) {
@@ -853,13 +843,12 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
 	uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
 	/*
-	 * WaDisableLSQCROPERFforOCL:skl,kbl
+	 * WaDisableLSQCROPERFforOCL:kbl
 	 * This WA is implemented in skl_init_clock_gating() but since
 	 * this batch updates GEN8_L3SQCREG4 with default value we need to
 	 * set this bit here to retain the WA during flush.
 	 */
-	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
-	    IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
 		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
 	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1002,9 +991,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
 	struct drm_i915_private *dev_priv = engine->i915;
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
-	/* WaDisableCtxRestoreArbitration:skl,bxt */
-	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
-	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+	/* WaDisableCtxRestoreArbitration:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
 	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1075,9 +1063,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 {
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
-	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
+	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
 		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
 		wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
 		wa_ctx_emit(batch, index,
@@ -1104,9 +1091,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 		wa_ctx_emit(batch, index, MI_NOOP);
 	}
 
-	/* WaDisableCtxRestoreArbitration:skl,bxt */
-	if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
-	    IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
+	/* WaDisableCtxRestoreArbitration:bxt */
+	if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
 		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 
 	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1250,8 +1236,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
 
 	intel_engine_init_hangcheck(engine);
 
-	if (!execlists_elsp_idle(engine))
+	/* After a GPU reset, we may have requests to replay */
+	if (!execlists_elsp_idle(engine)) {
+		engine->execlist_port[0].count = 0;
+		engine->execlist_port[1].count = 0;
 		execlists_submit_ports(engine);
+	}
 
 	return 0;
 }
@@ -1326,10 +1316,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 		memset(&port[1], 0, sizeof(port[1]));
 	}
 
-	/* CS is stopped, and we will resubmit both ports on resume */
 	GEM_BUG_ON(request->ctx != port[0].request->ctx);
-	port[0].count = 0;
-	port[1].count = 0;
 
 	/* Reset WaIdleLiteRestore:bdw,skl as well */
 	request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
@@ -1570,39 +1557,35 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
  * used as a workaround for not being allowed to do lite
  * restore with HEAD==TAIL (WaIdleLiteRestore).
  */
-
-static int gen8_emit_request(struct drm_i915_gem_request *request)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
 {
-	struct intel_ring *ring = request->ring;
-	int ret;
+	*out++ = MI_NOOP;
+	*out++ = MI_NOOP;
+	request->wa_tail = intel_ring_offset(request->ring, out);
+}
 
-	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
-	if (ret)
-		return ret;
-
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
+				 u32 *out)
+{
 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-	intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-	intel_ring_emit(ring,
-			intel_hws_seqno_address(request->engine) |
-			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, request->fence.seqno);
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_emit(ring, MI_NOOP);
-	return intel_logical_ring_advance(request);
+	*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+	*out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+	*out++ = 0;
+	*out++ = request->global_seqno;
+	*out++ = MI_USER_INTERRUPT;
+	*out++ = MI_NOOP;
+	request->tail = intel_ring_offset(request->ring, out);
+
+	gen8_emit_wa_tail(request, out);
 }
 
-static int gen8_emit_request_render(struct drm_i915_gem_request *request)
+static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
+
+static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
+					u32 *out)
 {
-	struct intel_ring *ring = request->ring;
-	int ret;
-
-	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
-	if (ret)
-		return ret;
-
 	/* We're using qword write, seqno should be aligned to 8 bytes. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
 
@@ -1610,21 +1593,24 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 	 * need a prior CS_STALL, which is emitted by the flush
 	 * following the batch.
 	 */
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring,
-			(PIPE_CONTROL_GLOBAL_GTT_IVB |
-			 PIPE_CONTROL_CS_STALL |
-			 PIPE_CONTROL_QW_WRITE));
-	intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(request));
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+		  PIPE_CONTROL_CS_STALL |
+		  PIPE_CONTROL_QW_WRITE);
+	*out++ = intel_hws_seqno_address(request->engine);
+	*out++ = 0;
+	*out++ = request->global_seqno;
 	/* We're thrashing one dword of HWS. */
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_emit(ring, MI_NOOP);
-	return intel_logical_ring_advance(request);
+	*out++ = 0;
+	*out++ = MI_USER_INTERRUPT;
+	*out++ = MI_NOOP;
+	request->tail = intel_ring_offset(request->ring, out);
+
+	gen8_emit_wa_tail(request, out);
 }
 
+static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
+
 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 {
 	int ret;
@@ -1641,7 +1627,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 	if (ret)
 		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
-	return i915_gem_render_state_init(req);
+	return i915_gem_render_state_emit(req);
 }
 
 /**
@@ -1652,9 +1638,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv;
 
-	if (!intel_engine_initialized(engine))
-		return;
-
 	/*
 	 * Tasklet cannot be active at this point due intel_mark_active/idle
 	 * so this is just for documentation.
@@ -1681,13 +1664,16 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 
 	lrc_destroy_wa_ctx_obj(engine);
 	engine->i915 = NULL;
+	dev_priv->engine[engine->id] = NULL;
+	kfree(engine);
 }
 
 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		engine->submit_request = execlists_submit_request;
 }
 
@@ -1698,7 +1684,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->init_hw = gen8_init_common_ring;
 	engine->reset_hw = reset_common_ring;
 	engine->emit_flush = gen8_emit_flush;
-	engine->emit_request = gen8_emit_request;
+	engine->emit_breadcrumb = gen8_emit_breadcrumb;
+	engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
 	engine->submit_request = execlists_submit_request;
 
 	engine->irq_enable = gen8_logical_ring_enable_irq;
@@ -1820,7 +1807,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
 		engine->init_hw = gen8_init_render_ring;
 	engine->init_context = gen8_init_rcs_context;
 	engine->emit_flush = gen8_emit_flush_render;
-	engine->emit_request = gen8_emit_request_render;
+	engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
+	engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
 
 	ret = intel_engine_create_scratch(engine, 4096);
 	if (ret)
@@ -1945,7 +1933,7 @@ static void execlists_init_reg_state(u32 *reg_state,
 		       RING_START(engine->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
 		       RING_CTL(engine->mmio_base),
-		       ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+		       RING_CTL_SIZE(ring->size) | RING_VALID);
 	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
 		       RING_BBADDR_UDW(engine->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2046,7 +2034,7 @@ populate_lr_context(struct i915_gem_context *ctx,
 		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
 		return ret;
 	}
-	ctx_obj->dirty = true;
+	ctx_obj->mm.dirty = true;
 
 	/* The second page of the context object contains some fields which must
 	 * be set up prior to the first execution. */
@@ -2153,30 +2141,43 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 {
-	struct i915_gem_context *ctx = dev_priv->kernel_context;
 	struct intel_engine_cs *engine;
+	struct i915_gem_context *ctx;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv) {
-		struct intel_context *ce = &ctx->engine[engine->id];
-		void *vaddr;
-		uint32_t *reg_state;
+	/* Because we emit WA_TAIL_DWORDS there may be a disparity
+	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
+	 * that stored in context. As we only write new commands from
+	 * ce->ring->tail onwards, everything before that is junk. If the GPU
+	 * starts reading from its RING_HEAD from the context, it may try to
+	 * execute that junk and die.
+	 *
+	 * So to avoid that we reset the context images upon resume. For
+	 * simplicity, we just zero everything out.
+	 */
+	list_for_each_entry(ctx, &dev_priv->context_list, link) {
+		for_each_engine(engine, dev_priv, id) {
+			struct intel_context *ce = &ctx->engine[engine->id];
+			u32 *reg;
 
-		if (!ce->state)
-			continue;
+			if (!ce->state)
+				continue;
 
-		vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
-		if (WARN_ON(IS_ERR(vaddr)))
-			continue;
+			reg = i915_gem_object_pin_map(ce->state->obj,
+						      I915_MAP_WB);
+			if (WARN_ON(IS_ERR(reg)))
+				continue;
 
-		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+			reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
+			reg[CTX_RING_HEAD+1] = 0;
+			reg[CTX_RING_TAIL+1] = 0;
 
-		reg_state[CTX_RING_HEAD+1] = 0;
-		reg_state[CTX_RING_TAIL+1] = 0;
+			ce->state->obj->mm.dirty = true;
+			i915_gem_object_unpin_map(ce->state->obj);
 
-		ce->state->obj->dirty = true;
-		i915_gem_object_unpin_map(ce->state->obj);
-
-		ce->ring->head = 0;
-		ce->ring->tail = 0;
+			ce->ring->head = ce->ring->tail = 0;
+			ce->ring->last_retired_head = -1;
+			intel_ring_update_space(ce->ring);
+		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4fed816..c1f5461 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -95,5 +95,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
+bool intel_execlists_idle(struct drm_i915_private *dev_priv);
 
 #endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
new file mode 100644
index 0000000..daa5234
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <drm/drm_edid.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_dual_mode_helper.h>
+#include "intel_drv.h"
+
+static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
+{
+	struct intel_digital_port *dig_port =
+		container_of(lspcon, struct intel_digital_port, lspcon);
+
+	return &dig_port->dp;
+}
+
+static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
+{
+	enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
+	struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+	if (drm_lspcon_get_mode(adapter, &current_mode))
+		DRM_ERROR("Error reading LSPCON mode\n");
+	else
+		DRM_DEBUG_KMS("Current LSPCON mode %s\n",
+			current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
+	return current_mode;
+}
+
+static int lspcon_change_mode(struct intel_lspcon *lspcon,
+	enum drm_lspcon_mode mode, bool force)
+{
+	int err;
+	enum drm_lspcon_mode current_mode;
+	struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+	err = drm_lspcon_get_mode(adapter, &current_mode);
+	if (err) {
+		DRM_ERROR("Error reading LSPCON mode\n");
+		return err;
+	}
+
+	if (current_mode == mode) {
+		DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n");
+		return 0;
+	}
+
+	err = drm_lspcon_set_mode(adapter, mode);
+	if (err < 0) {
+		DRM_ERROR("LSPCON mode change failed\n");
+		return err;
+	}
+
+	lspcon->mode = mode;
+	DRM_DEBUG_KMS("LSPCON mode changed done\n");
+	return 0;
+}
+
+static bool lspcon_probe(struct intel_lspcon *lspcon)
+{
+	enum drm_dp_dual_mode_type adaptor_type;
+	struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+
+	/* Lets probe the adaptor and check its type */
+	adaptor_type = drm_dp_dual_mode_detect(adapter);
+	if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
+		DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
+			drm_dp_get_dual_mode_type_name(adaptor_type));
+		return false;
+	}
+
+	/* Yay ... got a LSPCON device */
+	DRM_DEBUG_KMS("LSPCON detected\n");
+	lspcon->mode = lspcon_get_current_mode(lspcon);
+	lspcon->active = true;
+	return true;
+}
+
+static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
+{
+	struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+	unsigned long start = jiffies;
+
+	if (!lspcon->desc_valid)
+		return;
+
+	while (1) {
+		struct intel_dp_desc desc;
+
+		/*
+		 * The w/a only applies in PCON mode and we don't expect any
+		 * AUX errors.
+		 */
+		if (!__intel_dp_read_desc(intel_dp, &desc))
+			return;
+
+		if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+			DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
+				      jiffies_to_msecs(jiffies - start));
+			return;
+		}
+
+		if (time_after(jiffies, start + msecs_to_jiffies(1000)))
+			break;
+
+		usleep_range(10000, 15000);
+	}
+
+	DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
+}
+
+void lspcon_resume(struct intel_lspcon *lspcon)
+{
+	lspcon_resume_in_pcon_wa(lspcon);
+
+	if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
+		DRM_ERROR("LSPCON resume failed\n");
+	else
+		DRM_DEBUG_KMS("LSPCON resume success\n");
+}
+
+bool lspcon_init(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_dp *dp = &intel_dig_port->dp;
+	struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	if (!IS_GEN9(dev_priv)) {
+		DRM_ERROR("LSPCON is supported on GEN9 only\n");
+		return false;
+	}
+
+	lspcon->active = false;
+	lspcon->mode = DRM_LSPCON_MODE_INVALID;
+
+	if (!lspcon_probe(lspcon)) {
+		DRM_ERROR("Failed to probe lspcon\n");
+		return false;
+	}
+
+	/*
+	* In the SW state machine, lets Put LSPCON in PCON mode only.
+	* In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+	* 2.0 sinks.
+	*/
+	if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
+		if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
+			true) < 0) {
+			DRM_ERROR("LSPCON mode change to PCON failed\n");
+			return false;
+		}
+	}
+
+	if (!intel_dp_read_dpcd(dp)) {
+		DRM_ERROR("LSPCON DPCD read failed\n");
+		return false;
+	}
+
+	lspcon->desc_valid = intel_dp_read_desc(dp);
+
+	DRM_DEBUG_KMS("Success: LSPCON init\n");
+	return true;
+}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d5..de7b3e6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
 	if (!(tmp & LVDS_PORT_EN))
 		goto out;
 
-	if (HAS_PCH_CPT(dev))
+	if (HAS_PCH_CPT(dev_priv))
 		*pipe = PORT_TO_PIPE_CPT(tmp);
 	else
 		*pipe = PORT_TO_PIPE(tmp);
@@ -396,7 +396,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 				      struct intel_crtc_state *pipe_config,
 				      struct drm_connector_state *conn_state)
 {
-	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
 	struct intel_lvds_encoder *lvds_encoder =
 		to_lvds_encoder(&intel_encoder->base);
 	struct intel_connector *intel_connector =
@@ -406,7 +406,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 	unsigned int lvds_bpp;
 
 	/* Should never happen!! */
-	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+	if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
 		DRM_ERROR("Can't support LVDS on pipe A\n");
 		return false;
 	}
@@ -431,7 +431,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 	intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
 			       adjusted_mode);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_PCH_SPLIT(dev_priv)) {
 		pipe_config->has_pch_encoder = true;
 
 		intel_pch_panel_fitting(intel_crtc, pipe_config,
@@ -566,7 +566,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
 	 * and as part of the cleanup in the hw state restore we also redisable
 	 * the vga plane.
 	 */
-	if (!HAS_PCH_SPLIT(dev))
+	if (!HAS_PCH_SPLIT(dev_priv))
 		intel_display_resume(dev);
 
 	dev_priv->modeset_restore = MODESET_DONE;
@@ -949,16 +949,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
 	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
 }
 
-static bool intel_lvds_supported(struct drm_device *dev)
+static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
 {
 	/* With the introduction of the PCH we gained a dedicated
 	 * LVDS presence pin, use it. */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
 		return true;
 
 	/* Otherwise LVDS was only attached to mobile products,
 	 * except for the inglorious 830gm */
-	if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+	if (INTEL_GEN(dev_priv) <= 4 &&
+	    IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
 		return true;
 
 	return false;
@@ -984,27 +985,27 @@ void intel_lvds_init(struct drm_device *dev)
 	struct drm_display_mode *fixed_mode = NULL;
 	struct drm_display_mode *downclock_mode = NULL;
 	struct edid *edid;
-	struct drm_crtc *crtc;
+	struct intel_crtc *crtc;
 	i915_reg_t lvds_reg;
 	u32 lvds;
 	int pipe;
 	u8 pin;
 
-	if (!intel_lvds_supported(dev))
+	if (!intel_lvds_supported(dev_priv))
 		return;
 
 	/* Skip init on machines we know falsely report LVDS */
 	if (dmi_check_system(intel_no_lvds))
 		return;
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		lvds_reg = PCH_LVDS;
 	else
 		lvds_reg = LVDS;
 
 	lvds = I915_READ(lvds_reg);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_PCH_SPLIT(dev_priv)) {
 		if ((lvds & LVDS_DETECTED) == 0)
 			return;
 		if (dev_priv->vbt.edp.support) {
@@ -1064,12 +1065,13 @@ void intel_lvds_init(struct drm_device *dev)
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
-	intel_encoder->type = INTEL_OUTPUT_LVDS;
 
+	intel_encoder->type = INTEL_OUTPUT_LVDS;
+	intel_encoder->port = PORT_NONE;
 	intel_encoder->cloneable = 0;
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-	else if (IS_GEN4(dev))
+	else if (IS_GEN4(dev_priv))
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 	else
 		intel_encoder->crtc_mask = (1 << 1);
@@ -1157,14 +1159,14 @@ void intel_lvds_init(struct drm_device *dev)
 	 */
 
 	/* Ironlake: FIXME if still fail, not try pipe mode now */
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		goto failed;
 
 	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
-	crtc = intel_get_crtc_for_pipe(dev, pipe);
+	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
 	if (crtc && (lvds & LVDS_PORT_EN)) {
-		fixed_mode = intel_crtc_mode_get(dev, crtc);
+		fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
 		if (fixed_mode) {
 			DRM_DEBUG_KMS("using current (BIOS) mode: ");
 			drm_mode_debug_printmodeline(fixed_mode);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a24bc8c..fd0e4da 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -233,7 +233,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+	struct intel_engine_cs *engine = dev_priv->engine[RCS];
 
 	return i915_gem_request_alloc(engine, dev_priv->kernel_context);
 }
@@ -1222,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	drm_modeset_unlock_all(dev);
-	i915_gem_object_put_unlocked(new_bo);
+	i915_gem_object_put(new_bo);
 out_free:
 	kfree(params);
 
@@ -1466,10 +1466,12 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
 	 * hardware should be off already */
 	WARN_ON(dev_priv->overlay->active);
 
-	i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
+	i915_gem_object_put(dev_priv->overlay->reg_bo);
 	kfree(dev_priv->overlay);
 }
 
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
 struct intel_overlay_error_state {
 	struct overlay_registers regs;
 	unsigned long base;
@@ -1587,3 +1589,5 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
 	P(UVSCALEV);
 #undef P
 }
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a2f751c..cc9e0c0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,7 @@
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
+#include <drm/drm_atomic_helper.h>
 
 /**
  * DOC: RC6
@@ -55,10 +56,8 @@
 #define INTEL_RC6p_ENABLE			(1<<1)
 #define INTEL_RC6pp_ENABLE			(1<<2)
 
-static void gen9_init_clock_gating(struct drm_device *dev)
+static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
 	I915_WRITE(CHICKEN_PAR1_1,
 		   I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
@@ -81,11 +80,9 @@ static void gen9_init_clock_gating(struct drm_device *dev)
 		   ILK_DPFC_DISABLE_DUMMY0);
 }
 
-static void bxt_init_clock_gating(struct drm_device *dev)
+static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	gen9_init_clock_gating(dev);
+	gen9_init_clock_gating(dev_priv);
 
 	/* WaDisableSDEUnitClockGating:bxt */
 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
@@ -107,9 +104,8 @@ static void bxt_init_clock_gating(struct drm_device *dev)
 			   PWM1_GATING_DIS | PWM2_GATING_DIS);
 }
 
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 tmp;
 
 	tmp = I915_READ(CLKCFG);
@@ -146,9 +142,8 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
 	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
 }
 
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u16 ddrpll, csipll;
 
 	ddrpll = I915_READ16(DDRMPLL1);
@@ -252,8 +247,8 @@ static const struct cxsr_latency cxsr_latency_table[] = {
 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
 };
 
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
-							 int is_ddr3,
+static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
+							 bool is_ddr3,
 							 int fsb,
 							 int mem)
 {
@@ -319,27 +314,26 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
 
 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
 {
-	struct drm_device *dev = &dev_priv->drm;
 	u32 val;
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
 		POSTING_READ(FW_BLC_SELF_VLV);
 		dev_priv->wm.vlv.cxsr = enable;
-	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+	} else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
 		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
 		POSTING_READ(FW_BLC_SELF);
-	} else if (IS_PINEVIEW(dev)) {
+	} else if (IS_PINEVIEW(dev_priv)) {
 		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
 		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
 		I915_WRITE(DSPFW3, val);
 		POSTING_READ(DSPFW3);
-	} else if (IS_I945G(dev) || IS_I945GM(dev)) {
+	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
 		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
 			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
 		I915_WRITE(FW_BLC_SELF, val);
 		POSTING_READ(FW_BLC_SELF);
-	} else if (IS_I915GM(dev)) {
+	} else if (IS_I915GM(dev_priv)) {
 		/*
 		 * FIXME can't find a bit like this for 915G, and
 		 * and yet it does have the related watermark in
@@ -377,10 +371,9 @@ static const int pessimal_latency_ns = 5000;
 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
 	((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
 
-static int vlv_get_fifo_size(struct drm_device *dev,
+static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
 			      enum pipe pipe, int plane)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	int sprite0_start, sprite1_start, size;
 
 	switch (pipe) {
@@ -429,9 +422,8 @@ static int vlv_get_fifo_size(struct drm_device *dev,
 	return size;
 }
 
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t dsparb = I915_READ(DSPARB);
 	int size;
 
@@ -445,9 +437,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
 	return size;
 }
 
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t dsparb = I915_READ(DSPARB);
 	int size;
 
@@ -462,9 +453,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
 	return size;
 }
 
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t dsparb = I915_READ(DSPARB);
 	int size;
 
@@ -624,11 +614,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
 	return wm_size;
 }
 
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
 {
-	struct drm_crtc *crtc, *enabled = NULL;
+	struct intel_crtc *crtc, *enabled = NULL;
 
-	for_each_crtc(dev, crtc) {
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
 		if (intel_crtc_active(crtc)) {
 			if (enabled)
 				return NULL;
@@ -639,27 +629,31 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
 	return enabled;
 }
 
-static void pineview_update_wm(struct drm_crtc *unused_crtc)
+static void pineview_update_wm(struct intel_crtc *unused_crtc)
 {
-	struct drm_device *dev = unused_crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc;
+	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+	struct intel_crtc *crtc;
 	const struct cxsr_latency *latency;
 	u32 reg;
 	unsigned long wm;
 
-	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
-					 dev_priv->fsb_freq, dev_priv->mem_freq);
+	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+					 dev_priv->is_ddr3,
+					 dev_priv->fsb_freq,
+					 dev_priv->mem_freq);
 	if (!latency) {
 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
 		intel_set_memory_cxsr(dev_priv, false);
 		return;
 	}
 
-	crtc = single_enabled_crtc(dev);
+	crtc = single_enabled_crtc(dev_priv);
 	if (crtc) {
-		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
-		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+		const struct drm_display_mode *adjusted_mode =
+			&crtc->config->base.adjusted_mode;
+		const struct drm_framebuffer *fb =
+			crtc->base.primary->state->fb;
+		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 		int clock = adjusted_mode->crtc_clock;
 
 		/* Display SR */
@@ -706,7 +700,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
 	}
 }
 
-static bool g4x_compute_wm0(struct drm_device *dev,
+static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
 			    int plane,
 			    const struct intel_watermark_params *display,
 			    int display_latency_ns,
@@ -715,24 +709,26 @@ static bool g4x_compute_wm0(struct drm_device *dev,
 			    int *plane_wm,
 			    int *cursor_wm)
 {
-	struct drm_crtc *crtc;
+	struct intel_crtc *crtc;
 	const struct drm_display_mode *adjusted_mode;
+	const struct drm_framebuffer *fb;
 	int htotal, hdisplay, clock, cpp;
 	int line_time_us, line_count;
 	int entries, tlb_miss;
 
-	crtc = intel_get_crtc_for_plane(dev, plane);
+	crtc = intel_get_crtc_for_plane(dev_priv, plane);
 	if (!intel_crtc_active(crtc)) {
 		*cursor_wm = cursor->guard_size;
 		*plane_wm = display->guard_size;
 		return false;
 	}
 
-	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+	adjusted_mode = &crtc->config->base.adjusted_mode;
+	fb = crtc->base.primary->state->fb;
 	clock = adjusted_mode->crtc_clock;
 	htotal = adjusted_mode->crtc_htotal;
-	hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-	cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+	hdisplay = crtc->config->pipe_src_w;
+	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
 	/* Use the small buffer method to calculate plane watermark */
 	entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
@@ -747,7 +743,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
 	/* Use the large buffer method to calculate cursor watermark */
 	line_time_us = max(htotal * 1000 / clock, 1);
 	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
-	entries = line_count * crtc->cursor->state->crtc_w * cpp;
+	entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
 	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
 	if (tlb_miss > 0)
 		entries += tlb_miss;
@@ -766,7 +762,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
  * can be programmed into the associated watermark register, that watermark
  * must be disabled.
  */
-static bool g4x_check_srwm(struct drm_device *dev,
+static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
 			   int display_wm, int cursor_wm,
 			   const struct intel_watermark_params *display,
 			   const struct intel_watermark_params *cursor)
@@ -775,13 +771,13 @@ static bool g4x_check_srwm(struct drm_device *dev,
 		      display_wm, cursor_wm);
 
 	if (display_wm > display->max_wm) {
-		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+		DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
 			      display_wm, display->max_wm);
 		return false;
 	}
 
 	if (cursor_wm > cursor->max_wm) {
-		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+		DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
 			      cursor_wm, cursor->max_wm);
 		return false;
 	}
@@ -794,15 +790,16 @@ static bool g4x_check_srwm(struct drm_device *dev,
 	return true;
 }
 
-static bool g4x_compute_srwm(struct drm_device *dev,
+static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
 			     int plane,
 			     int latency_ns,
 			     const struct intel_watermark_params *display,
 			     const struct intel_watermark_params *cursor,
 			     int *display_wm, int *cursor_wm)
 {
-	struct drm_crtc *crtc;
+	struct intel_crtc *crtc;
 	const struct drm_display_mode *adjusted_mode;
+	const struct drm_framebuffer *fb;
 	int hdisplay, htotal, cpp, clock;
 	unsigned long line_time_us;
 	int line_count, line_size;
@@ -814,12 +811,13 @@ static bool g4x_compute_srwm(struct drm_device *dev,
 		return false;
 	}
 
-	crtc = intel_get_crtc_for_plane(dev, plane);
-	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+	crtc = intel_get_crtc_for_plane(dev_priv, plane);
+	adjusted_mode = &crtc->config->base.adjusted_mode;
+	fb = crtc->base.primary->state->fb;
 	clock = adjusted_mode->crtc_clock;
 	htotal = adjusted_mode->crtc_htotal;
-	hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-	cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+	hdisplay = crtc->config->pipe_src_w;
+	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
 	line_time_us = max(htotal * 1000 / clock, 1);
 	line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -833,11 +831,11 @@ static bool g4x_compute_srwm(struct drm_device *dev,
 	*display_wm = entries + display->guard_size;
 
 	/* calculate the self-refresh watermark for display cursor */
-	entries = line_count * cpp * crtc->cursor->state->crtc_w;
+	entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
 	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
 	*cursor_wm = entries + cursor->guard_size;
 
-	return g4x_check_srwm(dev,
+	return g4x_check_srwm(dev_priv,
 			      *display_wm, *cursor_wm,
 			      display, cursor);
 }
@@ -937,10 +935,8 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
 	return ret;
 }
 
-static void vlv_setup_wm_latency(struct drm_device *dev)
+static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/* all latencies in usec */
 	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
 
@@ -1327,20 +1323,19 @@ static void vlv_merge_wm(struct drm_device *dev,
 	}
 }
 
-static void vlv_update_wm(struct drm_crtc *crtc)
+static void vlv_update_wm(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	enum pipe pipe = crtc->pipe;
 	struct vlv_wm_values wm = {};
 
-	vlv_compute_wm(intel_crtc);
+	vlv_compute_wm(crtc);
 	vlv_merge_wm(dev, &wm);
 
 	if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
 		/* FIXME should be part of crtc atomic commit */
-		vlv_pipe_set_fifo_size(intel_crtc);
+		vlv_pipe_set_fifo_size(crtc);
 		return;
 	}
 
@@ -1356,9 +1351,9 @@ static void vlv_update_wm(struct drm_crtc *crtc)
 		intel_set_memory_cxsr(dev_priv, false);
 
 	/* FIXME should be part of crtc atomic commit */
-	vlv_pipe_set_fifo_size(intel_crtc);
+	vlv_pipe_set_fifo_size(crtc);
 
-	vlv_write_wm_values(intel_crtc, &wm);
+	vlv_write_wm_values(crtc, &wm);
 
 	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
 		      "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
@@ -1382,30 +1377,29 @@ static void vlv_update_wm(struct drm_crtc *crtc)
 
 #define single_plane_enabled(mask) is_power_of_2(mask)
 
-static void g4x_update_wm(struct drm_crtc *crtc)
+static void g4x_update_wm(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	static const int sr_latency_ns = 12000;
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
 	int plane_sr, cursor_sr;
 	unsigned int enabled = 0;
 	bool cxsr_enabled;
 
-	if (g4x_compute_wm0(dev, PIPE_A,
+	if (g4x_compute_wm0(dev_priv, PIPE_A,
 			    &g4x_wm_info, pessimal_latency_ns,
 			    &g4x_cursor_wm_info, pessimal_latency_ns,
 			    &planea_wm, &cursora_wm))
 		enabled |= 1 << PIPE_A;
 
-	if (g4x_compute_wm0(dev, PIPE_B,
+	if (g4x_compute_wm0(dev_priv, PIPE_B,
 			    &g4x_wm_info, pessimal_latency_ns,
 			    &g4x_cursor_wm_info, pessimal_latency_ns,
 			    &planeb_wm, &cursorb_wm))
 		enabled |= 1 << PIPE_B;
 
 	if (single_plane_enabled(enabled) &&
-	    g4x_compute_srwm(dev, ffs(enabled) - 1,
+	    g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
 			     sr_latency_ns,
 			     &g4x_wm_info,
 			     &g4x_cursor_wm_info,
@@ -1440,25 +1434,27 @@ static void g4x_update_wm(struct drm_crtc *crtc)
 		intel_set_memory_cxsr(dev_priv, true);
 }
 
-static void i965_update_wm(struct drm_crtc *unused_crtc)
+static void i965_update_wm(struct intel_crtc *unused_crtc)
 {
-	struct drm_device *dev = unused_crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc;
+	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+	struct intel_crtc *crtc;
 	int srwm = 1;
 	int cursor_sr = 16;
 	bool cxsr_enabled;
 
 	/* Calc sr entries for one plane configs */
-	crtc = single_enabled_crtc(dev);
+	crtc = single_enabled_crtc(dev_priv);
 	if (crtc) {
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 12000;
-		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+		const struct drm_display_mode *adjusted_mode =
+			&crtc->config->base.adjusted_mode;
+		const struct drm_framebuffer *fb =
+			crtc->base.primary->state->fb;
 		int clock = adjusted_mode->crtc_clock;
 		int htotal = adjusted_mode->crtc_htotal;
-		int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+		int hdisplay = crtc->config->pipe_src_w;
+		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 		unsigned long line_time_us;
 		int entries;
 
@@ -1476,7 +1472,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
 			      entries, srwm);
 
 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			cpp * crtc->cursor->state->crtc_w;
+			cpp * crtc->base.cursor->state->crtc_w;
 		entries = DIV_ROUND_UP(entries,
 					  i965_cursor_wm_info.cacheline_size);
 		cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1514,34 +1510,38 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
 
 #undef FW_WM
 
-static void i9xx_update_wm(struct drm_crtc *unused_crtc)
+static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 {
-	struct drm_device *dev = unused_crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
 	const struct intel_watermark_params *wm_info;
 	uint32_t fwater_lo;
 	uint32_t fwater_hi;
 	int cwm, srwm = 1;
 	int fifo_size;
 	int planea_wm, planeb_wm;
-	struct drm_crtc *crtc, *enabled = NULL;
+	struct intel_crtc *crtc, *enabled = NULL;
 
-	if (IS_I945GM(dev))
+	if (IS_I945GM(dev_priv))
 		wm_info = &i945_wm_info;
-	else if (!IS_GEN2(dev))
+	else if (!IS_GEN2(dev_priv))
 		wm_info = &i915_wm_info;
 	else
 		wm_info = &i830_a_wm_info;
 
-	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
-	crtc = intel_get_crtc_for_plane(dev, 0);
+	fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
+	crtc = intel_get_crtc_for_plane(dev_priv, 0);
 	if (intel_crtc_active(crtc)) {
-		const struct drm_display_mode *adjusted_mode;
-		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
-		if (IS_GEN2(dev))
-			cpp = 4;
+		const struct drm_display_mode *adjusted_mode =
+			&crtc->config->base.adjusted_mode;
+		const struct drm_framebuffer *fb =
+			crtc->base.primary->state->fb;
+		int cpp;
 
-		adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+		if (IS_GEN2(dev_priv))
+			cpp = 4;
+		else
+			cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
 		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
 					       wm_info, fifo_size, cpp,
 					       pessimal_latency_ns);
@@ -1552,18 +1552,23 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 			planea_wm = wm_info->max_wm;
 	}
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		wm_info = &i830_bc_wm_info;
 
-	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
-	crtc = intel_get_crtc_for_plane(dev, 1);
+	fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
+	crtc = intel_get_crtc_for_plane(dev_priv, 1);
 	if (intel_crtc_active(crtc)) {
-		const struct drm_display_mode *adjusted_mode;
-		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
-		if (IS_GEN2(dev))
-			cpp = 4;
+		const struct drm_display_mode *adjusted_mode =
+			&crtc->config->base.adjusted_mode;
+		const struct drm_framebuffer *fb =
+			crtc->base.primary->state->fb;
+		int cpp;
 
-		adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+		if (IS_GEN2(dev_priv))
+			cpp = 4;
+		else
+			cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
 		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
 					       wm_info, fifo_size, cpp,
 					       pessimal_latency_ns);
@@ -1579,10 +1584,10 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 
 	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
-	if (IS_I915GM(dev) && enabled) {
+	if (IS_I915GM(dev_priv) && enabled) {
 		struct drm_i915_gem_object *obj;
 
-		obj = intel_fb_obj(enabled->primary->state->fb);
+		obj = intel_fb_obj(enabled->base.primary->state->fb);
 
 		/* self-refresh seems busted with untiled */
 		if (!i915_gem_object_is_tiled(obj))
@@ -1598,19 +1603,24 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 	intel_set_memory_cxsr(dev_priv, false);
 
 	/* Calc sr entries for one plane configs */
-	if (HAS_FW_BLC(dev) && enabled) {
+	if (HAS_FW_BLC(dev_priv) && enabled) {
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 6000;
-		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
+		const struct drm_display_mode *adjusted_mode =
+			&enabled->config->base.adjusted_mode;
+		const struct drm_framebuffer *fb =
+			enabled->base.primary->state->fb;
 		int clock = adjusted_mode->crtc_clock;
 		int htotal = adjusted_mode->crtc_htotal;
-		int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
-		int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
+		int hdisplay = enabled->config->pipe_src_w;
+		int cpp;
 		unsigned long line_time_us;
 		int entries;
 
-		if (IS_I915GM(dev) || IS_I945GM(dev))
+		if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
 			cpp = 4;
+		else
+			cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
 		line_time_us = max(htotal * 1000 / clock, 1);
 
@@ -1623,7 +1633,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 		if (srwm < 0)
 			srwm = 1;
 
-		if (IS_I945G(dev) || IS_I945GM(dev))
+		if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
 			I915_WRITE(FW_BLC_SELF,
 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
 		else
@@ -1647,23 +1657,22 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 		intel_set_memory_cxsr(dev_priv, true);
 }
 
-static void i845_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct intel_crtc *unused_crtc)
 {
-	struct drm_device *dev = unused_crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc;
+	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+	struct intel_crtc *crtc;
 	const struct drm_display_mode *adjusted_mode;
 	uint32_t fwater_lo;
 	int planea_wm;
 
-	crtc = single_enabled_crtc(dev);
+	crtc = single_enabled_crtc(dev_priv);
 	if (crtc == NULL)
 		return;
 
-	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+	adjusted_mode = &crtc->config->base.adjusted_mode;
 	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
 				       &i845_wm_info,
-				       dev_priv->display.get_fifo_size(dev, 0),
+				       dev_priv->display.get_fifo_size(dev_priv, 0),
 				       4, pessimal_latency_ns);
 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
 	fwater_lo |= (3<<8) | planea_wm;
@@ -2076,14 +2085,13 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 	       PIPE_WM_LINETIME_TIME(linetime);
 }
 
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
+static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+				  uint16_t wm[8])
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	if (IS_GEN9(dev)) {
+	if (IS_GEN9(dev_priv)) {
 		uint32_t val;
 		int ret, i;
-		int level, max_level = ilk_wm_max_level(dev);
+		int level, max_level = ilk_wm_max_level(dev_priv);
 
 		/* read the first set of memory latencies[0:3] */
 		val = 0; /* data0 to be programmed to 0 for first set */
@@ -2155,7 +2163,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
 			}
 		}
 
-	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
 		wm[0] = (sskpd >> 56) & 0xFF;
@@ -2165,14 +2173,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
 		wm[2] = (sskpd >> 12) & 0xFF;
 		wm[3] = (sskpd >> 20) & 0x1FF;
 		wm[4] = (sskpd >> 32) & 0x1FF;
-	} else if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (INTEL_GEN(dev_priv) >= 6) {
 		uint32_t sskpd = I915_READ(MCH_SSKPD);
 
 		wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
 		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
 		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
 		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
-	} else if (INTEL_INFO(dev)->gen >= 5) {
+	} else if (INTEL_GEN(dev_priv) >= 5) {
 		uint32_t mltr = I915_READ(MLTR_ILK);
 
 		/* ILK primary LP0 latency is 700 ns */
@@ -2182,42 +2190,44 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
 	}
 }
 
-static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+				       uint16_t wm[5])
 {
 	/* ILK sprite LP0 latency is 1300 ns */
-	if (IS_GEN5(dev))
+	if (IS_GEN5(dev_priv))
 		wm[0] = 13;
 }
 
-static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+				       uint16_t wm[5])
 {
 	/* ILK cursor LP0 latency is 1300 ns */
-	if (IS_GEN5(dev))
+	if (IS_GEN5(dev_priv))
 		wm[0] = 13;
 
 	/* WaDoubleCursorLP3Latency:ivb */
-	if (IS_IVYBRIDGE(dev))
+	if (IS_IVYBRIDGE(dev_priv))
 		wm[3] *= 2;
 }
 
-int ilk_wm_max_level(const struct drm_device *dev)
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
 {
 	/* how many WM levels are we expecting */
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		return 7;
-	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		return 4;
-	else if (INTEL_INFO(dev)->gen >= 6)
+	else if (INTEL_GEN(dev_priv) >= 6)
 		return 3;
 	else
 		return 2;
 }
 
-static void intel_print_wm_latency(struct drm_device *dev,
+static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 				   const char *name,
 				   const uint16_t wm[8])
 {
-	int level, max_level = ilk_wm_max_level(dev);
+	int level, max_level = ilk_wm_max_level(dev_priv);
 
 	for (level = 0; level <= max_level; level++) {
 		unsigned int latency = wm[level];
@@ -2232,7 +2242,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
 		 * - latencies are in us on gen9.
 		 * - before then, WM1+ latency values are in 0.5us units
 		 */
-		if (IS_GEN9(dev))
+		if (IS_GEN9(dev_priv))
 			latency *= 10;
 		else if (level > 0)
 			latency *= 5;
@@ -2246,7 +2256,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 				    uint16_t wm[5], uint16_t min)
 {
-	int level, max_level = ilk_wm_max_level(&dev_priv->drm);
+	int level, max_level = ilk_wm_max_level(dev_priv);
 
 	if (wm[0] >= min)
 		return false;
@@ -2258,9 +2268,8 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 	return true;
 }
 
-static void snb_wm_latency_quirk(struct drm_device *dev)
+static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	bool changed;
 
 	/*
@@ -2275,39 +2284,35 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
 		return;
 
 	DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
-	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
-	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
-	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 }
 
-static void ilk_setup_wm_latency(struct drm_device *dev)
+static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 
-	intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
-	intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+	intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+	intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
 
-	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
-	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
-	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 
-	if (IS_GEN6(dev))
-		snb_wm_latency_quirk(dev);
+	if (IS_GEN6(dev_priv))
+		snb_wm_latency_quirk(dev_priv);
 }
 
-static void skl_setup_wm_latency(struct drm_device *dev)
+static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
-	intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+	intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
+	intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
 }
 
 static bool ilk_validate_pipe_wm(struct drm_device *dev,
@@ -2345,7 +2350,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 	struct intel_plane_state *pristate = NULL;
 	struct intel_plane_state *sprstate = NULL;
 	struct intel_plane_state *curstate = NULL;
-	int level, max_level = ilk_wm_max_level(dev), usable_level;
+	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
 	struct ilk_wm_maximums max;
 
 	pipe_wm = &cstate->wm.ilk.optimal;
@@ -2390,7 +2395,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
 	pipe_wm->wm[0] = pipe_wm->raw_wm[0];
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
 
 	if (!ilk_validate_pipe_wm(dev, pipe_wm))
@@ -2432,7 +2437,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
 {
 	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
 	struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
-	int level, max_level = ilk_wm_max_level(dev);
+	int level, max_level = ilk_wm_max_level(to_i915(dev));
 
 	/*
 	 * Start with the final, target watermarks, then combine with the
@@ -2516,11 +2521,11 @@ static void ilk_wm_merge(struct drm_device *dev,
 			 struct intel_pipe_wm *merged)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev);
+	int level, max_level = ilk_wm_max_level(dev_priv);
 	int last_enabled_level = max_level;
 
 	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
-	if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+	if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
 	    config->num_pipes_active > 1)
 		last_enabled_level = 0;
 
@@ -2556,7 +2561,7 @@ static void ilk_wm_merge(struct drm_device *dev,
 	 * What we should check here is whether FBC can be
 	 * enabled sometime later.
 	 */
-	if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+	if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
 	    intel_fbc_is_active(dev_priv)) {
 		for (level = 2; level <= max_level; level++) {
 			struct intel_wm_level *wm = &merged->wm[level];
@@ -2577,7 +2582,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		return 2 * level;
 	else
 		return dev_priv->wm.pri_latency[level];
@@ -2656,7 +2661,7 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
 						  struct intel_pipe_wm *r1,
 						  struct intel_pipe_wm *r2)
 {
-	int level, max_level = ilk_wm_max_level(dev);
+	int level, max_level = ilk_wm_max_level(to_i915(dev));
 	int level1 = 0, level2 = 0;
 
 	for (level = 1; level <= max_level; level++) {
@@ -2801,7 +2806,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
 		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
 	if (dirty & WM_DIRTY_DDB) {
-		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 			val = I915_READ(WM_MISC);
 			if (results->partitioning == INTEL_DDB_PART_1_2)
 				val &= ~WM_MISC_DATA_PARTITION_5_6;
@@ -2879,6 +2884,21 @@ skl_wm_plane_id(const struct intel_plane *plane)
 	}
 }
 
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+	    IS_KABYLAKE(dev_priv))
+		return true;
+
+	return false;
+}
+
 static bool
 intel_has_sagv(struct drm_i915_private *dev_priv)
 {
@@ -2999,9 +3019,12 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct drm_crtc *crtc;
+	struct intel_crtc *crtc;
+	struct intel_plane *plane;
+	struct intel_crtc_state *cstate;
+	struct skl_plane_wm *wm;
 	enum pipe pipe;
-	int level, plane;
+	int level, latency;
 
 	if (!intel_has_sagv(dev_priv))
 		return false;
@@ -3019,27 +3042,37 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 
 	/* Since we're now guaranteed to only have one active CRTC... */
 	pipe = ffs(intel_state->active_crtcs) - 1;
-	crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	cstate = to_intel_crtc_state(crtc->base.state);
 
-	if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+	if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		return false;
 
-	for_each_plane(dev_priv, pipe, plane) {
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)];
+
 		/* Skip this plane if it's not enabled */
-		if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+		if (!wm->wm[0].plane_en)
 			continue;
 
 		/* Find the highest enabled wm level for this plane */
-		for (level = ilk_wm_max_level(dev);
-		     intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+		for (level = ilk_wm_max_level(dev_priv);
+		     !wm->wm[level].plane_en; --level)
 		     { }
 
+		latency = dev_priv->wm.skl_latency[level];
+
+		if (skl_needs_memory_bw_wa(intel_state) &&
+		    plane->base.state->fb->modifier[0] ==
+		    I915_FORMAT_MOD_X_TILED)
+			latency += 15;
+
 		/*
 		 * If any of the planes on this pipe don't enable wm levels
 		 * that incur memory latencies higher then 30µs we can't enable
 		 * the SAGV
 		 */
-		if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+		if (latency < SKL_SAGV_BLOCK_TIME)
 			return false;
 	}
 
@@ -3058,7 +3091,6 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
 	struct drm_crtc *for_crtc = cstate->base.crtc;
 	unsigned int pipe_size, ddb_size;
 	int nth_active_pipe;
-	int pipe = to_intel_crtc(for_crtc)->pipe;
 
 	if (WARN_ON(!state) || !cstate->base.active) {
 		alloc->start = 0;
@@ -3086,7 +3118,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
 	 * we currently hold.
 	 */
 	if (!intel_state->active_pipe_changes) {
-		*alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+		*alloc = to_intel_crtc(for_crtc)->hw_ddb;
 		return;
 	}
 
@@ -3129,7 +3161,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 			continue;
 
-		for_each_plane(dev_priv, pipe, plane) {
+		for_each_universal_plane(dev_priv, pipe, plane) {
 			val = I915_READ(PLANE_BUF_CFG(pipe, plane));
 			skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
 						   val);
@@ -3173,7 +3205,7 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate)
 	src_h = drm_rect_height(&pstate->base.src);
 	dst_w = drm_rect_width(&pstate->base.dst);
 	dst_h = drm_rect_height(&pstate->base.dst);
-	if (intel_rotation_90_or_270(pstate->base.rotation))
+	if (drm_rotation_90_or_270(pstate->base.rotation))
 		swap(dst_w, dst_h);
 
 	downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@ -3204,7 +3236,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 	width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
-	if (intel_rotation_90_or_270(pstate->rotation))
+	if (drm_rotation_90_or_270(pstate->rotation))
 		swap(width, height);
 
 	/* for planar format */
@@ -3231,49 +3263,39 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
  *   3 * 4096 * 8192  * 4 < 2^32
  */
 static unsigned int
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+				 unsigned *plane_data_rate,
+				 unsigned *plane_y_data_rate)
 {
 	struct drm_crtc_state *cstate = &intel_cstate->base;
 	struct drm_atomic_state *state = cstate->state;
-	struct drm_crtc *crtc = cstate->crtc;
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	const struct drm_plane *plane;
+	struct drm_plane *plane;
 	const struct intel_plane *intel_plane;
-	struct drm_plane_state *pstate;
+	const struct drm_plane_state *pstate;
 	unsigned int rate, total_data_rate = 0;
 	int id;
-	int i;
 
 	if (WARN_ON(!state))
 		return 0;
 
 	/* Calculate and cache data rate for each plane */
-	for_each_plane_in_state(state, plane, pstate, i) {
+	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
 		id = skl_wm_plane_id(to_intel_plane(plane));
 		intel_plane = to_intel_plane(plane);
 
-		if (intel_plane->pipe != intel_crtc->pipe)
-			continue;
-
 		/* packed/uv */
 		rate = skl_plane_relative_data_rate(intel_cstate,
 						    pstate, 0);
-		intel_cstate->wm.skl.plane_data_rate[id] = rate;
+		plane_data_rate[id] = rate;
+
+		total_data_rate += rate;
 
 		/* y-plane */
 		rate = skl_plane_relative_data_rate(intel_cstate,
 						    pstate, 1);
-		intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
-	}
+		plane_y_data_rate[id] = rate;
 
-	/* Calculate CRTC's total data rate from cached values */
-	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
-		int id = skl_wm_plane_id(intel_plane);
-
-		/* packed/uv */
-		total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
-		total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
+		total_data_rate += rate;
 	}
 
 	return total_data_rate;
@@ -3304,7 +3326,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
 	src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
 	src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
 
-	if (intel_rotation_90_or_270(pstate->rotation))
+	if (drm_rotation_90_or_270(pstate->rotation))
 		swap(src_w, src_h);
 
 	/* Halve UV plane width and height for NV12 */
@@ -3318,7 +3340,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
 	else
 		plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
-	if (intel_rotation_90_or_270(pstate->rotation)) {
+	if (drm_rotation_90_or_270(pstate->rotation)) {
 		switch (plane_bpp) {
 		case 1:
 			min_scanlines = 32;
@@ -3342,6 +3364,30 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
 	return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
 }
 
+static void
+skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
+		 uint16_t *minimum, uint16_t *y_minimum)
+{
+	const struct drm_plane_state *pstate;
+	struct drm_plane *plane;
+
+	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+		int id = skl_wm_plane_id(intel_plane);
+
+		if (id == PLANE_CURSOR)
+			continue;
+
+		if (!pstate->visible)
+			continue;
+
+		minimum[id] = skl_ddb_min_alloc(pstate, 0);
+		y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+	}
+
+	minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+}
+
 static int
 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		      struct skl_ddb_allocation *ddb /* out */)
@@ -3350,25 +3396,26 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	struct drm_crtc *crtc = cstate->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *intel_plane;
-	struct drm_plane *plane;
-	struct drm_plane_state *pstate;
 	enum pipe pipe = intel_crtc->pipe;
-	struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
-	uint16_t alloc_size, start, cursor_blocks;
-	uint16_t *minimum = cstate->wm.skl.minimum_blocks;
-	uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
+	struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
+	uint16_t alloc_size, start;
+	uint16_t minimum[I915_MAX_PLANES] = {};
+	uint16_t y_minimum[I915_MAX_PLANES] = {};
 	unsigned int total_data_rate;
 	int num_active;
 	int id, i;
+	unsigned plane_data_rate[I915_MAX_PLANES] = {};
+	unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
+
+	/* Clear the partitioning for disabled planes. */
+	memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+	memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
 
 	if (WARN_ON(!state))
 		return 0;
 
 	if (!cstate->base.active) {
-		ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
-		memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
-		memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+		alloc->start = alloc->end = 0;
 		return 0;
 	}
 
@@ -3379,57 +3426,43 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		return 0;
 	}
 
-	cursor_blocks = skl_cursor_allocation(num_active);
-	ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
-	ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+	skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
 
-	alloc_size -= cursor_blocks;
+	/*
+	 * 1. Allocate the mininum required blocks for each active plane
+	 * and allocate the cursor, it doesn't require extra allocation
+	 * proportional to the data rate.
+	 */
 
-	/* 1. Allocate the mininum required blocks for each active plane */
-	for_each_plane_in_state(state, plane, pstate, i) {
-		intel_plane = to_intel_plane(plane);
-		id = skl_wm_plane_id(intel_plane);
-
-		if (intel_plane->pipe != pipe)
-			continue;
-
-		if (!to_intel_plane_state(pstate)->base.visible) {
-			minimum[id] = 0;
-			y_minimum[id] = 0;
-			continue;
-		}
-		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
-			minimum[id] = 0;
-			y_minimum[id] = 0;
-			continue;
-		}
-
-		minimum[id] = skl_ddb_min_alloc(pstate, 0);
-		y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
-	}
-
-	for (i = 0; i < PLANE_CURSOR; i++) {
+	for (i = 0; i < I915_MAX_PLANES; i++) {
 		alloc_size -= minimum[i];
 		alloc_size -= y_minimum[i];
 	}
 
+	ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+	ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+
 	/*
 	 * 2. Distribute the remaining space in proportion to the amount of
 	 * data each plane needs to fetch from memory.
 	 *
 	 * FIXME: we may not allocate every single block here.
 	 */
-	total_data_rate = skl_get_total_relative_data_rate(cstate);
+	total_data_rate = skl_get_total_relative_data_rate(cstate,
+							   plane_data_rate,
+							   plane_y_data_rate);
 	if (total_data_rate == 0)
 		return 0;
 
 	start = alloc->start;
-	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+	for (id = 0; id < I915_MAX_PLANES; id++) {
 		unsigned int data_rate, y_data_rate;
 		uint16_t plane_blocks, y_plane_blocks = 0;
-		int id = skl_wm_plane_id(intel_plane);
 
-		data_rate = cstate->wm.skl.plane_data_rate[id];
+		if (id == PLANE_CURSOR)
+			continue;
+
+		data_rate = plane_data_rate[id];
 
 		/*
 		 * allocation for (packed formats) or (uv-plane part of planar format):
@@ -3451,7 +3484,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		/*
 		 * allocation for y_plane part of planar format:
 		 */
-		y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
+		y_data_rate = plane_y_data_rate[id];
 
 		y_plane_blocks = y_minimum[id];
 		y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
@@ -3468,12 +3501,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	return 0;
 }
 
-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
-{
-	/* TODO: Take into account the scalers once we support them */
-	return config->base.adjusted_mode.crtc_clock;
-}
-
 /*
  * The max latency should be 257 (max the punit can code is 255 and we add 2us
  * for the read latency) and cpp should always be <= 8, so that
@@ -3524,7 +3551,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
 	 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
 	 * with additional adjustments for plane-specific scaling.
 	 */
-	adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+	adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
 	downscale_amount = skl_plane_downscale_amount(pstate);
 
 	pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
@@ -3553,22 +3580,28 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	uint32_t width = 0, height = 0;
 	uint32_t plane_pixel_rate;
 	uint32_t y_tile_minimum, y_min_scanlines;
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(cstate->base.state);
+	bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
 
 	if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
 		*enabled = false;
 		return 0;
 	}
 
+	if (apply_memory_bw_wa && fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+		latency += 15;
+
 	width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
-	if (intel_rotation_90_or_270(pstate->rotation))
+	if (drm_rotation_90_or_270(pstate->rotation))
 		swap(width, height);
 
 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
 
-	if (intel_rotation_90_or_270(pstate->rotation)) {
+	if (drm_rotation_90_or_270(pstate->rotation)) {
 		int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
 			drm_format_plane_cpp(fb->pixel_format, 1) :
 			drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3580,11 +3613,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 		case 2:
 			y_min_scanlines = 8;
 			break;
-		default:
-			WARN(1, "Unsupported pixel depth for rotation");
 		case 4:
 			y_min_scanlines = 4;
 			break;
+		default:
+			MISSING_CASE(cpp);
+			return -EINVAL;
 		}
 	} else {
 		y_min_scanlines = 4;
@@ -3610,12 +3644,17 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				 plane_blocks_per_line);
 
 	y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
+	if (apply_memory_bw_wa)
+		y_tile_minimum *= 2;
 
 	if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 	    fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
 		selected_result = max(method2, y_tile_minimum);
 	} else {
-		if ((ddb_allocation / plane_blocks_per_line) >= 1)
+		if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
+		    (plane_bytes_per_line / 512 < 1))
+			selected_result = method2;
+		else if ((ddb_allocation / plane_blocks_per_line) >= 1)
 			selected_result = min(method1, method2);
 		else
 			selected_result = method1;
@@ -3665,67 +3704,52 @@ static int
 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
 		     struct skl_ddb_allocation *ddb,
 		     struct intel_crtc_state *cstate,
+		     struct intel_plane *intel_plane,
 		     int level,
 		     struct skl_wm_level *result)
 {
 	struct drm_atomic_state *state = cstate->base.state;
 	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
-	struct drm_plane *plane;
-	struct intel_plane *intel_plane;
-	struct intel_plane_state *intel_pstate;
+	struct drm_plane *plane = &intel_plane->base;
+	struct intel_plane_state *intel_pstate = NULL;
 	uint16_t ddb_blocks;
 	enum pipe pipe = intel_crtc->pipe;
 	int ret;
+	int i = skl_wm_plane_id(intel_plane);
+
+	if (state)
+		intel_pstate =
+			intel_atomic_get_existing_plane_state(state,
+							      intel_plane);
 
 	/*
-	 * We'll only calculate watermarks for planes that are actually
-	 * enabled, so make sure all other planes are set as disabled.
+	 * Note: If we start supporting multiple pending atomic commits against
+	 * the same planes/CRTC's in the future, plane->state will no longer be
+	 * the correct pre-state to use for the calculations here and we'll
+	 * need to change where we get the 'unchanged' plane data from.
+	 *
+	 * For now this is fine because we only allow one queued commit against
+	 * a CRTC.  Even if the plane isn't modified by this transaction and we
+	 * don't have a plane lock, we still have the CRTC's lock, so we know
+	 * that no other transactions are racing with us to update it.
 	 */
-	memset(result, 0, sizeof(*result));
+	if (!intel_pstate)
+		intel_pstate = to_intel_plane_state(plane->state);
 
-	for_each_intel_plane_mask(&dev_priv->drm,
-				  intel_plane,
-				  cstate->base.plane_mask) {
-		int i = skl_wm_plane_id(intel_plane);
+	WARN_ON(!intel_pstate->base.fb);
 
-		plane = &intel_plane->base;
-		intel_pstate = NULL;
-		if (state)
-			intel_pstate =
-				intel_atomic_get_existing_plane_state(state,
-								      intel_plane);
+	ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
 
-		/*
-		 * Note: If we start supporting multiple pending atomic commits
-		 * against the same planes/CRTC's in the future, plane->state
-		 * will no longer be the correct pre-state to use for the
-		 * calculations here and we'll need to change where we get the
-		 * 'unchanged' plane data from.
-		 *
-		 * For now this is fine because we only allow one queued commit
-		 * against a CRTC.  Even if the plane isn't modified by this
-		 * transaction and we don't have a plane lock, we still have
-		 * the CRTC's lock, so we know that no other transactions are
-		 * racing with us to update it.
-		 */
-		if (!intel_pstate)
-			intel_pstate = to_intel_plane_state(plane->state);
-
-		WARN_ON(!intel_pstate->base.fb);
-
-		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
-
-		ret = skl_compute_plane_wm(dev_priv,
-					   cstate,
-					   intel_pstate,
-					   ddb_blocks,
-					   level,
-					   &result->plane_res_b[i],
-					   &result->plane_res_l[i],
-					   &result->plane_en[i]);
-		if (ret)
-			return ret;
-	}
+	ret = skl_compute_plane_wm(dev_priv,
+				   cstate,
+				   intel_pstate,
+				   ddb_blocks,
+				   level,
+				   &result->plane_res_b,
+				   &result->plane_res_l,
+				   &result->plane_en);
+	if (ret)
+		return ret;
 
 	return 0;
 }
@@ -3733,32 +3757,28 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
 static uint32_t
 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
 {
+	uint32_t pixel_rate;
+
 	if (!cstate->base.active)
 		return 0;
 
-	if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
+	pixel_rate = ilk_pipe_pixel_rate(cstate);
+
+	if (WARN_ON(pixel_rate == 0))
 		return 0;
 
 	return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
-			    skl_pipe_pixel_rate(cstate));
+			    pixel_rate);
 }
 
 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
 				      struct skl_wm_level *trans_wm /* out */)
 {
-	struct drm_crtc *crtc = cstate->base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *intel_plane;
-
 	if (!cstate->base.active)
 		return;
 
 	/* Until we know more, just disable transition WMs */
-	for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
-		int i = skl_wm_plane_id(intel_plane);
-
-		trans_wm->plane_en[i] = false;
-	}
+	trans_wm->plane_en = false;
 }
 
 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
@@ -3767,79 +3787,36 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
 {
 	struct drm_device *dev = cstate->base.crtc->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev);
+	struct intel_plane *intel_plane;
+	struct skl_plane_wm *wm;
+	int level, max_level = ilk_wm_max_level(dev_priv);
 	int ret;
 
-	for (level = 0; level <= max_level; level++) {
-		ret = skl_compute_wm_level(dev_priv, ddb, cstate,
-					   level, &pipe_wm->wm[level]);
-		if (ret)
-			return ret;
+	/*
+	 * We'll only calculate watermarks for planes that are actually
+	 * enabled, so make sure all other planes are set as disabled.
+	 */
+	memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
+
+	for_each_intel_plane_mask(&dev_priv->drm,
+				  intel_plane,
+				  cstate->base.plane_mask) {
+		wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)];
+
+		for (level = 0; level <= max_level; level++) {
+			ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+						   intel_plane, level,
+						   &wm->wm[level]);
+			if (ret)
+				return ret;
+		}
+		skl_compute_transition_wm(cstate, &wm->trans_wm);
 	}
 	pipe_wm->linetime = skl_compute_linetime_wm(cstate);
 
-	skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
-
 	return 0;
 }
 
-static void skl_compute_wm_results(struct drm_device *dev,
-				   struct skl_pipe_wm *p_wm,
-				   struct skl_wm_values *r,
-				   struct intel_crtc *intel_crtc)
-{
-	int level, max_level = ilk_wm_max_level(dev);
-	enum pipe pipe = intel_crtc->pipe;
-	uint32_t temp;
-	int i;
-
-	for (level = 0; level <= max_level; level++) {
-		for (i = 0; i < intel_num_planes(intel_crtc); i++) {
-			temp = 0;
-
-			temp |= p_wm->wm[level].plane_res_l[i] <<
-					PLANE_WM_LINES_SHIFT;
-			temp |= p_wm->wm[level].plane_res_b[i];
-			if (p_wm->wm[level].plane_en[i])
-				temp |= PLANE_WM_EN;
-
-			r->plane[pipe][i][level] = temp;
-		}
-
-		temp = 0;
-
-		temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
-		temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
-
-		if (p_wm->wm[level].plane_en[PLANE_CURSOR])
-			temp |= PLANE_WM_EN;
-
-		r->plane[pipe][PLANE_CURSOR][level] = temp;
-
-	}
-
-	/* transition WMs */
-	for (i = 0; i < intel_num_planes(intel_crtc); i++) {
-		temp = 0;
-		temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
-		temp |= p_wm->trans_wm.plane_res_b[i];
-		if (p_wm->trans_wm.plane_en[i])
-			temp |= PLANE_WM_EN;
-
-		r->plane_trans[pipe][i] = temp;
-	}
-
-	temp = 0;
-	temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
-	temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
-	if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
-		temp |= PLANE_WM_EN;
-
-	r->plane_trans[pipe][PLANE_CURSOR] = temp;
-
-	r->wm_linetime[pipe] = p_wm->linetime;
-}
-
 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
 				i915_reg_t reg,
 				const struct skl_ddb_entry *entry)
@@ -3850,53 +3827,77 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
 		I915_WRITE(reg, 0);
 }
 
+static void skl_write_wm_level(struct drm_i915_private *dev_priv,
+			       i915_reg_t reg,
+			       const struct skl_wm_level *level)
+{
+	uint32_t val = 0;
+
+	if (level->plane_en) {
+		val |= PLANE_WM_EN;
+		val |= level->plane_res_b;
+		val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
+	}
+
+	I915_WRITE(reg, val);
+}
+
 void skl_write_plane_wm(struct intel_crtc *intel_crtc,
-			const struct skl_wm_values *wm,
+			const struct skl_plane_wm *wm,
+			const struct skl_ddb_allocation *ddb,
 			int plane)
 {
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev);
+	int level, max_level = ilk_wm_max_level(dev_priv);
 	enum pipe pipe = intel_crtc->pipe;
 
 	for (level = 0; level <= max_level; level++) {
-		I915_WRITE(PLANE_WM(pipe, plane, level),
-			   wm->plane[pipe][plane][level]);
+		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level),
+				   &wm->wm[level]);
 	}
-	I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
+	skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane),
+			   &wm->trans_wm);
 
 	skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
-			    &wm->ddb.plane[pipe][plane]);
+			    &ddb->plane[pipe][plane]);
 	skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
-			    &wm->ddb.y_plane[pipe][plane]);
+			    &ddb->y_plane[pipe][plane]);
 }
 
 void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
-			 const struct skl_wm_values *wm)
+			 const struct skl_plane_wm *wm,
+			 const struct skl_ddb_allocation *ddb)
 {
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev);
+	int level, max_level = ilk_wm_max_level(dev_priv);
 	enum pipe pipe = intel_crtc->pipe;
 
 	for (level = 0; level <= max_level; level++) {
-		I915_WRITE(CUR_WM(pipe, level),
-			   wm->plane[pipe][PLANE_CURSOR][level]);
+		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
+				   &wm->wm[level]);
 	}
-	I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+	skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
 
 	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
-			    &wm->ddb.plane[pipe][PLANE_CURSOR]);
+			    &ddb->plane[pipe][PLANE_CURSOR]);
 }
 
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
-			       const struct skl_ddb_allocation *new,
-			       enum pipe pipe)
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+			 const struct skl_wm_level *l2)
 {
-	return new->pipe[pipe].start == old->pipe[pipe].start &&
-	       new->pipe[pipe].end == old->pipe[pipe].end;
+	if (l1->plane_en != l2->plane_en)
+		return false;
+
+	/* If both planes aren't enabled, the rest shouldn't matter */
+	if (!l1->plane_en)
+		return true;
+
+	return (l1->plane_res_l == l2->plane_res_l &&
+		l1->plane_res_b == l2->plane_res_b);
 }
 
 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -3906,22 +3907,22 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
 }
 
 bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
-				 const struct skl_ddb_allocation *old,
-				 const struct skl_ddb_allocation *new,
-				 enum pipe pipe)
+				 struct intel_crtc *intel_crtc)
 {
-	struct drm_device *dev = state->dev;
-	struct intel_crtc *intel_crtc;
-	enum pipe otherp;
+	struct drm_crtc *other_crtc;
+	struct drm_crtc_state *other_cstate;
+	struct intel_crtc *other_intel_crtc;
+	const struct skl_ddb_entry *ddb =
+		&to_intel_crtc_state(intel_crtc->base.state)->wm.skl.ddb;
+	int i;
 
-	for_each_intel_crtc(dev, intel_crtc) {
-		otherp = intel_crtc->pipe;
+	for_each_crtc_in_state(state, other_crtc, other_cstate, i) {
+		other_intel_crtc = to_intel_crtc(other_crtc);
 
-		if (otherp == pipe)
+		if (other_intel_crtc == intel_crtc)
 			continue;
 
-		if (skl_ddb_entries_overlap(&new->pipe[pipe],
-					    &old->pipe[otherp]))
+		if (skl_ddb_entries_overlap(ddb, &other_intel_crtc->hw_ddb))
 			return true;
 	}
 
@@ -3929,11 +3930,11 @@ bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
 }
 
 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
-			      struct skl_ddb_allocation *ddb, /* out */
+			      const struct skl_pipe_wm *old_pipe_wm,
 			      struct skl_pipe_wm *pipe_wm, /* out */
+			      struct skl_ddb_allocation *ddb, /* out */
 			      bool *changed /* out */)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
 	struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
 	int ret;
 
@@ -3941,7 +3942,7 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
 	if (ret)
 		return ret;
 
-	if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
+	if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
 		*changed = false;
 	else
 		*changed = true;
@@ -3962,7 +3963,7 @@ pipes_modified(struct drm_atomic_state *state)
 	return ret;
 }
 
-int
+static int
 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
 {
 	struct drm_atomic_state *state = cstate->base.state;
@@ -3980,7 +3981,7 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
 
 	WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
 
-	drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
+	drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
 		id = skl_wm_plane_id(to_intel_plane(plane));
 
 		if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
@@ -4050,6 +4051,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
 		intel_state->wm_results.dirty_pipes = ~0;
 	}
 
+	/*
+	 * We're not recomputing for the pipes not included in the commit, so
+	 * make sure we start with the current state.
+	 */
+	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+
 	for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
 		struct intel_crtc_state *cstate;
 
@@ -4074,19 +4081,50 @@ skl_copy_wm_for_pipe(struct skl_wm_values *dst,
 		     struct skl_wm_values *src,
 		     enum pipe pipe)
 {
-	dst->wm_linetime[pipe] = src->wm_linetime[pipe];
-	memcpy(dst->plane[pipe], src->plane[pipe],
-	       sizeof(dst->plane[pipe]));
-	memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
-	       sizeof(dst->plane_trans[pipe]));
-
-	dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
 	memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
 	       sizeof(dst->ddb.y_plane[pipe]));
 	memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
 	       sizeof(dst->ddb.plane[pipe]));
 }
 
+static void
+skl_print_wm_changes(const struct drm_atomic_state *state)
+{
+	const struct drm_device *dev = state->dev;
+	const struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct intel_atomic_state *intel_state =
+		to_intel_atomic_state(state);
+	const struct drm_crtc *crtc;
+	const struct drm_crtc_state *cstate;
+	const struct intel_plane *intel_plane;
+	const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
+	const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+	int id;
+	int i;
+
+	for_each_crtc_in_state(state, crtc, cstate, i) {
+		const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		enum pipe pipe = intel_crtc->pipe;
+
+		for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+			const struct skl_ddb_entry *old, *new;
+
+			id = skl_wm_plane_id(intel_plane);
+			old = &old_ddb->plane[pipe][id];
+			new = &new_ddb->plane[pipe][id];
+
+			if (skl_ddb_entry_equal(old, new))
+				continue;
+
+			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+					 intel_plane->base.base.id,
+					 intel_plane->base.name,
+					 old->start, old->end,
+					 new->start, new->end);
+		}
+	}
+}
+
 static int
 skl_compute_wm(struct drm_atomic_state *state)
 {
@@ -4129,13 +4167,14 @@ skl_compute_wm(struct drm_atomic_state *state)
 	 * no suitable watermark values can be found.
 	 */
 	for_each_crtc_in_state(state, crtc, cstate, i) {
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 		struct intel_crtc_state *intel_cstate =
 			to_intel_crtc_state(cstate);
+		const struct skl_pipe_wm *old_pipe_wm =
+			&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
 
 		pipe_wm = &intel_cstate->wm.skl.optimal;
-		ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
-					 &changed);
+		ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
+					 &results->ddb, &changed);
 		if (ret)
 			return ret;
 
@@ -4147,28 +4186,26 @@ skl_compute_wm(struct drm_atomic_state *state)
 			continue;
 
 		intel_cstate->update_wm_pre = true;
-		skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
 	}
 
+	skl_print_wm_changes(state);
+
 	return 0;
 }
 
-static void skl_update_wm(struct drm_crtc *crtc)
+static void skl_update_wm(struct intel_crtc *intel_crtc)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct skl_wm_values *results = &dev_priv->wm.skl_results;
 	struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
-	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+	struct intel_crtc_state *cstate = to_intel_crtc_state(intel_crtc->base.state);
 	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
 	enum pipe pipe = intel_crtc->pipe;
 
-	if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+	if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
 		return;
 
-	intel_crtc->wm.active.skl = *pipe_wm;
-
 	mutex_lock(&dev_priv->wm.wm_mutex);
 
 	/*
@@ -4177,17 +4214,21 @@ static void skl_update_wm(struct drm_crtc *crtc)
 	 * the pipe's shut off, just do so here. Already active pipes will have
 	 * their watermarks updated once we update their planes.
 	 */
-	if (crtc->state->active_changed) {
+	if (intel_crtc->base.state->active_changed) {
 		int plane;
 
-		for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
-			skl_write_plane_wm(intel_crtc, results, plane);
+		for_each_universal_plane(dev_priv, pipe, plane)
+			skl_write_plane_wm(intel_crtc, &pipe_wm->planes[plane],
+					   &results->ddb, plane);
 
-		skl_write_cursor_wm(intel_crtc, results);
+		skl_write_cursor_wm(intel_crtc, &pipe_wm->planes[PLANE_CURSOR],
+				    &results->ddb);
 	}
 
 	skl_copy_wm_for_pipe(hw_vals, results, pipe);
 
+	intel_crtc->hw_ddb = cstate->wm.skl.ddb;
+
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
@@ -4266,114 +4307,75 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
-static void skl_pipe_wm_active_state(uint32_t val,
-				     struct skl_pipe_wm *active,
-				     bool is_transwm,
-				     bool is_cursor,
-				     int i,
-				     int level)
+static inline void skl_wm_level_from_reg_val(uint32_t val,
+					     struct skl_wm_level *level)
 {
-	bool is_enabled = (val & PLANE_WM_EN) != 0;
-
-	if (!is_transwm) {
-		if (!is_cursor) {
-			active->wm[level].plane_en[i] = is_enabled;
-			active->wm[level].plane_res_b[i] =
-					val & PLANE_WM_BLOCKS_MASK;
-			active->wm[level].plane_res_l[i] =
-					(val >> PLANE_WM_LINES_SHIFT) &
-						PLANE_WM_LINES_MASK;
-		} else {
-			active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
-			active->wm[level].plane_res_b[PLANE_CURSOR] =
-					val & PLANE_WM_BLOCKS_MASK;
-			active->wm[level].plane_res_l[PLANE_CURSOR] =
-					(val >> PLANE_WM_LINES_SHIFT) &
-						PLANE_WM_LINES_MASK;
-		}
-	} else {
-		if (!is_cursor) {
-			active->trans_wm.plane_en[i] = is_enabled;
-			active->trans_wm.plane_res_b[i] =
-					val & PLANE_WM_BLOCKS_MASK;
-			active->trans_wm.plane_res_l[i] =
-					(val >> PLANE_WM_LINES_SHIFT) &
-						PLANE_WM_LINES_MASK;
-		} else {
-			active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
-			active->trans_wm.plane_res_b[PLANE_CURSOR] =
-					val & PLANE_WM_BLOCKS_MASK;
-			active->trans_wm.plane_res_l[PLANE_CURSOR] =
-					(val >> PLANE_WM_LINES_SHIFT) &
-						PLANE_WM_LINES_MASK;
-		}
-	}
+	level->plane_en = val & PLANE_WM_EN;
+	level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
+	level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
+		PLANE_WM_LINES_MASK;
 }
 
-static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+			      struct skl_pipe_wm *out)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
-	struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
+	struct intel_plane *intel_plane;
+	struct skl_plane_wm *wm;
 	enum pipe pipe = intel_crtc->pipe;
-	int level, i, max_level;
-	uint32_t temp;
+	int level, id, max_level;
+	uint32_t val;
 
-	max_level = ilk_wm_max_level(dev);
+	max_level = ilk_wm_max_level(dev_priv);
 
-	hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+		id = skl_wm_plane_id(intel_plane);
+		wm = &out->planes[id];
 
-	for (level = 0; level <= max_level; level++) {
-		for (i = 0; i < intel_num_planes(intel_crtc); i++)
-			hw->plane[pipe][i][level] =
-					I915_READ(PLANE_WM(pipe, i, level));
-		hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
+		for (level = 0; level <= max_level; level++) {
+			if (id != PLANE_CURSOR)
+				val = I915_READ(PLANE_WM(pipe, id, level));
+			else
+				val = I915_READ(CUR_WM(pipe, level));
+
+			skl_wm_level_from_reg_val(val, &wm->wm[level]);
+		}
+
+		if (id != PLANE_CURSOR)
+			val = I915_READ(PLANE_WM_TRANS(pipe, id));
+		else
+			val = I915_READ(CUR_WM_TRANS(pipe));
+
+		skl_wm_level_from_reg_val(val, &wm->trans_wm);
 	}
 
-	for (i = 0; i < intel_num_planes(intel_crtc); i++)
-		hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
-	hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
-
 	if (!intel_crtc->active)
 		return;
 
-	hw->dirty_pipes |= drm_crtc_mask(crtc);
-
-	active->linetime = hw->wm_linetime[pipe];
-
-	for (level = 0; level <= max_level; level++) {
-		for (i = 0; i < intel_num_planes(intel_crtc); i++) {
-			temp = hw->plane[pipe][i][level];
-			skl_pipe_wm_active_state(temp, active, false,
-						false, i, level);
-		}
-		temp = hw->plane[pipe][PLANE_CURSOR][level];
-		skl_pipe_wm_active_state(temp, active, false, true, i, level);
-	}
-
-	for (i = 0; i < intel_num_planes(intel_crtc); i++) {
-		temp = hw->plane_trans[pipe][i];
-		skl_pipe_wm_active_state(temp, active, true, false, i, 0);
-	}
-
-	temp = hw->plane_trans[pipe][PLANE_CURSOR];
-	skl_pipe_wm_active_state(temp, active, true, true, i, 0);
-
-	intel_crtc->wm.active.skl = *active;
+	out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
 }
 
 void skl_wm_get_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
 	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
 	struct drm_crtc *crtc;
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *cstate;
 
 	skl_ddb_get_hw_state(dev_priv, ddb);
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		skl_pipe_wm_get_hw_state(crtc);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		intel_crtc = to_intel_crtc(crtc);
+		cstate = to_intel_crtc_state(crtc->state);
+
+		skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+
+		if (intel_crtc->active)
+			hw->dirty_pipes |= drm_crtc_mask(crtc);
+	}
 
 	if (dev_priv->active_crtcs) {
 		/* Fully recompute DDB on first atomic commit */
@@ -4400,7 +4402,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 	};
 
 	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
 	memset(active, 0, sizeof(*active));
@@ -4422,7 +4424,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
 		active->linetime = hw->wm_linetime[pipe];
 	} else {
-		int level, max_level = ilk_wm_max_level(dev);
+		int level, max_level = ilk_wm_max_level(dev_priv);
 
 		/*
 		 * For inactive pipes, all watermark levels
@@ -4534,11 +4536,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
 			plane->wm.fifo_size = 63;
 			break;
 		case DRM_PLANE_TYPE_PRIMARY:
-			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+			plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0);
 			break;
 		case DRM_PLANE_TYPE_OVERLAY:
 			sprite = plane->plane;
-			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+			plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1);
 			break;
 		}
 	}
@@ -4608,10 +4610,10 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
 		hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
 	}
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-	else if (IS_IVYBRIDGE(dev))
+	else if (IS_IVYBRIDGE(dev_priv))
 		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
@@ -4651,9 +4653,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
  * We don't use the sprite, so we can ignore that.  And on Crestline we have
  * to set the non-SR watermarks to 8.
  */
-void intel_update_watermarks(struct drm_crtc *crtc)
+void intel_update_watermarks(struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (dev_priv->display.update_wm)
 		dev_priv->display.update_wm(crtc);
@@ -5355,6 +5357,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	uint32_t rc6_mask = 0;
 
 	/* 1a: Software RC state - RC0 */
@@ -5376,7 +5379,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	if (HAS_GUC(dev_priv))
@@ -5392,9 +5395,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 	if (intel_enable_rc6() & INTEL_RC6_ENABLE)
 		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
 	DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
-	/* WaRsUseTimeoutMode */
-	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
-	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+	/* WaRsUseTimeoutMode:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
 		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
 			   GEN7_RC_CTL_TO_MODE |
@@ -5422,6 +5424,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	uint32_t rc6_mask = 0;
 
 	/* 1a: Software RC state - RC0 */
@@ -5438,7 +5441,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	if (IS_BROADWELL(dev_priv))
@@ -5498,6 +5501,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	u32 rc6vids, rc6_mask = 0;
 	u32 gtfifodbg;
 	int rc6_mode;
@@ -5531,7 +5535,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -5568,10 +5572,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
-	if (ret)
-		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
-
 	reset_rps(dev_priv, gen6_set_rps);
 
 	rc6vids = 0;
@@ -5861,7 +5861,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
 	if (WARN_ON(!dev_priv->vlv_pctx))
 		return;
 
-	i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
+	i915_gem_object_put(dev_priv->vlv_pctx);
 	dev_priv->vlv_pctx = NULL;
 }
 
@@ -5980,6 +5980,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	u32 gtfifodbg, val, rc6_mode = 0, pcbr;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6006,7 +6007,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 
@@ -6068,6 +6069,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	u32 gtfifodbg, val, rc6_mode = 0;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6107,7 +6109,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv, id)
 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -6790,7 +6792,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
 	if (READ_ONCE(dev_priv->rps.enabled))
 		goto out;
 
-	rcs = &dev_priv->engine[RCS];
+	rcs = dev_priv->engine[RCS];
 	if (rcs->last_context)
 		goto out;
 
@@ -6843,10 +6845,8 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void ibx_init_clock_gating(struct drm_device *dev)
+static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/*
 	 * On Ibex Peak and Cougar Point, we need to disable clock
 	 * gating for the panel power sequencer or it will fail to
@@ -6855,9 +6855,8 @@ static void ibx_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
 }
 
-static void g4x_disable_trickle_feed(struct drm_device *dev)
+static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe;
 
 	for_each_pipe(dev_priv, pipe) {
@@ -6870,10 +6869,8 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
 	}
 }
 
-static void ilk_init_lp_watermarks(struct drm_device *dev)
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
 	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
 	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6884,9 +6881,8 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
 	 */
 }
 
-static void ironlake_init_clock_gating(struct drm_device *dev)
+static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
 	/*
@@ -6918,7 +6914,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
 		   (I915_READ(DISP_ARB_CTL) |
 		    DISP_FBC_WM_DIS));
 
-	ilk_init_lp_watermarks(dev);
+	ilk_init_lp_watermarks(dev_priv);
 
 	/*
 	 * Based on the document from hardware guys the following bits
@@ -6927,7 +6923,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
 	 * The bit 22 of 0x42004
 	 * The bit 7,8,9 of 0x42020.
 	 */
-	if (IS_IRONLAKE_M(dev)) {
+	if (IS_IRONLAKE_M(dev_priv)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -6953,14 +6949,13 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
 	/* WaDisable_RenderCache_OperationalFlush:ilk */
 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-	g4x_disable_trickle_feed(dev);
+	g4x_disable_trickle_feed(dev_priv);
 
-	ibx_init_clock_gating(dev);
+	ibx_init_clock_gating(dev_priv);
 }
 
-static void cpt_init_clock_gating(struct drm_device *dev)
+static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	int pipe;
 	uint32_t val;
 
@@ -6995,9 +6990,8 @@ static void cpt_init_clock_gating(struct drm_device *dev)
 	}
 }
 
-static void gen6_check_mch_setup(struct drm_device *dev)
+static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t tmp;
 
 	tmp = I915_READ(MCH_SSKPD);
@@ -7006,9 +7000,8 @@ static void gen6_check_mch_setup(struct drm_device *dev)
 			      tmp);
 }
 
-static void gen6_init_clock_gating(struct drm_device *dev)
+static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
 	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -7035,7 +7028,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(GEN6_GT_MODE,
 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
 
-	ilk_init_lp_watermarks(dev);
+	ilk_init_lp_watermarks(dev_priv);
 
 	I915_WRITE(CACHE_MODE_0,
 		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -7096,11 +7089,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
 		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
-	g4x_disable_trickle_feed(dev);
+	g4x_disable_trickle_feed(dev_priv);
 
-	cpt_init_clock_gating(dev);
+	cpt_init_clock_gating(dev_priv);
 
-	gen6_check_mch_setup(dev);
+	gen6_check_mch_setup(dev_priv);
 }
 
 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -7121,15 +7114,13 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
 }
 
-static void lpt_init_clock_gating(struct drm_device *dev)
+static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/*
 	 * TODO: this bit should only be enabled when really needed, then
 	 * disabled when not needed anymore in order to save power.
 	 */
-	if (HAS_PCH_LPT_LP(dev))
+	if (HAS_PCH_LPT_LP(dev_priv))
 		I915_WRITE(SOUTH_DSPCLK_GATE_D,
 			   I915_READ(SOUTH_DSPCLK_GATE_D) |
 			   PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -7140,11 +7131,9 @@ static void lpt_init_clock_gating(struct drm_device *dev)
 		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
 }
 
-static void lpt_suspend_hw(struct drm_device *dev)
+static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	if (HAS_PCH_LPT_LP(dev)) {
+	if (HAS_PCH_LPT_LP(dev_priv)) {
 		uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
 
 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -7175,11 +7164,9 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 }
 
-static void kabylake_init_clock_gating(struct drm_device *dev)
+static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	gen9_init_clock_gating(dev);
+	gen9_init_clock_gating(dev_priv);
 
 	/* WaDisableSDEUnitClockGating:kbl */
 	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
@@ -7196,11 +7183,9 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
 }
 
-static void skylake_init_clock_gating(struct drm_device *dev)
+static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	gen9_init_clock_gating(dev);
+	gen9_init_clock_gating(dev_priv);
 
 	/* WAC6entrylatency:skl */
 	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
@@ -7211,12 +7196,11 @@ static void skylake_init_clock_gating(struct drm_device *dev)
 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
 }
 
-static void broadwell_init_clock_gating(struct drm_device *dev)
+static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe;
 
-	ilk_init_lp_watermarks(dev);
+	ilk_init_lp_watermarks(dev_priv);
 
 	/* WaSwitchSolVfFArbitrationPriority:bdw */
 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7259,14 +7243,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
 		   | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
 
-	lpt_init_clock_gating(dev);
+	lpt_init_clock_gating(dev_priv);
 }
 
-static void haswell_init_clock_gating(struct drm_device *dev)
+static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	ilk_init_lp_watermarks(dev);
+	ilk_init_lp_watermarks(dev_priv);
 
 	/* L3 caching of data atomics doesn't work -- disable it. */
 	I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
@@ -7315,15 +7297,14 @@ static void haswell_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(CHICKEN_PAR1_1,
 		   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
 
-	lpt_init_clock_gating(dev);
+	lpt_init_clock_gating(dev_priv);
 }
 
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t snpcr;
 
-	ilk_init_lp_watermarks(dev);
+	ilk_init_lp_watermarks(dev_priv);
 
 	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
@@ -7337,7 +7318,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
 	/* WaDisablePSDDualDispatchEnable:ivb */
-	if (IS_IVB_GT1(dev))
+	if (IS_IVB_GT1(dev_priv))
 		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
 			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
@@ -7353,7 +7334,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 			GEN7_WA_FOR_GEN7_L3_CONTROL);
 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
 		   GEN7_WA_L3_CHICKEN_MODE);
-	if (IS_IVB_GT1(dev))
+	if (IS_IVB_GT1(dev_priv))
 		I915_WRITE(GEN7_ROW_CHICKEN2,
 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 	else {
@@ -7380,7 +7361,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-	g4x_disable_trickle_feed(dev);
+	g4x_disable_trickle_feed(dev_priv);
 
 	gen7_setup_fixed_func_scheduler(dev_priv);
 
@@ -7410,16 +7391,14 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 	snpcr |= GEN6_MBC_SNPCR_MED;
 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
 
-	if (!HAS_PCH_NOP(dev))
-		cpt_init_clock_gating(dev);
+	if (!HAS_PCH_NOP(dev_priv))
+		cpt_init_clock_gating(dev_priv);
 
-	gen6_check_mch_setup(dev);
+	gen6_check_mch_setup(dev_priv);
 }
 
-static void valleyview_init_clock_gating(struct drm_device *dev)
+static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/* WaDisableEarlyCull:vlv */
 	I915_WRITE(_3D_CHICKEN3,
 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -7498,10 +7477,8 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
 }
 
-static void cherryview_init_clock_gating(struct drm_device *dev)
+static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/* WaVSRefCountFullforceMissDisable:chv */
 	/* WaDSRefCountFullforceMissDisable:chv */
 	I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -7534,9 +7511,8 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
 }
 
-static void g4x_init_clock_gating(struct drm_device *dev)
+static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t dspclk_gate;
 
 	I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7547,7 +7523,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
 	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
 		OVRUNIT_CLOCK_GATE_DISABLE |
 		OVCUNIT_CLOCK_GATE_DISABLE;
-	if (IS_GM45(dev))
+	if (IS_GM45(dev_priv))
 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
 	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
 
@@ -7558,13 +7534,11 @@ static void g4x_init_clock_gating(struct drm_device *dev)
 	/* WaDisable_RenderCache_OperationalFlush:g4x */
 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-	g4x_disable_trickle_feed(dev);
+	g4x_disable_trickle_feed(dev_priv);
 }
 
-static void crestline_init_clock_gating(struct drm_device *dev)
+static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
 	I915_WRITE(RENCLK_GATE_D2, 0);
 	I915_WRITE(DSPCLK_GATE_D, 0);
@@ -7577,10 +7551,8 @@ static void crestline_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 }
 
-static void broadwater_init_clock_gating(struct drm_device *dev)
+static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
 		   I965_RCC_CLOCK_GATE_DISABLE |
 		   I965_RCPB_CLOCK_GATE_DISABLE |
@@ -7594,16 +7566,15 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 }
 
-static void gen3_init_clock_gating(struct drm_device *dev)
+static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 dstate = I915_READ(D_STATE);
 
 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
 		DSTATE_DOT_CLOCK_GATING;
 	I915_WRITE(D_STATE, dstate);
 
-	if (IS_PINEVIEW(dev))
+	if (IS_PINEVIEW(dev_priv))
 		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
 
 	/* IIR "flip pending" means done if this bit is set */
@@ -7619,10 +7590,8 @@ static void gen3_init_clock_gating(struct drm_device *dev)
 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
 }
 
-static void i85x_init_clock_gating(struct drm_device *dev)
+static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
 
 	/* interrupts should cause a wake up from C3 */
@@ -7633,10 +7602,8 @@ static void i85x_init_clock_gating(struct drm_device *dev)
 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
 }
 
-static void i830_init_clock_gating(struct drm_device *dev)
+static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 
 	I915_WRITE(MEM_MODE,
@@ -7644,20 +7611,18 @@ static void i830_init_clock_gating(struct drm_device *dev)
 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
 }
 
-void intel_init_clock_gating(struct drm_device *dev)
+void intel_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	dev_priv->display.init_clock_gating(dev);
+	dev_priv->display.init_clock_gating(dev_priv);
 }
 
-void intel_suspend_hw(struct drm_device *dev)
+void intel_suspend_hw(struct drm_i915_private *dev_priv)
 {
-	if (HAS_PCH_LPT(dev))
-		lpt_suspend_hw(dev);
+	if (HAS_PCH_LPT(dev_priv))
+		lpt_suspend_hw(dev_priv);
 }
 
-static void nop_init_clock_gating(struct drm_device *dev)
+static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
 }
@@ -7712,29 +7677,27 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 }
 
 /* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_device *dev)
+void intel_init_pm(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	intel_fbc_init(dev_priv);
 
 	/* For cxsr */
-	if (IS_PINEVIEW(dev))
-		i915_pineview_get_mem_freq(dev);
-	else if (IS_GEN5(dev))
-		i915_ironlake_get_mem_freq(dev);
+	if (IS_PINEVIEW(dev_priv))
+		i915_pineview_get_mem_freq(dev_priv);
+	else if (IS_GEN5(dev_priv))
+		i915_ironlake_get_mem_freq(dev_priv);
 
 	/* For FIFO watermark updates */
-	if (INTEL_INFO(dev)->gen >= 9) {
-		skl_setup_wm_latency(dev);
+	if (INTEL_GEN(dev_priv) >= 9) {
+		skl_setup_wm_latency(dev_priv);
 		dev_priv->display.update_wm = skl_update_wm;
 		dev_priv->display.compute_global_watermarks = skl_compute_wm;
-	} else if (HAS_PCH_SPLIT(dev)) {
-		ilk_setup_wm_latency(dev);
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
+		ilk_setup_wm_latency(dev_priv);
 
-		if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+		if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
 		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
-		    (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+		    (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
 		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
 			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
 			dev_priv->display.compute_intermediate_wm =
@@ -7747,14 +7710,14 @@ void intel_init_pm(struct drm_device *dev)
 			DRM_DEBUG_KMS("Failed to read display plane latency. "
 				      "Disable CxSR\n");
 		}
-	} else if (IS_CHERRYVIEW(dev)) {
-		vlv_setup_wm_latency(dev);
+	} else if (IS_CHERRYVIEW(dev_priv)) {
+		vlv_setup_wm_latency(dev_priv);
 		dev_priv->display.update_wm = vlv_update_wm;
-	} else if (IS_VALLEYVIEW(dev)) {
-		vlv_setup_wm_latency(dev);
+	} else if (IS_VALLEYVIEW(dev_priv)) {
+		vlv_setup_wm_latency(dev_priv);
 		dev_priv->display.update_wm = vlv_update_wm;
-	} else if (IS_PINEVIEW(dev)) {
-		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+	} else if (IS_PINEVIEW(dev_priv)) {
+		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
 					    dev_priv->is_ddr3,
 					    dev_priv->fsb_freq,
 					    dev_priv->mem_freq)) {
@@ -7768,15 +7731,15 @@ void intel_init_pm(struct drm_device *dev)
 			dev_priv->display.update_wm = NULL;
 		} else
 			dev_priv->display.update_wm = pineview_update_wm;
-	} else if (IS_G4X(dev)) {
+	} else if (IS_G4X(dev_priv)) {
 		dev_priv->display.update_wm = g4x_update_wm;
-	} else if (IS_GEN4(dev)) {
+	} else if (IS_GEN4(dev_priv)) {
 		dev_priv->display.update_wm = i965_update_wm;
-	} else if (IS_GEN3(dev)) {
+	} else if (IS_GEN3(dev_priv)) {
 		dev_priv->display.update_wm = i9xx_update_wm;
 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
-	} else if (IS_GEN2(dev)) {
-		if (INTEL_INFO(dev)->num_pipes == 1) {
+	} else if (IS_GEN2(dev_priv)) {
+		if (INTEL_INFO(dev_priv)->num_pipes == 1) {
 			dev_priv->display.update_wm = i845_update_wm;
 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
 		} else {
@@ -8024,5 +7987,4 @@ void intel_pm_setup(struct drm_device *dev)
 
 	dev_priv->pm.suspended = false;
 	atomic_set(&dev_priv->pm.wakeref_count, 0);
-	atomic_set(&dev_priv->pm.atomic_seq, 0);
 }
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 108ba1e..271a3e2 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -268,7 +268,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
 	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
 	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
 
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev_priv))
 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
 	if (dev_priv->psr.link_standby)
@@ -344,7 +344,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 	 * ones. Since by Display design transcoder EDP is tied to port A
 	 * we can safely escape based on the port A.
 	 */
-	if (HAS_DDI(dev) && dig_port->port != PORT_A) {
+	if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
 		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
 		return false;
 	}
@@ -354,20 +354,20 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 		return false;
 	}
 
-	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	    !dev_priv->psr.link_standby) {
 		DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
 		return false;
 	}
 
-	if (IS_HASWELL(dev) &&
+	if (IS_HASWELL(dev_priv) &&
 	    I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
 		      S3D_ENABLE) {
 		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
 		return false;
 	}
 
-	if (IS_HASWELL(dev) &&
+	if (IS_HASWELL(dev_priv) &&
 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
 		return false;
@@ -402,7 +402,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
 	lockdep_assert_held(&dev_priv->psr.lock);
 
 	/* Enable/Re-enable PSR on the host */
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		/* On HSW+ after we enable PSR on source it will activate it
 		 * as soon as it match configure idle_frame count. So
 		 * we just actually enable it here on activation time.
@@ -448,7 +448,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
 
 	dev_priv->psr.busy_frontbuffer_bits = 0;
 
-	if (HAS_DDI(dev)) {
+	if (HAS_DDI(dev_priv)) {
 		hsw_psr_setup_vsc(intel_dp);
 
 		if (dev_priv->psr.psr2_support) {
@@ -580,7 +580,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
 	}
 
 	/* Disable PSR on Source */
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		hsw_psr_disable(intel_dp);
 	else
 		vlv_psr_disable(intel_dp);
@@ -827,17 +827,17 @@ void intel_psr_init(struct drm_device *dev)
 
 	/* Per platform default */
 	if (i915.enable_psr == -1) {
-		if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 			i915.enable_psr = 1;
 		else
 			i915.enable_psr = 0;
 	}
 
 	/* Set link_standby x link_off defaults */
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		/* HSW and BDW require workarounds that we don't implement. */
 		dev_priv->psr.link_standby = false;
-	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		/* On VLV and CHV only standby mode is supported. */
 		dev_priv->psr.link_standby = true;
 	else
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ed9955d..700e93d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -405,22 +405,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	u64 acthd;
-
-	if (INTEL_GEN(dev_priv) >= 8)
-		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
-					 RING_ACTHD_UDW(engine->mmio_base));
-	else if (INTEL_GEN(dev_priv) >= 4)
-		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
-	else
-		acthd = I915_READ(ACTHD);
-
-	return acthd;
-}
-
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -585,9 +569,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	I915_WRITE_TAIL(engine, ring->tail);
 	(void)I915_READ_TAIL(engine);
 
-	I915_WRITE_CTL(engine,
-			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
-			| RING_VALID);
+	I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
 	if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
@@ -666,7 +648,7 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_render_state_init(req);
+	ret = i915_gem_render_state_emit(req);
 	if (ret)
 		return ret;
 
@@ -851,15 +833,13 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 			  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
-	/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
-	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+	/* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 		WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
 				  GEN9_DG_MIRROR_FIX_ENABLE);
 
-	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
 				  GEN9_RHWO_OPTIMIZATION_DISABLE);
 		/*
@@ -869,10 +849,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 		 */
 	}
 
-	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
 	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
 	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-			  GEN9_ENABLE_YV12_BUGFIX |
 			  GEN9_ENABLE_GPGPU_PREEMPTION);
 
 	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
@@ -884,9 +862,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
 			  GEN9_CCS_TLB_PREFETCH_ENABLE);
 
-	/* WaDisableMaskBasedCammingInRCC:skl,bxt */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
-	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+	/* WaDisableMaskBasedCammingInRCC:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
 				  PIXEL_MASK_CAMMING_DISABLE);
 
@@ -1003,47 +980,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
 	 * until D0 which is the default case so this is equivalent to
 	 * !WaDisablePerCtxtPreemptionGranularityControl:skl
 	 */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
-		I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-			   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-	}
-
-	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
-		/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
-		I915_WRITE(FF_SLICE_CS_CHICKEN2,
-			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
-	}
-
-	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
-	 * involving this register should also be added to WA batch as required.
-	 */
-	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
-		/* WaDisableLSQCROPERFforOCL:skl */
-		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-			   GEN8_LQSC_RO_PERF_DIS);
+	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
 
 	/* WaEnableGapsTsvCreditFix:skl */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
-		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
-					   GEN9_GAPS_TSV_CREDIT_DISABLE));
-	}
-
-	/* WaDisablePowerCompilerClockGating:skl */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
-		WA_SET_BIT_MASKED(HIZ_CHICKEN,
-				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
-
-	/* WaBarrierPerformanceFixDisable:skl */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
-		WA_SET_BIT_MASKED(HDC_CHICKEN0,
-				  HDC_FENCE_DEST_SLM_DISABLE |
-				  HDC_BARRIER_PERFORMANCE_DISABLE);
-
-	/* WaDisableSbeCacheDispatchPortSharing:skl */
-	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
-		WA_SET_BIT_MASKED(
-			GEN7_HALF_SLICE_CHICKEN1,
-			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+	I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+				   GEN9_GAPS_TSV_CREDIT_DISABLE));
 
 	/* WaDisableGafsUnitClkGating:skl */
 	WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
@@ -1271,91 +1213,64 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 	i915_vma_unpin_and_release(&dev_priv->semaphore);
 }
 
-static int gen8_rcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
-	int ret, num_rings;
 
-	num_rings = INTEL_INFO(dev_priv)->num_rings;
-	ret = intel_ring_begin(req, (num_rings-1) * 8);
-	if (ret)
-		return ret;
-
-	for_each_engine_id(waiter, dev_priv, id) {
+	for_each_engine(waiter, dev_priv, id) {
 		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring,
-				PIPE_CONTROL_GLOBAL_GTT_IVB |
-				PIPE_CONTROL_QW_WRITE |
-				PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(ring, lower_32_bits(gtt_offset));
-		intel_ring_emit(ring, upper_32_bits(gtt_offset));
-		intel_ring_emit(ring, req->fence.seqno);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring,
-				MI_SEMAPHORE_SIGNAL |
-				MI_SEMAPHORE_TARGET(waiter->hw_id));
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+			  PIPE_CONTROL_QW_WRITE |
+			  PIPE_CONTROL_CS_STALL);
+		*out++ = lower_32_bits(gtt_offset);
+		*out++ = upper_32_bits(gtt_offset);
+		*out++ = req->global_seqno;
+		*out++ = 0;
+		*out++ = (MI_SEMAPHORE_SIGNAL |
+			  MI_SEMAPHORE_TARGET(waiter->hw_id));
+		*out++ = 0;
 	}
-	intel_ring_advance(ring);
 
-	return 0;
+	return out;
 }
 
-static int gen8_xcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
-	int ret, num_rings;
 
-	num_rings = INTEL_INFO(dev_priv)->num_rings;
-	ret = intel_ring_begin(req, (num_rings-1) * 6);
-	if (ret)
-		return ret;
-
-	for_each_engine_id(waiter, dev_priv, id) {
+	for_each_engine(waiter, dev_priv, id) {
 		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(ring,
-				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-		intel_ring_emit(ring,
-				lower_32_bits(gtt_offset) |
-				MI_FLUSH_DW_USE_GTT);
-		intel_ring_emit(ring, upper_32_bits(gtt_offset));
-		intel_ring_emit(ring, req->fence.seqno);
-		intel_ring_emit(ring,
-				MI_SEMAPHORE_SIGNAL |
-				MI_SEMAPHORE_TARGET(waiter->hw_id));
-		intel_ring_emit(ring, 0);
+		*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+		*out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+		*out++ = upper_32_bits(gtt_offset);
+		*out++ = req->global_seqno;
+		*out++ = (MI_SEMAPHORE_SIGNAL |
+			  MI_SEMAPHORE_TARGET(waiter->hw_id));
+		*out++ = 0;
 	}
-	intel_ring_advance(ring);
 
-	return 0;
+	return out;
 }
 
-static int gen6_signal(struct drm_i915_gem_request *req)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *engine;
-	int ret, num_rings;
+	enum intel_engine_id id;
+	int num_rings = 0;
 
-	num_rings = INTEL_INFO(dev_priv)->num_rings;
-	ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
-	if (ret)
-		return ret;
-
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		i915_reg_t mbox_reg;
 
 		if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
@@ -1363,101 +1278,78 @@ static int gen6_signal(struct drm_i915_gem_request *req)
 
 		mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
 		if (i915_mmio_reg_valid(mbox_reg)) {
-			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-			intel_ring_emit_reg(ring, mbox_reg);
-			intel_ring_emit(ring, req->fence.seqno);
+			*out++ = MI_LOAD_REGISTER_IMM(1);
+			*out++ = i915_mmio_reg_offset(mbox_reg);
+			*out++ = req->global_seqno;
+			num_rings++;
 		}
 	}
+	if (num_rings & 1)
+		*out++ = MI_NOOP;
 
-	/* If num_dwords was rounded, make sure the tail pointer is correct */
-	if (num_rings % 2 == 0)
-		intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
-
-	return 0;
+	return out;
 }
 
 static void i9xx_submit_request(struct drm_i915_gem_request *request)
 {
 	struct drm_i915_private *dev_priv = request->i915;
 
-	I915_WRITE_TAIL(request->engine,
-			intel_ring_offset(request->ring, request->tail));
+	I915_WRITE_TAIL(request->engine, request->tail);
 }
 
-static int i9xx_emit_request(struct drm_i915_gem_request *req)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
+				 u32 *out)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	*out++ = MI_STORE_DWORD_INDEX;
+	*out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+	*out++ = req->global_seqno;
+	*out++ = MI_USER_INTERRUPT;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, req->fence.seqno);
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_advance(ring);
-
-	req->tail = ring->tail;
-
-	return 0;
+	req->tail = intel_ring_offset(req->ring, out);
 }
 
+static const int i9xx_emit_breadcrumb_sz = 4;
+
 /**
- * gen6_sema_emit_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
+				      u32 *out)
 {
-	int ret;
-
-	ret = req->engine->semaphore.signal(req);
-	if (ret)
-		return ret;
-
-	return i9xx_emit_request(req);
+	return i9xx_emit_breadcrumb(req,
+				    req->engine->semaphore.signal(req, out));
 }
 
-static int gen8_render_emit_request(struct drm_i915_gem_request *req)
+static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
+					u32 *out)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ring *ring = req->ring;
-	int ret;
 
-	if (engine->semaphore.signal) {
-		ret = engine->semaphore.signal(req);
-		if (ret)
-			return ret;
-	}
+	if (engine->semaphore.signal)
+		out = engine->semaphore.signal(req, out);
 
-	ret = intel_ring_begin(req, 8);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
 			       PIPE_CONTROL_CS_STALL |
-			       PIPE_CONTROL_QW_WRITE));
-	intel_ring_emit(ring, intel_hws_seqno_address(engine));
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+			       PIPE_CONTROL_QW_WRITE);
+	*out++ = intel_hws_seqno_address(engine);
+	*out++ = 0;
+	*out++ = req->global_seqno;
 	/* We're thrashing one dword of HWS. */
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = 0;
+	*out++ = MI_USER_INTERRUPT;
+	*out++ = MI_NOOP;
 
-	req->tail = ring->tail;
-
-	return 0;
+	req->tail = intel_ring_offset(req->ring, out);
 }
 
+static const int gen8_render_emit_breadcrumb_sz = 8;
+
 /**
  * intel_ring_sync - sync the waiter to the signaller on seqno
  *
@@ -1484,7 +1376,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req,
 			MI_SEMAPHORE_WAIT |
 			MI_SEMAPHORE_GLOBAL_GTT |
 			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->fence.seqno);
+	intel_ring_emit(ring, signal->global_seqno);
 	intel_ring_emit(ring, lower_32_bits(offset));
 	intel_ring_emit(ring, upper_32_bits(offset));
 	intel_ring_advance(ring);
@@ -1522,7 +1414,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->fence.seqno - 1);
+	intel_ring_emit(ring, signal->global_seqno - 1);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
@@ -1665,7 +1557,7 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
 	struct drm_i915_private *dev_priv = engine->i915;
 
 	I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-	gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
+	gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
@@ -1674,7 +1566,7 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 	struct drm_i915_private *dev_priv = engine->i915;
 
 	I915_WRITE_IMR(engine, ~0);
-	gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
+	gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
@@ -1819,14 +1711,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
 	struct i915_vma *vma;
+	struct drm_i915_gem_object *obj;
 
 	vma = fetch_and_zero(&engine->status_page.vma);
 	if (!vma)
 		return;
 
+	obj = vma->obj;
+
 	i915_vma_unpin(vma);
-	i915_gem_object_unpin_map(vma->obj);
-	i915_vma_put(vma);
+	i915_vma_close(vma);
+
+	i915_gem_object_unpin_map(obj);
+	__i915_gem_object_release_unless_active(obj);
 }
 
 static int init_status_page(struct intel_engine_cs *engine)
@@ -1834,9 +1731,10 @@ static int init_status_page(struct intel_engine_cs *engine)
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 	unsigned int flags;
+	void *vaddr;
 	int ret;
 
-	obj = i915_gem_object_create(&engine->i915->drm, 4096);
+	obj = i915_gem_object_create_internal(engine->i915, 4096);
 	if (IS_ERR(obj)) {
 		DRM_ERROR("Failed to allocate status page\n");
 		return PTR_ERR(obj);
@@ -1869,15 +1767,22 @@ static int init_status_page(struct intel_engine_cs *engine)
 	if (ret)
 		goto err;
 
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		ret = PTR_ERR(vaddr);
+		goto err_unpin;
+	}
+
 	engine->status_page.vma = vma;
 	engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
-	engine->status_page.page_addr =
-		i915_gem_object_pin_map(obj, I915_MAP_WB);
+	engine->status_page.page_addr = memset(vaddr, 0, 4096);
 
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
 			 engine->name, i915_ggtt_offset(vma));
 	return 0;
 
+err_unpin:
+	i915_vma_unpin(vma);
 err:
 	i915_gem_object_put(obj);
 	return ret;
@@ -1989,6 +1894,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 	struct i915_vma *vma;
 
 	GEM_BUG_ON(!is_power_of_2(size));
+	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 	if (!ring)
@@ -2023,7 +1929,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 void
 intel_ring_free(struct intel_ring *ring)
 {
-	i915_vma_put(ring->vma);
+	struct drm_i915_gem_object *obj = ring->vma->obj;
+
+	i915_vma_close(ring->vma);
+	__i915_gem_object_release_unless_active(obj);
+
 	kfree(ring);
 }
 
@@ -2039,14 +1949,13 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
 		return 0;
 
 	if (ce->state) {
-		ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
-		if (ret)
-			goto error;
+		struct i915_vma *vma;
 
-		ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
-				   PIN_GLOBAL | PIN_HIGH);
-		if (ret)
+		vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
+		if (IS_ERR(vma)) {
+			ret = PTR_ERR(vma);
 			goto error;
+		}
 	}
 
 	/* The kernel context is only used as a placeholder for flushing the
@@ -2093,9 +2002,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_engine_setup_common(engine);
 
-	memset(engine->semaphore.sync_seqno, 0,
-	       sizeof(engine->semaphore.sync_seqno));
-
 	ret = intel_engine_init_common(engine);
 	if (ret)
 		goto error;
@@ -2146,9 +2052,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv;
 
-	if (!intel_engine_initialized(engine))
-		return;
-
 	dev_priv = engine->i915;
 
 	if (engine->buffer) {
@@ -2175,13 +2078,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 	intel_ring_context_unpin(dev_priv->kernel_context, engine);
 
 	engine->i915 = NULL;
+	dev_priv->engine[engine->id] = NULL;
+	kfree(engine);
 }
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine(engine, dev_priv, id) {
 		engine->buffer->head = engine->buffer->tail;
 		engine->buffer->last_retired_head = -1;
 	}
@@ -2211,7 +2117,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
 	struct intel_ring *ring = req->ring;
 	struct drm_i915_gem_request *target;
-	int ret;
+	long timeout;
+
+	lockdep_assert_held(&req->i915->drm.struct_mutex);
 
 	intel_ring_update_space(ring);
 	if (ring->space >= bytes)
@@ -2241,11 +2149,11 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	if (WARN_ON(&target->ring_link == &ring->request_list))
 		return -ENOSPC;
 
-	ret = i915_wait_request(target,
-				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
-				NULL, NO_WAITBOOST);
-	if (ret)
-		return ret;
+	timeout = i915_wait_request(target,
+				    I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+				    MAX_SCHEDULE_TIMEOUT);
+	if (timeout < 0)
+		return timeout;
 
 	i915_gem_request_retire_upto(target);
 
@@ -2674,9 +2582,22 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 	engine->init_hw = init_ring_common;
 	engine->reset_hw = reset_ring_common;
 
-	engine->emit_request = i9xx_emit_request;
-	if (i915.semaphores)
-		engine->emit_request = gen6_sema_emit_request;
+	engine->emit_breadcrumb = i9xx_emit_breadcrumb;
+	engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
+	if (i915.semaphores) {
+		int num_rings;
+
+		engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
+
+		num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+		if (INTEL_GEN(dev_priv) >= 8) {
+			engine->emit_breadcrumb_sz += num_rings * 6;
+		} else {
+			engine->emit_breadcrumb_sz += num_rings * 3;
+			if (num_rings & 1)
+				engine->emit_breadcrumb_sz++;
+		}
+	}
 	engine->submit_request = i9xx_submit_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
@@ -2703,10 +2624,18 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->emit_request = gen8_render_emit_request;
+		engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
+		engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
 		engine->emit_flush = gen8_render_ring_flush;
-		if (i915.semaphores)
+		if (i915.semaphores) {
+			int num_rings;
+
 			engine->semaphore.signal = gen8_rcs_signal;
+
+			num_rings =
+				hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+			engine->emit_breadcrumb_sz += num_rings * 6;
+		}
 	} else if (INTEL_GEN(dev_priv) >= 6) {
 		engine->init_context = intel_rcs_ctx_init;
 		engine->emit_flush = gen7_render_ring_flush;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ec0b4a0..d1a7287 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -4,6 +4,7 @@
 #include <linux/hashtable.h>
 #include "i915_gem_batch_pool.h"
 #include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -73,13 +74,40 @@ enum intel_engine_hangcheck_action {
 
 #define HANGCHECK_SCORE_RING_HUNG 31
 
+#define I915_MAX_SLICES	3
+#define I915_MAX_SUBSLICES 3
+
+#define instdone_slice_mask(dev_priv__) \
+	(INTEL_GEN(dev_priv__) == 7 ? \
+	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+	(INTEL_GEN(dev_priv__) == 7 ? \
+	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+	for ((slice__) = 0, (subslice__) = 0; \
+	     (slice__) < I915_MAX_SLICES; \
+	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+	       (slice__) += ((subslice__) == 0)) \
+		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+struct intel_instdone {
+	u32 instdone;
+	/* The following exist only in the RCS engine */
+	u32 slice_common;
+	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
 struct intel_engine_hangcheck {
 	u64 acthd;
 	u32 seqno;
 	int score;
 	enum intel_engine_hangcheck_action action;
 	int deadlock;
-	u32 instdone[I915_NUM_INSTDONE_REG];
+	struct intel_instdone instdone;
 };
 
 struct intel_ring {
@@ -130,6 +158,7 @@ struct i915_ctx_workarounds {
 };
 
 struct drm_i915_gem_request;
+struct intel_render_state;
 
 struct intel_engine_cs {
 	struct drm_i915_private *i915;
@@ -141,7 +170,6 @@ struct intel_engine_cs {
 		VCS2,	/* Keep instances of the same type engine together. */
 		VECS
 	} id;
-#define I915_NUM_ENGINES 5
 #define _VCS(n) (VCS + (n))
 	unsigned int exec_id;
 	enum intel_engine_hw_id {
@@ -152,10 +180,12 @@ struct intel_engine_cs {
 		VCS2_HW
 	} hw_id;
 	enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
-	u64 fence_context;
 	u32		mmio_base;
 	unsigned int irq_shift;
 	struct intel_ring *buffer;
+	struct intel_timeline *timeline;
+
+	struct intel_render_state *render_state;
 
 	/* Rather than have every client wait upon all user interrupts,
 	 * with the herd waking after every interrupt and each doing the
@@ -177,7 +207,7 @@ struct intel_engine_cs {
 		struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
 		bool irq_posted;
 
-		spinlock_t lock; /* protects the lists of requests */
+		spinlock_t lock; /* protects the lists of requests; irqsafe */
 		struct rb_root waiters; /* sorted by retirement, priority */
 		struct rb_root signals; /* sorted by retirement */
 		struct intel_wait *first_wait; /* oldest waiter by retirement */
@@ -225,7 +255,9 @@ struct intel_engine_cs {
 #define I915_DISPATCH_SECURE BIT(0)
 #define I915_DISPATCH_PINNED BIT(1)
 #define I915_DISPATCH_RS     BIT(2)
-	int		(*emit_request)(struct drm_i915_gem_request *req);
+	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
+					   u32 *out);
+	int		emit_breadcrumb_sz;
 
 	/* Pass the request to the hardware queue (e.g. directly into
 	 * the legacy ringbuffer or to the end of an execlist).
@@ -282,8 +314,6 @@ struct intel_engine_cs {
 	 *  ie. transpose of f(x, y)
 	 */
 	struct {
-		u32	sync_seqno[I915_NUM_ENGINES-1];
-
 		union {
 #define GEN6_SEMAPHORE_LAST	VECS_HW
 #define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
@@ -300,7 +330,7 @@ struct intel_engine_cs {
 		/* AKA wait() */
 		int	(*sync_to)(struct drm_i915_gem_request *req,
 				   struct drm_i915_gem_request *signal);
-		int	(*signal)(struct drm_i915_gem_request *req);
+		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *out);
 	} semaphore;
 
 	/* Execlists */
@@ -316,27 +346,6 @@ struct intel_engine_cs {
 	bool preempt_wa;
 	u32 ctx_desc_template;
 
-	/**
-	 * List of breadcrumbs associated with GPU requests currently
-	 * outstanding.
-	 */
-	struct list_head request_list;
-
-	/**
-	 * Seqno of request most recently submitted to request_list.
-	 * Used exclusively by hang checker to avoid grabbing lock while
-	 * inspecting request list.
-	 */
-	u32 last_submitted_seqno;
-	u32 last_pending_seqno;
-
-	/* An RCU guarded pointer to the last request. No reference is
-	 * held to the request, users must carefully acquire a reference to
-	 * the request using i915_gem_active_get_rcu(), or hold the
-	 * struct_mutex.
-	 */
-	struct i915_gem_active last_request;
-
 	struct i915_gem_context *last_context;
 
 	struct intel_engine_hangcheck hangcheck;
@@ -368,39 +377,12 @@ struct intel_engine_cs {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
 
-static inline bool
-intel_engine_initialized(const struct intel_engine_cs *engine)
-{
-	return engine->i915 != NULL;
-}
-
 static inline unsigned
 intel_engine_flag(const struct intel_engine_cs *engine)
 {
 	return 1 << engine->id;
 }
 
-static inline u32
-intel_engine_sync_index(struct intel_engine_cs *engine,
-			struct intel_engine_cs *other)
-{
-	int idx;
-
-	/*
-	 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
-	 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
-	 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
-	 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
-	 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
-	 */
-
-	idx = (other - engine) - 1;
-	if (idx < 0)
-		idx += I915_NUM_ENGINES;
-
-	return idx;
-}
-
 static inline void
 intel_flush_status_page(struct intel_engine_cs *engine, int reg)
 {
@@ -483,30 +465,23 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 	 */
 }
 
-static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
 {
 	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
-	return value & (ring->size - 1);
+	u32 offset = addr - ring->vaddr;
+	return offset & (ring->size - 1);
 }
 
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ring *ring);
 
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
 void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
-static inline int intel_engine_idle(struct intel_engine_cs *engine,
-				    unsigned int flags)
-{
-	/* Wait upon the last request to be completed */
-	return i915_gem_active_wait_unlocked(&engine->last_request,
-					     flags, NULL, NULL);
-}
-
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
@@ -514,13 +489,30 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {
 	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
+static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
+{
+	/* We are only peeking at the tail of the submit queue (and not the
+	 * queue itself) in order to gain a hint as to the current active
+	 * state of the engine. Callers are not expected to be taking
+	 * engine->timeline->lock, nor are they expected to be concerned
+	 * wtih serialising this hint with anything, so document it as
+	 * a hint and nothing more.
+	 */
+	return READ_ONCE(engine->timeline->last_submitted_seqno);
+}
+
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+			       struct intel_instdone *instdone);
+
 /*
  * Arbitrary size for largest possible 'add request' sequence. The code paths
  * are complex and variable. Empirical measurement shows that the worst case
@@ -586,12 +578,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
 
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-unsigned int intel_kick_waiters(struct drm_i915_private *i915);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915);
-
-static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
-{
-	return i915_gem_active_isset(&engine->last_request);
-}
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6c11168..0599408 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -288,7 +288,6 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
-	struct drm_device *dev = &dev_priv->drm;
 
 	/*
 	 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -304,7 +303,7 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
 	vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
-	if (IS_BROADWELL(dev))
+	if (IS_BROADWELL(dev_priv))
 		gen8_irq_power_well_post_enable(dev_priv,
 						1 << PIPE_C | 1 << PIPE_B);
 }
@@ -331,7 +330,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
 	 * sure vgacon can keep working normally without triggering interrupts
 	 * and error messages.
 	 */
-	if (power_well->data == SKL_DISP_PW_2) {
+	if (power_well->id == SKL_DISP_PW_2) {
 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
@@ -344,7 +343,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
-	if (power_well->data == SKL_DISP_PW_2)
+	if (power_well->id == SKL_DISP_PW_2)
 		gen8_irq_power_well_pre_disable(dev_priv,
 						1 << PIPE_C | 1 << PIPE_B);
 }
@@ -659,7 +658,7 @@ static void
 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
 				  struct i915_power_well *power_well)
 {
-	enum skl_disp_power_wells power_well_id = power_well->data;
+	enum skl_disp_power_wells power_well_id = power_well->id;
 	u32 val;
 	u32 mask;
 
@@ -704,7 +703,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
 	fuse_status = I915_READ(SKL_FUSE_STATUS);
 
-	switch (power_well->data) {
+	switch (power_well->id) {
 	case SKL_DISP_PW_1:
 		if (intel_wait_for_register(dev_priv,
 					    SKL_FUSE_STATUS,
@@ -728,13 +727,13 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
 	case SKL_DISP_PW_MISC_IO:
 		break;
 	default:
-		WARN(1, "Unknown power well %lu\n", power_well->data);
+		WARN(1, "Unknown power well %lu\n", power_well->id);
 		return;
 	}
 
-	req_mask = SKL_POWER_WELL_REQ(power_well->data);
+	req_mask = SKL_POWER_WELL_REQ(power_well->id);
 	enable_requested = tmp & req_mask;
-	state_mask = SKL_POWER_WELL_STATE(power_well->data);
+	state_mask = SKL_POWER_WELL_STATE(power_well->id);
 	is_enabled = tmp & state_mask;
 
 	if (!enable && enable_requested)
@@ -770,14 +769,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
 			  power_well->name, enable ? "enable" : "disable");
 
 	if (check_fuse_status) {
-		if (power_well->data == SKL_DISP_PW_1) {
+		if (power_well->id == SKL_DISP_PW_1) {
 			if (intel_wait_for_register(dev_priv,
 						    SKL_FUSE_STATUS,
 						    SKL_FUSE_PG1_DIST_STATUS,
 						    SKL_FUSE_PG1_DIST_STATUS,
 						    1))
 				DRM_ERROR("PG1 distributing status timeout\n");
-		} else if (power_well->data == SKL_DISP_PW_2) {
+		} else if (power_well->id == SKL_DISP_PW_2) {
 			if (intel_wait_for_register(dev_priv,
 						    SKL_FUSE_STATUS,
 						    SKL_FUSE_PG2_DIST_STATUS,
@@ -819,8 +818,8 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
-	uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
-		SKL_POWER_WELL_STATE(power_well->data);
+	uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
+		SKL_POWER_WELL_STATE(power_well->id);
 
 	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
 }
@@ -846,45 +845,22 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
 	skl_set_power_well(dev_priv, power_well, false);
 }
 
-static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
-{
-	enum skl_disp_power_wells power_well_id = power_well->data;
-
-	return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
-}
-
 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
-	enum skl_disp_power_wells power_well_id = power_well->data;
-	struct i915_power_well *cmn_a_well = NULL;
-
-	if (power_well_id == BXT_DPIO_CMN_BC) {
-		/*
-		 * We need to copy the GRC calibration value from the eDP PHY,
-		 * so make sure it's powered up.
-		 */
-		cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
-		intel_power_well_get(dev_priv, cmn_a_well);
-	}
-
-	bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
-
-	if (cmn_a_well)
-		intel_power_well_put(dev_priv, cmn_a_well);
+	bxt_ddi_phy_init(dev_priv, power_well->data);
 }
 
 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 					    struct i915_power_well *power_well)
 {
-	bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
+	bxt_ddi_phy_uninit(dev_priv, power_well->data);
 }
 
 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
 					    struct i915_power_well *power_well)
 {
-	return bxt_ddi_phy_is_enabled(dev_priv,
-				      bxt_power_well_to_phy(power_well));
+	return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
 }
 
 static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -903,13 +879,11 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
 
 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
 	if (power_well->count > 0)
-		bxt_ddi_phy_verify_state(dev_priv,
-					 bxt_power_well_to_phy(power_well));
+		bxt_ddi_phy_verify_state(dev_priv, power_well->data);
 
 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
 	if (power_well->count > 0)
-		bxt_ddi_phy_verify_state(dev_priv,
-					 bxt_power_well_to_phy(power_well));
+		bxt_ddi_phy_verify_state(dev_priv, power_well->data);
 }
 
 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -933,7 +907,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
 	WARN_ON(dev_priv->cdclk_freq !=
-		dev_priv->display.get_display_clock_speed(&dev_priv->drm));
+		dev_priv->display.get_display_clock_speed(dev_priv));
 
 	gen9_assert_dbuf_enabled(dev_priv);
 
@@ -976,7 +950,7 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
 			       struct i915_power_well *power_well, bool enable)
 {
-	enum punit_power_well power_well_id = power_well->data;
+	enum punit_power_well power_well_id = power_well->id;
 	u32 mask;
 	u32 state;
 	u32 ctrl;
@@ -1030,7 +1004,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 				   struct i915_power_well *power_well)
 {
-	int power_well_id = power_well->data;
+	int power_well_id = power_well->id;
 	bool enabled = false;
 	u32 mask;
 	u32 state;
@@ -1139,13 +1113,15 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
 
 	intel_power_sequencer_reset(dev_priv);
 
-	intel_hpd_poll_init(dev_priv);
+	/* Prevent us from re-enabling polling on accident in late suspend */
+	if (!dev_priv->drm.dev->power.is_suspended)
+		intel_hpd_poll_init(dev_priv);
 }
 
 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
 					  struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
 
 	vlv_set_power_well(dev_priv, power_well, true);
 
@@ -1155,7 +1131,7 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
 
 	vlv_display_power_well_deinit(dev_priv);
 
@@ -1165,7 +1141,7 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
 	/* since ref/cri clock was enabled */
 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1191,7 +1167,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 {
 	enum pipe pipe;
 
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
 	for_each_pipe(dev_priv, pipe)
 		assert_pll_disabled(dev_priv, pipe);
@@ -1214,7 +1190,7 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
 		struct i915_power_well *power_well;
 
 		power_well = &power_domains->power_wells[i];
-		if (power_well->data == power_well_id)
+		if (power_well->id == power_well_id)
 			return power_well;
 	}
 
@@ -1338,10 +1314,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 	enum pipe pipe;
 	uint32_t tmp;
 
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
-		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
 
-	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
 		pipe = PIPE_A;
 		phy = DPIO_PHY0;
 	} else {
@@ -1369,7 +1345,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
 
-	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
@@ -1400,10 +1376,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 {
 	enum dpio_phy phy;
 
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
-		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
 
-	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
 		phy = DPIO_PHY0;
 		assert_pll_disabled(dev_priv, PIPE_A);
 		assert_pll_disabled(dev_priv, PIPE_B);
@@ -1552,7 +1528,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
-	enum pipe pipe = power_well->data;
+	enum pipe pipe = power_well->id;
 	bool enabled;
 	u32 state, ctrl;
 
@@ -1582,7 +1558,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
 				    struct i915_power_well *power_well,
 				    bool enable)
 {
-	enum pipe pipe = power_well->data;
+	enum pipe pipe = power_well->id;
 	u32 state;
 	u32 ctrl;
 
@@ -1615,7 +1591,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A);
+	WARN_ON_ONCE(power_well->id != PIPE_A);
 
 	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
 }
@@ -1623,7 +1599,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A);
+	WARN_ON_ONCE(power_well->id != PIPE_A);
 
 	chv_set_pipe_power_well(dev_priv, power_well, true);
 
@@ -1633,7 +1609,7 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A);
+	WARN_ON_ONCE(power_well->id != PIPE_A);
 
 	vlv_display_power_well_deinit(dev_priv);
 
@@ -1977,12 +1953,12 @@ static struct i915_power_well vlv_power_wells[] = {
 		.always_on = 1,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
-		.data = PUNIT_POWER_WELL_ALWAYS_ON,
+		.id = PUNIT_POWER_WELL_ALWAYS_ON,
 	},
 	{
 		.name = "display",
 		.domains = VLV_DISPLAY_POWER_DOMAINS,
-		.data = PUNIT_POWER_WELL_DISP2D,
+		.id = PUNIT_POWER_WELL_DISP2D,
 		.ops = &vlv_display_power_well_ops,
 	},
 	{
@@ -1992,7 +1968,7 @@ static struct i915_power_well vlv_power_wells[] = {
 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
 		.ops = &vlv_dpio_power_well_ops,
-		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
 	},
 	{
 		.name = "dpio-tx-b-23",
@@ -2001,7 +1977,7 @@ static struct i915_power_well vlv_power_wells[] = {
 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
 		.ops = &vlv_dpio_power_well_ops,
-		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
 	},
 	{
 		.name = "dpio-tx-c-01",
@@ -2010,7 +1986,7 @@ static struct i915_power_well vlv_power_wells[] = {
 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
 		.ops = &vlv_dpio_power_well_ops,
-		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
 	},
 	{
 		.name = "dpio-tx-c-23",
@@ -2019,12 +1995,12 @@ static struct i915_power_well vlv_power_wells[] = {
 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
 		.ops = &vlv_dpio_power_well_ops,
-		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
 	},
 	{
 		.name = "dpio-common",
 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
-		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
 		.ops = &vlv_dpio_cmn_power_well_ops,
 	},
 };
@@ -2044,19 +2020,19 @@ static struct i915_power_well chv_power_wells[] = {
 		 * required for any pipe to work.
 		 */
 		.domains = CHV_DISPLAY_POWER_DOMAINS,
-		.data = PIPE_A,
+		.id = PIPE_A,
 		.ops = &chv_pipe_power_well_ops,
 	},
 	{
 		.name = "dpio-common-bc",
 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
-		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
 		.ops = &chv_dpio_cmn_power_well_ops,
 	},
 	{
 		.name = "dpio-common-d",
 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
-		.data = PUNIT_POWER_WELL_DPIO_CMN_D,
+		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
 		.ops = &chv_dpio_cmn_power_well_ops,
 	},
 };
@@ -2079,57 +2055,57 @@ static struct i915_power_well skl_power_wells[] = {
 		.always_on = 1,
 		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
-		.data = SKL_DISP_PW_ALWAYS_ON,
+		.id = SKL_DISP_PW_ALWAYS_ON,
 	},
 	{
 		.name = "power well 1",
 		/* Handled by the DMC firmware */
 		.domains = 0,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_1,
+		.id = SKL_DISP_PW_1,
 	},
 	{
 		.name = "MISC IO power well",
 		/* Handled by the DMC firmware */
 		.domains = 0,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_MISC_IO,
+		.id = SKL_DISP_PW_MISC_IO,
 	},
 	{
 		.name = "DC off",
 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
 		.ops = &gen9_dc_off_power_well_ops,
-		.data = SKL_DISP_PW_DC_OFF,
+		.id = SKL_DISP_PW_DC_OFF,
 	},
 	{
 		.name = "power well 2",
 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_2,
+		.id = SKL_DISP_PW_2,
 	},
 	{
 		.name = "DDI A/E power well",
 		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_DDI_A_E,
+		.id = SKL_DISP_PW_DDI_A_E,
 	},
 	{
 		.name = "DDI B power well",
 		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_DDI_B,
+		.id = SKL_DISP_PW_DDI_B,
 	},
 	{
 		.name = "DDI C power well",
 		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_DDI_C,
+		.id = SKL_DISP_PW_DDI_C,
 	},
 	{
 		.name = "DDI D power well",
 		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_DDI_D,
+		.id = SKL_DISP_PW_DDI_D,
 	},
 };
 
@@ -2144,31 +2120,33 @@ static struct i915_power_well bxt_power_wells[] = {
 		.name = "power well 1",
 		.domains = 0,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_1,
+		.id = SKL_DISP_PW_1,
 	},
 	{
 		.name = "DC off",
 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
 		.ops = &gen9_dc_off_power_well_ops,
-		.data = SKL_DISP_PW_DC_OFF,
+		.id = SKL_DISP_PW_DC_OFF,
 	},
 	{
 		.name = "power well 2",
 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
 		.ops = &skl_power_well_ops,
-		.data = SKL_DISP_PW_2,
+		.id = SKL_DISP_PW_2,
 	},
 	{
 		.name = "dpio-common-a",
 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
 		.ops = &bxt_dpio_cmn_power_well_ops,
-		.data = BXT_DPIO_CMN_A,
+		.id = BXT_DPIO_CMN_A,
+		.data = DPIO_PHY1,
 	},
 	{
 		.name = "dpio-common-bc",
 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
 		.ops = &bxt_dpio_cmn_power_well_ops,
-		.data = BXT_DPIO_CMN_BC,
+		.id = BXT_DPIO_CMN_BC,
+		.data = DPIO_PHY0,
 	},
 };
 
@@ -2590,20 +2568,19 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  */
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
 {
-	struct drm_device *dev = &dev_priv->drm;
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
 	power_domains->initializing = true;
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		skl_display_core_init(dev_priv, resume);
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		bxt_display_core_init(dev_priv, resume);
-	} else if (IS_CHERRYVIEW(dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		mutex_lock(&power_domains->lock);
 		chv_phy_control_init(dev_priv);
 		mutex_unlock(&power_domains->lock);
-	} else if (IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		mutex_lock(&power_domains->lock);
 		vlv_cmnlane_wa(dev_priv);
 		mutex_unlock(&power_domains->lock);
@@ -2738,8 +2715,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
 	struct device *kdev = &pdev->dev;
 
 	assert_rpm_wakelock_held(dev_priv);
-	if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
-		atomic_inc(&dev_priv->pm.atomic_seq);
+	atomic_dec(&dev_priv->pm.wakeref_count);
 
 	pm_runtime_mark_last_busy(kdev);
 	pm_runtime_put_autosuspend(kdev);
@@ -2758,7 +2734,6 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
-	struct drm_device *dev = &dev_priv->drm;
 	struct device *kdev = &pdev->dev;
 
 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
@@ -2770,7 +2745,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 	 * so the driver's own RPM reference tracking asserts also work on
 	 * platforms without RPM support.
 	 */
-	if (!HAS_RUNTIME_PM(dev)) {
+	if (!HAS_RUNTIME_PM(dev_priv)) {
 		pm_runtime_dont_use_autosuspend(kdev);
 		pm_runtime_get_sync(kdev);
 	} else {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c551024..3990c80 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -251,7 +251,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 		 * HW workaround, need to write this twice for issue
 		 * that may result in first write getting masked.
 		 */
-		if (HAS_PCH_IBX(dev)) {
+		if (HAS_PCH_IBX(dev_priv)) {
 			I915_WRITE(intel_sdvo->sdvo_reg, val);
 			POSTING_READ(intel_sdvo->sdvo_reg);
 		}
@@ -307,7 +307,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
 static const struct _sdvo_cmd_name {
 	u8 cmd;
 	const char *name;
-} sdvo_cmd_names[] = {
+} __attribute__ ((packed)) sdvo_cmd_names[] = {
 	SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
 	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
 	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
@@ -1133,7 +1133,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
 	DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
 	pipe_config->pipe_bpp = 8*3;
 
-	if (HAS_PCH_SPLIT(encoder->base.dev))
+	if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
 		pipe_config->has_pch_encoder = true;
 
 	/* We need to construct preferred input timings based on our
@@ -1273,7 +1273,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 		/* The real mode polarity is set by the SDVO commands, using
 		 * struct intel_sdvo_dtd. */
 		sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
-		if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
+		if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
 			sdvox |= HDMI_COLOR_RANGE_16_235;
 		if (INTEL_INFO(dev)->gen < 5)
 			sdvox |= SDVO_BORDER_ENABLE;
@@ -1286,7 +1286,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 		sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
 	}
 
-	if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT)
 		sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
 	else
 		sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1296,7 +1296,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		/* done in crtc_mode_set as the dpll_md reg must be written early */
-	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+		   IS_G33(dev_priv)) {
 		/* done in crtc_mode_set as it lives inside the dpll register */
 	} else {
 		sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -1339,7 +1340,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
 	if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
 		return false;
 
-	if (HAS_PCH_CPT(dev))
+	if (HAS_PCH_CPT(dev_priv))
 		*pipe = PORT_TO_PIPE_CPT(tmp);
 	else
 		*pipe = PORT_TO_PIPE(tmp);
@@ -1389,7 +1390,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 	 * encoder->get_config we so already have a valid pixel multplier on all
 	 * other platfroms.
 	 */
-	if (IS_I915G(dev) || IS_I915GM(dev)) {
+	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 		pipe_config->pixel_multiplier =
 			((sdvox & SDVO_PORT_MULTIPLY_MASK)
 			 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1471,7 +1472,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
 		temp &= ~SDVO_ENABLE;
 		intel_sdvo_write_sdvox(intel_sdvo, temp);
 
-		intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 	}
@@ -1508,7 +1509,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
 	intel_sdvo_write_sdvox(intel_sdvo, temp);
 
 	for (i = 0; i < 2; i++)
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
+		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 
 	success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
 	/* Warn if the device reported failure to sync.
@@ -1595,15 +1596,15 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
 
 static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
 {
-	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
 	uint16_t hotplug;
 
-	if (!I915_HAS_HOTPLUG(dev))
+	if (!I915_HAS_HOTPLUG(dev_priv))
 		return 0;
 
 	/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
 	 * on the line. */
-	if (IS_I945G(dev) || IS_I945GM(dev))
+	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
 		return 0;
 
 	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
@@ -2410,10 +2411,10 @@ static void
 intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
 			       struct intel_sdvo_connector *connector)
 {
-	struct drm_device *dev = connector->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
 
 	intel_attach_force_audio_property(&connector->base.base);
-	if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+	if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
 		intel_attach_broadcast_rgb_property(&connector->base.base);
 		intel_sdvo->color_range_auto = true;
 	}
@@ -2981,6 +2982,7 @@ bool intel_sdvo_init(struct drm_device *dev,
 	/* encoder type will be decided later */
 	intel_encoder = &intel_sdvo->base;
 	intel_encoder->type = INTEL_OUTPUT_SDVO;
+	intel_encoder->port = port;
 	drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
 			 "SDVO %c", port_name(port));
 
@@ -2996,7 +2998,7 @@ bool intel_sdvo_init(struct drm_device *dev,
 	}
 
 	intel_encoder->compute_config = intel_sdvo_compute_config;
-	if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_PCH_SPLIT(dev_priv)) {
 		intel_encoder->disable = pch_disable_sdvo;
 		intel_encoder->post_disable = pch_post_disable_sdvo;
 	} else {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 73a521f..5e4eb7c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -208,6 +208,8 @@ skl_update_plane(struct drm_plane *drm_plane,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const int pipe = intel_plane->pipe;
 	const int plane = intel_plane->plane + 1;
+	const struct skl_plane_wm *p_wm =
+		&crtc_state->wm.skl.optimal.planes[plane];
 	u32 plane_ctl;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	u32 surf_addr = plane_state->main.offset;
@@ -232,7 +234,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 	plane_ctl |= skl_plane_ctl_rotation(rotation);
 
 	if (wm->dirty_pipes & drm_crtc_mask(crtc))
-		skl_write_plane_wm(intel_crtc, wm, plane);
+		skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, plane);
 
 	if (key->flags) {
 		I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
@@ -289,6 +291,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 	struct drm_device *dev = dplane->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 	const int pipe = intel_plane->pipe;
 	const int plane = intel_plane->plane + 1;
 
@@ -298,7 +301,8 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 	 */
 	if (!dplane->state->visible)
 		skl_write_plane_wm(to_intel_crtc(crtc),
-				   &dev_priv->wm.skl_results, plane);
+				   &cstate->wm.skl.optimal.planes[plane],
+				   &dev_priv->wm.skl_results.ddb, plane);
 
 	I915_WRITE(PLANE_CTL(pipe, plane), 0);
 
@@ -358,7 +362,7 @@ vlv_update_plane(struct drm_plane *dplane,
 	int plane = intel_plane->plane;
 	u32 sprctl;
 	u32 sprsurf_offset, linear_offset;
-	unsigned int rotation = dplane->state->rotation;
+	unsigned int rotation = plane_state->base.rotation;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	int crtc_x = plane_state->base.dst.x1;
 	int crtc_y = plane_state->base.dst.y1;
@@ -450,7 +454,7 @@ vlv_update_plane(struct drm_plane *dplane,
 	if (key->flags & I915_SET_COLORKEY_SOURCE)
 		sprctl |= SP_SOURCE_KEY;
 
-	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
 		chv_update_csc(intel_plane, fb->pixel_format);
 
 	I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
@@ -542,12 +546,12 @@ ivb_update_plane(struct drm_plane *plane,
 	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		sprctl |= SPRITE_TILED;
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
 	else
 		sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
 	/* Sizes are 0 based */
@@ -566,7 +570,7 @@ ivb_update_plane(struct drm_plane *plane,
 		sprctl |= SPRITE_ROTATE_180;
 
 		/* HSW and BDW does this automagically in hardware */
-		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+		if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
 			x += src_w;
 			y += src_h;
 		}
@@ -590,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
 
 	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
 	 * register */
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
 	else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
@@ -680,7 +684,7 @@ ilk_update_plane(struct drm_plane *plane,
 	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		dvscntr |= DVS_TILED;
 
-	if (IS_GEN6(dev))
+	if (IS_GEN6(dev_priv))
 		dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
 
 	/* Sizes are 0 based */
@@ -753,7 +757,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
 			 struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *state)
 {
-	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = to_i915(plane->dev);
 	struct drm_crtc *crtc = state->base.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -797,7 +801,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
 	}
 
 	/* setup can_scale, min_scale, max_scale */
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		/* use scaler when colorkey is not required */
 		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
 			can_scale = 1;
@@ -913,7 +917,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
 
 		width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
 
-		if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
+		if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
 		    width_bytes > 4096 || fb->pitches[0] > 4096)) {
 			DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
 			return -EINVAL;
@@ -932,7 +936,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
 	dst->y1 = crtc_y;
 	dst->y2 = crtc_y + crtc_h;
 
-	if (INTEL_GEN(dev) >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		ret = skl_check_plane_surface(state);
 		if (ret)
 			return ret;
@@ -944,6 +948,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_intel_sprite_colorkey *set = data;
 	struct drm_plane *plane;
 	struct drm_plane_state *plane_state;
@@ -955,7 +960,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 	if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
 		return -EINVAL;
 
-	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	    set->flags & I915_SET_COLORKEY_DESTINATION)
 		return -EINVAL;
 
@@ -987,9 +992,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 		drm_modeset_backoff(&ctx);
 	}
 
-	if (ret)
-		drm_atomic_state_free(state);
-
+	drm_atomic_state_put(state);
 out:
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
@@ -1039,19 +1042,18 @@ static uint32_t skl_plane_formats[] = {
 	DRM_FORMAT_VYUY,
 };
 
-int
-intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, int plane)
 {
 	struct intel_plane *intel_plane = NULL;
 	struct intel_plane_state *state = NULL;
 	unsigned long possible_crtcs;
 	const uint32_t *plane_formats;
+	unsigned int supported_rotations;
 	int num_plane_formats;
 	int ret;
 
-	if (INTEL_INFO(dev)->gen < 5)
-		return -ENODEV;
-
 	intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
 	if (!intel_plane) {
 		ret = -ENOMEM;
@@ -1065,26 +1067,26 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 	}
 	intel_plane->base.state = &state->base;
 
-	switch (INTEL_INFO(dev)->gen) {
-	case 5:
-	case 6:
+	if (INTEL_GEN(dev_priv) >= 9) {
 		intel_plane->can_scale = true;
-		intel_plane->max_downscale = 16;
-		intel_plane->update_plane = ilk_update_plane;
-		intel_plane->disable_plane = ilk_disable_plane;
+		state->scaler_id = -1;
 
-		if (IS_GEN6(dev)) {
-			plane_formats = snb_plane_formats;
-			num_plane_formats = ARRAY_SIZE(snb_plane_formats);
-		} else {
-			plane_formats = ilk_plane_formats;
-			num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
-		}
-		break;
+		intel_plane->update_plane = skl_update_plane;
+		intel_plane->disable_plane = skl_disable_plane;
 
-	case 7:
-	case 8:
-		if (IS_IVYBRIDGE(dev)) {
+		plane_formats = skl_plane_formats;
+		num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		intel_plane->can_scale = false;
+		intel_plane->max_downscale = 1;
+
+		intel_plane->update_plane = vlv_update_plane;
+		intel_plane->disable_plane = vlv_disable_plane;
+
+		plane_formats = vlv_plane_formats;
+		num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+	} else if (INTEL_GEN(dev_priv) >= 7) {
+		if (IS_IVYBRIDGE(dev_priv)) {
 			intel_plane->can_scale = true;
 			intel_plane->max_downscale = 2;
 		} else {
@@ -1092,33 +1094,34 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 			intel_plane->max_downscale = 1;
 		}
 
-		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-			intel_plane->update_plane = vlv_update_plane;
-			intel_plane->disable_plane = vlv_disable_plane;
+		intel_plane->update_plane = ivb_update_plane;
+		intel_plane->disable_plane = ivb_disable_plane;
 
-			plane_formats = vlv_plane_formats;
-			num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
-		} else {
-			intel_plane->update_plane = ivb_update_plane;
-			intel_plane->disable_plane = ivb_disable_plane;
+		plane_formats = snb_plane_formats;
+		num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+	} else {
+		intel_plane->can_scale = true;
+		intel_plane->max_downscale = 16;
 
+		intel_plane->update_plane = ilk_update_plane;
+		intel_plane->disable_plane = ilk_disable_plane;
+
+		if (IS_GEN6(dev_priv)) {
 			plane_formats = snb_plane_formats;
 			num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+		} else {
+			plane_formats = ilk_plane_formats;
+			num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
 		}
-		break;
-	case 9:
-		intel_plane->can_scale = true;
-		intel_plane->update_plane = skl_update_plane;
-		intel_plane->disable_plane = skl_disable_plane;
-		state->scaler_id = -1;
+	}
 
-		plane_formats = skl_plane_formats;
-		num_plane_formats = ARRAY_SIZE(skl_plane_formats);
-		break;
-	default:
-		MISSING_CASE(INTEL_INFO(dev)->gen);
-		ret = -ENODEV;
-		goto fail;
+	if (INTEL_GEN(dev_priv) >= 9) {
+		supported_rotations =
+			DRM_ROTATE_0 | DRM_ROTATE_90 |
+			DRM_ROTATE_180 | DRM_ROTATE_270;
+	} else {
+		supported_rotations =
+			DRM_ROTATE_0 | DRM_ROTATE_180;
 	}
 
 	intel_plane->pipe = pipe;
@@ -1128,30 +1131,32 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 
 	possible_crtcs = (1 << pipe);
 
-	if (INTEL_INFO(dev)->gen >= 9)
-		ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
-					       &intel_plane_funcs,
+	if (INTEL_GEN(dev_priv) >= 9)
+		ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+					       possible_crtcs, &intel_plane_funcs,
 					       plane_formats, num_plane_formats,
 					       DRM_PLANE_TYPE_OVERLAY,
 					       "plane %d%c", plane + 2, pipe_name(pipe));
 	else
-		ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
-					       &intel_plane_funcs,
+		ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+					       possible_crtcs, &intel_plane_funcs,
 					       plane_formats, num_plane_formats,
 					       DRM_PLANE_TYPE_OVERLAY,
 					       "sprite %c", sprite_name(pipe, plane));
 	if (ret)
 		goto fail;
 
-	intel_create_rotation_property(dev, intel_plane);
+	drm_plane_create_rotation_property(&intel_plane->base,
+					   DRM_ROTATE_0,
+					   supported_rotations);
 
 	drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
 
-	return 0;
+	return intel_plane;
 
 fail:
 	kfree(state);
 	kfree(intel_plane);
 
-	return ret;
+	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d960e48..9212f00 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -86,7 +86,8 @@ struct intel_tv {
 };
 
 struct video_levels {
-	int blank, black, burst;
+	u16 blank, black;
+	u8 burst;
 };
 
 struct color_conversion {
@@ -339,34 +340,43 @@ static const struct video_levels component_levels = {
 
 struct tv_mode {
 	const char *name;
-	int clock;
-	int refresh; /* in millihertz (for precision) */
+
+	u32 clock;
+	u16 refresh; /* in millihertz (for precision) */
 	u32 oversample;
-	int hsync_end, hblank_start, hblank_end, htotal;
-	bool progressive, trilevel_sync, component_only;
-	int vsync_start_f1, vsync_start_f2, vsync_len;
-	bool veq_ena;
-	int veq_start_f1, veq_start_f2, veq_len;
-	int vi_end_f1, vi_end_f2, nbr_end;
-	bool burst_ena;
-	int hburst_start, hburst_len;
-	int vburst_start_f1, vburst_end_f1;
-	int vburst_start_f2, vburst_end_f2;
-	int vburst_start_f3, vburst_end_f3;
-	int vburst_start_f4, vburst_end_f4;
+	u8 hsync_end;
+	u16 hblank_start, hblank_end, htotal;
+	bool progressive : 1, trilevel_sync : 1, component_only : 1;
+	u8 vsync_start_f1, vsync_start_f2, vsync_len;
+	bool veq_ena : 1;
+	u8 veq_start_f1, veq_start_f2, veq_len;
+	u8 vi_end_f1, vi_end_f2;
+	u16 nbr_end;
+	bool burst_ena : 1;
+	u8 hburst_start, hburst_len;
+	u8 vburst_start_f1;
+	u16 vburst_end_f1;
+	u8 vburst_start_f2;
+	u16 vburst_end_f2;
+	u8 vburst_start_f3;
+	u16 vburst_end_f3;
+	u8 vburst_start_f4;
+	u16 vburst_end_f4;
 	/*
 	 * subcarrier programming
 	 */
-	int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+	u16 dda2_size, dda3_size;
+	u8 dda1_inc;
+	u16 dda2_inc, dda3_inc;
 	u32 sc_reset;
-	bool pal_burst;
+	bool pal_burst : 1;
 	/*
 	 * blank/black levels
 	 */
 	const struct video_levels *composite_levels, *svideo_levels;
 	const struct color_conversion *composite_color, *svideo_color;
 	const u32 *filter_table;
-	int max_srcw;
+	u16 max_srcw;
 };
 
 
@@ -846,7 +856,7 @@ intel_enable_tv(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	/* Prevents vblank waits from timing out in intel_tv_detect_type() */
-	intel_wait_for_vblank(encoder->base.dev,
+	intel_wait_for_vblank(dev_priv,
 			      to_intel_crtc(encoder->base.crtc)->pipe);
 
 	I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
@@ -1095,7 +1105,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
 		tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
 
 	/* Enable two fixes for the chips that need them. */
-	if (IS_I915GM(dev))
+	if (IS_I915GM(dev_priv))
 		tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
 
 	set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
@@ -1220,7 +1230,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 	 * The TV sense state should be cleared to zero on cantiga platform. Otherwise
 	 * the TV is misdetected. This is hardware requirement.
 	 */
-	if (IS_GM45(dev))
+	if (IS_GM45(dev_priv))
 		tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
 			    TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
 
@@ -1228,7 +1238,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 	I915_WRITE(TV_DAC, tv_dac);
 	POSTING_READ(TV_DAC);
 
-	intel_wait_for_vblank(dev, intel_crtc->pipe);
+	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 
 	type = -1;
 	tv_dac = I915_READ(TV_DAC);
@@ -1258,7 +1268,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 	POSTING_READ(TV_CTL);
 
 	/* For unknown reasons the hw barfs if we don't do this vblank wait. */
-	intel_wait_for_vblank(dev, intel_crtc->pipe);
+	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 
 	/* Restore interrupt config */
 	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1610,7 +1620,9 @@ intel_tv_init(struct drm_device *dev)
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
+
 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
+	intel_encoder->port = PORT_NONE;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 	intel_encoder->cloneable = 0;
 	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ee2306a..e2b188d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,19 +231,21 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
 {
 	struct intel_uncore_forcewake_domain *domain =
 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
+	struct drm_i915_private *dev_priv = domain->i915;
 	unsigned long irqflags;
 
-	assert_rpm_device_not_suspended(domain->i915);
+	assert_rpm_device_not_suspended(dev_priv);
 
-	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	if (WARN_ON(domain->wake_count == 0))
 		domain->wake_count++;
 
-	if (--domain->wake_count == 0)
-		domain->i915->uncore.funcs.force_wake_put(domain->i915,
-							  1 << domain->id);
+	if (--domain->wake_count == 0) {
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+		dev_priv->uncore.fw_domains_active &= ~domain->mask;
+	}
 
-	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
 	return HRTIMER_NORESTART;
 }
@@ -254,7 +256,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
 	unsigned long irqflags;
 	struct intel_uncore_forcewake_domain *domain;
 	int retry_count = 100;
-	enum forcewake_domains fw = 0, active_domains;
+	enum forcewake_domains fw, active_domains;
 
 	/* Hold uncore.lock across reset to prevent any register access
 	 * with forcewake not set correctly. Wait until all pending
@@ -291,10 +293,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
 
 	WARN_ON(active_domains);
 
-	for_each_fw_domain(domain, dev_priv)
-		if (domain->wake_count)
-			fw |= domain->mask;
-
+	fw = dev_priv->uncore.fw_domains_active;
 	if (fw)
 		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
 
@@ -443,9 +442,6 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 {
 	struct intel_uncore_forcewake_domain *domain;
 
-	if (!dev_priv->uncore.funcs.force_wake_get)
-		return;
-
 	fw_domains &= dev_priv->uncore.fw_domains;
 
 	for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -453,8 +449,10 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 			fw_domains &= ~domain->mask;
 	}
 
-	if (fw_domains)
+	if (fw_domains) {
 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+		dev_priv->uncore.fw_domains_active |= fw_domains;
+	}
 }
 
 /**
@@ -509,9 +507,6 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
 {
 	struct intel_uncore_forcewake_domain *domain;
 
-	if (!dev_priv->uncore.funcs.force_wake_put)
-		return;
-
 	fw_domains &= dev_priv->uncore.fw_domains;
 
 	for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -567,13 +562,10 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
 {
-	struct intel_uncore_forcewake_domain *domain;
-
 	if (!dev_priv->uncore.funcs.force_wake_get)
 		return;
 
-	for_each_fw_domain(domain, dev_priv)
-		WARN_ON(domain->wake_count);
+	WARN_ON(dev_priv->uncore.fw_domains_active);
 }
 
 /* We give fast paths for the really cool registers */
@@ -589,49 +581,146 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
 	__fwd; \
 })
 
-#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
+static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
+{
+	if (offset < entry->start)
+		return -1;
+	else if (offset > entry->end)
+		return 1;
+	else
+		return 0;
+}
 
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
-	(REG_RANGE((reg), 0x2000, 0x4000) || \
-	 REG_RANGE((reg), 0x5000, 0x8000) || \
-	 REG_RANGE((reg), 0xB000, 0x12000) || \
-	 REG_RANGE((reg), 0x2E000, 0x30000))
+/* Copied and "macroized" from lib/bsearch.c */
+#define BSEARCH(key, base, num, cmp) ({                                 \
+	unsigned int start__ = 0, end__ = (num);                        \
+	typeof(base) result__ = NULL;                                   \
+	while (start__ < end__) {                                       \
+		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
+		int ret__ = (cmp)((key), (base) + mid__);               \
+		if (ret__ < 0) {                                        \
+			end__ = mid__;                                  \
+		} else if (ret__ > 0) {                                 \
+			start__ = mid__ + 1;                            \
+		} else {                                                \
+			result__ = (base) + mid__;                      \
+			break;                                          \
+		}                                                       \
+	}                                                               \
+	result__;                                                       \
+})
 
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
-	(REG_RANGE((reg), 0x12000, 0x14000) || \
-	 REG_RANGE((reg), 0x22000, 0x24000) || \
-	 REG_RANGE((reg), 0x30000, 0x40000))
+static enum forcewake_domains
+find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+{
+	const struct intel_forcewake_range *entry;
 
-#define __vlv_reg_read_fw_domains(offset) \
+	entry = BSEARCH(offset,
+			dev_priv->uncore.fw_domains_table,
+			dev_priv->uncore.fw_domains_table_entries,
+			fw_range_cmp);
+
+	return entry ? entry->domains : 0;
+}
+
+static void
+intel_fw_table_check(struct drm_i915_private *dev_priv)
+{
+	const struct intel_forcewake_range *ranges;
+	unsigned int num_ranges;
+	s32 prev;
+	unsigned int i;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+		return;
+
+	ranges = dev_priv->uncore.fw_domains_table;
+	if (!ranges)
+		return;
+
+	num_ranges = dev_priv->uncore.fw_domains_table_entries;
+
+	for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+		WARN_ON_ONCE(prev >= (s32)ranges->start);
+		prev = ranges->start;
+		WARN_ON_ONCE(prev >= (s32)ranges->end);
+		prev = ranges->end;
+	}
+}
+
+#define GEN_FW_RANGE(s, e, d) \
+	{ .start = (s), .end = (e), .domains = (d) }
+
+#define HAS_FWTABLE(dev_priv) \
+	(IS_GEN9(dev_priv) || \
+	 IS_CHERRYVIEW(dev_priv) || \
+	 IS_VALLEYVIEW(dev_priv))
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __vlv_fw_ranges[] = {
+	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
+};
+
+#define __fwtable_reg_read_fw_domains(offset) \
 ({ \
 	enum forcewake_domains __fwd = 0; \
-	if (!NEEDS_FORCE_WAKE(offset)) \
-		__fwd = 0; \
-	else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_MEDIA; \
+	if (NEEDS_FORCE_WAKE((offset))) \
+		__fwd = find_fw_domain(dev_priv, offset); \
 	__fwd; \
 })
 
+/* *Must* be sorted by offset! See intel_shadow_table_check(). */
 static const i915_reg_t gen8_shadowed_regs[] = {
-	GEN6_RPNSWREQ,
-	GEN6_RC_VIDEO_FREQ,
-	RING_TAIL(RENDER_RING_BASE),
-	RING_TAIL(GEN6_BSD_RING_BASE),
-	RING_TAIL(VEBOX_RING_BASE),
-	RING_TAIL(BLT_RING_BASE),
+	RING_TAIL(RENDER_RING_BASE),	/* 0x2000 (base) */
+	GEN6_RPNSWREQ,			/* 0xA008 */
+	GEN6_RC_VIDEO_FREQ,		/* 0xA00C */
+	RING_TAIL(GEN6_BSD_RING_BASE),	/* 0x12000 (base) */
+	RING_TAIL(VEBOX_RING_BASE),	/* 0x1a000 (base) */
+	RING_TAIL(BLT_RING_BASE),	/* 0x22000 (base) */
 	/* TODO: Other registers are not yet used */
 };
 
+static void intel_shadow_table_check(void)
+{
+	const i915_reg_t *reg = gen8_shadowed_regs;
+	s32 prev;
+	u32 offset;
+	unsigned int i;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+		return;
+
+	for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
+		offset = i915_mmio_reg_offset(*reg);
+		WARN_ON_ONCE(prev >= (s32)offset);
+		prev = offset;
+	}
+}
+
+static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
+{
+	u32 offset = i915_mmio_reg_offset(*reg);
+
+	if (key < offset)
+		return -1;
+	else if (key > offset)
+		return 1;
+	else
+		return 0;
+}
+
 static bool is_gen8_shadowed(u32 offset)
 {
-	int i;
-	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
-		if (offset == gen8_shadowed_regs[i].reg)
-			return true;
+	const i915_reg_t *regs = gen8_shadowed_regs;
 
-	return false;
+	return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
+		       mmio_reg_cmp);
 }
 
 #define __gen8_reg_write_fw_domains(offset) \
@@ -644,143 +733,70 @@ static bool is_gen8_shadowed(u32 offset)
 	__fwd; \
 })
 
-#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
-	(REG_RANGE((reg), 0x2000, 0x4000) || \
-	 REG_RANGE((reg), 0x5200, 0x8000) || \
-	 REG_RANGE((reg), 0x8300, 0x8500) || \
-	 REG_RANGE((reg), 0xB000, 0xB480) || \
-	 REG_RANGE((reg), 0xE000, 0xE800))
-
-#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
-	(REG_RANGE((reg), 0x8800, 0x8900) || \
-	 REG_RANGE((reg), 0xD000, 0xD800) || \
-	 REG_RANGE((reg), 0x12000, 0x14000) || \
-	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
-	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
-	 REG_RANGE((reg), 0x30000, 0x38000))
-
-#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
-	(REG_RANGE((reg), 0x4000, 0x5000) || \
-	 REG_RANGE((reg), 0x8000, 0x8300) || \
-	 REG_RANGE((reg), 0x8500, 0x8600) || \
-	 REG_RANGE((reg), 0x9000, 0xB000) || \
-	 REG_RANGE((reg), 0xF000, 0x10000))
-
-#define __chv_reg_read_fw_domains(offset) \
-({ \
-	enum forcewake_domains __fwd = 0; \
-	if (!NEEDS_FORCE_WAKE(offset)) \
-		__fwd = 0; \
-	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_MEDIA; \
-	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
-	__fwd; \
-})
-
-#define __chv_reg_write_fw_domains(offset) \
-({ \
-	enum forcewake_domains __fwd = 0; \
-	if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
-		__fwd = 0; \
-	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_MEDIA; \
-	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
-	__fwd; \
-})
-
-#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
-	REG_RANGE((reg), 0xB00,  0x2000)
-
-#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
-	(REG_RANGE((reg), 0x2000, 0x2700) || \
-	 REG_RANGE((reg), 0x3000, 0x4000) || \
-	 REG_RANGE((reg), 0x5200, 0x8000) || \
-	 REG_RANGE((reg), 0x8140, 0x8160) || \
-	 REG_RANGE((reg), 0x8300, 0x8500) || \
-	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
-	 REG_RANGE((reg), 0xB000, 0xB480) || \
-	 REG_RANGE((reg), 0xE000, 0xE900) || \
-	 REG_RANGE((reg), 0x24400, 0x24800))
-
-#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
-	(REG_RANGE((reg), 0x8130, 0x8140) || \
-	 REG_RANGE((reg), 0x8800, 0x8A00) || \
-	 REG_RANGE((reg), 0xD000, 0xD800) || \
-	 REG_RANGE((reg), 0x12000, 0x14000) || \
-	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
-	 REG_RANGE((reg), 0x30000, 0x40000))
-
-#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
-	REG_RANGE((reg), 0x9400, 0x9800)
-
-#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
-	((reg) < 0x40000 && \
-	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
-	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
-	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
-	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
-
-#define SKL_NEEDS_FORCE_WAKE(reg) \
-	((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
-#define __gen9_reg_read_fw_domains(offset) \
-({ \
-	enum forcewake_domains __fwd; \
-	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
-		__fwd = 0; \
-	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_MEDIA; \
-	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
-	else \
-		__fwd = FORCEWAKE_BLITTER; \
-	__fwd; \
-})
-
-static const i915_reg_t gen9_shadowed_regs[] = {
-	RING_TAIL(RENDER_RING_BASE),
-	RING_TAIL(GEN6_BSD_RING_BASE),
-	RING_TAIL(VEBOX_RING_BASE),
-	RING_TAIL(BLT_RING_BASE),
-	GEN6_RPNSWREQ,
-	GEN6_RC_VIDEO_FREQ,
-	/* TODO: Other registers are not yet used */
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __chv_fw_ranges[] = {
+	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
 };
 
-static bool is_gen9_shadowed(u32 offset)
-{
-	int i;
-	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
-		if (offset == gen9_shadowed_regs[i].reg)
-			return true;
-
-	return false;
-}
-
-#define __gen9_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(offset) \
 ({ \
-	enum forcewake_domains __fwd; \
-	if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
-		__fwd = 0; \
-	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_MEDIA; \
-	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
-		__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
-	else \
-		__fwd = FORCEWAKE_BLITTER; \
+	enum forcewake_domains __fwd = 0; \
+	if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
+		__fwd = find_fw_domain(dev_priv, offset); \
 	__fwd; \
 })
 
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen9_fw_ranges[] = {
+	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0xb480, 0xbfff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
+	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
+	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
+};
+
 static void
 ilk_dummy_write(struct drm_i915_private *dev_priv)
 {
@@ -869,26 +885,30 @@ __gen2_read(64)
 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 	return val
 
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
-				     enum forcewake_domains fw_domains)
+static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+					enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *domain;
 
+	for_each_fw_domain_masked(domain, fw_domains, dev_priv)
+		fw_domain_arm_timer(domain);
+
+	dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+	dev_priv->uncore.fw_domains_active |= fw_domains;
+}
+
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+				     enum forcewake_domains fw_domains)
+{
 	if (WARN_ON(!fw_domains))
 		return;
 
-	/* Ideally GCC would be constant-fold and eliminate this loop */
-	for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
-		if (domain->wake_count) {
-			fw_domains &= ~domain->mask;
-			continue;
-		}
-
-		fw_domain_arm_timer(domain);
-	}
+	/* Turn on all requested but inactive supported forcewake domains. */
+	fw_domains &= dev_priv->uncore.fw_domains;
+	fw_domains &= ~dev_priv->uncore.fw_domains_active;
 
 	if (fw_domains)
-		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+		___force_wake_auto(dev_priv, fw_domains);
 }
 
 #define __gen6_read(x) \
@@ -903,62 +923,28 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	GEN6_READ_FOOTER; \
 }
 
-#define __vlv_read(x) \
+#define __fwtable_read(x) \
 static u##x \
-vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_READ_HEADER(x); \
-	fw_engine = __vlv_reg_read_fw_domains(offset); \
+	fw_engine = __fwtable_reg_read_fw_domains(offset); \
 	if (fw_engine) \
 		__force_wake_auto(dev_priv, fw_engine); \
 	val = __raw_i915_read##x(dev_priv, reg); \
 	GEN6_READ_FOOTER; \
 }
 
-#define __chv_read(x) \
-static u##x \
-chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-	enum forcewake_domains fw_engine; \
-	GEN6_READ_HEADER(x); \
-	fw_engine = __chv_reg_read_fw_domains(offset); \
-	if (fw_engine) \
-		__force_wake_auto(dev_priv, fw_engine); \
-	val = __raw_i915_read##x(dev_priv, reg); \
-	GEN6_READ_FOOTER; \
-}
-
-#define __gen9_read(x) \
-static u##x \
-gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-	enum forcewake_domains fw_engine; \
-	GEN6_READ_HEADER(x); \
-	fw_engine = __gen9_reg_read_fw_domains(offset); \
-	if (fw_engine) \
-		__force_wake_auto(dev_priv, fw_engine); \
-	val = __raw_i915_read##x(dev_priv, reg); \
-	GEN6_READ_FOOTER; \
-}
-
-__gen9_read(8)
-__gen9_read(16)
-__gen9_read(32)
-__gen9_read(64)
-__chv_read(8)
-__chv_read(16)
-__chv_read(32)
-__chv_read(64)
-__vlv_read(8)
-__vlv_read(16)
-__vlv_read(32)
-__vlv_read(64)
+__fwtable_read(8)
+__fwtable_read(16)
+__fwtable_read(32)
+__fwtable_read(64)
 __gen6_read(8)
 __gen6_read(16)
 __gen6_read(32)
 __gen6_read(64)
 
-#undef __gen9_read
-#undef __chv_read
-#undef __vlv_read
+#undef __fwtable_read
 #undef __gen6_read
 #undef GEN6_READ_FOOTER
 #undef GEN6_READ_HEADER
@@ -1054,21 +1040,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 	GEN6_WRITE_FOOTER; \
 }
 
-#define __hsw_write(x) \
-static void \
-hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
-	u32 __fifo_ret = 0; \
-	GEN6_WRITE_HEADER; \
-	if (NEEDS_FORCE_WAKE(offset)) { \
-		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
-	} \
-	__raw_i915_write##x(dev_priv, reg, val); \
-	if (unlikely(__fifo_ret)) { \
-		gen6_gt_check_fifodbg(dev_priv); \
-	} \
-	GEN6_WRITE_FOOTER; \
-}
-
 #define __gen8_write(x) \
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
@@ -1081,51 +1052,30 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 	GEN6_WRITE_FOOTER; \
 }
 
-#define __chv_write(x) \
+#define __fwtable_write(x) \
 static void \
-chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_WRITE_HEADER; \
-	fw_engine = __chv_reg_write_fw_domains(offset); \
+	fw_engine = __fwtable_reg_write_fw_domains(offset); \
 	if (fw_engine) \
 		__force_wake_auto(dev_priv, fw_engine); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
 
-#define __gen9_write(x) \
-static void \
-gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
-		bool trace) { \
-	enum forcewake_domains fw_engine; \
-	GEN6_WRITE_HEADER; \
-	fw_engine = __gen9_reg_write_fw_domains(offset); \
-	if (fw_engine) \
-		__force_wake_auto(dev_priv, fw_engine); \
-	__raw_i915_write##x(dev_priv, reg, val); \
-	GEN6_WRITE_FOOTER; \
-}
-
-__gen9_write(8)
-__gen9_write(16)
-__gen9_write(32)
-__chv_write(8)
-__chv_write(16)
-__chv_write(32)
+__fwtable_write(8)
+__fwtable_write(16)
+__fwtable_write(32)
 __gen8_write(8)
 __gen8_write(16)
 __gen8_write(32)
-__hsw_write(8)
-__hsw_write(16)
-__hsw_write(32)
 __gen6_write(8)
 __gen6_write(16)
 __gen6_write(32)
 
-#undef __gen9_write
-#undef __chv_write
+#undef __fwtable_write
 #undef __gen8_write
-#undef __hsw_write
 #undef __gen6_write
 #undef GEN6_WRITE_FOOTER
 #undef GEN6_WRITE_HEADER
@@ -1314,6 +1264,13 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 	WARN_ON(dev_priv->uncore.fw_domains == 0);
 }
 
+#define ASSIGN_FW_DOMAINS_TABLE(d) \
+{ \
+	dev_priv->uncore.fw_domains_table = \
+			(struct intel_forcewake_range *)(d); \
+	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+}
+
 void intel_uncore_init(struct drm_i915_private *dev_priv)
 {
 	i915_check_vgpu(dev_priv);
@@ -1327,13 +1284,15 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
 	switch (INTEL_INFO(dev_priv)->gen) {
 	default:
 	case 9:
-		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
-		ASSIGN_READ_MMIO_VFUNCS(gen9);
+		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
+		ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+		ASSIGN_READ_MMIO_VFUNCS(fwtable);
 		break;
 	case 8:
 		if (IS_CHERRYVIEW(dev_priv)) {
-			ASSIGN_WRITE_MMIO_VFUNCS(chv);
-			ASSIGN_READ_MMIO_VFUNCS(chv);
+			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
+			ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+			ASSIGN_READ_MMIO_VFUNCS(fwtable);
 
 		} else {
 			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
@@ -1342,14 +1301,11 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
 		break;
 	case 7:
 	case 6:
-		if (IS_HASWELL(dev_priv)) {
-			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
-		} else {
-			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
-		}
+		ASSIGN_WRITE_MMIO_VFUNCS(gen6);
 
 		if (IS_VALLEYVIEW(dev_priv)) {
-			ASSIGN_READ_MMIO_VFUNCS(vlv);
+			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
+			ASSIGN_READ_MMIO_VFUNCS(fwtable);
 		} else {
 			ASSIGN_READ_MMIO_VFUNCS(gen6);
 		}
@@ -1366,6 +1322,10 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
 		break;
 	}
 
+	intel_fw_table_check(dev_priv);
+	if (INTEL_GEN(dev_priv) >= 8)
+		intel_shadow_table_check();
+
 	if (intel_vgpu_active(dev_priv)) {
 		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
 		ASSIGN_READ_MMIO_VFUNCS(vgpu);
@@ -1815,35 +1775,16 @@ static enum forcewake_domains
 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
 				i915_reg_t reg)
 {
+	u32 offset = i915_mmio_reg_offset(reg);
 	enum forcewake_domains fw_domains;
 
-	if (intel_vgpu_active(dev_priv))
-		return 0;
-
-	switch (INTEL_GEN(dev_priv)) {
-	case 9:
-		fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
-		break;
-	case 8:
-		if (IS_CHERRYVIEW(dev_priv))
-			fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
-		else
-			fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
-		break;
-	case 7:
-	case 6:
-		if (IS_VALLEYVIEW(dev_priv))
-			fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
-		else
-			fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
-		break;
-	default:
-		MISSING_CASE(INTEL_INFO(dev_priv)->gen);
-	case 5: /* forcewake was introduced with gen6 */
-	case 4:
-	case 3:
-	case 2:
-		return 0;
+	if (HAS_FWTABLE(dev_priv)) {
+		fw_domains = __fwtable_reg_read_fw_domains(offset);
+	} else if (INTEL_GEN(dev_priv) >= 6) {
+		fw_domains = __gen6_reg_read_fw_domains(offset);
+	} else {
+		WARN_ON(!IS_GEN(dev_priv, 2, 5));
+		fw_domains = 0;
 	}
 
 	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1855,32 +1796,18 @@ static enum forcewake_domains
 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
 				 i915_reg_t reg)
 {
+	u32 offset = i915_mmio_reg_offset(reg);
 	enum forcewake_domains fw_domains;
 
-	if (intel_vgpu_active(dev_priv))
-		return 0;
-
-	switch (INTEL_GEN(dev_priv)) {
-	case 9:
-		fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
-		break;
-	case 8:
-		if (IS_CHERRYVIEW(dev_priv))
-			fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
-		else
-			fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
-		break;
-	case 7:
-	case 6:
+	if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
+		fw_domains = __fwtable_reg_write_fw_domains(offset);
+	} else if (IS_GEN8(dev_priv)) {
+		fw_domains = __gen8_reg_write_fw_domains(offset);
+	} else if (IS_GEN(dev_priv, 6, 7)) {
 		fw_domains = FORCEWAKE_RENDER;
-		break;
-	default:
-		MISSING_CASE(INTEL_INFO(dev_priv)->gen);
-	case 5:
-	case 4:
-	case 3:
-	case 2:
-		return 0;
+	} else {
+		WARN_ON(!IS_GEN(dev_priv, 2, 5));
+		fw_domains = 0;
 	}
 
 	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1910,6 +1837,9 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
 
 	WARN_ON(!op);
 
+	if (intel_vgpu_active(dev_priv))
+		return 0;
+
 	if (op & FW_REG_READ)
 		fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf83f65..296f541 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -18,6 +18,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
 #include <linux/component.h>
 #include <linux/iommu.h>
 #include <linux/of_address.h>
@@ -83,7 +84,7 @@ static void mtk_atomic_complete(struct mtk_drm_private *private,
 	drm_atomic_helper_wait_for_vblanks(drm, state);
 
 	drm_atomic_helper_cleanup_planes(drm, state);
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 }
 
 static void mtk_atomic_work(struct work_struct *work)
@@ -110,6 +111,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
 
 	drm_atomic_helper_swap_state(state, true);
 
+	drm_atomic_state_get(state);
 	if (async)
 		mtk_atomic_schedule(private, state);
 	else
@@ -415,7 +417,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
 		    comp_type == MTK_DPI) {
 			dev_info(dev, "Adding component match for %s\n",
 				 node->full_name);
-			component_match_add(dev, &match, compare_of, node);
+			drm_of_component_match_add(dev, &match, compare_of,
+						   node);
 		} else {
 			struct mtk_ddp_comp *comp;
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 919b35f..83272b4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
 	.ttm_tt_populate = mgag200_ttm_tt_populate,
 	.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
 	.init_mem_type = mgag200_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = mgag200_bo_evict_flags,
 	.move = NULL,
 	.verify_access = mgag200_bo_verify_access,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 5127b75..7250ffc 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -25,9 +25,6 @@ bool hang_debug = false;
 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
 module_param_named(hang_debug, hang_debug, bool, 0600);
 
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-
 static const struct adreno_info gpulist[] = {
 	{
 		.rev   = ADRENO_REV(3, 0, 5, ANY_ID),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a54f6e0..07d99bd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -311,4 +311,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
 		gpu_write(&gpu->base, reg - 1, data);
 }
 
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+
 #endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 951c002..cf50d3ec 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -75,15 +75,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev,
 		!(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
 		return;
 
-	if (!dev->mode_config.rotation_property)
-		dev->mode_config.rotation_property =
-			drm_mode_create_rotation_property(dev,
-				DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
-
-	if (dev->mode_config.rotation_property)
-		drm_object_attach_property(&plane->base,
-			dev->mode_config.rotation_property,
-			DRM_ROTATE_0);
+	drm_plane_create_rotation_property(plane,
+					   DRM_ROTATE_0,
+					   DRM_ROTATE_0 |
+					   DRM_ROTATE_180 |
+					   DRM_REFLECT_X |
+					   DRM_REFLECT_Y);
 }
 
 /* helper to install properties which are common to planes and crtcs */
@@ -289,6 +286,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 			plane_enabled(old_state), plane_enabled(state));
 
 	if (plane_enabled(state)) {
+		unsigned int rotation;
+
 		format = to_mdp_format(msm_framebuffer_format(state->fb));
 		if (MDP_FORMAT_IS_YUV(format) &&
 			!pipe_supports_yuv(mdp5_plane->caps)) {
@@ -309,8 +308,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 			return -EINVAL;
 		}
 
-		hflip = !!(state->rotation & DRM_REFLECT_X);
-		vflip = !!(state->rotation & DRM_REFLECT_Y);
+		rotation = drm_rotation_simplify(state->rotation,
+						 DRM_ROTATE_0 |
+						 DRM_REFLECT_X |
+						 DRM_REFLECT_Y);
+		hflip = !!(rotation & DRM_REFLECT_X);
+		vflip = !!(rotation & DRM_REFLECT_Y);
+
 		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
 			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
 			dev_err(plane->dev->dev,
@@ -681,6 +685,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
 	uint32_t hdecm = 0, vdecm = 0;
 	uint32_t pix_format;
+	unsigned int rotation;
 	bool vflip, hflip;
 	unsigned long flags;
 	int ret;
@@ -743,8 +748,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	config |= get_scale_config(format, src_h, crtc_h, false);
 	DBG("scale config = %x", config);
 
-	hflip = !!(pstate->rotation & DRM_REFLECT_X);
-	vflip = !!(pstate->rotation & DRM_REFLECT_Y);
+	rotation = drm_rotation_simplify(pstate->rotation,
+					 DRM_ROTATE_0 |
+					 DRM_REFLECT_X |
+					 DRM_REFLECT_Y);
+	hflip = !!(rotation & DRM_REFLECT_X);
+	vflip = !!(rotation & DRM_REFLECT_Y);
 
 	spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
 
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 73bae38..db193f8 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -141,7 +141,7 @@ static void complete_commit(struct msm_commit *c, bool async)
 
 	kms->funcs->complete_commit(kms, state);
 
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 
 	commit_destroy(c);
 }
@@ -256,6 +256,7 @@ int msm_atomic_commit(struct drm_device *dev,
 	 * current layout.
 	 */
 
+	drm_atomic_state_get(state);
 	if (nonblock) {
 		queue_work(priv->atomic_wq, &c->work);
 		return 0;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 663f2b6..3c85373 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -18,6 +18,7 @@
 #ifdef CONFIG_DEBUG_FS
 #include "msm_drv.h"
 #include "msm_gpu.h"
+#include "msm_debugfs.h"
 
 static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
 {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index fb5c0b0..84d38ea 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -15,6 +15,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <drm/drm_of.h>
+
 #include "msm_drv.h"
 #include "msm_debugfs.h"
 #include "msm_fence.h"
@@ -919,8 +921,8 @@ static int add_components_mdp(struct device *mdp_dev,
 			continue;
 		}
 
-		component_match_add(master_dev, matchptr, compare_of, intf);
-
+		drm_of_component_match_add(master_dev, matchptr, compare_of,
+					   intf);
 		of_node_put(intf);
 		of_node_put(ep_node);
 	}
@@ -962,8 +964,8 @@ static int add_display_components(struct device *dev,
 		put_device(mdp_dev);
 
 		/* add the MDP component itself */
-		component_match_add(dev, matchptr, compare_of,
-				    mdp_dev->of_node);
+		drm_of_component_match_add(dev, matchptr, compare_of,
+					   mdp_dev->of_node);
 	} else {
 		/* MDP4 */
 		mdp_dev = dev;
@@ -996,7 +998,7 @@ static int add_gpu_components(struct device *dev,
 	if (!np)
 		return 0;
 
-	component_match_add(dev, matchptr, compare_of, np);
+	drm_of_component_match_add(dev, matchptr, compare_of, np);
 
 	of_node_put(np);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d0da52f..940bf49 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
 int msm_gem_sync_object(struct drm_gem_object *obj,
 		struct msm_fence_context *fctx, bool exclusive);
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-		struct msm_gpu *gpu, bool exclusive, struct fence *fence);
+		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
 void msm_gem_move_to_inactive(struct drm_gem_object *obj);
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a9b9b1c..3f299c5 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,7 +15,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #include "msm_drv.h"
 #include "msm_fence.h"
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
 
 	fctx->dev = dev;
 	fctx->name = name;
-	fctx->context = fence_context_alloc(1);
+	fctx->context = dma_fence_context_alloc(1);
 	init_waitqueue_head(&fctx->event);
 	spin_lock_init(&fctx->spinlock);
 
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 
 struct msm_fence {
 	struct msm_fence_context *fctx;
-	struct fence base;
+	struct dma_fence base;
 };
 
-static inline struct msm_fence *to_msm_fence(struct fence *fence)
+static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
 {
 	return container_of(fence, struct msm_fence, base);
 }
 
-static const char *msm_fence_get_driver_name(struct fence *fence)
+static const char *msm_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "msm";
 }
 
-static const char *msm_fence_get_timeline_name(struct fence *fence)
+static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
 {
 	struct msm_fence *f = to_msm_fence(fence);
 	return f->fctx->name;
 }
 
-static bool msm_fence_enable_signaling(struct fence *fence)
+static bool msm_fence_enable_signaling(struct dma_fence *fence)
 {
 	return true;
 }
 
-static bool msm_fence_signaled(struct fence *fence)
+static bool msm_fence_signaled(struct dma_fence *fence)
 {
 	struct msm_fence *f = to_msm_fence(fence);
 	return fence_completed(f->fctx, f->base.seqno);
 }
 
-static void msm_fence_release(struct fence *fence)
+static void msm_fence_release(struct dma_fence *fence)
 {
 	struct msm_fence *f = to_msm_fence(fence);
 	kfree_rcu(f, base.rcu);
 }
 
-static const struct fence_ops msm_fence_ops = {
+static const struct dma_fence_ops msm_fence_ops = {
 	.get_driver_name = msm_fence_get_driver_name,
 	.get_timeline_name = msm_fence_get_timeline_name,
 	.enable_signaling = msm_fence_enable_signaling,
 	.signaled = msm_fence_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = msm_fence_release,
 };
 
-struct fence *
+struct dma_fence *
 msm_fence_alloc(struct msm_fence_context *fctx)
 {
 	struct msm_fence *f;
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx)
 
 	f->fctx = fctx;
 
-	fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
-			fctx->context, ++fctx->last_fence);
+	dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+		       fctx->context, ++fctx->last_fence);
 
 	return &f->base;
 }
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index ceb5b3d..56061aa 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx,
 		struct msm_fence_cb *cb, uint32_t fence);
 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
 
-struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
 
 #endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e..57db7db 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	int i, ret;
 
 	if (!exclusive) {
@@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 		fence = reservation_object_get_excl(msm_obj->resv);
 		/* don't need to wait on our own fences, since ring is fifo */
 		if (fence && (fence->context != fctx->context)) {
-			ret = fence_wait(fence, true);
+			ret = dma_fence_wait(fence, true);
 			if (ret)
 				return ret;
 		}
@@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 		fence = rcu_dereference_protected(fobj->shared[i],
 						reservation_object_held(msm_obj->resv));
 		if (fence->context != fctx->context) {
-			ret = fence_wait(fence, true);
+			ret = dma_fence_wait(fence, true);
 			if (ret)
 				return ret;
 		}
@@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 }
 
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-		struct msm_gpu *gpu, bool exclusive, struct fence *fence)
+		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
@@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
 }
 
 #ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct fence *fence, const char *type,
+static void describe_fence(struct dma_fence *fence, const char *type,
 		struct seq_file *m)
 {
-	if (!fence_is_signaled(fence))
+	if (!dma_fence_is_signaled(fence))
 		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
 				fence->ops->get_driver_name(fence),
 				fence->ops->get_timeline_name(fence),
@@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct reservation_object *robj = msm_obj->resv;
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
 	const char *madv;
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b2f13cf..2cb8551 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -104,7 +104,7 @@ struct msm_gem_submit {
 	struct list_head node;   /* node in gpu submit_list */
 	struct list_head bo_list;
 	struct ww_acquire_ctx ticket;
-	struct fence *fence;
+	struct dma_fence *fence;
 	struct pid *pid;    /* submitting process */
 	bool valid;         /* true if no cmdstream patching needed */
 	unsigned int nr_cmds;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37..25e8786 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 
 void msm_gem_submit_free(struct msm_gem_submit *submit)
 {
-	fence_put(submit->fence);
+	dma_fence_put(submit->fence);
 	list_del(&submit->node);
 	put_pid(submit->pid);
 	kfree(submit);
@@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 	struct msm_file_private *ctx = file->driver_priv;
 	struct msm_gem_submit *submit;
 	struct msm_gpu *gpu = priv->gpu;
-	struct fence *in_fence = NULL;
+	struct dma_fence *in_fence = NULL;
 	struct sync_file *sync_file = NULL;
 	int out_fence_fd = -1;
 	unsigned i;
@@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		 */
 
 		if (in_fence->context != gpu->fctx->context) {
-			ret = fence_wait(in_fence, true);
+			ret = dma_fence_wait(in_fence, true);
 			if (ret)
 				goto out;
 		}
@@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 
 out:
 	if (in_fence)
-		fence_put(in_fence);
+		dma_fence_put(in_fence);
 	submit_cleanup(submit);
 	if (ret)
 		msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb0983..3249707 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu)
 		submit = list_first_entry(&gpu->submit_list,
 				struct msm_gem_submit, node);
 
-		if (fence_is_signaled(submit->fence)) {
+		if (dma_fence_is_signaled(submit->fence)) {
 			retire_submit(gpu, submit);
 		} else {
 			break;
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2527bf4..fde6e36 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -22,6 +22,7 @@
 nouveau-y += nouveau_drm.o
 nouveau-y += nouveau_hwmon.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
 nouveau-y += nouveau_nvif.o
 nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
 nouveau-y += nouveau_usif.o # userspace <-> nvif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index a47d46dd..b7a54e6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -6,6 +6,7 @@ enum dcb_gpio_func_name {
 	DCB_GPIO_TVDAC1 = 0x2d,
 	DCB_GPIO_FAN = 0x09,
 	DCB_GPIO_FAN_SENSE = 0x3d,
+	DCB_GPIO_LOGO_LED_PWM = 0x84,
 	DCB_GPIO_UNUSED = 0xff,
 	DCB_GPIO_VID0 = 0x04,
 	DCB_GPIO_VID1 = 0x05,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index 9cb9747..e933d3e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,10 +1,16 @@
 #ifndef __NVBIOS_ICCSENSE_H__
 #define __NVBIOS_ICCSENSE_H__
+struct pwr_rail_resistor_t {
+	u8 mohm;
+	bool enabled;
+};
+
 struct pwr_rail_t {
 	u8 mode;
 	u8 extdev_id;
-	u8 resistor_mohm;
-	u8 rail;
+	u8 resistor_count;
+	struct pwr_rail_resistor_t resistors[3];
+	u16 config;
 };
 
 struct nvbios_iccsense {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index 6633c6d..8fa1294 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,6 +1,9 @@
 #ifndef __NVBIOS_VMAP_H__
 #define __NVBIOS_VMAP_H__
 struct nvbios_vmap {
+	u8  max0;
+	u8  max1;
+	u8  max2;
 };
 
 u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
@@ -8,7 +11,7 @@ u16 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 		      struct nvbios_vmap *);
 
 struct nvbios_vmap_entry {
-	u8  unk0;
+	u8  mode;
 	u8  link;
 	u32 min;
 	u32 max;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index b0df610..23f3d1b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -13,8 +13,9 @@ struct nvbios_volt {
 	u32 base;
 
 	/* GPIO mode */
-	u8  vidmask;
-	s16 step;
+	bool ranged;
+	u8   vidmask;
+	s16  step;
 
 	/* PWM mode */
 	u32 pwm_freq;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
new file mode 100644
index 0000000..87f804f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -0,0 +1,24 @@
+#ifndef __NVBIOS_VPSTATE_H__
+#define __NVBIOS_VPSTATE_H__
+struct nvbios_vpstate_header {
+	u32 offset;
+
+	u8 version;
+	u8 hlen;
+	u8 ecount;
+	u8 elen;
+	u8 scount;
+	u8 slen;
+
+	u8 base_id;
+	u8 boost_id;
+	u8 tdp_id;
+};
+struct nvbios_vpstate_entry {
+	u8  pstate;
+	u16 clock_mhz;
+};
+int nvbios_vpstate_parse(struct nvkm_bios *, struct nvbios_vpstate_header *);
+int nvbios_vpstate_entry(struct nvkm_bios *, struct nvbios_vpstate_header *,
+			 u8 idx, struct nvbios_vpstate_entry *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index fb54417..e5275f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -6,6 +6,10 @@
 struct nvbios_pll;
 struct nvkm_pll_vals;
 
+#define NVKM_CLK_CSTATE_DEFAULT -1 /* POSTed default */
+#define NVKM_CLK_CSTATE_BASE    -2 /* pstate base */
+#define NVKM_CLK_CSTATE_HIGHEST -3 /* highest possible */
+
 enum nv_clk_src {
 	nv_clk_src_crystal,
 	nv_clk_src_href,
@@ -52,6 +56,7 @@ struct nvkm_cstate {
 	struct list_head head;
 	u8  voltage;
 	u32 domain[nv_clk_src_max];
+	u8  id;
 };
 
 struct nvkm_pstate {
@@ -67,7 +72,8 @@ struct nvkm_pstate {
 struct nvkm_domain {
 	enum nv_clk_src name;
 	u8 bios; /* 0xff for none */
-#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_CORE    0x01
+#define NVKM_CLK_DOM_FLAG_VPSTATE 0x02
 	u8 flags;
 	const char *mname;
 	int mdiv;
@@ -93,10 +99,16 @@ struct nvkm_clk {
 	int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
 	int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
 	int astate; /* perfmon adjustment (base) */
-	int tstate; /* thermal adjustment (max-) */
 	int dstate; /* display adjustment (min+) */
+	u8  temp;
 
 	bool allow_reclock;
+#define NVKM_CLK_BOOST_NONE 0x0
+#define NVKM_CLK_BOOST_BIOS 0x1
+#define NVKM_CLK_BOOST_FULL 0x2
+	u8  boost_mode;
+	u32 base_khz;
+	u32 boost_khz;
 
 	/*XXX: die, these are here *only* to support the completely
 	 *     bat-shit insane what-was-nouveau_hw.c code
@@ -110,7 +122,7 @@ int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
 int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
 int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
 int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
-int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
 
 int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
 int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index b765f4f..08ef998 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -15,12 +15,28 @@ struct nvkm_volt {
 
 	u32 max_uv;
 	u32 min_uv;
+
+	/*
+	 * These are fully functional map entries creating a sw ceiling for
+	 * the voltage. These all can describe different kind of curves, so
+	 * that for any given temperature a different one can return the lowest
+	 * value of all three.
+	 */
+	u8 max0_id;
+	u8 max1_id;
+	u8 max2_id;
+
+	int speedo;
 };
 
+int nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temperature);
+int nvkm_volt_map_min(struct nvkm_volt *volt, u8 id);
 int nvkm_volt_get(struct nvkm_volt *);
-int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
+		     int condition);
 
 int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f5101be..5e2c568 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -232,6 +232,7 @@ nouveau_backlight_init(struct drm_device *dev)
 		case NV_DEVICE_INFO_V0_TESLA:
 		case NV_DEVICE_INFO_V0_FERMI:
 		case NV_DEVICE_INFO_V0_KEPLER:
+		case NV_DEVICE_INFO_V0_MAXWELL:
 			return nv50_backlight_init(connector);
 		default:
 			break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 0067586..18eb061 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -31,10 +31,8 @@
 
 #define DCB_LOC_ON_CHIP 0
 
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
-#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
-#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
 #define ROMPTR(d,x) ({            \
 	struct nouveau_drm *drm = nouveau_drm((d)); \
 	ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 343b865..e0c0007 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
 
 static void
 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
-			struct fence *fence)
+			struct dma_fence *fence)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	if (tile) {
 		spin_lock(&drm->tile.lock);
-		tile->fence = (struct nouveau_fence *)fence_get(fence);
+		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
 		tile->used = false;
 		spin_unlock(&drm->tile.lock);
 	}
@@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct drm_device *dev = drm->dev;
-	struct fence *fence = reservation_object_get_excl(bo->resv);
+	struct dma_fence *fence = reservation_object_get_excl(bo->resv);
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	*old_tile = new_tile;
@@ -1561,6 +1561,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
 	.invalidate_caches = nouveau_bo_invalidate_caches,
 	.init_mem_type = nouveau_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = nouveau_bo_evict_flags,
 	.move_notify = nouveau_bo_move_ntfy,
 	.move = nouveau_bo_move,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88..6adf947 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -47,6 +47,7 @@
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 #include "nouveau_vga.h"
+#include "nouveau_led.h"
 #include "nouveau_hwmon.h"
 #include "nouveau_acpi.h"
 #include "nouveau_bios.h"
@@ -475,6 +476,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 	nouveau_hwmon_init(dev);
 	nouveau_accel_init(drm);
 	nouveau_fbcon_init(dev);
+	nouveau_led_init(dev);
 
 	if (nouveau_runtime_pm != 0) {
 		pm_runtime_use_autosuspend(dev->dev);
@@ -510,6 +512,7 @@ nouveau_drm_unload(struct drm_device *dev)
 		pm_runtime_forbid(dev->dev);
 	}
 
+	nouveau_led_fini(dev);
 	nouveau_fbcon_fini(dev);
 	nouveau_accel_fini(drm);
 	nouveau_hwmon_fini(dev);
@@ -561,6 +564,8 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
 	struct nouveau_cli *cli;
 	int ret;
 
+	nouveau_led_suspend(dev);
+
 	if (dev->mode_config.num_crtc) {
 		NV_INFO(drm, "suspending console...\n");
 		nouveau_fbcon_set_suspend(dev, 1);
@@ -649,6 +654,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
 		nouveau_fbcon_set_suspend(dev, 0);
 	}
 
+	nouveau_led_resume(dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a021..c0e2b32 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -166,6 +166,9 @@ struct nouveau_drm {
 	struct nouveau_hwmon *hwmon;
 	struct nouveau_debugfs *debugfs;
 
+	/* led management */
+	struct nouveau_led *led;
+
 	/* display power reference */
 	bool have_disp_power_ref;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4bb9ab8..e9529ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,7 +28,7 @@
 
 #include <linux/ktime.h>
 #include <linux/hrtimer.h>
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
 
 #include <nvif/cl826e.h>
 #include <nvif/notify.h>
@@ -38,11 +38,11 @@
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
-static const struct fence_ops nouveau_fence_ops_uevent;
-static const struct fence_ops nouveau_fence_ops_legacy;
+static const struct dma_fence_ops nouveau_fence_ops_uevent;
+static const struct dma_fence_ops nouveau_fence_ops_legacy;
 
 static inline struct nouveau_fence *
-from_fence(struct fence *fence)
+from_fence(struct dma_fence *fence)
 {
 	return container_of(fence, struct nouveau_fence, base);
 }
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence)
 {
 	int drop = 0;
 
-	fence_signal_locked(&fence->base);
+	dma_fence_signal_locked(&fence->base);
 	list_del(&fence->head);
 	rcu_assign_pointer(fence->channel, NULL);
 
-	if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+	if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
 		if (!--fctx->notify_ref)
 			drop = 1;
 	}
 
-	fence_put(&fence->base);
+	dma_fence_put(&fence->base);
 	return drop;
 }
 
 static struct nouveau_fence *
-nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
+nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
 	struct nouveau_fence_priv *priv = (void*)drm->fence;
 
 	if (fence->ops != &nouveau_fence_ops_legacy &&
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
 
 struct nouveau_fence_work {
 	struct work_struct work;
-	struct fence_cb cb;
+	struct dma_fence_cb cb;
 	void (*func)(void *);
 	void *data;
 };
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork)
 	kfree(work);
 }
 
-static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
+static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
 
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
 }
 
 void
-nouveau_fence_work(struct fence *fence,
+nouveau_fence_work(struct dma_fence *fence,
 		   void (*func)(void *), void *data)
 {
 	struct nouveau_fence_work *work;
 
-	if (fence_is_signaled(fence))
+	if (dma_fence_is_signaled(fence))
 		goto err;
 
 	work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence,
 	work->func = func;
 	work->data = data;
 
-	if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
+	if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
 		goto err_free;
 	return;
 
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
 	fence->timeout  = jiffies + (15 * HZ);
 
 	if (priv->uevent)
-		fence_init(&fence->base, &nouveau_fence_ops_uevent,
-			   &fctx->lock, fctx->context, ++fctx->sequence);
+		dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
+			       &fctx->lock, fctx->context, ++fctx->sequence);
 	else
-		fence_init(&fence->base, &nouveau_fence_ops_legacy,
-			   &fctx->lock, fctx->context, ++fctx->sequence);
+		dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
+			       &fctx->lock, fctx->context, ++fctx->sequence);
 	kref_get(&fctx->fence_ref);
 
-	trace_fence_emit(&fence->base);
+	trace_dma_fence_emit(&fence->base);
 	ret = fctx->emit(fence);
 	if (!ret) {
-		fence_get(&fence->base);
+		dma_fence_get(&fence->base);
 		spin_lock_irq(&fctx->lock);
 
 		if (nouveau_fence_update(chan, fctx))
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
 		struct nouveau_channel *chan;
 		unsigned long flags;
 
-		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 			return true;
 
 		spin_lock_irqsave(&fctx->lock, flags);
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
 			nvif_notify_put(&fctx->notify);
 		spin_unlock_irqrestore(&fctx->lock, flags);
 	}
-	return fence_is_signaled(&fence->base);
+	return dma_fence_is_signaled(&fence->base);
 }
 
 static long
-nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
+nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
 {
 	struct nouveau_fence *fence = from_fence(f);
 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 	if (!lazy)
 		return nouveau_fence_wait_busy(fence, intr);
 
-	ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
+	ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
 	if (ret < 0)
 		return ret;
 	else if (!ret)
@@ -391,7 +391,7 @@ int
 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
 {
 	struct nouveau_fence_chan *fctx = chan->fence;
-	struct fence *fence;
+	struct dma_fence *fence;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object_list *fobj;
 	struct nouveau_fence *f;
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 		}
 
 		if (must_wait)
-			ret = fence_wait(fence, intr);
+			ret = dma_fence_wait(fence, intr);
 
 		return ret;
 	}
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 		}
 
 		if (must_wait)
-			ret = fence_wait(fence, intr);
+			ret = dma_fence_wait(fence, intr);
 	}
 
 	return ret;
@@ -456,7 +456,7 @@ void
 nouveau_fence_unref(struct nouveau_fence **pfence)
 {
 	if (*pfence)
-		fence_put(&(*pfence)->base);
+		dma_fence_put(&(*pfence)->base);
 	*pfence = NULL;
 }
 
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
 	return ret;
 }
 
-static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
+static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
 {
 	return "nouveau";
 }
 
-static const char *nouveau_fence_get_timeline_name(struct fence *f)
+static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
  * result. The drm node should still be there, so we can derive the index from
  * the fence context.
  */
-static bool nouveau_fence_is_signaled(struct fence *f)
+static bool nouveau_fence_is_signaled(struct dma_fence *f)
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f)
 	return ret;
 }
 
-static bool nouveau_fence_no_signaling(struct fence *f)
+static bool nouveau_fence_no_signaling(struct dma_fence *f)
 {
 	struct nouveau_fence *fence = from_fence(f);
 
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f)
 	WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
 
 	/*
-	 * This needs uevents to work correctly, but fence_add_callback relies on
+	 * This needs uevents to work correctly, but dma_fence_add_callback relies on
 	 * being able to enable signaling. It will still get signaled eventually,
 	 * just not right away.
 	 */
 	if (nouveau_fence_is_signaled(f)) {
 		list_del(&fence->head);
 
-		fence_put(&fence->base);
+		dma_fence_put(&fence->base);
 		return false;
 	}
 
 	return true;
 }
 
-static void nouveau_fence_release(struct fence *f)
+static void nouveau_fence_release(struct dma_fence *f)
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
-	fence_free(&fence->base);
+	dma_fence_free(&fence->base);
 }
 
-static const struct fence_ops nouveau_fence_ops_legacy = {
+static const struct dma_fence_ops nouveau_fence_ops_legacy = {
 	.get_driver_name = nouveau_fence_get_get_driver_name,
 	.get_timeline_name = nouveau_fence_get_timeline_name,
 	.enable_signaling = nouveau_fence_no_signaling,
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = {
 	.release = nouveau_fence_release
 };
 
-static bool nouveau_fence_enable_signaling(struct fence *f)
+static bool nouveau_fence_enable_signaling(struct dma_fence *f)
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f)
 
 	ret = nouveau_fence_no_signaling(f);
 	if (ret)
-		set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
+		set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
 	else if (!--fctx->notify_ref)
 		nvif_notify_put(&fctx->notify);
 
 	return ret;
 }
 
-static const struct fence_ops nouveau_fence_ops_uevent = {
+static const struct dma_fence_ops nouveau_fence_ops_uevent = {
 	.get_driver_name = nouveau_fence_get_get_driver_name,
 	.get_timeline_name = nouveau_fence_get_timeline_name,
 	.enable_signaling = nouveau_fence_enable_signaling,
 	.signaled = nouveau_fence_is_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = NULL
 };
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7..41f3c01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,14 +1,14 @@
 #ifndef __NOUVEAU_FENCE_H__
 #define __NOUVEAU_FENCE_H__
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <nvif/notify.h>
 
 struct nouveau_drm;
 struct nouveau_bo;
 
 struct nouveau_fence {
-	struct fence base;
+	struct dma_fence base;
 
 	struct list_head head;
 
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
 
 int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
 bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct fence *, void (*)(void *), void *);
+void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
 int  nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
 int  nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 72e2399..7f083c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object_list *fobj;
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 
 	fobj = reservation_object_get_list(resv);
 
@@ -861,6 +861,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 	struct nouveau_bo *nvbo;
 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
+	long lret;
 	int ret;
 
 	gem = drm_gem_object_lookup(file_priv, req->handle);
@@ -868,19 +869,15 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
 
-	if (no_wait)
-		ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
-	else {
-		long lret;
+	lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
+						   no_wait ? 0 : 30 * HZ);
+	if (!lret)
+		ret = -EBUSY;
+	else if (lret > 0)
+		ret = 0;
+	else
+		ret = lret;
 
-		lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
-		if (!lret)
-			ret = -EBUSY;
-		else if (lret > 0)
-			ret = 0;
-		else
-			ret = lret;
-	}
 	nouveau_bo_sync_for_cpu(nvbo);
 	drm_gem_object_unreference_unlocked(gem);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
new file mode 100644
index 0000000..3e2f1b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Martin Peres <martin.peres@free.fr>
+ */
+
+#include <linux/leds.h>
+
+#include "nouveau_led.h"
+#include <nvkm/subdev/gpio.h>
+
+static enum led_brightness
+nouveau_led_get_brightness(struct led_classdev *led)
+{
+	struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+	struct nouveau_drm *drm = nouveau_drm(drm_dev);
+	struct nvif_object *device = &drm->device.object;
+	u32 div, duty;
+
+	div =  nvif_rd32(device, 0x61c880) & 0x00ffffff;
+	duty = nvif_rd32(device, 0x61c884) & 0x00ffffff;
+
+	if (div > 0)
+		return duty * LED_FULL / div;
+	else
+		return 0;
+}
+
+static void
+nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
+{
+	struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+	struct nouveau_drm *drm = nouveau_drm(drm_dev);
+	struct nvif_object *device = &drm->device.object;
+
+	u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
+	u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
+	u32 div, duty;
+
+	div = input_clk / freq;
+	duty = value * div / LED_FULL;
+
+	/* for now, this is safe to directly poke those registers because:
+	 *  - A: nvidia never puts the logo led to any other PWM controler
+	 *       than PDISPLAY.SOR[1].PWM.
+	 *  - B: nouveau does not touch these registers anywhere else
+	 */
+	nvif_wr32(device, 0x61c880, div);
+	nvif_wr32(device, 0x61c884, 0xc0000000 | duty);
+}
+
+
+int
+nouveau_led_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+	struct dcb_gpio_func logo_led;
+	int ret;
+
+	if (!gpio)
+		return 0;
+
+	/* check that there is a GPIO controlling the logo LED */
+	if (nvkm_gpio_find(gpio, 0, DCB_GPIO_LOGO_LED_PWM, 0xff, &logo_led))
+		return 0;
+
+	drm->led = kzalloc(sizeof(*drm->led), GFP_KERNEL);
+	if (!drm->led)
+		return -ENOMEM;
+	drm->led->dev = dev;
+
+	drm->led->led.name = "nvidia-logo";
+	drm->led->led.max_brightness = 255;
+	drm->led->led.brightness_get = nouveau_led_get_brightness;
+	drm->led->led.brightness_set = nouveau_led_set_brightness;
+
+	ret = led_classdev_register(dev->dev, &drm->led->led);
+	if (ret) {
+		kfree(drm->led);
+		return ret;
+	}
+
+	return 0;
+}
+
+void
+nouveau_led_suspend(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (drm->led)
+		led_classdev_suspend(&drm->led->led);
+}
+
+void
+nouveau_led_resume(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (drm->led)
+		led_classdev_resume(&drm->led->led);
+}
+
+void
+nouveau_led_fini(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (drm->led) {
+		led_classdev_unregister(&drm->led->led);
+		kfree(drm->led);
+		drm->led = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
new file mode 100644
index 0000000..187ecdb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@free.fr>
+ */
+
+#ifndef __NOUVEAU_LED_H__
+#define __NOUVEAU_LED_H__
+
+#include "nouveau_drv.h"
+
+struct led_classdev;
+
+struct nouveau_led {
+	struct drm_device *dev;
+
+	struct led_classdev led;
+};
+
+static inline struct nouveau_led *
+nouveau_led(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->led;
+}
+
+/* nouveau_led.c */
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
+int  nouveau_led_init(struct drm_device *dev);
+void nouveau_led_suspend(struct drm_device *dev);
+void nouveau_led_resume(struct drm_device *dev);
+void nouveau_led_fini(struct drm_device *dev);
+#else
+static inline int  nouveau_led_init(struct drm_device *dev) { return 0; };
+static inline void nouveau_led_suspend(struct drm_device *dev) { };
+static inline void nouveau_led_resume(struct drm_device *dev) { };
+static inline void nouveau_led_fini(struct drm_device *dev) { };
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1915b7b..fa8f237 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv04_fence_context_new;
 	priv->base.context_del = nv04_fence_context_del;
 	priv->base.contexts = 15;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4e3de34..f99fcf5 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv10_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.contexts = 31;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 7d5e562..79bc011 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv17_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.contexts = 31;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 
 	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 4d6f202..8c529541 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv50_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.contexts = 127;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 
 	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d..23ef04b 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -229,7 +229,7 @@ nv84_fence_create(struct nouveau_drm *drm)
 	priv->base.context_del = nv84_fence_context_del;
 
 	priv->base.contexts = fifo->nr;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 
 	/* Use VRAM if there is any ; otherwise fallback to system memory */
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 34ecd4a..058ff46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -20,6 +20,7 @@
  * DEALINGS IN THE SOFTWARE.
  */
 #include <core/device.h>
+#include <core/firmware.h>
 
 /**
  * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7218a06..53d1717 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1357,7 +1357,7 @@ nvc0_chipset = {
 	.pmu = gf100_pmu_new,
 	.therm = gt215_therm_new,
 	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.ce[1] = gf100_ce_new,
 	.disp = gt215_disp_new,
@@ -1394,7 +1394,7 @@ nvc1_chipset = {
 	.pmu = gf100_pmu_new,
 	.therm = gt215_therm_new,
 	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.disp = gt215_disp_new,
 	.dma = gf100_dma_new,
@@ -1430,7 +1430,7 @@ nvc3_chipset = {
 	.pmu = gf100_pmu_new,
 	.therm = gt215_therm_new,
 	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.disp = gt215_disp_new,
 	.dma = gf100_dma_new,
@@ -1466,7 +1466,7 @@ nvc4_chipset = {
 	.pmu = gf100_pmu_new,
 	.therm = gt215_therm_new,
 	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.ce[1] = gf100_ce_new,
 	.disp = gt215_disp_new,
@@ -1503,7 +1503,7 @@ nvc8_chipset = {
 	.pmu = gf100_pmu_new,
 	.therm = gt215_therm_new,
 	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.ce[1] = gf100_ce_new,
 	.disp = gt215_disp_new,
@@ -1540,7 +1540,7 @@ nvce_chipset = {
 	.pmu = gf100_pmu_new,
 	.therm = gt215_therm_new,
 	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.ce[1] = gf100_ce_new,
 	.disp = gt215_disp_new,
@@ -1577,7 +1577,7 @@ nvcf_chipset = {
 	.pmu = gf100_pmu_new,
 	.therm = gt215_therm_new,
 	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.disp = gt215_disp_new,
 	.dma = gf100_dma_new,
@@ -1612,6 +1612,7 @@ nvd7_chipset = {
 	.pci = gf106_pci_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.disp = gf119_disp_new,
 	.dma = gf119_dma_new,
@@ -1647,7 +1648,7 @@ nvd9_chipset = {
 	.pmu = gf119_pmu_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
-	.volt = nv40_volt_new,
+	.volt = gf100_volt_new,
 	.ce[0] = gf100_ce_new,
 	.disp = gf119_disp_new,
 	.dma = gf119_dma_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad030..0030cd9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1665,14 +1665,31 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
 	*pdevice = &pdev->device;
 	pdev->pdev = pci_dev;
 
-	return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
-				pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
-				pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
-				NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
-				(u64)pci_domain_nr(pci_dev->bus) << 32 |
-				     pci_dev->bus->number << 16 |
-				     PCI_SLOT(pci_dev->devfn) << 8 |
-				     PCI_FUNC(pci_dev->devfn), name,
-				cfg, dbg, detect, mmio, subdev_mask,
-				&pdev->device);
+	ret = nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+			       pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+			       pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+			       NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+			       (u64)pci_domain_nr(pci_dev->bus) << 32 |
+				    pci_dev->bus->number << 16 |
+				    PCI_SLOT(pci_dev->devfn) << 8 |
+				    PCI_FUNC(pci_dev->devfn), name,
+			       cfg, dbg, detect, mmio, subdev_mask,
+			       &pdev->device);
+
+	if (ret)
+		return ret;
+
+	/*
+	 * Set a preliminary DMA mask based on the .dma_bits member of the
+	 * MMU subdevice. This allows other subdevices to create DMA mappings
+	 * in their init() or oneinit() methods, which may be called before the
+	 * TTM layer sets the DMA mask definitively.
+	 * This is necessary for platforms where the default DMA mask of 32
+	 * does not cover any system memory, i.e., when all RAM is > 4 GB.
+	 */
+	if (subdev_mask & BIT(NVKM_SUBDEV_MMU))
+		dma_set_mask_and_coherent(&pci_dev->dev,
+				DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 1bb9d66..4510cb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -45,14 +45,6 @@ static const struct nvkm_output_func
 g94_sor_output_func = {
 };
 
-int
-g94_sor_output_new(struct nvkm_disp *disp, int index,
-		   struct dcb_output *dcbE, struct nvkm_output **poutp)
-{
-	return nvkm_output_new_(&g94_sor_output_func, disp,
-				index, dcbE, poutp);
-}
-
 /*******************************************************************************
  * DisplayPort
  ******************************************************************************/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index f1e15a4..b4e3c50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -187,6 +187,7 @@ nv30_gr = {
 		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
 		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
 		{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
 		{}
 	}
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 300f5ed..e7ed04b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -123,6 +123,7 @@ nv34_gr = {
 		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
 		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
 		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
 		{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
 		{}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 740df0f..5e8abac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -124,6 +124,7 @@ nv35_gr = {
 		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
 		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
 		{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
 		{}
 	}
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 370dcd8..6eff637 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -84,7 +84,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
 	start = 0x0100000000ULL;
 	limit = start + device->func->resource_size(device, 3);
 
-	ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
+	ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
 	if (ret)
 		return ret;
 
@@ -117,7 +117,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
 	start = 0x0000000000ULL;
 	limit = start + device->func->resource_size(device, 1);
 
-	ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
+	ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index dbcb0ef..be57220 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -31,6 +31,7 @@
 nvkm-y += nvkm/subdev/bios/therm.o
 nvkm-y += nvkm/subdev/bios/vmap.o
 nvkm-y += nvkm/subdev/bios/volt.o
+nvkm-y += nvkm/subdev/bios/vpstate.o
 nvkm-y += nvkm/subdev/bios/xpio.o
 nvkm-y += nvkm/subdev/bios/M0203.o
 nvkm-y += nvkm/subdev/bios/M0205.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 0843280..aafd5e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -23,6 +23,7 @@
  */
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
+#include <subdev/bios/extdev.h>
 #include <subdev/bios/iccsense.h>
 
 static u16
@@ -77,23 +78,47 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
 		return -ENOMEM;
 
 	for (i = 0; i < cnt; ++i) {
+		struct nvbios_extdev_func extdev;
 		struct pwr_rail_t *rail = &iccsense->rail[i];
+		u8 res_start = 0;
+		int r;
+
 		entry = table + hdr + i * len;
 
 		switch(ver) {
 		case 0x10:
 			rail->mode = nvbios_rd08(bios, entry + 0x1);
 			rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
-			rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3);
-			rail->rail = nvbios_rd08(bios, entry + 0x4);
+			res_start = 0x3;
 			break;
 		case 0x20:
 			rail->mode = nvbios_rd08(bios, entry);
 			rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
-			rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5);
-			rail->rail = nvbios_rd08(bios, entry + 0x6);
+			res_start = 0x5;
 			break;
 		};
+
+		if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
+			continue;
+
+		switch (extdev.type) {
+		case NVBIOS_EXTDEV_INA209:
+		case NVBIOS_EXTDEV_INA219:
+			rail->resistor_count = 1;
+			break;
+		case NVBIOS_EXTDEV_INA3221:
+			rail->resistor_count = 3;
+			break;
+		default:
+			rail->resistor_count = 0;
+			break;
+		};
+
+		for (r = 0; r < rail->resistor_count; ++r) {
+			rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
+			rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40);
+		}
+		rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index 2f13db7..32bd8b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -61,7 +61,17 @@ nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!vmap * *ver) {
 	case 0x10:
+		info->max0 = 0xff;
+		info->max1 = 0xff;
+		info->max2 = 0xff;
+		break;
 	case 0x20:
+		info->max0 = nvbios_rd08(bios, vmap + 0x7);
+		info->max1 = nvbios_rd08(bios, vmap + 0x8);
+		if (*len >= 0xc)
+			info->max2 = nvbios_rd08(bios, vmap + 0xc);
+		else
+			info->max2 = 0xff;
 		break;
 	}
 	return vmap;
@@ -95,7 +105,7 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
 		info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
 		break;
 	case 0x20:
-		info->unk0   = nvbios_rd08(bios, vmap + 0x00);
+		info->mode   = nvbios_rd08(bios, vmap + 0x00);
 		info->link   = nvbios_rd08(bios, vmap + 0x01);
 		info->min    = nvbios_rd32(bios, vmap + 0x02);
 		info->max    = nvbios_rd32(bios, vmap + 0x06);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 6e0a336..4504822 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -75,20 +75,24 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 	case 0x12:
 		info->type    = NVBIOS_VOLT_GPIO;
 		info->vidmask = nvbios_rd08(bios, volt + 0x04);
+		info->ranged  = false;
 		break;
 	case 0x20:
 		info->type    = NVBIOS_VOLT_GPIO;
 		info->vidmask = nvbios_rd08(bios, volt + 0x05);
+		info->ranged  = false;
 		break;
 	case 0x30:
 		info->type    = NVBIOS_VOLT_GPIO;
 		info->vidmask = nvbios_rd08(bios, volt + 0x04);
+		info->ranged  = false;
 		break;
 	case 0x40:
 		info->type    = NVBIOS_VOLT_GPIO;
 		info->base    = nvbios_rd32(bios, volt + 0x04);
 		info->step    = nvbios_rd16(bios, volt + 0x08);
 		info->vidmask = nvbios_rd08(bios, volt + 0x0b);
+		info->ranged  = true; /* XXX: find the flag byte */
 		/*XXX*/
 		info->min     = 0;
 		info->max     = info->base;
@@ -104,9 +108,11 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 			info->pwm_freq  = nvbios_rd32(bios, volt + 0x5) / 1000;
 			info->pwm_range = nvbios_rd32(bios, volt + 0x16);
 		} else {
-			info->type      = NVBIOS_VOLT_GPIO;
-			info->vidmask   = nvbios_rd08(bios, volt + 0x06);
-			info->step      = nvbios_rd16(bios, volt + 0x16);
+			info->type    = NVBIOS_VOLT_GPIO;
+			info->vidmask = nvbios_rd08(bios, volt + 0x06);
+			info->step    = nvbios_rd16(bios, volt + 0x16);
+			info->ranged  =
+				!!(nvbios_rd08(bios, volt + 0x4) & 0x2);
 		}
 		break;
 	}
@@ -142,7 +148,10 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
 		info->vid     = nvbios_rd08(bios, volt + 0x01) >> 2;
 		break;
 	case 0x40:
+		break;
 	case 0x50:
+		info->voltage = nvbios_rd32(bios, volt) & 0x001fffff;
+		info->vid     = (nvbios_rd32(bios, volt) >> 23) & 0xff;
 		break;
 	}
 	return volt;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
new file mode 100644
index 0000000..f199270
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vpstate.h>
+
+static u32
+nvbios_vpstate_offset(struct nvkm_bios *b)
+{
+	struct bit_entry bit_P;
+
+	if (!bit_entry(b, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			return nvbios_rd32(b, bit_P.offset + 0x38);
+	}
+
+	return 0x0000;
+}
+
+int
+nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
+{
+	if (!h)
+		return -EINVAL;
+
+	h->offset = nvbios_vpstate_offset(b);
+	if (!h->offset)
+		return -ENODEV;
+
+	h->version = nvbios_rd08(b, h->offset);
+	switch (h->version) {
+	case 0x10:
+		h->hlen     = nvbios_rd08(b, h->offset + 0x1);
+		h->elen     = nvbios_rd08(b, h->offset + 0x2);
+		h->slen     = nvbios_rd08(b, h->offset + 0x3);
+		h->scount   = nvbios_rd08(b, h->offset + 0x4);
+		h->ecount   = nvbios_rd08(b, h->offset + 0x5);
+
+		h->base_id  = nvbios_rd08(b, h->offset + 0x0f);
+		h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+		h->tdp_id   = nvbios_rd08(b, h->offset + 0x11);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+int
+nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h,
+		     u8 idx, struct nvbios_vpstate_entry *e)
+{
+	u32 offset;
+
+	if (!e || !h || idx > h->ecount)
+		return -EINVAL;
+
+	offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount));
+	e->pstate    = nvbios_rd08(b, offset);
+	e->clock_mhz = nvbios_rd16(b, offset + 0x5);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 7102c25..fa1c121 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -27,6 +27,7 @@
 #include <subdev/bios/boost.h>
 #include <subdev/bios/cstep.h>
 #include <subdev/bios/perf.h>
+#include <subdev/bios/vpstate.h>
 #include <subdev/fb.h>
 #include <subdev/therm.h>
 #include <subdev/volt.h>
@@ -74,6 +75,88 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
 /******************************************************************************
  * C-States
  *****************************************************************************/
+static bool
+nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
+		  u32 max_volt, int temp)
+{
+	const struct nvkm_domain *domain = clk->domains;
+	struct nvkm_volt *volt = clk->subdev.device->volt;
+	int voltage;
+
+	while (domain && domain->name != nv_clk_src_max) {
+		if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
+			u32 freq = cstate->domain[domain->name];
+			switch (clk->boost_mode) {
+			case NVKM_CLK_BOOST_NONE:
+				if (clk->base_khz && freq > clk->base_khz)
+					return false;
+			case NVKM_CLK_BOOST_BIOS:
+				if (clk->boost_khz && freq > clk->boost_khz)
+					return false;
+			}
+		}
+		domain++;
+	}
+
+	if (!volt)
+		return true;
+
+	voltage = nvkm_volt_map(volt, cstate->voltage, temp);
+	if (voltage < 0)
+		return false;
+	return voltage <= min(max_volt, volt->max_uv);
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
+		      struct nvkm_cstate *start)
+{
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_volt *volt = device->volt;
+	struct nvkm_cstate *cstate;
+	int max_volt;
+
+	if (!pstate || !start)
+		return NULL;
+
+	if (!volt)
+		return start;
+
+	max_volt = volt->max_uv;
+	if (volt->max0_id != 0xff)
+		max_volt = min(max_volt,
+			       nvkm_volt_map(volt, volt->max0_id, clk->temp));
+	if (volt->max1_id != 0xff)
+		max_volt = min(max_volt,
+			       nvkm_volt_map(volt, volt->max1_id, clk->temp));
+	if (volt->max2_id != 0xff)
+		max_volt = min(max_volt,
+			       nvkm_volt_map(volt, volt->max2_id, clk->temp));
+
+	for (cstate = start; &cstate->head != &pstate->list;
+	     cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
+		if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
+			break;
+	}
+
+	return cstate;
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
+{
+	struct nvkm_cstate *cstate;
+	if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
+		return list_last_entry(&pstate->list, typeof(*cstate), head);
+	else {
+		list_for_each_entry(cstate, &pstate->list, head) {
+			if (cstate->id == cstatei)
+				return cstate;
+		}
+	}
+	return NULL;
+}
+
 static int
 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
 {
@@ -85,7 +168,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
 	int ret;
 
 	if (!list_empty(&pstate->list)) {
-		cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+		cstate = nvkm_cstate_get(clk, pstate, cstatei);
+		cstate = nvkm_cstate_find_best(clk, pstate, cstate);
 	} else {
 		cstate = &pstate->base;
 	}
@@ -99,7 +183,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
 	}
 
 	if (volt) {
-		ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
+		ret = nvkm_volt_set_id(volt, cstate->voltage,
+				       pstate->base.voltage, clk->temp, +1);
 		if (ret && ret != -ENODEV) {
 			nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
 			return ret;
@@ -113,7 +198,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
 	}
 
 	if (volt) {
-		ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
+		ret = nvkm_volt_set_id(volt, cstate->voltage,
+				       pstate->base.voltage, clk->temp, -1);
 		if (ret && ret != -ENODEV)
 			nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
 	}
@@ -138,6 +224,7 @@ static int
 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
 {
 	struct nvkm_bios *bios = clk->subdev.device->bios;
+	struct nvkm_volt *volt = clk->subdev.device->volt;
 	const struct nvkm_domain *domain = clk->domains;
 	struct nvkm_cstate *cstate = NULL;
 	struct nvbios_cstepX cstepX;
@@ -148,12 +235,16 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
 	if (!data)
 		return -ENOENT;
 
+	if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
+		return -EINVAL;
+
 	cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
 	if (!cstate)
 		return -ENOMEM;
 
 	*cstate = pstate->base;
 	cstate->voltage = cstepX.voltage;
+	cstate->id = idx;
 
 	while (domain && domain->name != nv_clk_src_max) {
 		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
@@ -175,7 +266,7 @@ static int
 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
 {
 	struct nvkm_subdev *subdev = &clk->subdev;
-	struct nvkm_ram *ram = subdev->device->fb->ram;
+	struct nvkm_fb *fb = subdev->device->fb;
 	struct nvkm_pci *pci = subdev->device->pci;
 	struct nvkm_pstate *pstate;
 	int ret, idx = 0;
@@ -190,7 +281,8 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
 
 	nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
 
-	if (ram && ram->func->calc) {
+	if (fb && fb->ram && fb->ram->func->calc) {
+		struct nvkm_ram *ram = fb->ram;
 		int khz = pstate->base.domain[nv_clk_src_mem];
 		do {
 			ret = ram->func->calc(ram, khz);
@@ -200,7 +292,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
 		ram->func->tidy(ram);
 	}
 
-	return nvkm_cstate_prog(clk, pstate, 0);
+	return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
 }
 
 static void
@@ -214,14 +306,14 @@ nvkm_pstate_work(struct work_struct *work)
 		return;
 	clk->pwrsrc = power_supply_is_system_supplied();
 
-	nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+	nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
 		   clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
-		   clk->astate, clk->tstate, clk->dstate);
+		   clk->astate, clk->temp, clk->dstate);
 
 	pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
 	if (clk->state_nr && pstate != -1) {
 		pstate = (pstate < 0) ? clk->astate : pstate;
-		pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
+		pstate = min(pstate, clk->state_nr - 1);
 		pstate = max(pstate, clk->dstate);
 	} else {
 		pstate = clk->pstate = -1;
@@ -448,13 +540,12 @@ nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
 }
 
 int
-nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
+nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
 {
-	if (!rel) clk->tstate  = req;
-	if ( rel) clk->tstate += rel;
-	clk->tstate = min(clk->tstate, 0);
-	clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
-	return nvkm_pstate_calc(clk, true);
+	if (clk->temp == temp)
+		return 0;
+	clk->temp = temp;
+	return nvkm_pstate_calc(clk, false);
 }
 
 int
@@ -524,9 +615,9 @@ nvkm_clk_init(struct nvkm_subdev *subdev)
 		return clk->func->init(clk);
 
 	clk->astate = clk->state_nr - 1;
-	clk->tstate = 0;
 	clk->dstate = 0;
 	clk->pstate = -1;
+	clk->temp = 90; /* reasonable default value */
 	nvkm_pstate_calc(clk, true);
 	return 0;
 }
@@ -561,10 +652,22 @@ int
 nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
 	      int index, bool allow_reclock, struct nvkm_clk *clk)
 {
+	struct nvkm_subdev *subdev = &clk->subdev;
+	struct nvkm_bios *bios = device->bios;
 	int ret, idx, arglen;
 	const char *mode;
+	struct nvbios_vpstate_header h;
 
-	nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
+	nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+
+	if (bios && !nvbios_vpstate_parse(bios, &h)) {
+		struct nvbios_vpstate_entry base, boost;
+		if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
+			clk->boost_khz = boost.clock_mhz * 1000;
+		if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
+			clk->base_khz = base.clock_mhz * 1000;
+	}
+
 	clk->func = func;
 	INIT_LIST_HEAD(&clk->states);
 	clk->domains = func->domains;
@@ -607,6 +710,8 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
 	if (mode)
 		clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
 
+	clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
+				       NVKM_CLK_BOOST_NONE);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 89d5543..7f67f9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -457,7 +457,7 @@ gf100_clk = {
 		{ nv_clk_src_hubk06 , 0x00 },
 		{ nv_clk_src_hubk01 , 0x01 },
 		{ nv_clk_src_copy   , 0x02 },
-		{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
+		{ nv_clk_src_gpc    , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
 		{ nv_clk_src_rop    , 0x04 },
 		{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
 		{ nv_clk_src_vdec   , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 06bc0d2..0b37e3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -491,7 +491,7 @@ gk104_clk = {
 	.domains = {
 		{ nv_clk_src_crystal, 0xff },
 		{ nv_clk_src_href   , 0xff },
-		{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+		{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
 		{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
 		{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
 		{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 76433cc..3841ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base)
 }
 
 int
-gf100_fb_oneinit(struct nvkm_fb *fb)
+gf100_fb_oneinit(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = fb->subdev.device;
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 	int ret, size = 0x1000;
 
 	size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
 	size = min(size, 0x1000);
 
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
-			      false, &fb->mmu_rd);
+			      false, &fb->base.mmu_rd);
 	if (ret)
 		return ret;
 
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
-			      false, &fb->mmu_wr);
+			      false, &fb->base.mmu_wr);
 	if (ret)
 		return ret;
 
+	fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (fb->r100c10_page) {
+		fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device->dev, fb->r100c10))
+			return -EFAULT;
+	}
+
 	return 0;
 }
 
@@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
 	nvkm_fb_ctor(func, device, index, &fb->base);
 	*pfb = &fb->base;
 
-	fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (fb->r100c10_page) {
-		fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
-					   PAGE_SIZE, DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(device->dev, fb->r100c10))
-			return -EFAULT;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 1b5fb02..0595e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base)
 	nvkm_fifo_chan_put(fifo, flags, &chan);
 }
 
+static int
+nv50_fb_oneinit(struct nvkm_fb *base)
+{
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
+
+	fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (fb->r100c08_page) {
+		fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device->dev, fb->r100c08))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
 static void
 nv50_fb_init(struct nvkm_fb *base)
 {
@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
 static const struct nvkm_fb_func
 nv50_fb_ = {
 	.dtor = nv50_fb_dtor,
+	.oneinit = nv50_fb_oneinit,
 	.init = nv50_fb_init,
 	.intr = nv50_fb_intr,
 	.ram_new = nv50_fb_ram_new,
@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
 	fb->func = func;
 	*pfb = &fb->base;
 
-	fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (fb->r100c08_page) {
-		fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
-					   PAGE_SIZE, DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(device->dev, fb->r100c08))
-			return -EFAULT;
-	} else {
-		nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b9ec0ae..b60068b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -24,6 +24,7 @@ int  gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
 int  gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
 void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
 
+int  gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32);
 int  gk104_ram_init(struct nvkm_ram *ram);
 
 /* RAM type-specific MR calculation routines */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1fa3ade..7904fa4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -259,7 +259,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
 
 	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
 	ram_block(fuc);
-	ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+	if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+		ram_wr32(fuc, 0x62c000, 0x0f0f0000);
 
 	/* MR1: turn termination on early, for some reason.. */
 	if ((ram->base.mr[1] & 0x03c) != 0x030) {
@@ -658,7 +660,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
 		gk104_ram_train(fuc, 0x80020000, 0x01000000);
 
 	ram_unblock(fuc);
-	ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+	if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+		ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
 
 	if (next->bios.rammap_11_08_01)
 		data = 0x00000800;
@@ -706,7 +710,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
 
 	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
 	ram_block(fuc);
-	ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+	if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+		ram_wr32(fuc, 0x62c000, 0x0f0f0000);
 
 	if (vc == 1 && ram_have(fuc, gpio2E)) {
 		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -936,7 +942,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
 	ram_nsec(fuc, 1000);
 
 	ram_unblock(fuc);
-	ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+	if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+		ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
 
 	if (next->bios.rammap_11_08_01)
 		data = 0x00000800;
@@ -1530,6 +1538,12 @@ gk104_ram_func = {
 int
 gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
+	return gk104_ram_ctor(fb, pram, 0x022554);
+}
+
+int
+gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
+{
 	struct nvkm_subdev *subdev = &fb->subdev;
 	struct nvkm_device *device = subdev->device;
 	struct nvkm_bios *bios = device->bios;
@@ -1544,7 +1558,7 @@ gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 		return -ENOMEM;
 	*pram = &ram->base;
 
-	ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
+	ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 43d807f..ac862d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -23,18 +23,8 @@
  */
 #include "ram.h"
 
-static const struct nvkm_ram_func
-gm107_ram_func = {
-	.init = gk104_ram_init,
-	.get = gf100_ram_get,
-	.put = gf100_ram_put,
-};
-
 int
 gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
-		return -ENOMEM;
-
-	return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
+	return gk104_ram_ctor(fb, pram, 0x021c14);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index b7159b3..1a4ab82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -29,7 +29,7 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
 	nvkm_mask(device, 0x137250, 0x3f, 0);
 
 	nvkm_mask(device, 0x000200, 0x20, 0);
-	usleep_range(20, 30);
+	udelay(20);
 	nvkm_mask(device, 0x000200, 0x20, 0x20);
 
 	nvkm_wr32(device, 0x12004c, 0x4);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 41bd5d0..658355f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -96,60 +96,12 @@ nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
 }
 
 static void
-nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense,
-			    struct nvkm_iccsense_sensor *sensor)
-{
-	struct nvkm_subdev *subdev = &iccsense->subdev;
-	/* configuration:
-	 * 0x0007: 0x0007 shunt and bus continous
-	 * 0x0078: 0x0078 128 samples shunt
-	 * 0x0780: 0x0780 128 samples bus
-	 * 0x1800: 0x0000 +-40 mV shunt range
-	 * 0x2000: 0x0000 16V FSR
-         */
-	u16 value = 0x07ff;
-	nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
-	nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
-nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense,
-			     struct nvkm_iccsense_sensor *sensor)
-{
-	struct nvkm_subdev *subdev = &iccsense->subdev;
-	/* configuration:
-	 * 0x0007: 0x0007 shunt and bus continous
-	 * 0x0031: 0x0000 140 us conversion time shunt
-	 * 0x01c0: 0x0000 140 us conversion time bus
-	 * 0x0f00: 0x0f00 1024 samples
-	 * 0x7000: 0x?000 channels
-         */
-	u16 value = 0x0e07;
-	if (sensor->rail_mask & 0x1)
-		value |= 0x1 << 14;
-	if (sensor->rail_mask & 0x2)
-		value |= 0x1 << 13;
-	if (sensor->rail_mask & 0x4)
-		value |= 0x1 << 12;
-	nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
-	nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
 nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
 		            struct nvkm_iccsense_sensor *sensor)
 {
-	switch (sensor->type) {
-	case NVBIOS_EXTDEV_INA209:
-	case NVBIOS_EXTDEV_INA219:
-		nvkm_iccsense_ina209_config(iccsense, sensor);
-		break;
-	case NVBIOS_EXTDEV_INA3221:
-		nvkm_iccsense_ina3221_config(iccsense, sensor);
-		break;
-	default:
-		break;
-	}
+	struct nvkm_subdev *subdev = &iccsense->subdev;
+	nvkm_trace(subdev, "write config of extdev %i: 0x%04x\n", sensor->id, sensor->config);
+	nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, sensor->config);
 }
 
 int
@@ -196,7 +148,6 @@ nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
 static struct nvkm_iccsense_sensor*
 nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
 {
-
 	struct nvkm_subdev *subdev = &iccsense->subdev;
 	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvkm_i2c *i2c = subdev->device->i2c;
@@ -245,7 +196,7 @@ nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
 	sensor->type = extdev.type;
 	sensor->i2c = &i2c_bus->i2c;
 	sensor->addr = addr;
-	sensor->rail_mask = 0x0;
+	sensor->config = 0x0;
 	return sensor;
 }
 
@@ -273,48 +224,56 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
 
 	iccsense->data_valid = true;
 	for (i = 0; i < stbl.nr_entry; ++i) {
-		struct pwr_rail_t *r = &stbl.rail[i];
-		struct nvkm_iccsense_rail *rail;
+		struct pwr_rail_t *pwr_rail = &stbl.rail[i];
 		struct nvkm_iccsense_sensor *sensor;
-		int (*read)(struct nvkm_iccsense *,
-			    struct nvkm_iccsense_rail *);
+		int r;
 
-		if (!r->mode || r->resistor_mohm == 0)
+		if (pwr_rail->mode != 1 || !pwr_rail->resistor_count)
 			continue;
 
-		sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id);
+		sensor = nvkm_iccsense_get_sensor(iccsense, pwr_rail->extdev_id);
 		if (!sensor)
 			continue;
 
-		switch (sensor->type) {
-		case NVBIOS_EXTDEV_INA209:
-			if (r->rail != 0)
-				continue;
-			read = nvkm_iccsense_ina209_read;
-			break;
-		case NVBIOS_EXTDEV_INA219:
-			if (r->rail != 0)
-				continue;
-			read = nvkm_iccsense_ina219_read;
-			break;
-		case NVBIOS_EXTDEV_INA3221:
-			if (r->rail >= 3)
-				continue;
-			read = nvkm_iccsense_ina3221_read;
-			break;
-		default:
-			continue;
-		}
+		if (!sensor->config)
+			sensor->config = pwr_rail->config;
+		else if (sensor->config != pwr_rail->config)
+			nvkm_error(subdev, "config mismatch found for extdev %i\n", pwr_rail->extdev_id);
 
-		rail = kmalloc(sizeof(*rail), GFP_KERNEL);
-		if (!rail)
-			return -ENOMEM;
-		sensor->rail_mask |= 1 << r->rail;
-		rail->read = read;
-		rail->sensor = sensor;
-		rail->idx = r->rail;
-		rail->mohm = r->resistor_mohm;
-		list_add_tail(&rail->head, &iccsense->rails);
+		for (r = 0; r < pwr_rail->resistor_count; ++r) {
+			struct nvkm_iccsense_rail *rail;
+			struct pwr_rail_resistor_t *res = &pwr_rail->resistors[r];
+			int (*read)(struct nvkm_iccsense *,
+				    struct nvkm_iccsense_rail *);
+
+			if (!res->mohm || !res->enabled)
+				continue;
+
+			switch (sensor->type) {
+			case NVBIOS_EXTDEV_INA209:
+				read = nvkm_iccsense_ina209_read;
+				break;
+			case NVBIOS_EXTDEV_INA219:
+				read = nvkm_iccsense_ina219_read;
+				break;
+			case NVBIOS_EXTDEV_INA3221:
+				read = nvkm_iccsense_ina3221_read;
+				break;
+			default:
+				continue;
+			}
+
+			rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+			if (!rail)
+				return -ENOMEM;
+
+			rail->read = read;
+			rail->sensor = sensor;
+			rail->idx = r;
+			rail->mohm = res->mohm;
+			nvkm_debug(subdev, "create rail for extdev %i: { idx: %i, mohm: %i }\n", pwr_rail->extdev_id, r, rail->mohm);
+			list_add_tail(&rail->head, &iccsense->rails);
+		}
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index b72c31d..e90e0f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -10,7 +10,7 @@ struct nvkm_iccsense_sensor {
 	enum nvbios_extdev_type type;
 	struct i2c_adapter *i2c;
 	u8 addr;
-	u8 rail_mask;
+	u16 config;
 };
 
 struct nvkm_iccsense_rail {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index 45a2f8e..9abfa5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -23,8 +23,8 @@
  */
 #include "mxms.h"
 
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
 
 static u8 *
 mxms_data(struct nvkm_mxm *mxm)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index c340762..bcd179b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -1,6 +1,7 @@
 nvkm-y += nvkm/subdev/volt/base.o
 nvkm-y += nvkm/subdev/volt/gpio.o
 nvkm-y += nvkm/subdev/volt/nv40.o
+nvkm-y += nvkm/subdev/volt/gf100.o
 nvkm-y += nvkm/subdev/volt/gk104.o
 nvkm-y += nvkm/subdev/volt/gk20a.o
 nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 1c3d23b..e8569b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -26,6 +26,7 @@
 #include <subdev/bios.h>
 #include <subdev/bios/vmap.h>
 #include <subdev/bios/volt.h>
+#include <subdev/therm.h>
 
 int
 nvkm_volt_get(struct nvkm_volt *volt)
@@ -50,23 +51,35 @@ static int
 nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
 {
 	struct nvkm_subdev *subdev = &volt->subdev;
-	int i, ret = -EINVAL;
+	int i, ret = -EINVAL, best_err = volt->max_uv, best = -1;
 
 	if (volt->func->volt_set)
 		return volt->func->volt_set(volt, uv);
 
 	for (i = 0; i < volt->vid_nr; i++) {
-		if (volt->vid[i].uv == uv) {
-			ret = volt->func->vid_set(volt, volt->vid[i].vid);
-			nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+		int err = volt->vid[i].uv - uv;
+		if (err < 0 || err > best_err)
+			continue;
+
+		best_err = err;
+		best = i;
+		if (best_err == 0)
 			break;
-		}
 	}
+
+	if (best == -1) {
+		nvkm_error(subdev, "couldn't set %iuv\n", uv);
+		return ret;
+	}
+
+	ret = volt->func->vid_set(volt, volt->vid[best].vid);
+	nvkm_debug(subdev, "set req %duv to %duv: %d\n", uv,
+		   volt->vid[best].uv, ret);
 	return ret;
 }
 
-static int
-nvkm_volt_map(struct nvkm_volt *volt, u8 id)
+int
+nvkm_volt_map_min(struct nvkm_volt *volt, u8 id)
 {
 	struct nvkm_bios *bios = volt->subdev.device->bios;
 	struct nvbios_vmap_entry info;
@@ -76,7 +89,7 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
 	vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
 	if (vmap) {
 		if (info.link != 0xff) {
-			int ret = nvkm_volt_map(volt, info.link);
+			int ret = nvkm_volt_map_min(volt, info.link);
 			if (ret < 0)
 				return ret;
 			info.min += ret;
@@ -88,19 +101,79 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
 }
 
 int
-nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp)
+{
+	struct nvkm_bios *bios = volt->subdev.device->bios;
+	struct nvbios_vmap_entry info;
+	u8  ver, len;
+	u16 vmap;
+
+	vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+	if (vmap) {
+		s64 result;
+
+		if (volt->speedo < 0)
+			return volt->speedo;
+
+		if (ver == 0x10 || (ver == 0x20 && info.mode == 0)) {
+			result  = div64_s64((s64)info.arg[0], 10);
+			result += div64_s64((s64)info.arg[1] * volt->speedo, 10);
+			result += div64_s64((s64)info.arg[2] * volt->speedo * volt->speedo, 100000);
+		} else if (ver == 0x20) {
+			switch (info.mode) {
+			/* 0x0 handled above! */
+			case 0x1:
+				result =  ((s64)info.arg[0] * 15625) >> 18;
+				result += ((s64)info.arg[1] * volt->speedo * 15625) >> 18;
+				result += ((s64)info.arg[2] * temp * 15625) >> 10;
+				result += ((s64)info.arg[3] * volt->speedo * temp * 15625) >> 18;
+				result += ((s64)info.arg[4] * volt->speedo * volt->speedo * 15625) >> 30;
+				result += ((s64)info.arg[5] * temp * temp * 15625) >> 18;
+				break;
+			case 0x3:
+				result = (info.min + info.max) / 2;
+				break;
+			case 0x2:
+			default:
+				result = info.min;
+				break;
+			}
+		} else {
+			return -ENODEV;
+		}
+
+		result = min(max(result, (s64)info.min), (s64)info.max);
+
+		if (info.link != 0xff) {
+			int ret = nvkm_volt_map(volt, info.link, temp);
+			if (ret < 0)
+				return ret;
+			result += ret;
+		}
+		return result;
+	}
+
+	return id ? id * 10000 : -ENODEV;
+}
+
+int
+nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, u8 min_id, u8 temp,
+		 int condition)
 {
 	int ret;
 
 	if (volt->func->set_id)
 		return volt->func->set_id(volt, id, condition);
 
-	ret = nvkm_volt_map(volt, id);
+	ret = nvkm_volt_map(volt, id, temp);
 	if (ret >= 0) {
 		int prev = nvkm_volt_get(volt);
 		if (!condition || prev < 0 ||
 		    (condition < 0 && ret < prev) ||
 		    (condition > 0 && ret > prev)) {
+			int min = nvkm_volt_map(volt, min_id, temp);
+			if (min >= 0)
+				ret = max(min, ret);
 			ret = nvkm_volt_set(volt, ret);
 		} else {
 			ret = 0;
@@ -112,6 +185,7 @@ nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
 static void
 nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_volt_entry ivid;
 	struct nvbios_volt info;
 	u8  ver, hdr, cnt, len;
@@ -119,7 +193,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
 	int i;
 
 	data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
-	if (data && info.vidmask && info.base && info.step) {
+	if (data && info.vidmask && info.base && info.step && info.ranged) {
+		nvkm_debug(subdev, "found ranged based VIDs\n");
 		volt->min_uv = info.min;
 		volt->max_uv = info.max;
 		for (i = 0; i < info.vidmask + 1; i++) {
@@ -132,7 +207,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
 			info.base += info.step;
 		}
 		volt->vid_mask = info.vidmask;
-	} else if (data && info.vidmask) {
+	} else if (data && info.vidmask && !info.ranged) {
+		nvkm_debug(subdev, "found entry based VIDs\n");
 		volt->min_uv = 0xffffffff;
 		volt->max_uv = 0;
 		for (i = 0; i < cnt; i++) {
@@ -154,6 +230,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
 }
 
 static int
+nvkm_volt_speedo_read(struct nvkm_volt *volt)
+{
+	if (volt->func->speedo_read)
+		return volt->func->speedo_read(volt);
+	return -EINVAL;
+}
+
+static int
 nvkm_volt_init(struct nvkm_subdev *subdev)
 {
 	struct nvkm_volt *volt = nvkm_volt(subdev);
@@ -167,6 +251,21 @@ nvkm_volt_init(struct nvkm_subdev *subdev)
 	return 0;
 }
 
+static int
+nvkm_volt_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_volt *volt = nvkm_volt(subdev);
+
+	volt->speedo = nvkm_volt_speedo_read(volt);
+	if (volt->speedo > 0)
+		nvkm_debug(&volt->subdev, "speedo %x\n", volt->speedo);
+
+	if (volt->func->oneinit)
+		return volt->func->oneinit(volt);
+
+	return 0;
+}
+
 static void *
 nvkm_volt_dtor(struct nvkm_subdev *subdev)
 {
@@ -177,6 +276,7 @@ static const struct nvkm_subdev_func
 nvkm_volt = {
 	.dtor = nvkm_volt_dtor,
 	.init = nvkm_volt_init,
+	.oneinit = nvkm_volt_oneinit,
 };
 
 void
@@ -191,9 +291,22 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
 
 	/* Assuming the non-bios device should build the voltage table later */
 	if (bios) {
+		u8 ver, hdr, cnt, len;
+		struct nvbios_vmap vmap;
+
 		nvkm_volt_parse_bios(bios, volt);
 		nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
 			   volt->min_uv, volt->max_uv);
+
+		if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) {
+			volt->max0_id = vmap.max0;
+			volt->max1_id = vmap.max1;
+			volt->max2_id = vmap.max2;
+		} else {
+			volt->max0_id = 0xff;
+			volt->max1_id = 0xff;
+			volt->max2_id = 0xff;
+		}
 	}
 
 	if (volt->vid_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
new file mode 100644
index 0000000..d9ed692
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+#include <subdev/fuse.h>
+
+static int
+gf100_volt_speedo_read(struct nvkm_volt *volt)
+{
+	struct nvkm_device *device = volt->subdev.device;
+	struct nvkm_fuse *fuse = device->fuse;
+
+	if (!fuse)
+		return -EINVAL;
+
+	return nvkm_fuse_read(fuse, 0x1cc);
+}
+
+int
+gf100_volt_oneinit(struct nvkm_volt *volt)
+{
+	struct nvkm_subdev *subdev = &volt->subdev;
+	if (volt->speedo <= 0)
+		nvkm_error(subdev, "couldn't find speedo value, volting not "
+			   "possible\n");
+	return 0;
+}
+
+static const struct nvkm_volt_func
+gf100_volt = {
+	.oneinit = gf100_volt_oneinit,
+	.vid_get = nvkm_voltgpio_get,
+	.vid_set = nvkm_voltgpio_set,
+	.speedo_read = gf100_volt_speedo_read,
+};
+
+int
+gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+	struct nvkm_volt *volt;
+	int ret;
+
+	ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+	*pvolt = volt;
+	if (ret)
+		return ret;
+
+	return nvkm_voltgpio_init(volt);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 420bd84d..b2c5d11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -27,6 +27,7 @@
 #include <subdev/gpio.h>
 #include <subdev/bios.h>
 #include <subdev/bios/volt.h>
+#include <subdev/fuse.h>
 
 #define gk104_volt(p) container_of((p), struct gk104_volt, base)
 struct gk104_volt {
@@ -64,13 +65,33 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
 	return 0;
 }
 
+static int
+gk104_volt_speedo_read(struct nvkm_volt *volt)
+{
+	struct nvkm_device *device = volt->subdev.device;
+	struct nvkm_fuse *fuse = device->fuse;
+	int ret;
+
+	if (!fuse)
+		return -EINVAL;
+
+	nvkm_wr32(device, 0x122634, 0x0);
+	ret = nvkm_fuse_read(fuse, 0x3a8);
+	nvkm_wr32(device, 0x122634, 0x41);
+	return ret;
+}
+
 static const struct nvkm_volt_func
 gk104_volt_pwm = {
+	.oneinit = gf100_volt_oneinit,
 	.volt_get = gk104_volt_get,
 	.volt_set = gk104_volt_set,
+	.speedo_read = gk104_volt_speedo_read,
 }, gk104_volt_gpio = {
+	.oneinit = gf100_volt_oneinit,
 	.vid_get = nvkm_voltgpio_get,
 	.vid_set = nvkm_voltgpio_set,
+	.speedo_read = gk104_volt_speedo_read,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index d5140d9..354bafe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -9,11 +9,13 @@ int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
 		   int index, struct nvkm_volt **);
 
 struct nvkm_volt_func {
+	int (*oneinit)(struct nvkm_volt *);
 	int (*volt_get)(struct nvkm_volt *);
 	int (*volt_set)(struct nvkm_volt *, u32 uv);
 	int (*vid_get)(struct nvkm_volt *);
 	int (*vid_set)(struct nvkm_volt *, u8 vid);
 	int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+	int (*speedo_read)(struct nvkm_volt *);
 };
 
 int nvkm_voltgpio_init(struct nvkm_volt *);
@@ -23,4 +25,6 @@ int nvkm_voltgpio_set(struct nvkm_volt *, u8);
 int nvkm_voltpwm_init(struct nvkm_volt *volt);
 int nvkm_voltpwm_get(struct nvkm_volt *volt);
 int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv);
+
+int gf100_volt_oneinit(struct nvkm_volt *);
 #endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 180f644..16c691db 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -438,13 +438,14 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
 	}
 }
 
-static bool omap_crtc_is_plane_prop(struct drm_device *dev,
+static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
 	struct drm_property *property)
 {
+	struct drm_device *dev = crtc->dev;
 	struct omap_drm_private *priv = dev->dev_private;
 
 	return property == priv->zorder_prop ||
-		property == dev->mode_config.rotation_property;
+		property == crtc->primary->rotation_property;
 }
 
 static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
@@ -452,9 +453,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
 					 struct drm_property *property,
 					 uint64_t val)
 {
-	struct drm_device *dev = crtc->dev;
-
-	if (omap_crtc_is_plane_prop(dev, property)) {
+	if (omap_crtc_is_plane_prop(crtc, property)) {
 		struct drm_plane_state *plane_state;
 		struct drm_plane *plane = crtc->primary;
 
@@ -479,9 +478,7 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
 					 struct drm_property *property,
 					 uint64_t *val)
 {
-	struct drm_device *dev = crtc->dev;
-
-	if (omap_crtc_is_plane_prop(dev, property)) {
+	if (omap_crtc_is_plane_prop(crtc, property)) {
 		/*
 		 * Delegate property get to the primary plane. The
 		 * drm_atomic_plane_get_property() function isn't exported, but
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e1cfba5..39c5312 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -105,7 +105,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
 
 	dispc_runtime_put();
 
-	drm_atomic_state_free(old_state);
+	drm_atomic_state_put(old_state);
 
 	/* Complete the commit, wake up any waiter. */
 	spin_lock(&priv->commit.lock);
@@ -176,6 +176,7 @@ static int omap_atomic_commit(struct drm_device *dev,
 	/* Swap the state, this is the point of no return. */
 	drm_atomic_helper_swap_state(state, true);
 
+	drm_atomic_state_get(state);
 	if (nonblock)
 		schedule_work(&commit->work);
 	else
@@ -292,16 +293,6 @@ static int omap_modeset_init_properties(struct drm_device *dev)
 {
 	struct omap_drm_private *priv = dev->dev_private;
 
-	if (priv->has_dmm) {
-		dev->mode_config.rotation_property =
-			drm_mode_create_rotation_property(dev,
-				DRM_ROTATE_0 | DRM_ROTATE_90 |
-				DRM_ROTATE_180 | DRM_ROTATE_270 |
-				DRM_REFLECT_X | DRM_REFLECT_Y);
-		if (!dev->mode_config.rotation_property)
-			return -ENOMEM;
-	}
-
 	priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
 	if (!priv->zorder_prop)
 		return -ENOMEM;
@@ -752,22 +743,32 @@ static void dev_lastclose(struct drm_device *dev)
 
 	DBG("lastclose: dev=%p", dev);
 
-	if (dev->mode_config.rotation_property) {
-		/* need to restore default rotation state.. not sure
-		 * if there is a cleaner way to restore properties to
-		 * default state?  Maybe a flag that properties should
-		 * automatically be restored to default state on
-		 * lastclose?
-		 */
-		for (i = 0; i < priv->num_crtcs; i++) {
-			drm_object_property_set_value(&priv->crtcs[i]->base,
-					dev->mode_config.rotation_property, 0);
-		}
+	/* need to restore default rotation state.. not sure
+	 * if there is a cleaner way to restore properties to
+	 * default state?  Maybe a flag that properties should
+	 * automatically be restored to default state on
+	 * lastclose?
+	 */
+	for (i = 0; i < priv->num_crtcs; i++) {
+		struct drm_crtc *crtc = priv->crtcs[i];
 
-		for (i = 0; i < priv->num_planes; i++) {
-			drm_object_property_set_value(&priv->planes[i]->base,
-					dev->mode_config.rotation_property, 0);
-		}
+		if (!crtc->primary->rotation_property)
+			continue;
+
+		drm_object_property_set_value(&crtc->base,
+					      crtc->primary->rotation_property,
+					      DRM_ROTATE_0);
+	}
+
+	for (i = 0; i < priv->num_planes; i++) {
+		struct drm_plane *plane = priv->planes[i];
+
+		if (!plane->rotation_property)
+			continue;
+
+		drm_object_property_set_value(&plane->base,
+					      plane->rotation_property,
+					      DRM_ROTATE_0);
 	}
 
 	if (priv->fbdev) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 66ac8c4..0ffd5b9 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -108,16 +108,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
 	win.src_x = state->src_x >> 16;
 	win.src_y = state->src_y >> 16;
 
-	switch (state->rotation & DRM_ROTATE_MASK) {
-	case DRM_ROTATE_90:
-	case DRM_ROTATE_270:
+	if (drm_rotation_90_or_270(state->rotation)) {
 		win.src_w = state->src_h >> 16;
 		win.src_h = state->src_w >> 16;
-		break;
-	default:
+	} else {
 		win.src_w = state->src_w >> 16;
 		win.src_h = state->src_h >> 16;
-		break;
 	}
 
 	/* update scanout: */
@@ -215,9 +211,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
 	struct omap_drm_private *priv = dev->dev_private;
 
 	if (priv->has_dmm) {
-		struct drm_property *prop = dev->mode_config.rotation_property;
+		if (!plane->rotation_property)
+			drm_plane_create_rotation_property(plane,
+							   DRM_ROTATE_0,
+							   DRM_ROTATE_0 | DRM_ROTATE_90 |
+							   DRM_ROTATE_180 | DRM_ROTATE_270 |
+							   DRM_REFLECT_X | DRM_REFLECT_Y);
 
-		drm_object_attach_property(obj, prop, 0);
+		/* Attach the rotation property also to the crtc object */
+		if (plane->rotation_property && obj != &plane->base)
+			drm_object_attach_property(obj, plane->rotation_property,
+						   DRM_ROTATE_0);
 	}
 
 	drm_object_attach_property(obj, priv->zorder_prop, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 5f3e5ad..84995eb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,7 +31,7 @@
  * Definitions taken from spice-protocol, plus kernel driver specific bits.
  */
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/workqueue.h>
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
@@ -190,7 +190,7 @@ enum {
  * spice-protocol/qxl_dev.h */
 #define QXL_MAX_RES 96
 struct qxl_release {
-	struct fence base;
+	struct dma_fence base;
 
 	int id;
 	int type;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cd83f05..50b4e52 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,7 +21,7 @@
  */
 #include "qxl_drv.h"
 #include "qxl_object.h"
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
 
 /*
  * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -40,23 +40,24 @@
 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
 
-static const char *qxl_get_driver_name(struct fence *fence)
+static const char *qxl_get_driver_name(struct dma_fence *fence)
 {
 	return "qxl";
 }
 
-static const char *qxl_get_timeline_name(struct fence *fence)
+static const char *qxl_get_timeline_name(struct dma_fence *fence)
 {
 	return "release";
 }
 
-static bool qxl_nop_signaling(struct fence *fence)
+static bool qxl_nop_signaling(struct dma_fence *fence)
 {
 	/* fences are always automatically signaled, so just pretend we did this.. */
 	return true;
 }
 
-static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
+static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+			   signed long timeout)
 {
 	struct qxl_device *qdev;
 	struct qxl_release *release;
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
 retry:
 	sc++;
 
-	if (fence_is_signaled(fence))
+	if (dma_fence_is_signaled(fence))
 		goto signaled;
 
 	qxl_io_notify_oom(qdev);
@@ -80,11 +81,11 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
 		if (!qxl_queue_garbage_collect(qdev, true))
 			break;
 
-		if (fence_is_signaled(fence))
+		if (dma_fence_is_signaled(fence))
 			goto signaled;
 	}
 
-	if (fence_is_signaled(fence))
+	if (dma_fence_is_signaled(fence))
 		goto signaled;
 
 	if (have_drawable_releases || sc < 4) {
@@ -96,9 +97,9 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
 			return 0;
 
 		if (have_drawable_releases && sc > 300) {
-			FENCE_WARN(fence, "failed to wait on release %llu "
-					  "after spincount %d\n",
-					  fence->context & ~0xf0000000, sc);
+			DMA_FENCE_WARN(fence, "failed to wait on release %llu "
+				       "after spincount %d\n",
+				       fence->context & ~0xf0000000, sc);
 			goto signaled;
 		}
 		goto retry;
@@ -115,7 +116,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
 	return end - cur;
 }
 
-static const struct fence_ops qxl_fence_ops = {
+static const struct dma_fence_ops qxl_fence_ops = {
 	.get_driver_name = qxl_get_driver_name,
 	.get_timeline_name = qxl_get_timeline_name,
 	.enable_signaling = qxl_nop_signaling,
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev,
 		WARN_ON(list_empty(&release->bos));
 		qxl_release_free_list(release);
 
-		fence_signal(&release->base);
-		fence_put(&release->base);
+		dma_fence_signal(&release->base);
+		dma_fence_put(&release->base);
 	} else {
 		qxl_release_free_list(release);
 		kfree(release);
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
 	 * Since we never really allocated a context and we don't want to conflict,
 	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
 	 */
-	fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
-		   release->id | 0xf0000000, release->base.seqno);
-	trace_fence_emit(&release->base);
+	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
+		       release->id | 0xf0000000, release->base.seqno);
+	trace_dma_fence_emit(&release->base);
 
 	driver = bdev->driver;
 	glob = bo->glob;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index e26c82d..1176133 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
 	.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
 	.invalidate_caches = &qxl_invalidate_caches,
 	.init_mem_type = &qxl_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &qxl_evict_flags,
 	.move = &qxl_bo_move,
 	.verify_access = &qxl_verify_access,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758..fa4f8f0 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,6 +28,7 @@
 #include <drm/radeon_drm.h>
 #include "radeon.h"
 #include "radeon_audio.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include <linux/backlight.h>
 
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index d960d39..f8b0509 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -27,6 +27,7 @@
  */
 #include <drm/drmP.h>
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "evergreend.h"
 #include "evergreen_reg_safe.h"
 #include "cayman_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b69c8de..595a197 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
 #include <linux/kernel.h>
 #include <drm/drmP.h>
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "r600d.h"
 #include "r600_reg_safe.h"
 
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1b0dcad..44e0c5e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -66,7 +66,7 @@
 #include <linux/kref.h>
 #include <linux/interval_tree.h>
 #include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #include <ttm/ttm_bo_api.h>
 #include <ttm/ttm_bo_driver.h>
@@ -367,7 +367,7 @@ struct radeon_fence_driver {
 };
 
 struct radeon_fence {
-	struct fence		base;
+	struct dma_fence		base;
 
 	struct radeon_device	*rdev;
 	uint64_t		seq;
@@ -746,7 +746,7 @@ struct radeon_flip_work {
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct radeon_bo		*old_rbo;
-	struct fence			*fence;
+	struct dma_fence		*fence;
 	bool				async;
 };
 
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
 /*
  * Cast helper
  */
-extern const struct fence_ops radeon_fence_ops;
+extern const struct dma_fence_ops radeon_fence_ops;
 
-static inline struct radeon_fence *to_radeon_fence(struct fence *f)
+static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
 {
 	struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
 
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5df3ec7..4134759 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -29,6 +29,7 @@
 
 #include "atom.h"
 #include "atom-bits.h"
+#include "radeon_asic.h"
 
 extern void
 radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 38e396d..c1135fe 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -29,6 +29,7 @@
 #include <drm/radeon_drm.h>
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 
 /* 10 khz */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb92aef..0be8d5cd 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1320,7 +1320,7 @@ int radeon_device_init(struct radeon_device *rdev,
 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
 		rdev->ring[i].idx = i;
 	}
-	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
+	rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
 
 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
 		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -1651,7 +1651,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 
 	radeon_suspend(rdev);
 	radeon_hpd_fini(rdev);
-	/* evict remaining vram memory */
+	/* evict remaining vram memory
+	 * This second call to evict vram is to evict the gart page table
+	 * using the CPU.
+	 */
 	radeon_bo_evict_vram(rdev);
 
 	radeon_agp_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index cdb8cb56..e7409e8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
 				down_read(&rdev->exclusive_lock);
 			}
 		} else
-			r = fence_wait(work->fence, false);
+			r = dma_fence_wait(work->fence, false);
 
 		if (r)
 			DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
 		 * confused about which BO the CRTC is scanning out
 		 */
 
-		fence_put(work->fence);
+		dma_fence_put(work->fence);
 		work->fence = NULL;
 	}
 
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 		DRM_ERROR("failed to pin new rbo buffer before flip\n");
 		goto cleanup;
 	}
-	work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+	work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
 	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
 	radeon_bo_unreserve(new_rbo);
 
@@ -617,7 +617,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 
 cleanup:
 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-	fence_put(work->fence);
+	dma_fence_put(work->fence);
 	kfree(work);
 	return r;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index de504ea..6d1237d 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector,
 	return MODE_OK;
 }
 
-struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+static struct
+drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
 {
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = {
 	.hotplug = radeon_dp_mst_hotplug,
 };
 
-struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+static struct
+radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_connector *connector;
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
 	.commit = radeon_mst_encoder_commit,
 };
 
-void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
 {
 	drm_encoder_cleanup(encoder);
 	kfree(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0daad44..f65f299 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -89,13 +89,13 @@ static struct fb_ops radeonfb_ops = {
 };
 
 
-int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
 {
 	int aligned = width;
 	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
 	int pitch_mask = 0;
 
-	switch (bpp / 8) {
+	switch (cpp) {
 	case 1:
 		pitch_mask = align_large ? 255 : 127;
 		break;
@@ -110,7 +110,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
 
 	aligned += pitch_mask;
 	aligned &= ~pitch_mask;
-	return aligned;
+	return aligned * cpp;
 }
 
 static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,13 +139,13 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
 	int ret;
 	int aligned_size, size;
 	int height = mode_cmd->height;
-	u32 bpp, depth;
+	u32 cpp;
 
-	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+	cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
 
 	/* need to align pitch with crtc limits */
-	mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
-						  fb_tiled) * ((bpp + 1) / 8);
+	mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp,
+						  fb_tiled);
 
 	if (rdev->family >= CHIP_R600)
 		height = ALIGN(mode_cmd->height, 8);
@@ -165,11 +165,11 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
 		tiling_flags = RADEON_TILING_MACRO;
 
 #ifdef __BIG_ENDIAN
-	switch (bpp) {
-	case 32:
+	switch (cpp) {
+	case 4:
 		tiling_flags |= RADEON_TILING_SWAP_32BIT;
 		break;
-	case 16:
+	case 2:
 		tiling_flags |= RADEON_TILING_SWAP_16BIT;
 	default:
 		break;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ef075a..ef09f0a6 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev,
 	(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
 	(*fence)->ring = ring;
 	(*fence)->is_vm_update = false;
-	fence_init(&(*fence)->base, &radeon_fence_ops,
-		   &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
+	dma_fence_init(&(*fence)->base, &radeon_fence_ops,
+		       &rdev->fence_queue.lock,
+		       rdev->fence_context + ring,
+		       seq);
 	radeon_fence_ring_emit(rdev, ring, *fence);
 	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
 	radeon_fence_schedule_check(rdev, ring);
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
 	 */
 	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
 	if (seq >= fence->seq) {
-		int ret = fence_signal_locked(&fence->base);
+		int ret = dma_fence_signal_locked(&fence->base);
 
 		if (!ret)
-			FENCE_TRACE(&fence->base, "signaled from irq context\n");
+			DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
 		else
-			FENCE_TRACE(&fence->base, "was already signaled\n");
+			DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
 
 		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
 		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
-		fence_put(&fence->base);
+		dma_fence_put(&fence->base);
 	} else
-		FENCE_TRACE(&fence->base, "pending\n");
+		DMA_FENCE_TRACE(&fence->base, "pending\n");
 	return 0;
 }
 
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
 	return false;
 }
 
-static bool radeon_fence_is_signaled(struct fence *f)
+static bool radeon_fence_is_signaled(struct dma_fence *f)
 {
 	struct radeon_fence *fence = to_radeon_fence(f);
 	struct radeon_device *rdev = fence->rdev;
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f)
  * to fence_queue that checks if this fence is signaled, and if so it
  * signals the fence and removes itself.
  */
-static bool radeon_fence_enable_signaling(struct fence *f)
+static bool radeon_fence_enable_signaling(struct dma_fence *f)
 {
 	struct radeon_fence *fence = to_radeon_fence(f);
 	struct radeon_device *rdev = fence->rdev;
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f)
 	fence->fence_wake.private = NULL;
 	fence->fence_wake.func = radeon_fence_check_signaled;
 	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
-	fence_get(f);
+	dma_fence_get(f);
 
-	FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
+	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
 	return true;
 }
 
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
 	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
 		int ret;
 
-		ret = fence_signal(&fence->base);
+		ret = dma_fence_signal(&fence->base);
 		if (!ret)
-			FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+			DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
 		return true;
 	}
 	return false;
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
 	 * exclusive_lock is not held in that case.
 	 */
 	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
-		return fence_wait(&fence->base, intr);
+		return dma_fence_wait(&fence->base, intr);
 
 	seq[fence->ring] = fence->seq;
 	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
 		return r;
 	}
 
-	r_sig = fence_signal(&fence->base);
+	r_sig = dma_fence_signal(&fence->base);
 	if (!r_sig)
-		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+		DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
 	return r;
 }
 
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
  */
 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
 {
-	fence_get(&fence->base);
+	dma_fence_get(&fence->base);
 	return fence;
 }
 
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
 
 	*fence = NULL;
 	if (tmp) {
-		fence_put(&tmp->base);
+		dma_fence_put(&tmp->base);
 	}
 }
 
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
 #endif
 }
 
-static const char *radeon_fence_get_driver_name(struct fence *fence)
+static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "radeon";
 }
 
-static const char *radeon_fence_get_timeline_name(struct fence *f)
+static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
 {
 	struct radeon_fence *fence = to_radeon_fence(f);
 	switch (fence->ring) {
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f)
 
 static inline bool radeon_test_signaled(struct radeon_fence *fence)
 {
-	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
 struct radeon_wait_cb {
-	struct fence_cb base;
+	struct dma_fence_cb base;
 	struct task_struct *task;
 };
 
 static void
-radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	struct radeon_wait_cb *wait =
 		container_of(cb, struct radeon_wait_cb, base);
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
 	wake_up_process(wait->task);
 }
 
-static signed long radeon_fence_default_wait(struct fence *f, bool intr,
+static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
 					     signed long t)
 {
 	struct radeon_fence *fence = to_radeon_fence(f);
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
 
 	cb.task = current;
 
-	if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+	if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
 		return t;
 
 	while (t > 0) {
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
 	}
 
 	__set_current_state(TASK_RUNNING);
-	fence_remove_callback(f, &cb.base);
+	dma_fence_remove_callback(f, &cb.base);
 
 	return t;
 }
 
-const struct fence_ops radeon_fence_ops = {
+const struct dma_fence_ops radeon_fence_ops = {
 	.get_driver_name = radeon_fence_get_driver_name,
 	.get_timeline_name = radeon_fence_get_timeline_name,
 	.enable_signaling = radeon_fence_enable_signaling,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index deb9511..0bcffd8 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -745,7 +745,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
 	uint32_t handle;
 	int r;
 
-	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+	args->pitch = radeon_align_pitch(rdev, args->width,
+					 DIV_ROUND_UP(args->bpp, 8), 0);
 	args->size = args->pitch * args->height;
 	args->size = ALIGN(args->size, PAGE_SIZE);
 
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 868c3ba..222a1fa 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -27,6 +27,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include <linux/backlight.h>
 #ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b65425..326ad06 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
 static void radeon_pm_update_profile(struct radeon_device *rdev);
 static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
 
 int radeon_pm_get_type_index(struct radeon_device *rdev,
 			     enum radeon_pm_state_type ps_type,
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
 				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
 		}
 		mutex_unlock(&rdev->pm.mutex);
+		/* allow new DPM state to be picked */
+		radeon_pm_compute_clocks_dpm(rdev);
 	} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 		if (rdev->pm.profile == PM_PROFILE_AUTO) {
 			mutex_lock(&rdev->pm.mutex);
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
 	/* balanced states don't exist at the moment */
 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
-		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+		dpm_state = rdev->pm.dpm.ac_power ?
+			POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
 
 restart_search:
 	/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 02ac8a1..be5d7a3 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
 		     bool shared)
 {
 	struct reservation_object_list *flist;
-	struct fence *f;
+	struct dma_fence *f;
 	struct radeon_fence *fence;
 	unsigned i;
 	int r = 0;
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
 	if (fence && fence->rdev == rdev)
 		radeon_sync_fence(sync, fence);
 	else if (f)
-		r = fence_wait(f, true);
+		r = dma_fence_wait(f, true);
 
 	flist = reservation_object_get_list(resv);
 	if (shared || !flist || r)
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
 		if (fence && fence->rdev == rdev)
 			radeon_sync_fence(sync, fence);
 		else
-			r = fence_wait(f, true);
+			r = dma_fence_wait(f, true);
 
 		if (r)
 			break;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e..0cf03cc 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
 	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
 	.invalidate_caches = &radeon_invalidate_caches,
 	.init_mem_type = &radeon_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &radeon_evict_flags,
 	.move = &radeon_bo_move,
 	.verify_access = &radeon_verify_access,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0cd0e7b..d34d1cf 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
 {
 	int32_t *msg, msg_type, handle;
 	unsigned img_size = 0;
-	struct fence *f;
+	struct dma_fence *f;
 	void *ptr;
 
 	int i, r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e402be8..143280d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7858,7 +7858,7 @@ static void si_program_aspm(struct radeon_device *rdev)
 	}
 }
 
-int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
+static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
 {
 	unsigned i;
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index bd9c3bb..c76ed9e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -264,7 +264,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
 
 	drm_atomic_helper_cleanup_planes(dev, old_state);
 
-	drm_atomic_state_free(old_state);
+	drm_atomic_state_put(old_state);
 
 	/* Complete the commit, wake up any waiter. */
 	spin_lock(&rcdu->commit.wait.lock);
@@ -330,6 +330,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
 	/* Swap the state, this is the point of no return. */
 	drm_atomic_helper_swap_state(state, true);
 
+	drm_atomic_state_get(state);
 	if (nonblock)
 		schedule_work(&commit->work);
 	else
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 3c58669..6f7f9c5 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -1,7 +1,6 @@
 config DRM_ROCKCHIP
 	tristate "DRM Support for Rockchip"
 	depends on DRM && ROCKCHIP_IOMMU
-	depends on RESET_CONTROLLER
 	select DRM_GEM_CMA_HELPER
 	select DRM_KMS_HELPER
 	select DRM_PANEL
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8c8cbe8..6fe1611 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -20,6 +20,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/module.h>
@@ -388,7 +389,7 @@ static void rockchip_add_endpoints(struct device *dev,
 			continue;
 		}
 
-		component_match_add(dev, match, compare_of, remote);
+		drm_of_component_match_add(dev, match, compare_of, remote);
 		of_node_put(remote);
 	}
 }
@@ -437,7 +438,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
 		}
 
 		of_node_put(iommu);
-		component_match_add(dev, &match, compare_of, port->parent);
+		drm_of_component_match_add(dev, &match, compare_of,
+					   port->parent);
 		of_node_put(port);
 	}
 
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 3dc0d8f..2db89be 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1004,6 +1004,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
 		kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
 		if (IS_ERR(kvb_addr)) {
 			ret = PTR_ERR(kvb_addr);
+			kvb_addr = NULL;
 			goto done;
 		}
 		cmdbuf->vb_addr = kvb_addr;
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 2784919..6aead20 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -17,6 +17,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_of.h>
 
 #include "sti_crtc.h"
 #include "sti_drv.h"
@@ -184,7 +185,7 @@ static void sti_atomic_complete(struct sti_private *private,
 	drm_atomic_helper_wait_for_vblanks(drm, state);
 
 	drm_atomic_helper_cleanup_planes(drm, state);
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 }
 
 static void sti_atomic_work(struct work_struct *work)
@@ -217,6 +218,7 @@ static int sti_atomic_commit(struct drm_device *drm,
 
 	drm_atomic_helper_swap_state(state, true);
 
+	drm_atomic_state_get(state);
 	if (nonblock)
 		sti_atomic_schedule(private, state);
 	else
@@ -423,8 +425,8 @@ static int sti_platform_probe(struct platform_device *pdev)
 	child_np = of_get_next_available_child(node, NULL);
 
 	while (child_np) {
-		component_match_add(dev, &match, compare_of, child_np);
-		of_node_put(child_np);
+		drm_of_component_match_add(dev, &match, compare_of,
+					   child_np);
 		child_np = of_get_next_available_child(node, child_np);
 	}
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 0da9862..b3c4ad6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -18,6 +18,7 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_of.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_drv.h"
@@ -239,7 +240,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
 		/* Add current component */
 		DRM_DEBUG_DRIVER("Adding component %s\n",
 				 of_node_full_name(node));
-		component_match_add(dev, match, compare_of, node);
+		drm_of_component_match_add(dev, match, compare_of, node);
 		count++;
 	}
 
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 63ebb15..bbf5a4b 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -3,7 +3,6 @@
 	depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
 	depends on COMMON_CLK
 	depends on DRM
-	depends on RESET_CONTROLLER
 	select DRM_KMS_HELPER
 	select DRM_MIPI_DSI
 	select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8ab47b5..a9630c2 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -63,7 +63,7 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
 	drm_atomic_helper_wait_for_vblanks(drm, state);
 
 	drm_atomic_helper_cleanup_planes(drm, state);
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 }
 
 static void tegra_atomic_work(struct work_struct *work)
@@ -96,6 +96,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
 
 	drm_atomic_helper_swap_state(state, true);
 
+	drm_atomic_state_get(state);
 	if (nonblock)
 		tegra_atomic_schedule(tegra, state);
 	else
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 52ebe8f..822531e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -72,16 +72,14 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct drm_gem_cma_object *gem;
-	unsigned int depth, bpp;
 	dma_addr_t start, end;
 	u64 dma_base_and_ceiling;
 
-	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
 	gem = drm_fb_cma_get_gem_obj(fb, 0);
 
 	start = gem->paddr + fb->offsets[0] +
 		crtc->y * fb->pitches[0] +
-		crtc->x * bpp / 8;
+		crtc->x * drm_format_plane_cpp(fb->pixel_format, 0);
 
 	end = start + (crtc->mode.vdisplay * fb->pitches[0]);
 
@@ -461,16 +459,16 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	if (info->tft_alt_mode)
 		reg |= LCDC_TFT_ALT_ENABLE;
 	if (priv->rev == 2) {
-		unsigned int depth, bpp;
-
-		drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
-		switch (bpp) {
-		case 16:
+		switch (fb->pixel_format) {
+		case DRM_FORMAT_BGR565:
+		case DRM_FORMAT_RGB565:
 			break;
-		case 32:
+		case DRM_FORMAT_XBGR8888:
+		case DRM_FORMAT_XRGB8888:
 			reg |= LCDC_V2_TFT_24BPP_UNPACK;
 			/* fallthrough */
-		case 24:
+		case DRM_FORMAT_BGR888:
+		case DRM_FORMAT_RGB888:
 			reg |= LCDC_V2_TFT_24BPP_MODE;
 			break;
 		default:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index a694977..147fb28 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -143,8 +143,6 @@ static int tilcdc_commit(struct drm_device *dev,
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 
-	drm_atomic_state_free(state);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 68e8950..06a4c58 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
 
 #include <linux/component.h>
 #include <linux/of_graph.h>
+#include <drm/drm_of.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_external.h"
@@ -160,7 +161,8 @@ int tilcdc_get_external_components(struct device *dev,
 
 		dev_dbg(dev, "Subdevice node '%s' found\n", node->name);
 		if (match)
-			component_match_add(dev, match, dev_match_of, node);
+			drm_of_component_match_add(dev, match, dev_match_of,
+						   node);
 		of_node_put(node);
 		count++;
 	}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 74c65fa..8a6a50d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -39,7 +39,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
 {
 	struct drm_crtc_state *crtc_state;
 	struct drm_plane_state *old_state = plane->state;
-	unsigned int depth, bpp;
+	unsigned int pitch;
 
 	if (!state->crtc)
 		return 0;
@@ -68,8 +68,9 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
 		return -EINVAL;
 	}
 
-	drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
-	if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
+	pitch = crtc_state->mode.hdisplay *
+		drm_format_plane_cpp(state->fb->pixel_format, 0);
+	if (state->fb->pitches[0] != pitch) {
 		dev_err(plane->dev->dev,
 			"Invalid pitch: fb and crtc widths must be the same");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc6217d..f6ff579 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
 	BUG_ON(!list_empty(&bo->ddestroy));
 	ttm_tt_destroy(bo->ttm);
 	atomic_dec(&bo->glob->bo_count);
-	fence_put(bo->moving);
+	dma_fence_put(bo->moving);
 	if (bo->resv == &bo->ttm_resv)
 		reservation_object_fini(&bo->ttm_resv);
 	mutex_destroy(&bo->wu_mutex);
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 {
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	int i;
 
 	fobj = reservation_object_get_list(bo->resv);
 	fence = reservation_object_get_excl(bo->resv);
 	if (fence && !fence->ops->signaled)
-		fence_enable_sw_signaling(fence);
+		dma_fence_enable_sw_signaling(fence);
 
 	for (i = 0; fobj && i < fobj->shared_count; ++i) {
 		fence = rcu_dereference_protected(fobj->shared[i],
 					reservation_object_held(bo->resv));
 
 		if (!fence->ops->signaled)
-			fence_enable_sw_signaling(fence);
+			dma_fence_enable_sw_signaling(fence);
 	}
 }
 
@@ -717,6 +717,20 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 	return ret;
 }
 
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+			      const struct ttm_place *place)
+{
+	/* Don't evict this BO if it's outside of the
+	 * requested placement range
+	 */
+	if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+	    (place->lpfn && place->lpfn <= bo->mem.start))
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 				uint32_t mem_type,
 				const struct ttm_place *place,
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 	spin_lock(&glob->lru_lock);
 	list_for_each_entry(bo, &man->lru, lru) {
 		ret = __ttm_bo_reserve(bo, false, true, NULL);
-		if (!ret) {
-			if (place && (place->fpfn || place->lpfn)) {
-				/* Don't evict this BO if it's outside of the
-				 * requested placement range
-				 */
-				if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
-				    (place->lpfn && place->lpfn <= bo->mem.start)) {
-					__ttm_bo_unreserve(bo);
-					ret = -EBUSY;
-					continue;
-				}
-			}
+		if (ret)
+			continue;
 
-			break;
+		if (place && !bdev->driver->eviction_valuable(bo, place)) {
+			__ttm_bo_unreserve(bo);
+			ret = -EBUSY;
+			continue;
 		}
+
+		break;
 	}
 
 	if (ret) {
@@ -792,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 				 struct ttm_mem_type_manager *man,
 				 struct ttm_mem_reg *mem)
 {
-	struct fence *fence;
+	struct dma_fence *fence;
 	int ret;
 
 	spin_lock(&man->move_lock);
-	fence = fence_get(man->move);
+	fence = dma_fence_get(man->move);
 	spin_unlock(&man->move_lock);
 
 	if (fence) {
@@ -806,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 		if (unlikely(ret))
 			return ret;
 
-		fence_put(bo->moving);
+		dma_fence_put(bo->moving);
 		bo->moving = fence;
 	}
 
@@ -1286,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_bo_global *glob = bdev->glob;
-	struct fence *fence;
+	struct dma_fence *fence;
 	int ret;
 
 	/*
@@ -1309,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 	spin_unlock(&glob->lru_lock);
 
 	spin_lock(&man->move_lock);
-	fence = fence_get(man->move);
+	fence = dma_fence_get(man->move);
 	spin_unlock(&man->move_lock);
 
 	if (fence) {
-		ret = fence_wait(fence, false);
-		fence_put(fence);
+		ret = dma_fence_wait(fence, false);
+		dma_fence_put(fence);
 		if (ret) {
 			if (allow_errors) {
 				return ret;
@@ -1343,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 		       mem_type);
 		return ret;
 	}
-	fence_put(man->move);
+	dma_fence_put(man->move);
 
 	man->use_type = false;
 	man->has_type = false;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e216..d0459b3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 EXPORT_SYMBOL(ttm_bo_kunmap);
 
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
-			      struct fence *fence,
+			      struct dma_fence *fence,
 			      bool evict,
 			      struct ttm_mem_reg *new_mem)
 {
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 		 * operation has completed.
 		 */
 
-		fence_put(bo->moving);
-		bo->moving = fence_get(fence);
+		dma_fence_put(bo->moving);
+		bo->moving = dma_fence_get(fence);
 
 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
 		if (ret)
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
 
 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
-			 struct fence *fence, bool evict,
+			 struct dma_fence *fence, bool evict,
 			 struct ttm_mem_reg *new_mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 		 * operation has completed.
 		 */
 
-		fence_put(bo->moving);
-		bo->moving = fence_get(fence);
+		dma_fence_put(bo->moving);
+		bo->moving = dma_fence_get(fence);
 
 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
 		if (ret)
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 		 */
 
 		spin_lock(&from->move_lock);
-		if (!from->move || fence_is_later(fence, from->move)) {
-			fence_put(from->move);
-			from->move = fence_get(fence);
+		if (!from->move || dma_fence_is_later(fence, from->move)) {
+			dma_fence_put(from->move);
+			from->move = dma_fence_get(fence);
 		}
 		spin_unlock(&from->move_lock);
 
 		ttm_bo_free_old_node(bo);
 
-		fence_put(bo->moving);
-		bo->moving = fence_get(fence);
+		dma_fence_put(bo->moving);
+		bo->moving = dma_fence_get(fence);
 
 	} else {
 		/**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a6ed9d5..4748aed 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 	/*
 	 * Quick non-stalling check for idle.
 	 */
-	if (fence_is_signaled(bo->moving))
+	if (dma_fence_is_signaled(bo->moving))
 		goto out_clear;
 
 	/*
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 			goto out_unlock;
 
 		up_read(&vma->vm_mm->mmap_sem);
-		(void) fence_wait(bo->moving, true);
+		(void) dma_fence_wait(bo->moving, true);
 		goto out_unlock;
 	}
 
 	/*
 	 * Ordinary wait.
 	 */
-	ret = fence_wait(bo->moving, true);
+	ret = dma_fence_wait(bo->moving, true);
 	if (unlikely(ret != 0)) {
 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
 			VM_FAULT_NOPAGE;
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 	}
 
 out_clear:
-	fence_put(bo->moving);
+	dma_fence_put(bo->moving);
 	bo->moving = NULL;
 
 out_unlock:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index a80717b..d35bc49 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
 
 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
-				 struct list_head *list, struct fence *fence)
+				 struct list_head *list,
+				 struct dma_fence *fence)
 {
 	struct ttm_validate_buffer *entry;
 	struct ttm_buffer_object *bo;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c1f65c6..f31f72a 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -61,7 +61,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 
-	drm_atomic_state_free(state);
+	drm_atomic_state_put(state);
 
 	up(&vc4->async_modeset);
 
@@ -173,6 +173,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
 	 * current layout.
 	 */
 
+	drm_atomic_state_get(state);
 	if (nonblock) {
 		vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
 				   vc4_atomic_complete_commit_seqno_cb);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 5c57c1f..488909a 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -28,56 +28,57 @@
 #define VGEM_FENCE_TIMEOUT (10*HZ)
 
 struct vgem_fence {
-	struct fence base;
+	struct dma_fence base;
 	struct spinlock lock;
 	struct timer_list timer;
 };
 
-static const char *vgem_fence_get_driver_name(struct fence *fence)
+static const char *vgem_fence_get_driver_name(struct dma_fence *fence)
 {
 	return "vgem";
 }
 
-static const char *vgem_fence_get_timeline_name(struct fence *fence)
+static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
 {
 	return "unbound";
 }
 
-static bool vgem_fence_signaled(struct fence *fence)
+static bool vgem_fence_signaled(struct dma_fence *fence)
 {
 	return false;
 }
 
-static bool vgem_fence_enable_signaling(struct fence *fence)
+static bool vgem_fence_enable_signaling(struct dma_fence *fence)
 {
 	return true;
 }
 
-static void vgem_fence_release(struct fence *base)
+static void vgem_fence_release(struct dma_fence *base)
 {
 	struct vgem_fence *fence = container_of(base, typeof(*fence), base);
 
 	del_timer_sync(&fence->timer);
-	fence_free(&fence->base);
+	dma_fence_free(&fence->base);
 }
 
-static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
 {
 	snprintf(str, size, "%u", fence->seqno);
 }
 
-static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
 					  int size)
 {
-	snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+	snprintf(str, size, "%u",
+		 dma_fence_is_signaled(fence) ? fence->seqno : 0);
 }
 
-static const struct fence_ops vgem_fence_ops = {
+static const struct dma_fence_ops vgem_fence_ops = {
 	.get_driver_name = vgem_fence_get_driver_name,
 	.get_timeline_name = vgem_fence_get_timeline_name,
 	.enable_signaling = vgem_fence_enable_signaling,
 	.signaled = vgem_fence_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = vgem_fence_release,
 
 	.fence_value_str = vgem_fence_value_str,
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data)
 {
 	struct vgem_fence *fence = (struct vgem_fence *)data;
 
-	fence_signal(&fence->base);
+	dma_fence_signal(&fence->base);
 }
 
-static struct fence *vgem_fence_create(struct vgem_file *vfile,
-				       unsigned int flags)
+static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
+					   unsigned int flags)
 {
 	struct vgem_fence *fence;
 
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile,
 		return NULL;
 
 	spin_lock_init(&fence->lock);
-	fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
-		   fence_context_alloc(1), 1);
+	dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+		       dma_fence_context_alloc(1), 1);
 
 	setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
 
@@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 	struct vgem_file *vfile = file->driver_priv;
 	struct reservation_object *resv;
 	struct drm_gem_object *obj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	int ret;
 
 	if (arg->flags & ~VGEM_FENCE_WRITE)
@@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 	}
 err_fence:
 	if (ret) {
-		fence_signal(fence);
-		fence_put(fence);
+		dma_fence_signal(fence);
+		dma_fence_put(fence);
 	}
 err:
 	drm_gem_object_unreference_unlocked(obj);
@@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
 {
 	struct vgem_file *vfile = file->driver_priv;
 	struct drm_vgem_fence_signal *arg = data;
-	struct fence *fence;
+	struct dma_fence *fence;
 	int ret = 0;
 
 	if (arg->flags)
@@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
 	if (IS_ERR(fence))
 		return PTR_ERR(fence);
 
-	if (fence_is_signaled(fence))
+	if (dma_fence_is_signaled(fence))
 		ret = -ETIMEDOUT;
 
-	fence_signal(fence);
-	fence_put(fence);
+	dma_fence_signal(fence);
+	dma_fence_put(fence);
 	return ret;
 }
 
@@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile)
 
 static int __vgem_fence_idr_fini(int id, void *p, void *data)
 {
-	fence_signal(p);
-	fence_put(p);
+	dma_fence_signal(p);
+	dma_fence_put(p);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index e1afc3d..81d1807 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,10 +1,10 @@
 config DRM_VIRTIO_GPU
 	tristate "Virtio GPU driver"
 	depends on DRM && VIRTIO
-        select DRM_KMS_HELPER
-        select DRM_TTM
+	select DRM_KMS_HELPER
+	select DRM_TTM
 	help
 	   This is the virtual GPU driver for virtio.  It can be used with
-           QEMU based VMMs (like KVM or Xen).
+	   QEMU based VMMs (like KVM or Xen).
 
 	   If unsure say M.
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index ae59080..ec1ebdc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -82,7 +82,7 @@ struct virtio_gpu_fence_driver {
 };
 
 struct virtio_gpu_fence {
-	struct fence f;
+	struct dma_fence f;
 	struct virtio_gpu_fence_driver *drv;
 	struct list_head node;
 	uint64_t seq;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f3f70fa..2335352 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -26,22 +26,22 @@
 #include <drm/drmP.h>
 #include "virtgpu_drv.h"
 
-static const char *virtio_get_driver_name(struct fence *f)
+static const char *virtio_get_driver_name(struct dma_fence *f)
 {
 	return "virtio_gpu";
 }
 
-static const char *virtio_get_timeline_name(struct fence *f)
+static const char *virtio_get_timeline_name(struct dma_fence *f)
 {
 	return "controlq";
 }
 
-static bool virtio_enable_signaling(struct fence *f)
+static bool virtio_enable_signaling(struct dma_fence *f)
 {
 	return true;
 }
 
-static bool virtio_signaled(struct fence *f)
+static bool virtio_signaled(struct dma_fence *f)
 {
 	struct virtio_gpu_fence *fence = to_virtio_fence(f);
 
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f)
 	return false;
 }
 
-static void virtio_fence_value_str(struct fence *f, char *str, int size)
+static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
 {
 	struct virtio_gpu_fence *fence = to_virtio_fence(f);
 
 	snprintf(str, size, "%llu", fence->seq);
 }
 
-static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
 {
 	struct virtio_gpu_fence *fence = to_virtio_fence(f);
 
 	snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
 }
 
-static const struct fence_ops virtio_fence_ops = {
+static const struct dma_fence_ops virtio_fence_ops = {
 	.get_driver_name     = virtio_get_driver_name,
 	.get_timeline_name   = virtio_get_timeline_name,
 	.enable_signaling    = virtio_enable_signaling,
 	.signaled            = virtio_signaled,
-	.wait                = fence_default_wait,
+	.wait                = dma_fence_default_wait,
 	.fence_value_str     = virtio_fence_value_str,
 	.timeline_value_str  = virtio_timeline_value_str,
 };
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
 	spin_lock_irqsave(&drv->lock, irq_flags);
 	(*fence)->drv = drv;
 	(*fence)->seq = ++drv->sync_seq;
-	fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
-		   drv->context, (*fence)->seq);
-	fence_get(&(*fence)->f);
+	dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
+		       drv->context, (*fence)->seq);
+	dma_fence_get(&(*fence)->f);
 	list_add_tail(&(*fence)->node, &drv->fences);
 	spin_unlock_irqrestore(&drv->lock, irq_flags);
 
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
 	list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
 		if (last_seq < fence->seq)
 			continue;
-		fence_signal_locked(&fence->f);
+		dma_fence_signal_locked(&fence->f);
 		list_del(&fence->node);
-		fence_put(&fence->f);
+		dma_fence_put(&fence->f);
 	}
 	spin_unlock_irqrestore(&drv->lock, irq_flags);
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b..61f3a96 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 	/* fence the command bo */
 	virtio_gpu_unref_list(&validate_list);
 	drm_free_large(buflist);
-	fence_put(&fence->f);
+	dma_fence_put(&fence->f);
 	return 0;
 
 out_unresv:
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 		drm_gem_object_release(obj);
 		if (vgdev->has_virgl_3d) {
 			virtio_gpu_unref_list(&validate_list);
-			fence_put(&fence->f);
+			dma_fence_put(&fence->f);
 		}
 		return ret;
 	}
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 
 	if (vgdev->has_virgl_3d) {
 		virtio_gpu_unref_list(&validate_list);
-		fence_put(&fence->f);
+		dma_fence_put(&fence->f);
 	}
 	return 0;
 fail_unref:
 	if (vgdev->has_virgl_3d) {
 		virtio_gpu_unref_list(&validate_list);
-		fence_put(&fence->f);
+		dma_fence_put(&fence->f);
 	}
 //fail_obj:
 //	drm_gem_object_handle_unreference_unlocked(obj);
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 	reservation_object_add_excl_fence(qobj->tbo.resv,
 					  &fence->f);
 
-	fence_put(&fence->f);
+	dma_fence_put(&fence->f);
 out_unres:
 	virtio_gpu_object_unreserve(qobj);
 out:
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 			 args->level, &box, &fence);
 		reservation_object_add_excl_fence(qobj->tbo.resv,
 						  &fence->f);
-		fence_put(&fence->f);
+		dma_fence_put(&fence->f);
 	}
 
 out_unres:
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 036b0fb..1235519 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
 	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
 	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
 
-	vgdev->fence_drv.context = fence_context_alloc(1);
+	vgdev->fence_drv.context = dma_fence_context_alloc(1);
 	spin_lock_init(&vgdev->fence_drv.lock);
 	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
 	INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ba28c0f..cb75f06 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 		if (!ret) {
 			reservation_object_add_excl_fence(bo->tbo.resv,
 							  &fence->f);
-			fence_put(&fence->f);
+			dma_fence_put(&fence->f);
 			fence = NULL;
 			virtio_gpu_object_unreserve(bo);
 			virtio_gpu_object_wait(bo, false);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 80482ac..4a1de9f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
 	.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
 	.invalidate_caches = &virtio_gpu_invalidate_caches,
 	.init_mem_type = &virtio_gpu_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &virtio_gpu_evict_flags,
 	.move = &virtio_gpu_bo_move,
 	.verify_access = &virtio_gpu_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 78b75ee..c894a48 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = {
 	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
 	.invalidate_caches = vmw_invalidate_caches,
 	.init_mem_type = vmw_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = vmw_evict_flags,
 	.move = NULL,
 	.verify_access = vmw_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e8..6541dd8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
  * objects with actions attached to them.
  */
 
-static void vmw_fence_obj_destroy(struct fence *f)
+static void vmw_fence_obj_destroy(struct dma_fence *f)
 {
 	struct vmw_fence_obj *fence =
 		container_of(f, struct vmw_fence_obj, base);
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f)
 	fence->destroy(fence);
 }
 
-static const char *vmw_fence_get_driver_name(struct fence *f)
+static const char *vmw_fence_get_driver_name(struct dma_fence *f)
 {
 	return "vmwgfx";
 }
 
-static const char *vmw_fence_get_timeline_name(struct fence *f)
+static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
 {
 	return "svga";
 }
 
-static bool vmw_fence_enable_signaling(struct fence *f)
+static bool vmw_fence_enable_signaling(struct dma_fence *f)
 {
 	struct vmw_fence_obj *fence =
 		container_of(f, struct vmw_fence_obj, base);
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f)
 }
 
 struct vmwgfx_wait_cb {
-	struct fence_cb base;
+	struct dma_fence_cb base;
 	struct task_struct *task;
 };
 
 static void
-vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
+vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	struct vmwgfx_wait_cb *wait =
 		container_of(cb, struct vmwgfx_wait_cb, base);
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
 
 static void __vmw_fences_update(struct vmw_fence_manager *fman);
 
-static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
+static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
 {
 	struct vmw_fence_obj *fence =
 		container_of(f, struct vmw_fence_obj, base);
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
 
 	while (ret > 0) {
 		__vmw_fences_update(fman);
-		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
 			break;
 
 		if (intr)
@@ -225,7 +225,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
 	return ret;
 }
 
-static struct fence_ops vmw_fence_ops = {
+static struct dma_fence_ops vmw_fence_ops = {
 	.get_driver_name = vmw_fence_get_driver_name,
 	.get_timeline_name = vmw_fence_get_timeline_name,
 	.enable_signaling = vmw_fence_enable_signaling,
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
 	fman->event_fence_action_size =
 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
 	mutex_init(&fman->goal_irq_mutex);
-	fman->ctx = fence_context_alloc(1);
+	fman->ctx = dma_fence_context_alloc(1);
 
 	return fman;
 }
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
 	unsigned long irq_flags;
 	int ret = 0;
 
-	fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
-		   fman->ctx, seqno);
+	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+		       fman->ctx, seqno);
 	INIT_LIST_HEAD(&fence->seq_passed_actions);
 	fence->destroy = destroy;
 
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
 	u32 goal_seqno;
 	u32 *fifo_mem;
 
-	if (fence_is_signaled_locked(&fence->base))
+	if (dma_fence_is_signaled_locked(&fence->base))
 		return false;
 
 	fifo_mem = fman->dev_priv->mmio_virt;
@@ -459,7 +459,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
 			list_del_init(&fence->head);
-			fence_signal_locked(&fence->base);
+			dma_fence_signal_locked(&fence->base);
 			INIT_LIST_HEAD(&action_list);
 			list_splice_init(&fence->seq_passed_actions,
 					 &action_list);
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
 {
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 		return 1;
 
 	vmw_fences_update(fman);
 
-	return fence_is_signaled(&fence->base);
+	return dma_fence_is_signaled(&fence->base);
 }
 
 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
 		       bool interruptible, unsigned long timeout)
 {
-	long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
+	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
 
 	if (likely(ret > 0))
 		return 0;
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
 
 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
 {
-	fence_free(&fence->base);
+	dma_fence_free(&fence->base);
 }
 
 int vmw_fence_create(struct vmw_fence_manager *fman,
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 		struct vmw_fence_obj *fence =
 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
 				   head);
-		fence_get(&fence->base);
+		dma_fence_get(&fence->base);
 		spin_unlock_irq(&fman->lock);
 
 		ret = vmw_fence_obj_wait(fence, false, false,
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 
 		if (unlikely(ret != 0)) {
 			list_del_init(&fence->head);
-			fence_signal(&fence->base);
+			dma_fence_signal(&fence->base);
 			INIT_LIST_HEAD(&action_list);
 			list_splice_init(&fence->seq_passed_actions,
 					 &action_list);
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 		}
 
 		BUG_ON(!list_empty(&fence->head));
-		fence_put(&fence->base);
+		dma_fence_put(&fence->base);
 		spin_lock_irq(&fman->lock);
 	}
 	spin_unlock_irq(&fman->lock);
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 	spin_lock_irqsave(&fman->lock, irq_flags);
 
 	fman->pending_actions[action->type]++;
-	if (fence_is_signaled_locked(&fence->base)) {
+	if (dma_fence_is_signaled_locked(&fence->base)) {
 		struct list_head action_list;
 
 		INIT_LIST_HEAD(&action_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 83ae301..d9d85aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,7 +27,7 @@
 
 #ifndef _VMWGFX_FENCE_H_
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
 
@@ -52,7 +52,7 @@ struct vmw_fence_action {
 };
 
 struct vmw_fence_obj {
-	struct fence base;
+	struct dma_fence base;
 
 	struct list_head head;
 	struct list_head seq_passed_actions;
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
 
 	*fence_p = NULL;
 	if (fence)
-		fence_put(&fence->base);
+		dma_fence_put(&fence->base);
 }
 
 static inline struct vmw_fence_obj *
 vmw_fence_obj_reference(struct vmw_fence_obj *fence)
 {
 	if (fence)
-		fence_get(&fence->base);
+		dma_fence_get(&fence->base);
 	return fence;
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bf28ccc..c965514 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -980,14 +980,22 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 	struct vmw_dma_buffer *bo = NULL;
 	struct ttm_base_object *user_obj;
 	struct drm_mode_fb_cmd mode_cmd;
+	const struct drm_format_info *info;
 	int ret;
 
+	info = drm_format_info(mode_cmd2->pixel_format);
+	if (!info || !info->depth) {
+		DRM_ERROR("Unsupported framebuffer format %s\n",
+			  drm_get_format_name(mode_cmd2->pixel_format));
+		return ERR_PTR(-EINVAL);
+	}
+
 	mode_cmd.width = mode_cmd2->width;
 	mode_cmd.height = mode_cmd2->height;
 	mode_cmd.pitch = mode_cmd2->pitches[0];
 	mode_cmd.handle = mode_cmd2->handles[0];
-	drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
-				    &mode_cmd.bpp);
+	mode_cmd.depth = info->depth;
+	mode_cmd.bpp = info->cpp[0] * 8;
 
 	/**
 	 * This code should be conditioned on Screen Objects not being used.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 52ca1c9..8e86d6d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -575,7 +575,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
 		long lret;
 
 		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
-					nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+							   nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 		if (!lret)
 			return -EBUSY;
 		else if (lret < 0)
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
 	if (fence == NULL) {
 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 		reservation_object_add_excl_fence(bo->resv, &fence->base);
-		fence_put(&fence->base);
+		dma_fence_put(&fence->base);
 	} else
 		reservation_object_add_excl_fence(bo->resv, &fence->base);
 }
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index aefdff9..08766c6 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,6 @@
 config IMX_IPUV3_CORE
 	tristate "IPUv3 core support"
 	depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
-	depends on RESET_CONTROLLER
 	select GENERIC_IRQ_CHIP
 	help
 	  Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1887f19..77657a8 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1022,21 +1022,16 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
 
 	unsigned int io_state;
 
-	char *kbuf, *curr_pos;
+	char kbuf[64], *curr_pos;
 	size_t remaining = count;
 
 	int ret_val;
 	int i;
 
-
-	kbuf = kmalloc(count + 1, GFP_KERNEL);
-	if (!kbuf)
-		return -ENOMEM;
-
-	if (copy_from_user(kbuf, buf, count)) {
-		kfree(kbuf);
+	if (count >= sizeof(kbuf))
+		return -EINVAL;
+	if (copy_from_user(kbuf, buf, count))
 		return -EFAULT;
-	}
 	curr_pos = kbuf;
 	kbuf[count] = '\0';	/* Just to make sure... */
 
@@ -1259,11 +1254,9 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
 		goto done;
 	}
 	/* If we got here, the message written is not part of the protocol! */
-	kfree(kbuf);
 	return -EPROTO;
 
 done:
-	kfree(kbuf);
 	return ret_val;
 }
 
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 1626892..1cf907e 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -533,6 +533,10 @@ hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect)
 		return "4:3";
 	case HDMI_PICTURE_ASPECT_16_9:
 		return "16:9";
+	case HDMI_PICTURE_ASPECT_64_27:
+		return "64:27";
+	case HDMI_PICTURE_ASPECT_256_135:
+		return "256:135";
 	case HDMI_PICTURE_ASPECT_RESERVED:
 		return "Reserved";
 	}
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
new file mode 100644
index 0000000..3629b27
--- /dev/null
+++ b/include/drm/bridge/mhl.h
@@ -0,0 +1,291 @@
+/*
+ * Defines for Mobile High-Definition Link (MHL) interface
+ *
+ * Copyright (C) 2015, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MHL_H__
+#define __MHL_H__
+
+/* Device Capabilities Registers */
+enum {
+	MHL_DCAP_DEV_STATE,
+	MHL_DCAP_MHL_VERSION,
+	MHL_DCAP_CAT,
+	MHL_DCAP_ADOPTER_ID_H,
+	MHL_DCAP_ADOPTER_ID_L,
+	MHL_DCAP_VID_LINK_MODE,
+	MHL_DCAP_AUD_LINK_MODE,
+	MHL_DCAP_VIDEO_TYPE,
+	MHL_DCAP_LOG_DEV_MAP,
+	MHL_DCAP_BANDWIDTH,
+	MHL_DCAP_FEATURE_FLAG,
+	MHL_DCAP_DEVICE_ID_H,
+	MHL_DCAP_DEVICE_ID_L,
+	MHL_DCAP_SCRATCHPAD_SIZE,
+	MHL_DCAP_INT_STAT_SIZE,
+	MHL_DCAP_RESERVED,
+	MHL_DCAP_SIZE
+};
+
+#define MHL_DCAP_CAT_SINK			0x01
+#define MHL_DCAP_CAT_SOURCE			0x02
+#define MHL_DCAP_CAT_POWER			0x10
+#define MHL_DCAP_CAT_PLIM(x)			((x) << 5)
+
+#define MHL_DCAP_VID_LINK_RGB444		0x01
+#define MHL_DCAP_VID_LINK_YCBCR444		0x02
+#define MHL_DCAP_VID_LINK_YCBCR422		0x04
+#define MHL_DCAP_VID_LINK_PPIXEL		0x08
+#define MHL_DCAP_VID_LINK_ISLANDS		0x10
+#define MHL_DCAP_VID_LINK_VGA			0x20
+#define MHL_DCAP_VID_LINK_16BPP			0x40
+
+#define MHL_DCAP_AUD_LINK_2CH			0x01
+#define MHL_DCAP_AUD_LINK_8CH			0x02
+
+#define MHL_DCAP_VT_GRAPHICS			0x00
+#define MHL_DCAP_VT_PHOTO			0x02
+#define MHL_DCAP_VT_CINEMA			0x04
+#define MHL_DCAP_VT_GAMES			0x08
+#define MHL_DCAP_SUPP_VT			0x80
+
+#define MHL_DCAP_LD_DISPLAY			0x01
+#define MHL_DCAP_LD_VIDEO			0x02
+#define MHL_DCAP_LD_AUDIO			0x04
+#define MHL_DCAP_LD_MEDIA			0x08
+#define MHL_DCAP_LD_TUNER			0x10
+#define MHL_DCAP_LD_RECORD			0x20
+#define MHL_DCAP_LD_SPEAKER			0x40
+#define MHL_DCAP_LD_GUI				0x80
+#define MHL_DCAP_LD_ALL				0xFF
+
+#define MHL_DCAP_FEATURE_RCP_SUPPORT		0x01
+#define MHL_DCAP_FEATURE_RAP_SUPPORT		0x02
+#define MHL_DCAP_FEATURE_SP_SUPPORT		0x04
+#define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR	0x08
+#define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT	0x10
+#define MHL_DCAP_FEATURE_RBP_SUPPORT		0x40
+
+/* Extended Device Capabilities Registers */
+enum {
+	MHL_XDC_ECBUS_SPEEDS,
+	MHL_XDC_TMDS_SPEEDS,
+	MHL_XDC_ECBUS_ROLES,
+	MHL_XDC_LOG_DEV_MAPX,
+	MHL_XDC_SIZE
+};
+
+#define MHL_XDC_ECBUS_S_075			0x01
+#define MHL_XDC_ECBUS_S_8BIT			0x02
+#define MHL_XDC_ECBUS_S_12BIT			0x04
+#define MHL_XDC_ECBUS_D_150			0x10
+#define MHL_XDC_ECBUS_D_8BIT			0x20
+
+#define MHL_XDC_TMDS_000			0x00
+#define MHL_XDC_TMDS_150			0x01
+#define MHL_XDC_TMDS_300			0x02
+#define MHL_XDC_TMDS_600			0x04
+
+/* MHL_XDC_ECBUS_ROLES flags */
+#define MHL_XDC_DEV_HOST			0x01
+#define MHL_XDC_DEV_DEVICE			0x02
+#define MHL_XDC_DEV_CHARGER			0x04
+#define MHL_XDC_HID_HOST			0x08
+#define MHL_XDC_HID_DEVICE			0x10
+
+/* MHL_XDC_LOG_DEV_MAPX flags */
+#define MHL_XDC_LD_PHONE			0x01
+
+/* Device Status Registers */
+enum {
+	MHL_DST_CONNECTED_RDY,
+	MHL_DST_LINK_MODE,
+	MHL_DST_VERSION,
+	MHL_DST_SIZE
+};
+
+/* Offset of DEVSTAT registers */
+#define MHL_DST_OFFSET				0x30
+#define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name)
+
+#define MHL_DST_CONN_DCAP_RDY			0x01
+#define MHL_DST_CONN_XDEVCAPP_SUPP		0x02
+#define MHL_DST_CONN_POW_STAT			0x04
+#define MHL_DST_CONN_PLIM_STAT_MASK		0x38
+
+#define MHL_DST_LM_CLK_MODE_MASK		0x07
+#define MHL_DST_LM_CLK_MODE_PACKED_PIXEL	0x02
+#define MHL_DST_LM_CLK_MODE_NORMAL		0x03
+#define MHL_DST_LM_PATH_EN_MASK			0x08
+#define MHL_DST_LM_PATH_ENABLED			0x08
+#define MHL_DST_LM_PATH_DISABLED		0x00
+#define MHL_DST_LM_MUTED_MASK			0x10
+
+/* Extended Device Status Registers */
+enum {
+	MHL_XDS_CURR_ECBUS_MODE,
+	MHL_XDS_AVLINK_MODE_STATUS,
+	MHL_XDS_AVLINK_MODE_CONTROL,
+	MHL_XDS_MULTI_SINK_STATUS,
+	MHL_XDS_SIZE
+};
+
+/* Offset of XDEVSTAT registers */
+#define MHL_XDS_OFFSET				0x90
+#define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name)
+
+/* MHL_XDS_REG_CURR_ECBUS_MODE flags */
+#define MHL_XDS_SLOT_MODE_8BIT			0x00
+#define MHL_XDS_SLOT_MODE_6BIT			0x01
+#define MHL_XDS_ECBUS_S				0x04
+#define MHL_XDS_ECBUS_D				0x08
+
+#define MHL_XDS_LINK_CLOCK_75MHZ		0x00
+#define MHL_XDS_LINK_CLOCK_150MHZ		0x10
+#define MHL_XDS_LINK_CLOCK_300MHZ		0x20
+#define MHL_XDS_LINK_CLOCK_600MHZ		0x30
+
+#define MHL_XDS_LINK_STATUS_NO_SIGNAL		0x00
+#define MHL_XDS_LINK_STATUS_CRU_LOCKED		0x01
+#define MHL_XDS_LINK_STATUS_TMDS_NORMAL		0x02
+#define MHL_XDS_LINK_STATUS_TMDS_RESERVED	0x03
+
+#define MHL_XDS_LINK_RATE_1_5_GBPS		0x00
+#define MHL_XDS_LINK_RATE_3_0_GBPS		0x01
+#define MHL_XDS_LINK_RATE_6_0_GBPS		0x02
+#define MHL_XDS_ATT_CAPABLE			0x08
+
+#define MHL_XDS_SINK_STATUS_1_HPD_LOW		0x00
+#define MHL_XDS_SINK_STATUS_1_HPD_HIGH		0x01
+#define MHL_XDS_SINK_STATUS_2_HPD_LOW		0x00
+#define MHL_XDS_SINK_STATUS_2_HPD_HIGH		0x04
+#define MHL_XDS_SINK_STATUS_3_HPD_LOW		0x00
+#define MHL_XDS_SINK_STATUS_3_HPD_HIGH		0x10
+#define MHL_XDS_SINK_STATUS_4_HPD_LOW		0x00
+#define MHL_XDS_SINK_STATUS_4_HPD_HIGH		0x40
+
+/* Interrupt Registers */
+enum {
+	MHL_INT_RCHANGE,
+	MHL_INT_DCHANGE,
+	MHL_INT_SIZE
+};
+
+/* Offset of DEVSTAT registers */
+#define MHL_INT_OFFSET				0x20
+#define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name)
+
+#define	MHL_INT_RC_DCAP_CHG			0x01
+#define MHL_INT_RC_DSCR_CHG			0x02
+#define MHL_INT_RC_REQ_WRT			0x04
+#define MHL_INT_RC_GRT_WRT			0x08
+#define MHL_INT_RC_3D_REQ			0x10
+#define MHL_INT_RC_FEAT_REQ			0x20
+#define MHL_INT_RC_FEAT_COMPLETE		0x40
+
+#define MHL_INT_DC_EDID_CHG			0x02
+
+enum {
+	MHL_ACK = 0x33, /* Command or Data byte acknowledge */
+	MHL_NACK = 0x34, /* Command or Data byte not acknowledge */
+	MHL_ABORT = 0x35, /* Transaction abort */
+	MHL_WRITE_STAT = 0xe0, /* Write one status register */
+	MHL_SET_INT = 0x60, /* Write one interrupt register */
+	MHL_READ_DEVCAP_REG = 0x61, /* Read one register */
+	MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */
+	MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */
+	MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */
+	MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */
+	MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */
+	MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */
+	MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */
+	MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */
+	MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */
+	MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */
+	MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */
+	MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */
+	MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */
+	MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */
+	/* let the rest of these float, they are software specific */
+	MHL_READ_EDID_BLOCK,
+	MHL_SEND_3D_REQ_OR_FEAT_REQ,
+	MHL_READ_DEVCAP,
+	MHL_READ_XDEVCAP
+};
+
+/* MSC message types */
+enum {
+	MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */
+	MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */
+	MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */
+	MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */
+	MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */
+	MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */
+	MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */
+	MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */
+	MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */
+	MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */
+	MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */
+	MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */
+	MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */
+	MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */
+	MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */
+	MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */
+	MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */
+	MHL_MSC_MSG_BIST_TRIGGER = 0x60,
+	MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61,
+	MHL_MSC_MSG_BIST_READY = 0x62,
+	MHL_MSC_MSG_BIST_STOP = 0x63,
+};
+
+/* RAP action codes */
+#define MHL_RAP_POLL		0x00	/* Just do an ack */
+#define MHL_RAP_CONTENT_ON	0x10	/* Turn content stream ON */
+#define MHL_RAP_CONTENT_OFF	0x11	/* Turn content stream OFF */
+#define MHL_RAP_CBUS_MODE_DOWN	0x20
+#define MHL_RAP_CBUS_MODE_UP	0x21
+
+/* RAPK status codes */
+#define MHL_RAPK_NO_ERR		0x00	/* RAP action recognized & supported */
+#define MHL_RAPK_UNRECOGNIZED	0x01	/* Unknown RAP action code received */
+#define MHL_RAPK_UNSUPPORTED	0x02	/* Rcvd RAP action code not supported */
+#define MHL_RAPK_BUSY		0x03	/* Responder too busy to respond */
+
+/*
+ * Error status codes for RCPE messages
+ */
+/* No error. (Not allowed in RCPE messages) */
+#define MHL_RCPE_STATUS_NO_ERROR		0x00
+/* Unsupported/unrecognized key code */
+#define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE	0x01
+/* Responder busy. Initiator may retry message */
+#define MHL_RCPE_STATUS_BUSY			0x02
+
+/*
+ * Error status codes for RBPE messages
+ */
+/* No error. (Not allowed in RBPE messages) */
+#define MHL_RBPE_STATUS_NO_ERROR		0x00
+/* Unsupported/unrecognized button code */
+#define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE	0x01
+/* Responder busy. Initiator may retry message */
+#define MHL_RBPE_STATUS_BUSY			0x02
+
+/*
+ * Error status codes for UCPE messages
+ */
+/* No error. (Not allowed in UCPE messages) */
+#define MHL_UCPE_STATUS_NO_ERROR		0x00
+/* Unsupported/unrecognized key code */
+#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE	0x01
+
+#endif /* __MHL_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 6726440..e336e39 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -57,7 +57,7 @@
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 #include <asm/mman.h>
 #include <asm/pgalloc.h>
@@ -362,7 +362,7 @@ struct drm_ioctl_desc {
 struct drm_pending_event {
 	struct completion *completion;
 	struct drm_event *event;
-	struct fence *fence;
+	struct dma_fence *fence;
 	struct list_head link;
 	struct list_head pending_link;
 	struct drm_file *file_priv;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9701f2d..fc8af53 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -153,6 +153,7 @@ struct __drm_connnectors_state {
 
 /**
  * struct drm_atomic_state - the global state object for atomic updates
+ * @ref: count of all references to this state (will not be freed until zero)
  * @dev: parent DRM device
  * @allow_modeset: allow full modeset
  * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
@@ -164,6 +165,8 @@ struct __drm_connnectors_state {
  * @acquire_ctx: acquire context for this atomic modeset state update
  */
 struct drm_atomic_state {
+	struct kref ref;
+
 	struct drm_device *dev;
 	bool allow_modeset : 1;
 	bool legacy_cursor_update : 1;
@@ -193,7 +196,33 @@ static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
 struct drm_atomic_state * __must_check
 drm_atomic_state_alloc(struct drm_device *dev);
 void drm_atomic_state_clear(struct drm_atomic_state *state);
-void drm_atomic_state_free(struct drm_atomic_state *state);
+
+/**
+ * drm_atomic_state_get - acquire a reference to the atomic state
+ * @state: The atomic state
+ *
+ * Returns a new reference to the @state
+ */
+static inline struct drm_atomic_state *
+drm_atomic_state_get(struct drm_atomic_state *state)
+{
+	kref_get(&state->ref);
+	return state;
+}
+
+void __drm_atomic_state_free(struct kref *ref);
+
+/**
+ * drm_atomic_state_put - release a reference to the atomic state
+ * @state: The atomic state
+ *
+ * This releases a reference to @state which is freed after removing the
+ * final reference. No locking required and callable from any context.
+ */
+static inline void drm_atomic_state_put(struct drm_atomic_state *state)
+{
+	kref_put(&state->ref, __drm_atomic_state_free);
+}
 
 int  __must_check
 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
@@ -365,8 +394,17 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
  *
  * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
  * whether the state CRTC changed enough to need a full modeset cycle:
- * connectors_changed, mode_changed and active_change. This helper simply
+ * connectors_changed, mode_changed and active_changed. This helper simply
  * combines these three to compute the overall need for a modeset for @state.
+ *
+ * The atomic helper code sets these booleans, but drivers can and should
+ * change them appropriately to accurately represent whether a modeset is
+ * really needed. In general, drivers should avoid full modesets whenever
+ * possible.
+ *
+ * For example if the CRTC mode has changed, and the hardware is able to enact
+ * the requested mode change without going through a full modeset, the driver
+ * should clear mode_changed during its ->atomic_check.
  */
 static inline bool
 drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 36baa17..13221cf 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -47,8 +47,14 @@ struct drm_atomic_state;
 #define DRM_REFLECT_Y	BIT(5)
 #define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
 
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-						       unsigned int supported_rotations);
+static inline bool drm_rotation_90_or_270(unsigned int rotation)
+{
+	return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
+}
+
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+				       unsigned int rotation,
+				       unsigned int supported_rotations);
 unsigned int drm_rotation_simplify(unsigned int rotation,
 				   unsigned int supported_rotations);
 
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 0aa2925..fa1aa21 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -47,13 +47,14 @@
 #include <drm/drm_plane.h>
 #include <drm/drm_blend.h>
 #include <drm/drm_color_mgmt.h>
+#include <drm/drm_debugfs_crc.h>
 
 struct drm_device;
 struct drm_mode_set;
 struct drm_file;
 struct drm_clip_rect;
 struct device_node;
-struct fence;
+struct dma_fence;
 struct edid;
 
 static inline int64_t U642I64(uint64_t val)
@@ -116,6 +117,11 @@ struct drm_plane_helper_funcs;
  * never return in a failure from the ->atomic_check callback. Userspace assumes
  * that a DPMS On will always succeed. In other words: @enable controls resource
  * assignment, @active controls the actual hardware state.
+ *
+ * The three booleans active_changed, connectors_changed and mode_changed are
+ * intended to indicate whether a full modeset is needed, rather than strictly
+ * describing what has changed in a commit.
+ * See also: drm_atomic_crtc_needs_modeset()
  */
 struct drm_crtc_state {
 	struct drm_crtc *crtc;
@@ -564,6 +570,30 @@ struct drm_crtc_funcs {
 	 * before data structures are torndown.
 	 */
 	void (*early_unregister)(struct drm_crtc *crtc);
+
+	/**
+	 * @set_crc_source:
+	 *
+	 * Changes the source of CRC checksums of frames at the request of
+	 * userspace, typically for testing purposes. The sources available are
+	 * specific of each driver and a %NULL value indicates that CRC
+	 * generation is to be switched off.
+	 *
+	 * When CRC generation is enabled, the driver should call
+	 * drm_crtc_add_crc_entry() at each frame, providing any information
+	 * that characterizes the frame contents in the crcN arguments, as
+	 * provided from the configured source. Drivers must accept a "auto"
+	 * source name that will select a default source for this CRTC.
+	 *
+	 * This callback is optional if the driver does not support any CRC
+	 * generation functionality.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*set_crc_source)(struct drm_crtc *crtc, const char *source,
+			      size_t *values_cnt);
 };
 
 /**
@@ -680,6 +710,22 @@ struct drm_crtc {
 	 * context.
 	 */
 	struct drm_modeset_acquire_ctx *acquire_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+	/**
+	 * @debugfs_entry:
+	 *
+	 * Debugfs directory for this CRTC.
+	 */
+	struct dentry *debugfs_entry;
+
+	/**
+	 * @crc:
+	 *
+	 * Configuration settings of CRC capture.
+	 */
+	struct drm_crtc_crc crc;
+#endif
 };
 
 /**
@@ -1110,11 +1156,6 @@ struct drm_mode_config {
 	 */
 	struct drm_property *plane_type_property;
 	/**
-	 * @rotation_property: Optional property for planes or CRTCs to specifiy
-	 * rotation.
-	 */
-	struct drm_property *rotation_property;
-	/**
 	 * @prop_src_x: Default atomic plane property for the plane source
 	 * position in the connected &drm_framebuffer.
 	 */
@@ -1354,7 +1395,7 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
  * Given a registered CRTC, return the mask bit of that CRTC for an
  * encoder's possible_crtcs field.
  */
-static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
+static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
 {
 	return 1 << drm_crtc_index(crtc);
 }
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
new file mode 100644
index 0000000..7d63b1d
--- /dev/null
+++ b/include/drm/drm_debugfs_crc.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2016 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_DEBUGFS_CRC_H__
+#define __DRM_DEBUGFS_CRC_H__
+
+#define DRM_MAX_CRC_NR		10
+
+/**
+ * struct drm_crtc_crc_entry - entry describing a frame's content
+ * @has_frame_counter: whether the source was able to provide a frame number
+ * @frame: number of the frame this CRC is about, if @has_frame_counter is true
+ * @crc: array of values that characterize the frame
+ */
+struct drm_crtc_crc_entry {
+	bool has_frame_counter;
+	uint32_t frame;
+	uint32_t crcs[DRM_MAX_CRC_NR];
+};
+
+#define DRM_CRC_ENTRIES_NR	128
+
+/**
+ * struct drm_crtc_crc - data supporting CRC capture on a given CRTC
+ * @lock: protects the fields in this struct
+ * @source: name of the currently configured source of CRCs
+ * @opened: whether userspace has opened the data file for reading
+ * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
+ * @head: head of circular queue
+ * @tail: tail of circular queue
+ * @values_cnt: number of CRC values per entry, up to %DRM_MAX_CRC_NR
+ * @wq: workqueue used to synchronize reading and writing
+ */
+struct drm_crtc_crc {
+	spinlock_t lock;
+	const char *source;
+	bool opened;
+	struct drm_crtc_crc_entry *entries;
+	int head, tail;
+	size_t values_cnt;
+	wait_queue_head_t wq;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+			   uint32_t frame, uint32_t *crcs);
+#else
+static inline int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+					 uint32_t frame, uint32_t *crcs)
+{
+	return -EINVAL;
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* __DRM_DEBUGFS_CRC_H__ */
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
index e8a9dfd..4c42db8 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -40,6 +40,8 @@
 #define  DP_DUAL_MODE_REV_TYPE2 0x00
 #define  DP_DUAL_MODE_TYPE_MASK 0xf0
 #define  DP_DUAL_MODE_TYPE_TYPE2 0xa0
+/* This field is marked reserved in dual mode spec, used in LSPCON */
+#define  DP_DUAL_MODE_TYPE_HAS_DPCD 0x08
 #define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
 #define  DP_DUAL_IEEE_OUI_LEN 3
 #define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
@@ -55,6 +57,11 @@
 #define  DP_DUAL_MODE_CEC_ENABLE 0x01
 #define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
 
+/* LSPCON specific registers, defined by MCA */
+#define DP_DUAL_MODE_LSPCON_MODE_CHANGE		0x40
+#define DP_DUAL_MODE_LSPCON_CURRENT_MODE		0x41
+#define  DP_DUAL_MODE_LSPCON_MODE_PCON			0x1
+
 struct i2c_adapter;
 
 ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
@@ -63,6 +70,20 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
 			       u8 offset, const void *buffer, size_t size);
 
 /**
+ * enum drm_lspcon_mode
+ * @DRM_LSPCON_MODE_INVALID: No LSPCON.
+ * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON
+ *	which drives DP++ to HDMI 1.4 conversion.
+ * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON
+ *	which drives DP++ to HDMI 2.0 active conversion.
+ */
+enum drm_lspcon_mode {
+	DRM_LSPCON_MODE_INVALID,
+	DRM_LSPCON_MODE_LS,
+	DRM_LSPCON_MODE_PCON,
+};
+
+/**
  * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
  * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
  * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
@@ -70,6 +91,7 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
  * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
  * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
  * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
+ * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter
  */
 enum drm_dp_dual_mode_type {
 	DRM_DP_DUAL_MODE_NONE,
@@ -78,6 +100,7 @@ enum drm_dp_dual_mode_type {
 	DRM_DP_DUAL_MODE_TYPE1_HDMI,
 	DRM_DP_DUAL_MODE_TYPE2_DVI,
 	DRM_DP_DUAL_MODE_TYPE2_HDMI,
+	DRM_DP_DUAL_MODE_LSPCON,
 };
 
 enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
@@ -89,4 +112,8 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
 				     struct i2c_adapter *adapter, bool enable);
 const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
 
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+			enum drm_lspcon_mode *current_mode);
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+			enum drm_lspcon_mode reqd_mode);
 #endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2a79882..55bbeb0 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -690,6 +690,12 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 		dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
 }
 
+static inline bool
+drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+	return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
+}
+
 /*
  * DisplayPort AUX channel
  */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3a7d44..38eabf6 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -330,7 +330,6 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
 int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
 int drm_av_sync_delay(struct drm_connector *connector,
 		      const struct drm_display_mode *mode);
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
 
 #ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
 int drm_load_edid_firmware(struct drm_connector *connector);
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 387e33a..c7438ff 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -189,7 +189,7 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
 }
 
 /* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
-static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
+static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc);
 
 /**
  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 30c30fa..dc0aafa 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,8 +25,29 @@
 #include <linux/types.h>
 #include <uapi/drm/drm_fourcc.h>
 
+/**
+ * struct drm_format_info - information about a DRM format
+ * @format: 4CC format identifier (DRM_FORMAT_*)
+ * @depth: Color depth (number of bits per pixel excluding padding bits),
+ *	valid for a subset of RGB formats only. This is a legacy field, do not
+ *	use in new code and set to 0 for new formats.
+ * @num_planes: Number of color planes (1 to 3)
+ * @cpp: Number of bytes per pixel (per plane)
+ * @hsub: Horizontal chroma subsampling factor
+ * @vsub: Vertical chroma subsampling factor
+ */
+struct drm_format_info {
+	u32 format;
+	u8 depth;
+	u8 num_planes;
+	u8 cpp[3];
+	u8 hsub;
+	u8 vsub;
+};
+
+const struct drm_format_info *__drm_format_info(u32 format);
+const struct drm_format_info *drm_format_info(u32 format);
 uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
 int drm_format_num_planes(uint32_t format);
 int drm_format_plane_cpp(uint32_t format, int plane);
 int drm_format_horz_chroma_subsampling(uint32_t format);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 3fd87b3..26a6480 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -4,6 +4,7 @@
 #include <linux/of_graph.h>
 
 struct component_master_ops;
+struct component_match;
 struct device;
 struct drm_device;
 struct drm_encoder;
@@ -12,6 +13,10 @@ struct device_node;
 #ifdef CONFIG_OF
 extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
 					   struct device_node *port);
+extern void drm_of_component_match_add(struct device *master,
+				       struct component_match **matchptr,
+				       int (*compare)(struct device *, void *),
+				       struct device_node *node);
 extern int drm_of_component_probe(struct device *dev,
 				  int (*compare_of)(struct device *, void *),
 				  const struct component_master_ops *m_ops);
@@ -25,6 +30,14 @@ static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
 	return 0;
 }
 
+static inline void
+drm_of_component_match_add(struct device *master,
+			   struct component_match **matchptr,
+			   int (*compare)(struct device *, void *),
+			   struct device_node *node)
+{
+}
+
 static inline int
 drm_of_component_probe(struct device *dev,
 		       int (*compare_of)(struct device *, void *),
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 43cf193..c5e8a0d 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -59,7 +59,7 @@ struct drm_plane_state {
 
 	struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
 	struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
-	struct fence *fence;
+	struct dma_fence *fence;
 
 	/* Signed dest location allows it to be partially off screen */
 	int32_t crtc_x, crtc_y;
@@ -88,7 +88,6 @@ struct drm_plane_state {
 	struct drm_atomic_state *state;
 };
 
-
 /**
  * struct drm_plane_funcs - driver plane control functions
  */
@@ -386,6 +385,7 @@ enum drm_plane_type {
  * @type: type of plane (overlay, primary, cursor)
  * @state: current atomic state for this plane
  * @zpos_property: zpos property for this plane
+ * @rotation_property: rotation property for this plane
  * @helper_private: mid-layer private data
  */
 struct drm_plane {
@@ -432,6 +432,7 @@ struct drm_plane {
 	struct drm_plane_state *state;
 
 	struct drm_property *zpos_property;
+	struct drm_property *rotation_property;
 };
 
 #define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index b46fa0e..545c6e0 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -64,7 +64,7 @@ struct i915_audio_component_ops {
 	 * Called from audio driver. After audio driver sets the
 	 * sample rate, it will call this function to set n/cts
 	 */
-	int (*sync_audio_rate)(struct device *, int port, int rate);
+	int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
 	/**
 	 * @get_eld: fill the audio state and ELD bytes for the given port
 	 *
@@ -77,7 +77,7 @@ struct i915_audio_component_ops {
 	 * Note that the returned size may be over @max_bytes.  Then it
 	 * implies that only a part of ELD has been copied to the buffer.
 	 */
-	int (*get_eld)(struct device *, int port, bool *enabled,
+	int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
 		       unsigned char *buf, int max_bytes);
 };
 
@@ -97,7 +97,7 @@ struct i915_audio_component_audio_ops {
 	 * status accordingly (even when the HDA controller is in power save
 	 * mode).
 	 */
-	void (*pin_eld_notify)(void *audio_ptr, int port);
+	void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
 };
 
 /**
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 9eb940d..652e45b 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -47,6 +47,8 @@ struct drm_mm_node;
 
 struct ttm_placement;
 
+struct ttm_place;
+
 /**
  * struct ttm_bus_placement
  *
@@ -209,7 +211,7 @@ struct ttm_buffer_object {
 	 * Members protected by a bo reservation.
 	 */
 
-	struct fence *moving;
+	struct dma_fence *moving;
 
 	struct drm_vma_offset_node vma_node;
 
@@ -396,6 +398,17 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
 					    int resched);
 
 /**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+			      const struct ttm_place *place);
+
+/**
  * ttm_bo_synccpu_write_grab
  *
  * @bo: The buffer object:
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4f0a921..cdbdb40 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -303,7 +303,7 @@ struct ttm_mem_type_manager {
 	/*
 	 * Protected by @move_lock.
 	 */
-	struct fence *move;
+	struct dma_fence *move;
 };
 
 /**
@@ -371,9 +371,21 @@ struct ttm_bo_driver {
 	 * submission as a consequence.
 	 */
 
-	int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
-	int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
-			      struct ttm_mem_type_manager *man);
+	int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
+	int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
+			     struct ttm_mem_type_manager *man);
+
+	/**
+	 * struct ttm_bo_driver member eviction_valuable
+	 *
+	 * @bo: the buffer object to be evicted
+	 * @place: placement we need room for
+	 *
+	 * Check with the driver if it is valuable to evict a BO to make room
+	 * for a certain placement.
+	 */
+	bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+				  const struct ttm_place *place);
 	/**
 	 * struct ttm_bo_driver member evict_flags:
 	 *
@@ -384,8 +396,9 @@ struct ttm_bo_driver {
 	 * finished, they'll end up in bo->mem.flags
 	 */
 
-	 void(*evict_flags) (struct ttm_buffer_object *bo,
-				struct ttm_placement *placement);
+	void (*evict_flags)(struct ttm_buffer_object *bo,
+			    struct ttm_placement *placement);
+
 	/**
 	 * struct ttm_bo_driver member move:
 	 *
@@ -399,10 +412,9 @@ struct ttm_bo_driver {
 	 *
 	 * Move a buffer between two memory regions.
 	 */
-	int (*move) (struct ttm_buffer_object *bo,
-		     bool evict, bool interruptible,
-		     bool no_wait_gpu,
-		     struct ttm_mem_reg *new_mem);
+	int (*move)(struct ttm_buffer_object *bo, bool evict,
+		    bool interruptible, bool no_wait_gpu,
+		    struct ttm_mem_reg *new_mem);
 
 	/**
 	 * struct ttm_bo_driver_member verify_access
@@ -416,8 +428,8 @@ struct ttm_bo_driver {
 	 * access for all buffer objects.
 	 * This function should return 0 if access is granted, -EPERM otherwise.
 	 */
-	int (*verify_access) (struct ttm_buffer_object *bo,
-			      struct file *filp);
+	int (*verify_access)(struct ttm_buffer_object *bo,
+			     struct file *filp);
 
 	/* hook to notify driver about a driver move so it
 	 * can do tiling things */
@@ -430,7 +442,7 @@ struct ttm_bo_driver {
 	/**
 	 * notify the driver that we're about to swap out this bo
 	 */
-	void (*swap_notify) (struct ttm_buffer_object *bo);
+	void (*swap_notify)(struct ttm_buffer_object *bo);
 
 	/**
 	 * Driver callback on when mapping io memory (for bo_move_memcpy
@@ -438,8 +450,10 @@ struct ttm_bo_driver {
 	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
 	 * are balanced.
 	 */
-	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
-	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+	int (*io_mem_reserve)(struct ttm_bo_device *bdev,
+			      struct ttm_mem_reg *mem);
+	void (*io_mem_free)(struct ttm_bo_device *bdev,
+			    struct ttm_mem_reg *mem);
 
 	/**
 	 * Optional driver callback for when BO is removed from the LRU.
@@ -1025,7 +1039,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  */
 
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
-				     struct fence *fence, bool evict,
+				     struct dma_fence *fence, bool evict,
 				     struct ttm_mem_reg *new_mem);
 
 /**
@@ -1040,7 +1054,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
  * immediately or hang it on a temporary buffer object.
  */
 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
-			 struct fence *fence, bool evict,
+			 struct dma_fence *fence, bool evict,
 			 struct ttm_mem_reg *new_mem);
 
 /**
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b620c31..47f35b8 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -114,6 +114,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 
 extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 					struct list_head *list,
-					struct fence *fence);
+					struct dma_fence *fence);
 
 #endif
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index e0b0741..8daeb3c 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,7 +30,7 @@
 #include <linux/list.h>
 #include <linux/dma-mapping.h>
 #include <linux/fs.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/wait.h>
 
 struct device;
@@ -143,7 +143,7 @@ struct dma_buf {
 	wait_queue_head_t poll;
 
 	struct dma_buf_poll_cb_t {
-		struct fence_cb cb;
+		struct dma_fence_cb cb;
 		wait_queue_head_t *poll;
 
 		unsigned long active;
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
new file mode 100644
index 0000000..5900945
--- /dev/null
+++ b/include/linux/dma-fence-array.h
@@ -0,0 +1,86 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ *	Gustavo Padovan <gustavo@padovan.org>
+ *	Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_ARRAY_H
+#define __LINUX_DMA_FENCE_ARRAY_H
+
+#include <linux/dma-fence.h>
+
+/**
+ * struct dma_fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct dma_fence_array_cb {
+	struct dma_fence_cb cb;
+	struct dma_fence_array *array;
+};
+
+/**
+ * struct dma_fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct dma_fence_array {
+	struct dma_fence base;
+
+	spinlock_t lock;
+	unsigned num_fences;
+	atomic_t num_pending;
+	struct dma_fence **fences;
+};
+
+extern const struct dma_fence_ops dma_fence_array_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subsclass
+ * @fence: fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+	return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * to_dma_fence_array - cast a fence to a dma_fence_array
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_array,
+ * or the dma_fence_array otherwise.
+ */
+static inline struct dma_fence_array *
+to_dma_fence_array(struct dma_fence *fence)
+{
+	if (fence->ops != &dma_fence_array_ops)
+		return NULL;
+
+	return container_of(fence, struct dma_fence_array, base);
+}
+
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+					       struct dma_fence **fences,
+					       u64 context, unsigned seqno,
+					       bool signal_on_any);
+
+#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
new file mode 100644
index 0000000..ba60c04
--- /dev/null
+++ b/include/linux/dma-fence.h
@@ -0,0 +1,437 @@
+/*
+ * Fence mechanism for dma-buf to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_H
+#define __LINUX_DMA_FENCE_H
+
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+
+struct dma_fence;
+struct dma_fence_ops;
+struct dma_fence_cb;
+
+/**
+ * struct dma_fence - software synchronization primitive
+ * @refcount: refcount for this fence
+ * @ops: dma_fence_ops associated with this fence
+ * @rcu: used for releasing fence with kfree_rcu
+ * @cb_list: list of all callbacks to call
+ * @lock: spin_lock_irqsave used for locking
+ * @context: execution context this fence belongs to, returned by
+ *           dma_fence_context_alloc()
+ * @seqno: the sequence number of this fence inside the execution context,
+ * can be compared to decide which fence would be signaled later.
+ * @flags: A mask of DMA_FENCE_FLAG_* defined below
+ * @timestamp: Timestamp when the fence was signaled.
+ * @status: Optional, only valid if < 0, must be set before calling
+ * dma_fence_signal, indicates that the fence has completed with an error.
+ *
+ * the flags member must be manipulated and read using the appropriate
+ * atomic ops (bit_*), so taking the spinlock will not be needed most
+ * of the time.
+ *
+ * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
+ * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
+ * implementer of the fence for its own purposes. Can be used in different
+ * ways by different fence implementers, so do not rely on this.
+ *
+ * Since atomic bitops are used, this is not guaranteed to be the case.
+ * Particularly, if the bit was set, but dma_fence_signal was called right
+ * before this bit was set, it would have been able to set the
+ * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
+ * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
+ * after dma_fence_signal was called, any enable_signaling call will have either
+ * been completed, or never called at all.
+ */
+struct dma_fence {
+	struct kref refcount;
+	const struct dma_fence_ops *ops;
+	struct rcu_head rcu;
+	struct list_head cb_list;
+	spinlock_t *lock;
+	u64 context;
+	unsigned seqno;
+	unsigned long flags;
+	ktime_t timestamp;
+	int status;
+};
+
+enum dma_fence_flag_bits {
+	DMA_FENCE_FLAG_SIGNALED_BIT,
+	DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+	DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
+};
+
+typedef void (*dma_fence_func_t)(struct dma_fence *fence,
+				 struct dma_fence_cb *cb);
+
+/**
+ * struct dma_fence_cb - callback for dma_fence_add_callback
+ * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * @func: dma_fence_func_t to call
+ *
+ * This struct will be initialized by dma_fence_add_callback, additional
+ * data can be passed along by embedding dma_fence_cb in another struct.
+ */
+struct dma_fence_cb {
+	struct list_head node;
+	dma_fence_func_t func;
+};
+
+/**
+ * struct dma_fence_ops - operations implemented for fence
+ * @get_driver_name: returns the driver name.
+ * @get_timeline_name: return the name of the context this fence belongs to.
+ * @enable_signaling: enable software signaling of fence.
+ * @signaled: [optional] peek whether the fence is signaled, can be null.
+ * @wait: custom wait implementation, or dma_fence_default_wait.
+ * @release: [optional] called on destruction of fence, can be null
+ * @fill_driver_data: [optional] callback to fill in free-form debug info
+ * Returns amount of bytes filled, or -errno.
+ * @fence_value_str: [optional] fills in the value of the fence as a string
+ * @timeline_value_str: [optional] fills in the current value of the timeline
+ * as a string
+ *
+ * Notes on enable_signaling:
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * irqs, or insert commands into cmdstream, etc.  This is called
+ * in the first wait() or add_callback() path to let the fence
+ * implementation know that there is another driver waiting on
+ * the signal (ie. hw->sw case).
+ *
+ * This function can be called called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * fence->status may be set in enable_signaling, but only when false is
+ * returned.
+ *
+ * Calling dma_fence_signal before enable_signaling is called allows
+ * for a tiny race window in which enable_signaling is called during,
+ * before, or after dma_fence_signal. To fight this, it is recommended
+ * that before enable_signaling returns true an extra reference is
+ * taken on the fence, to be released when the fence is signaled.
+ * This will mean dma_fence_signal will still be called twice, but
+ * the second time will be a noop since it was already signaled.
+ *
+ * Notes on signaled:
+ * May set fence->status if returning true.
+ *
+ * Notes on wait:
+ * Must not be NULL, set to dma_fence_default_wait for default implementation.
+ * the dma_fence_default_wait implementation should work for any fence, as long
+ * as enable_signaling works correctly.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ *
+ * Notes on release:
+ * Can be NULL, this function allows additional commands to run on
+ * destruction of the fence. Can be called from irq context.
+ * If pointer is set to NULL, kfree will get called instead.
+ */
+
+struct dma_fence_ops {
+	const char * (*get_driver_name)(struct dma_fence *fence);
+	const char * (*get_timeline_name)(struct dma_fence *fence);
+	bool (*enable_signaling)(struct dma_fence *fence);
+	bool (*signaled)(struct dma_fence *fence);
+	signed long (*wait)(struct dma_fence *fence,
+			    bool intr, signed long timeout);
+	void (*release)(struct dma_fence *fence);
+
+	int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+	void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
+	void (*timeline_value_str)(struct dma_fence *fence,
+				   char *str, int size);
+};
+
+void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+		    spinlock_t *lock, u64 context, unsigned seqno);
+
+void dma_fence_release(struct kref *kref);
+void dma_fence_free(struct dma_fence *fence);
+
+/**
+ * dma_fence_put - decreases refcount of the fence
+ * @fence:	[in]	fence to reduce refcount of
+ */
+static inline void dma_fence_put(struct dma_fence *fence)
+{
+	if (fence)
+		kref_put(&fence->refcount, dma_fence_release);
+}
+
+/**
+ * dma_fence_get - increases refcount of the fence
+ * @fence:	[in]	fence to increase refcount of
+ *
+ * Returns the same fence, with refcount increased by 1.
+ */
+static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
+{
+	if (fence)
+		kref_get(&fence->refcount);
+	return fence;
+}
+
+/**
+ * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ *                     rcu read lock
+ * @fence:	[in]	fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ */
+static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
+{
+	if (kref_get_unless_zero(&fence->refcount))
+		return fence;
+	else
+		return NULL;
+}
+
+/**
+ * dma_fence_get_rcu_safe  - acquire a reference to an RCU tracked fence
+ * @fence:	[in]	pointer to fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ * This function handles acquiring a reference to a fence that may be
+ * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * so long as the caller is using RCU on the pointer to the fence.
+ *
+ * An alternative mechanism is to employ a seqlock to protect a bunch of
+ * fences, such as used by struct reservation_object. When using a seqlock,
+ * the seqlock must be taken before and checked after a reference to the
+ * fence is acquired (as shown here).
+ *
+ * The caller is required to hold the RCU read lock.
+ */
+static inline struct dma_fence *
+dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
+{
+	do {
+		struct dma_fence *fence;
+
+		fence = rcu_dereference(*fencep);
+		if (!fence || !dma_fence_get_rcu(fence))
+			return NULL;
+
+		/* The atomic_inc_not_zero() inside dma_fence_get_rcu()
+		 * provides a full memory barrier upon success (such as now).
+		 * This is paired with the write barrier from assigning
+		 * to the __rcu protected fence pointer so that if that
+		 * pointer still matches the current fence, we know we
+		 * have successfully acquire a reference to it. If it no
+		 * longer matches, we are holding a reference to some other
+		 * reallocated pointer. This is possible if the allocator
+		 * is using a freelist like SLAB_DESTROY_BY_RCU where the
+		 * fence remains valid for the RCU grace period, but it
+		 * may be reallocated. When using such allocators, we are
+		 * responsible for ensuring the reference we get is to
+		 * the right fence, as below.
+		 */
+		if (fence == rcu_access_pointer(*fencep))
+			return rcu_pointer_handoff(fence);
+
+		dma_fence_put(fence);
+	} while (1);
+}
+
+int dma_fence_signal(struct dma_fence *fence);
+int dma_fence_signal_locked(struct dma_fence *fence);
+signed long dma_fence_default_wait(struct dma_fence *fence,
+				   bool intr, signed long timeout);
+int dma_fence_add_callback(struct dma_fence *fence,
+			   struct dma_fence_cb *cb,
+			   dma_fence_func_t func);
+bool dma_fence_remove_callback(struct dma_fence *fence,
+			       struct dma_fence_cb *cb);
+void dma_fence_enable_sw_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_is_signaled_locked - Return an indication if the fence
+ *                                is signaled yet.
+ * @fence:	[in]	the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * This function requires fence->lock to be held.
+ */
+static inline bool
+dma_fence_is_signaled_locked(struct dma_fence *fence)
+{
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return true;
+
+	if (fence->ops->signaled && fence->ops->signaled(fence)) {
+		dma_fence_signal_locked(fence);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
+ * @fence:	[in]	the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * It's recommended for seqno fences to call dma_fence_signal when the
+ * operation is complete, it makes it possible to prevent issues from
+ * wraparound between time of issue and time of use by checking the return
+ * value of this function before calling hardware-specific wait instructions.
+ */
+static inline bool
+dma_fence_is_signaled(struct dma_fence *fence)
+{
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return true;
+
+	if (fence->ops->signaled && fence->ops->signaled(fence)) {
+		dma_fence_signal(fence);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1:	[in]	the first fence from the same context
+ * @f2:	[in]	the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not re-used across contexts.
+ */
+static inline bool dma_fence_is_later(struct dma_fence *f1,
+				      struct dma_fence *f2)
+{
+	if (WARN_ON(f1->context != f2->context))
+		return false;
+
+	return (int)(f1->seqno - f2->seqno) > 0;
+}
+
+/**
+ * dma_fence_later - return the chronologically later fence
+ * @f1:	[in]	the first fence from the same context
+ * @f2:	[in]	the second fence from the same context
+ *
+ * Returns NULL if both fences are signaled, otherwise the fence that would be
+ * signaled last. Both fences must be from the same context, since a seqno is
+ * not re-used across contexts.
+ */
+static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
+						struct dma_fence *f2)
+{
+	if (WARN_ON(f1->context != f2->context))
+		return NULL;
+
+	/*
+	 * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
+	 * have been set if enable_signaling wasn't called, and enabling that
+	 * here is overkill.
+	 */
+	if (dma_fence_is_later(f1, f2))
+		return dma_fence_is_signaled(f1) ? NULL : f1;
+	else
+		return dma_fence_is_signaled(f2) ? NULL : f2;
+}
+
+signed long dma_fence_wait_timeout(struct dma_fence *,
+				   bool intr, signed long timeout);
+signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
+				       uint32_t count,
+				       bool intr, signed long timeout);
+
+/**
+ * dma_fence_wait - sleep until the fence gets signaled
+ * @fence:	[in]	the fence to wait on
+ * @intr:	[in]	if true, do an interruptible wait
+ *
+ * This function will return -ERESTARTSYS if interrupted by a signal,
+ * or 0 if the fence was signaled. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly holds a reference to the fence, otherwise the
+ * fence might be freed before return, resulting in undefined behavior.
+ */
+static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
+{
+	signed long ret;
+
+	/* Since dma_fence_wait_timeout cannot timeout with
+	 * MAX_SCHEDULE_TIMEOUT, only valid return values are
+	 * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
+	 */
+	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+
+	return ret < 0 ? ret : 0;
+}
+
+u64 dma_fence_context_alloc(unsigned num);
+
+#define DMA_FENCE_TRACE(f, fmt, args...) \
+	do {								\
+		struct dma_fence *__ff = (f);				\
+		if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE))			\
+			pr_info("f %llu#%u: " fmt,			\
+				__ff->context, __ff->seqno, ##args);	\
+	} while (0)
+
+#define DMA_FENCE_WARN(f, fmt, args...) \
+	do {								\
+		struct dma_fence *__ff = (f);				\
+		pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno,	\
+			 ##args);					\
+	} while (0)
+
+#define DMA_FENCE_ERR(f, fmt, args...) \
+	do {								\
+		struct dma_fence *__ff = (f);				\
+		pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno,	\
+			##args);					\
+	} while (0)
+
+#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
deleted file mode 100644
index a44794e..0000000
--- a/include/linux/fence-array.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * fence-array: aggregates fence to be waited together
- *
- * Copyright (C) 2016 Collabora Ltd
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
- * Authors:
- *	Gustavo Padovan <gustavo@padovan.org>
- *	Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_ARRAY_H
-#define __LINUX_FENCE_ARRAY_H
-
-#include <linux/fence.h>
-
-/**
- * struct fence_array_cb - callback helper for fence array
- * @cb: fence callback structure for signaling
- * @array: reference to the parent fence array object
- */
-struct fence_array_cb {
-	struct fence_cb cb;
-	struct fence_array *array;
-};
-
-/**
- * struct fence_array - fence to represent an array of fences
- * @base: fence base class
- * @lock: spinlock for fence handling
- * @num_fences: number of fences in the array
- * @num_pending: fences in the array still pending
- * @fences: array of the fences
- */
-struct fence_array {
-	struct fence base;
-
-	spinlock_t lock;
-	unsigned num_fences;
-	atomic_t num_pending;
-	struct fence **fences;
-};
-
-extern const struct fence_ops fence_array_ops;
-
-/**
- * fence_is_array - check if a fence is from the array subsclass
- *
- * Return true if it is a fence_array and false otherwise.
- */
-static inline bool fence_is_array(struct fence *fence)
-{
-	return fence->ops == &fence_array_ops;
-}
-
-/**
- * to_fence_array - cast a fence to a fence_array
- * @fence: fence to cast to a fence_array
- *
- * Returns NULL if the fence is not a fence_array,
- * or the fence_array otherwise.
- */
-static inline struct fence_array *to_fence_array(struct fence *fence)
-{
-	if (fence->ops != &fence_array_ops)
-		return NULL;
-
-	return container_of(fence, struct fence_array, base);
-}
-
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
-				       u64 context, unsigned seqno,
-				       bool signal_on_any);
-
-#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
deleted file mode 100644
index 0d76305..0000000
--- a/include/linux/fence.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Fence mechanism for dma-buf to allow for asynchronous dma access
- *
- * Copyright (C) 2012 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_H
-#define __LINUX_FENCE_H
-
-#include <linux/err.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/printk.h>
-#include <linux/rcupdate.h>
-
-struct fence;
-struct fence_ops;
-struct fence_cb;
-
-/**
- * struct fence - software synchronization primitive
- * @refcount: refcount for this fence
- * @ops: fence_ops associated with this fence
- * @rcu: used for releasing fence with kfree_rcu
- * @cb_list: list of all callbacks to call
- * @lock: spin_lock_irqsave used for locking
- * @context: execution context this fence belongs to, returned by
- *           fence_context_alloc()
- * @seqno: the sequence number of this fence inside the execution context,
- * can be compared to decide which fence would be signaled later.
- * @flags: A mask of FENCE_FLAG_* defined below
- * @timestamp: Timestamp when the fence was signaled.
- * @status: Optional, only valid if < 0, must be set before calling
- * fence_signal, indicates that the fence has completed with an error.
- *
- * the flags member must be manipulated and read using the appropriate
- * atomic ops (bit_*), so taking the spinlock will not be needed most
- * of the time.
- *
- * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
- * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
- * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
- * implementer of the fence for its own purposes. Can be used in different
- * ways by different fence implementers, so do not rely on this.
- *
- * Since atomic bitops are used, this is not guaranteed to be the case.
- * Particularly, if the bit was set, but fence_signal was called right
- * before this bit was set, it would have been able to set the
- * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
- * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
- * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
- * after fence_signal was called, any enable_signaling call will have either
- * been completed, or never called at all.
- */
-struct fence {
-	struct kref refcount;
-	const struct fence_ops *ops;
-	struct rcu_head rcu;
-	struct list_head cb_list;
-	spinlock_t *lock;
-	u64 context;
-	unsigned seqno;
-	unsigned long flags;
-	ktime_t timestamp;
-	int status;
-};
-
-enum fence_flag_bits {
-	FENCE_FLAG_SIGNALED_BIT,
-	FENCE_FLAG_ENABLE_SIGNAL_BIT,
-	FENCE_FLAG_USER_BITS, /* must always be last member */
-};
-
-typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
-
-/**
- * struct fence_cb - callback for fence_add_callback
- * @node: used by fence_add_callback to append this struct to fence::cb_list
- * @func: fence_func_t to call
- *
- * This struct will be initialized by fence_add_callback, additional
- * data can be passed along by embedding fence_cb in another struct.
- */
-struct fence_cb {
-	struct list_head node;
-	fence_func_t func;
-};
-
-/**
- * struct fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
- *
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc.  This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
- *
- * This function can be called called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->status may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->status if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to fence_default_wait for default implementation.
- * the fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
- */
-
-struct fence_ops {
-	const char * (*get_driver_name)(struct fence *fence);
-	const char * (*get_timeline_name)(struct fence *fence);
-	bool (*enable_signaling)(struct fence *fence);
-	bool (*signaled)(struct fence *fence);
-	signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
-	void (*release)(struct fence *fence);
-
-	int (*fill_driver_data)(struct fence *fence, void *data, int size);
-	void (*fence_value_str)(struct fence *fence, char *str, int size);
-	void (*timeline_value_str)(struct fence *fence, char *str, int size);
-};
-
-void fence_init(struct fence *fence, const struct fence_ops *ops,
-		spinlock_t *lock, u64 context, unsigned seqno);
-
-void fence_release(struct kref *kref);
-void fence_free(struct fence *fence);
-
-/**
- * fence_get - increases refcount of the fence
- * @fence:	[in]	fence to increase refcount of
- *
- * Returns the same fence, with refcount increased by 1.
- */
-static inline struct fence *fence_get(struct fence *fence)
-{
-	if (fence)
-		kref_get(&fence->refcount);
-	return fence;
-}
-
-/**
- * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
- * @fence:	[in]	fence to increase refcount of
- *
- * Function returns NULL if no refcount could be obtained, or the fence.
- */
-static inline struct fence *fence_get_rcu(struct fence *fence)
-{
-	if (kref_get_unless_zero(&fence->refcount))
-		return fence;
-	else
-		return NULL;
-}
-
-/**
- * fence_put - decreases refcount of the fence
- * @fence:	[in]	fence to reduce refcount of
- */
-static inline void fence_put(struct fence *fence)
-{
-	if (fence)
-		kref_put(&fence->refcount, fence_release);
-}
-
-int fence_signal(struct fence *fence);
-int fence_signal_locked(struct fence *fence);
-signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
-		       fence_func_t func);
-bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
-void fence_enable_sw_signaling(struct fence *fence);
-
-/**
- * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
- * @fence:	[in]	the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * This function requires fence->lock to be held.
- */
-static inline bool
-fence_is_signaled_locked(struct fence *fence)
-{
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
-		return true;
-
-	if (fence->ops->signaled && fence->ops->signaled(fence)) {
-		fence_signal_locked(fence);
-		return true;
-	}
-
-	return false;
-}
-
-/**
- * fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence:	[in]	the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * It's recommended for seqno fences to call fence_signal when the
- * operation is complete, it makes it possible to prevent issues from
- * wraparound between time of issue and time of use by checking the return
- * value of this function before calling hardware-specific wait instructions.
- */
-static inline bool
-fence_is_signaled(struct fence *fence)
-{
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
-		return true;
-
-	if (fence->ops->signaled && fence->ops->signaled(fence)) {
-		fence_signal(fence);
-		return true;
-	}
-
-	return false;
-}
-
-/**
- * fence_is_later - return if f1 is chronologically later than f2
- * @f1:	[in]	the first fence from the same context
- * @f2:	[in]	the second fence from the same context
- *
- * Returns true if f1 is chronologically later than f2. Both fences must be
- * from the same context, since a seqno is not re-used across contexts.
- */
-static inline bool fence_is_later(struct fence *f1, struct fence *f2)
-{
-	if (WARN_ON(f1->context != f2->context))
-		return false;
-
-	return (int)(f1->seqno - f2->seqno) > 0;
-}
-
-/**
- * fence_later - return the chronologically later fence
- * @f1:	[in]	the first fence from the same context
- * @f2:	[in]	the second fence from the same context
- *
- * Returns NULL if both fences are signaled, otherwise the fence that would be
- * signaled last. Both fences must be from the same context, since a seqno is
- * not re-used across contexts.
- */
-static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
-{
-	if (WARN_ON(f1->context != f2->context))
-		return NULL;
-
-	/*
-	 * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
-	 * set if enable_signaling wasn't called, and enabling that here is
-	 * overkill.
-	 */
-	if (fence_is_later(f1, f2))
-		return fence_is_signaled(f1) ? NULL : f1;
-	else
-		return fence_is_signaled(f2) ? NULL : f2;
-}
-
-signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
-signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
-				   bool intr, signed long timeout);
-
-/**
- * fence_wait - sleep until the fence gets signaled
- * @fence:	[in]	the fence to wait on
- * @intr:	[in]	if true, do an interruptible wait
- *
- * This function will return -ERESTARTSYS if interrupted by a signal,
- * or 0 if the fence was signaled. Other error values may be
- * returned on custom implementations.
- *
- * Performs a synchronous wait on this fence. It is assumed the caller
- * directly or indirectly holds a reference to the fence, otherwise the
- * fence might be freed before return, resulting in undefined behavior.
- */
-static inline signed long fence_wait(struct fence *fence, bool intr)
-{
-	signed long ret;
-
-	/* Since fence_wait_timeout cannot timeout with
-	 * MAX_SCHEDULE_TIMEOUT, only valid return values are
-	 * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
-	 */
-	ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
-
-	return ret < 0 ? ret : 0;
-}
-
-u64 fence_context_alloc(unsigned num);
-
-#define FENCE_TRACE(f, fmt, args...) \
-	do {								\
-		struct fence *__ff = (f);				\
-		if (IS_ENABLED(CONFIG_FENCE_TRACE))			\
-			pr_info("f %llu#%u: " fmt,			\
-				__ff->context, __ff->seqno, ##args);	\
-	} while (0)
-
-#define FENCE_WARN(f, fmt, args...) \
-	do {								\
-		struct fence *__ff = (f);				\
-		pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno,	\
-			 ##args);					\
-	} while (0)
-
-#define FENCE_ERR(f, fmt, args...) \
-	do {								\
-		struct fence *__ff = (f);				\
-		pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno,	\
-			##args);					\
-	} while (0)
-
-#endif /* __LINUX_FENCE_H */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index e974420..edbb4fc 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -78,6 +78,8 @@ enum hdmi_picture_aspect {
 	HDMI_PICTURE_ASPECT_NONE,
 	HDMI_PICTURE_ASPECT_4_3,
 	HDMI_PICTURE_ASPECT_16_9,
+	HDMI_PICTURE_ASPECT_64_27,
+	HDMI_PICTURE_ASPECT_256_135,
 	HDMI_PICTURE_ASPECT_RESERVED,
 };
 
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b0f305e..2e313cc 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -40,7 +40,7 @@
 #define _LINUX_RESERVATION_H
 
 #include <linux/ww_mutex.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/slab.h>
 #include <linux/seqlock.h>
 #include <linux/rcupdate.h>
@@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[];
 struct reservation_object_list {
 	struct rcu_head rcu;
 	u32 shared_count, shared_max;
-	struct fence __rcu *shared[];
+	struct dma_fence __rcu *shared[];
 };
 
 /**
@@ -74,7 +74,7 @@ struct reservation_object {
 	struct ww_mutex lock;
 	seqcount_t seq;
 
-	struct fence __rcu *fence_excl;
+	struct dma_fence __rcu *fence_excl;
 	struct reservation_object_list __rcu *fence;
 	struct reservation_object_list *staged;
 };
@@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj)
 {
 	int i;
 	struct reservation_object_list *fobj;
-	struct fence *excl;
+	struct dma_fence *excl;
 
 	/*
 	 * This object should be dead and all references must have
@@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj)
 	 */
 	excl = rcu_dereference_protected(obj->fence_excl, 1);
 	if (excl)
-		fence_put(excl);
+		dma_fence_put(excl);
 
 	fobj = rcu_dereference_protected(obj->fence, 1);
 	if (fobj) {
 		for (i = 0; i < fobj->shared_count; ++i)
-			fence_put(rcu_dereference_protected(fobj->shared[i], 1));
+			dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
 
 		kfree(fobj);
 	}
@@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj)
  * RETURNS
  * The exclusive fence or NULL
  */
-static inline struct fence *
+static inline struct dma_fence *
 reservation_object_get_excl(struct reservation_object *obj)
 {
 	return rcu_dereference_protected(obj->fence_excl,
@@ -173,10 +173,10 @@ reservation_object_get_excl(struct reservation_object *obj)
  * RETURNS
  * The exclusive fence or NULL if none
  */
-static inline struct fence *
+static inline struct dma_fence *
 reservation_object_get_excl_rcu(struct reservation_object *obj)
 {
-	struct fence *fence;
+	struct dma_fence *fence;
 	unsigned seq;
 retry:
 	seq = read_seqcount_begin(&obj->seq);
@@ -186,22 +186,22 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
 		rcu_read_unlock();
 		goto retry;
 	}
-	fence = fence_get(fence);
+	fence = dma_fence_get(fence);
 	rcu_read_unlock();
 	return fence;
 }
 
 int reservation_object_reserve_shared(struct reservation_object *obj);
 void reservation_object_add_shared_fence(struct reservation_object *obj,
-					 struct fence *fence);
+					 struct dma_fence *fence);
 
 void reservation_object_add_excl_fence(struct reservation_object *obj,
-				       struct fence *fence);
+				       struct dma_fence *fence);
 
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
-				      struct fence **pfence_excl,
+				      struct dma_fence **pfence_excl,
 				      unsigned *pshared_count,
-				      struct fence ***pshared);
+				      struct dma_fence ***pshared);
 
 long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 					 bool wait_all, bool intr,
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
index a1ba6a5..c58c535 100644
--- a/include/linux/seqno-fence.h
+++ b/include/linux/seqno-fence.h
@@ -20,7 +20,7 @@
 #ifndef __LINUX_SEQNO_FENCE_H
 #define __LINUX_SEQNO_FENCE_H
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/dma-buf.h>
 
 enum seqno_fence_condition {
@@ -29,15 +29,15 @@ enum seqno_fence_condition {
 };
 
 struct seqno_fence {
-	struct fence base;
+	struct dma_fence base;
 
-	const struct fence_ops *ops;
+	const struct dma_fence_ops *ops;
 	struct dma_buf *sync_buf;
 	uint32_t seqno_ofs;
 	enum seqno_fence_condition condition;
 };
 
-extern const struct fence_ops seqno_fence_ops;
+extern const struct dma_fence_ops seqno_fence_ops;
 
 /**
  * to_seqno_fence - cast a fence to a seqno_fence
@@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops;
  * or the seqno_fence otherwise.
  */
 static inline struct seqno_fence *
-to_seqno_fence(struct fence *fence)
+to_seqno_fence(struct dma_fence *fence)
 {
 	if (fence->ops != &seqno_fence_ops)
 		return NULL;
@@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence)
  * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
  * device's vm can be expensive.
  *
- * It is recommended for creators of seqno_fence to call fence_signal
+ * It is recommended for creators of seqno_fence to call dma_fence_signal()
  * before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check fence_is_signaled
+ * time of issue vs time of check, since users can check dma_fence_is_signaled()
  * before submitting instructions for the hardware to wait on the fence.
  * However, when ops.enable_signaling is not called, it doesn't have to be
  * done as soon as possible, just before there's any real danger of seqno
@@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
 		 struct dma_buf *sync_buf,  uint32_t context,
 		 uint32_t seqno_ofs, uint32_t seqno,
 		 enum seqno_fence_condition cond,
-		 const struct fence_ops *ops)
+		 const struct dma_fence_ops *ops)
 {
 	BUG_ON(!fence || !sync_buf || !ops);
 	BUG_ON(!ops->wait || !ops->enable_signaling ||
 	       !ops->get_driver_name || !ops->get_timeline_name);
 
 	/*
-	 * ops is used in fence_init for get_driver_name, so needs to be
+	 * ops is used in dma_fence_init for get_driver_name, so needs to be
 	 * initialized first
 	 */
 	fence->ops = ops;
-	fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
+	dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
 	get_dma_buf(sync_buf);
 	fence->sync_buf = sync_buf;
 	fence->seqno_ofs = seqno_ofs;
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index aa17ccf..3e3ab84 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -18,8 +18,8 @@
 #include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
-#include <linux/fence.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
 
 /**
  * struct sync_file - sync file to export to the userspace
@@ -41,13 +41,13 @@ struct sync_file {
 
 	wait_queue_head_t	wq;
 
-	struct fence		*fence;
-	struct fence_cb cb;
+	struct dma_fence	*fence;
+	struct dma_fence_cb cb;
 };
 
-#define POLL_ENABLED FENCE_FLAG_USER_BITS
+#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
 
-struct sync_file *sync_file_create(struct fence *fence);
-struct fence *sync_file_get_fence(int fd);
+struct sync_file *sync_file_create(struct dma_fence *fence);
+struct dma_fence *sync_file_get_fence(int fd);
 
 #endif /* _LINUX_SYNC_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index 796cabf..5ab972e 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -10,8 +10,9 @@
 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
 int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
 void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+			     int dev_id, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
 			   bool *audio_enabled, char *buffer, int max_bytes);
 int snd_hdac_i915_init(struct hdac_bus *bus);
 int snd_hdac_i915_exit(struct hdac_bus *bus);
@@ -29,13 +30,13 @@ static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
 {
 }
 static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
-					   hda_nid_t nid, int rate)
+					   hda_nid_t nid, int dev_id, int rate)
 {
 	return 0;
 }
 static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
-					 bool *audio_enabled, char *buffer,
-					 int max_bytes)
+					 int dev_id, bool *audio_enabled,
+					 char *buffer, int max_bytes)
 {
 	return -ENODEV;
 }
diff --git a/include/trace/events/fence.h b/include/trace/events/dma_fence.h
similarity index 72%
rename from include/trace/events/fence.h
rename to include/trace/events/dma_fence.h
index d6dfa05..1157cb4 100644
--- a/include/trace/events/fence.h
+++ b/include/trace/events/dma_fence.h
@@ -1,17 +1,17 @@
 #undef TRACE_SYSTEM
-#define TRACE_SYSTEM fence
+#define TRACE_SYSTEM dma_fence
 
 #if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_FENCE_H
+#define _TRACE_DMA_FENCE_H
 
 #include <linux/tracepoint.h>
 
-struct fence;
+struct dma_fence;
 
-TRACE_EVENT(fence_annotate_wait_on,
+TRACE_EVENT(dma_fence_annotate_wait_on,
 
 	/* fence: the fence waiting on f1, f1: the fence to be waited on. */
-	TP_PROTO(struct fence *fence, struct fence *f1),
+	TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
 
 	TP_ARGS(fence, f1),
 
@@ -48,9 +48,9 @@ TRACE_EVENT(fence_annotate_wait_on,
 		  __entry->waiting_context, __entry->waiting_seqno)
 );
 
-DECLARE_EVENT_CLASS(fence,
+DECLARE_EVENT_CLASS(dma_fence,
 
-	TP_PROTO(struct fence *fence),
+	TP_PROTO(struct dma_fence *fence),
 
 	TP_ARGS(fence),
 
@@ -73,56 +73,56 @@ DECLARE_EVENT_CLASS(fence,
 		  __entry->seqno)
 );
 
-DEFINE_EVENT(fence, fence_emit,
+DEFINE_EVENT(dma_fence, dma_fence_emit,
 
-	TP_PROTO(struct fence *fence),
+	TP_PROTO(struct dma_fence *fence),
 
 	TP_ARGS(fence)
 );
 
-DEFINE_EVENT(fence, fence_init,
+DEFINE_EVENT(dma_fence, dma_fence_init,
 
-	TP_PROTO(struct fence *fence),
+	TP_PROTO(struct dma_fence *fence),
 
 	TP_ARGS(fence)
 );
 
-DEFINE_EVENT(fence, fence_destroy,
+DEFINE_EVENT(dma_fence, dma_fence_destroy,
 
-	TP_PROTO(struct fence *fence),
+	TP_PROTO(struct dma_fence *fence),
 
 	TP_ARGS(fence)
 );
 
-DEFINE_EVENT(fence, fence_enable_signal,
+DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
 
-	TP_PROTO(struct fence *fence),
+	TP_PROTO(struct dma_fence *fence),
 
 	TP_ARGS(fence)
 );
 
-DEFINE_EVENT(fence, fence_signaled,
+DEFINE_EVENT(dma_fence, dma_fence_signaled,
 
-	TP_PROTO(struct fence *fence),
+	TP_PROTO(struct dma_fence *fence),
 
 	TP_ARGS(fence)
 );
 
-DEFINE_EVENT(fence, fence_wait_start,
+DEFINE_EVENT(dma_fence, dma_fence_wait_start,
 
-	TP_PROTO(struct fence *fence),
+	TP_PROTO(struct dma_fence *fence),
 
 	TP_ARGS(fence)
 );
 
-DEFINE_EVENT(fence, fence_wait_end,
+DEFINE_EVENT(dma_fence, dma_fence_wait_end,
 
-	TP_PROTO(struct fence *fence),
+	TP_PROTO(struct dma_fence *fence),
 
 	TP_ARGS(fence)
 );
 
-#endif /*  _TRACE_FENCE_H */
+#endif /*  _TRACE_DMA_FENCE_H */
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index d6b5a21..4684f37 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -81,6 +81,8 @@ extern "C" {
 #define AMDGPU_GEM_CREATE_VRAM_CLEARED		(1 << 3)
 /* Flag that create shadow bo(GTT) while allocating vram bo */
 #define AMDGPU_GEM_CREATE_SHADOW		(1 << 4)
+/* Flag that allocating the BO should use linear VRAM */
+#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS	(1 << 5)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */
@@ -436,6 +438,7 @@ struct drm_amdgpu_cs_chunk_data {
  *
  */
 #define AMDGPU_IDS_FLAGS_FUSION         0x1
+#define AMDGPU_IDS_FLAGS_PREEMPTION     0x2
 
 /* indicate if acceleration can be working */
 #define AMDGPU_INFO_ACCEL_WORKING		0x00
@@ -487,6 +490,10 @@ struct drm_amdgpu_cs_chunk_data {
 #define AMDGPU_INFO_VIS_VRAM_USAGE		0x17
 /* number of TTM buffer evictions */
 #define AMDGPU_INFO_NUM_EVICTIONS		0x18
+/* Query memory about VRAM and GTT domains */
+#define AMDGPU_INFO_MEMORY			0x19
+/* Query vce clock table */
+#define AMDGPU_INFO_VCE_CLOCK_TABLE		0x1A
 
 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT	0
 #define AMDGPU_INFO_MMR_SE_INDEX_MASK	0xff
@@ -572,6 +579,34 @@ struct drm_amdgpu_info_vram_gtt {
 	__u64 gtt_size;
 };
 
+struct drm_amdgpu_heap_info {
+	/** max. physical memory */
+	__u64 total_heap_size;
+
+	/** Theoretical max. available memory in the given heap */
+	__u64 usable_heap_size;
+
+	/**
+	 * Number of bytes allocated in the heap. This includes all processes
+	 * and private allocations in the kernel. It changes when new buffers
+	 * are allocated, freed, and moved. It cannot be larger than
+	 * heap_size.
+	 */
+	__u64 heap_usage;
+
+	/**
+	 * Theoretical possible max. size of buffer which
+	 * could be allocated in the given heap
+	 */
+	__u64 max_allocation;
+};
+
+struct drm_amdgpu_memory_info {
+	struct drm_amdgpu_heap_info vram;
+	struct drm_amdgpu_heap_info cpu_accessible_vram;
+	struct drm_amdgpu_heap_info gtt;
+};
+
 struct drm_amdgpu_info_firmware {
 	__u32 ver;
 	__u32 feature;
@@ -645,6 +680,24 @@ struct drm_amdgpu_info_hw_ip {
 	__u32  _pad;
 };
 
+#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES		6
+
+struct drm_amdgpu_info_vce_clock_table_entry {
+	/** System clock */
+	__u32 sclk;
+	/** Memory clock */
+	__u32 mclk;
+	/** VCE clock */
+	__u32 eclk;
+	__u32 pad;
+};
+
+struct drm_amdgpu_info_vce_clock_table {
+	struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
+	__u32 num_valid_entries;
+	__u32 pad;
+};
+
 /*
  * Supported GPU families
  */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index df0e350..084b50a 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -77,6 +77,25 @@ extern "C" {
 #define  DRM_MODE_FLAG_3D_TOP_AND_BOTTOM	(7<<14)
 #define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF	(8<<14)
 
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE		0
+#define DRM_MODE_PICTURE_ASPECT_4_3		1
+#define DRM_MODE_PICTURE_ASPECT_16_9		2
+#define DRM_MODE_PICTURE_ASPECT_64_27		3
+#define DRM_MODE_PICTURE_ASPECT_256_135		4
+
+/* Aspect ratio flag bitmask (4 bits 22:19) */
+#define DRM_MODE_FLAG_PIC_AR_MASK		(0x0F<<19)
+#define  DRM_MODE_FLAG_PIC_AR_NONE \
+			(DRM_MODE_PICTURE_ASPECT_NONE<<19)
+#define  DRM_MODE_FLAG_PIC_AR_4_3 \
+			(DRM_MODE_PICTURE_ASPECT_4_3<<19)
+#define  DRM_MODE_FLAG_PIC_AR_16_9 \
+			(DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define  DRM_MODE_FLAG_PIC_AR_64_27 \
+			(DRM_MODE_PICTURE_ASPECT_64_27<<19)
+#define  DRM_MODE_FLAG_PIC_AR_256_135 \
+			(DRM_MODE_PICTURE_ASPECT_256_135<<19)
 
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
@@ -92,11 +111,6 @@ extern "C" {
 #define DRM_MODE_SCALE_CENTER		2 /* Centered, no scaling */
 #define DRM_MODE_SCALE_ASPECT		3 /* Full screen, preserve aspect */
 
-/* Picture aspect ratio options */
-#define DRM_MODE_PICTURE_ASPECT_NONE	0
-#define DRM_MODE_PICTURE_ASPECT_4_3	1
-#define DRM_MODE_PICTURE_ASPECT_16_9	2
-
 /* Dithering mode options */
 #define DRM_MODE_DITHERING_OFF	0
 #define DRM_MODE_DITHERING_ON	1
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index c9af022..0659bf3 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -193,6 +193,7 @@ static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid)
  * snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate
  * @codec: HDA codec
  * @nid: the pin widget NID
+ * @dev_id: device identifier
  * @rate: the sample rate to set
  *
  * This function is supposed to be used only by a HD-audio controller
@@ -201,18 +202,20 @@ static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid)
  * This function sets N/CTS value based on the given sample rate.
  * Returns zero for success, or a negative error code.
  */
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate)
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+			     int dev_id, int rate)
 {
 	struct hdac_bus *bus = codec->bus;
 	struct i915_audio_component *acomp = bus->audio_component;
-	int port;
+	int port, pipe;
 
 	if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate)
 		return -ENODEV;
 	port = pin2port(codec, nid);
 	if (port < 0)
 		return -EINVAL;
-	return acomp->ops->sync_audio_rate(acomp->dev, port, rate);
+	pipe = dev_id;
+	return acomp->ops->sync_audio_rate(acomp->dev, port, pipe, rate);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
 
@@ -220,6 +223,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
  * snd_hdac_acomp_get_eld - Get the audio state and ELD via component
  * @codec: HDA codec
  * @nid: the pin widget NID
+ * @dev_id: device identifier
  * @audio_enabled: the pointer to store the current audio state
  * @buffer: the buffer pointer to store ELD bytes
  * @max_bytes: the max bytes to be stored on @buffer
@@ -236,12 +240,12 @@ EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
  * thus it may be over @max_bytes.  If it's over @max_bytes, it implies
  * that only a part of ELD bytes have been fetched.
  */
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
 			   bool *audio_enabled, char *buffer, int max_bytes)
 {
 	struct hdac_bus *bus = codec->bus;
 	struct i915_audio_component *acomp = bus->audio_component;
-	int port;
+	int port, pipe;
 
 	if (!acomp || !acomp->ops || !acomp->ops->get_eld)
 		return -ENODEV;
@@ -249,7 +253,9 @@ int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
 	port = pin2port(codec, nid);
 	if (port < 0)
 		return -EINVAL;
-	return acomp->ops->get_eld(acomp->dev, port, audio_enabled,
+
+	pipe = dev_id;
+	return acomp->ops->get_eld(acomp->dev, port, pipe, audio_enabled,
 				   buffer, max_bytes);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_acomp_get_eld);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 56e5204..cf9bc042 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1485,7 +1485,7 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
 
 	mutex_lock(&per_pin->lock);
 	eld->monitor_present = false;
-	size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
+	size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid, -1,
 				      &eld->monitor_present, eld->eld_buffer,
 				      ELD_MAX_SIZE);
 	if (size > 0) {
@@ -1744,7 +1744,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
 	/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
 	/* Todo: add DP1.2 MST audio support later */
 	if (codec_has_acomp(codec))
-		snd_hdac_sync_audio_rate(&codec->core, pin_nid, runtime->rate);
+		snd_hdac_sync_audio_rate(&codec->core, pin_nid, -1,
+					 runtime->rate);
 
 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
 	mutex_lock(&per_pin->lock);
@@ -2290,7 +2291,7 @@ static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
 	snd_hda_codec_set_power_to_all(codec, fg, power_state);
 }
 
-static void intel_pin_eld_notify(void *audio_ptr, int port)
+static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
 {
 	struct hda_codec *codec = audio_ptr;
 	int pin_nid;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index c602c49..0c6228a 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1368,7 +1368,7 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
 	return hdac_hdmi_init_dai_map(edev);
 }
 
-static void hdac_hdmi_eld_notify_cb(void *aptr, int port)
+static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
 {
 	struct hdac_ext_device *edev = aptr;
 	struct hdac_hdmi_priv *hdmi = edev->private_data;