drm/i915: Clear the gpu_write_list on resetting write_domain upon hang

Otherwise we will hit a list handling assertion when moving the object
to the inactive list.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4e83bb3..2184d29 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -395,21 +395,7 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	/*
-	 * Clear request list
-	 */
-	i915_gem_retire_requests(dev);
-
-	/* Remove anything from the flushing lists. The GPU cache is likely
-	 * to be lost on reset along with the data, so simply move the
-	 * lost bo to the inactive list.
-	 */
-	i915_gem_reset_flushing_list(dev);
-
-	/* Move everything out of the GPU domains to ensure we do any
-	 * necessary invalidation upon reuse.
-	 */
-	i915_gem_reset_inactive_gpu_domains(dev);
+	i915_gem_reset_lists(dev);
 
 	/*
 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12e9f85..5fec2ca 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1005,8 +1005,7 @@
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
 				  bool interruptible);
 void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_reset_flushing_list(struct drm_device *dev);
-void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev);
+void i915_gem_reset_lists(struct drm_device *dev);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 void i915_gem_flush_ring(struct drm_device *dev,
 			 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 734cc08..0ce28c7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1682,27 +1682,60 @@
 	return ring->get_gem_seqno(dev, ring);
 }
 
-void i915_gem_reset_flushing_list(struct drm_device *dev)
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+				      struct intel_ring_buffer *ring)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	while (!list_empty(&ring->request_list)) {
+		struct drm_i915_gem_request *request;
 
-	while (!list_empty(&dev_priv->mm.flushing_list)) {
+		request = list_first_entry(&ring->request_list,
+					   struct drm_i915_gem_request,
+					   list);
+
+		list_del(&request->list);
+		list_del(&request->client_list);
+		kfree(request);
+	}
+
+	while (!list_empty(&ring->active_list)) {
 		struct drm_i915_gem_object *obj_priv;
 
+		obj_priv = list_first_entry(&ring->active_list,
+					    struct drm_i915_gem_object,
+					    list);
+
+		obj_priv->base.write_domain = 0;
+		list_del_init(&obj_priv->gpu_write_list);
+		i915_gem_object_move_to_inactive(&obj_priv->base);
+	}
+}
+
+void i915_gem_reset_lists(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+
+	i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+	if (HAS_BSD(dev))
+		i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+
+	/* Remove anything from the flushing lists. The GPU cache is likely
+	 * to be lost on reset along with the data, so simply move the
+	 * lost bo to the inactive list.
+	 */
+	while (!list_empty(&dev_priv->mm.flushing_list)) {
 		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
 					    struct drm_i915_gem_object,
 					    list);
 
 		obj_priv->base.write_domain = 0;
+		list_del_init(&obj_priv->gpu_write_list);
 		i915_gem_object_move_to_inactive(&obj_priv->base);
 	}
-}
 
-void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-
+	/* Move everything out of the GPU domains to ensure we do any
+	 * necessary invalidation upon reuse.
+	 */
 	list_for_each_entry(obj_priv,
 			    &dev_priv->mm.inactive_list,
 			    list)
@@ -1720,15 +1753,12 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
-	bool wedged;
 
 	if (!ring->status_page.page_addr ||
 	    list_empty(&ring->request_list))
 		return;
 
 	seqno = i915_get_gem_seqno(dev, ring);
-	wedged = atomic_read(&dev_priv->mm.wedged);
-
 	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 
@@ -1736,7 +1766,7 @@
 					   struct drm_i915_gem_request,
 					   list);
 
-		if (!wedged && !i915_seqno_passed(seqno, request->seqno))
+		if (!i915_seqno_passed(seqno, request->seqno))
 			break;
 
 		trace_i915_gem_request_retire(dev, request->seqno);
@@ -1757,8 +1787,7 @@
 					    struct drm_i915_gem_object,
 					    list);
 
-		if (!wedged &&
-		    !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+		if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
 			break;
 
 		obj = &obj_priv->base;