drm/i915: move flushing list processing to i915_gem_flush

Now that we can move objects to the active list without already having
emitted a request, move the flushing list handling into i915_gem_flush.
This makes more sense and allows to drop a few i915_add_request calls
that are not strictly necessary.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1fed3e6..eea8232 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1637,7 +1637,7 @@
 	}
 
 	/* Associate any objects on the flushing list matching the write
-	 * domain we're flushing with our flush.
+	 * domain we're flushing with our request.
 	 */
 	if (flush_domains != 0) 
 		i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
@@ -1887,8 +1887,9 @@
 		ret = -EIO;
 
 	if (ret && ret != -ERESTARTSYS)
-		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-			  __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
+		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+			  __func__, ret, seqno, ring->get_gem_seqno(dev, ring),
+			  dev_priv->next_seqno);
 
 	/* Directly dispatch request retiring.  While we have the work queue
 	 * to handle this, the waiter on a request often wants an associated
@@ -1918,8 +1919,10 @@
 	       uint32_t flush_domains)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+
 	if (flush_domains & I915_GEM_DOMAIN_CPU)
 		drm_agp_chipset_flush(dev);
+
 	dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
 			invalidate_domains,
 			flush_domains);
@@ -1928,6 +1931,17 @@
 		dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
 				invalidate_domains,
 				flush_domains);
+
+	/* Associate any objects on the flushing list matching the write
+	 * domain we're flushing with the next request.
+	 */
+	if (flush_domains != 0)  {
+		i915_gem_process_flushing_list(dev, flush_domains, 0,
+					       &dev_priv->render_ring);
+		if (HAS_BSD(dev))
+			i915_gem_process_flushing_list(dev, flush_domains, 0,
+						       &dev_priv->bsd_ring);
+	}
 }
 
 /**
@@ -2061,14 +2075,14 @@
 
 	/* Flush everything onto the inactive list. */
 	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-	seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+	seqno1 = i915_add_request(dev, NULL, 0,
 			&dev_priv->render_ring);
 	if (seqno1 == 0)
 		return -ENOMEM;
 	ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
 
 	if (HAS_BSD(dev)) {
-		seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+		seqno2 = i915_add_request(dev, NULL, 0,
 				&dev_priv->bsd_ring);
 		if (seqno2 == 0)
 			return -ENOMEM;
@@ -2078,7 +2092,6 @@
 			return ret;
 	}
 
-
 	return ret;
 }
 
@@ -3771,12 +3784,10 @@
 			       dev->invalidate_domains,
 			       dev->flush_domains);
 		if (dev_priv->flush_rings & FLUSH_RENDER_RING)
-			(void)i915_add_request(dev, file_priv,
-					       dev->flush_domains,
+			(void)i915_add_request(dev, file_priv, 0,
 					       &dev_priv->render_ring);
 		if (dev_priv->flush_rings & FLUSH_BSD_RING)
-			(void)i915_add_request(dev, file_priv,
-					       dev->flush_domains,
+			(void)i915_add_request(dev, file_priv, 0,
 					       &dev_priv->bsd_ring);
 	}