drm/i915: argument to control retiring behavior

Sometimes it may be the case when we idle the gpu or wait on something
we don't actually want to process the retiring list. This patch allows
callers to choose the behavior.

Reviewed-by: Keith Packard <keithp@keithp.com>
Reviewed-by: Eugeni Dodonov <eugeni.dodonov@intel.com>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index eb98a7f..ff3066c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1943,7 +1943,8 @@
  */
 int
 i915_wait_request(struct intel_ring_buffer *ring,
-		  uint32_t seqno)
+		  uint32_t seqno,
+		  bool do_retire)
 {
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	u32 ier;
@@ -2027,7 +2028,7 @@
 	 * buffer to have made it to the inactive list, and we would need
 	 * a separate wait queue to handle that.
 	 */
-	if (ret == 0)
+	if (ret == 0 && do_retire)
 		i915_gem_retire_requests_ring(ring);
 
 	return ret;
@@ -2051,7 +2052,8 @@
 	 * it.
 	 */
 	if (obj->active) {
-		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
+					true);
 		if (ret)
 			return ret;
 	}
@@ -2172,7 +2174,7 @@
 	return 0;
 }
 
-static int i915_ring_idle(struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
 {
 	int ret;
 
@@ -2186,18 +2188,18 @@
 			return ret;
 	}
 
-	return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+	return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
+				 do_retire);
 }
 
-int
-i915_gpu_idle(struct drm_device *dev)
+int i915_gpu_idle(struct drm_device *dev, bool do_retire)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret, i;
 
 	/* Flush everything onto the inactive list. */
 	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ret = i915_ring_idle(&dev_priv->ring[i]);
+		ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
 		if (ret)
 			return ret;
 	}
@@ -2400,7 +2402,8 @@
 		if (!ring_passed_seqno(obj->last_fenced_ring,
 				       obj->last_fenced_seqno)) {
 			ret = i915_wait_request(obj->last_fenced_ring,
-						obj->last_fenced_seqno);
+						obj->last_fenced_seqno,
+						true);
 			if (ret)
 				return ret;
 		}
@@ -2541,7 +2544,8 @@
 				if (!ring_passed_seqno(obj->last_fenced_ring,
 						       reg->setup_seqno)) {
 					ret = i915_wait_request(obj->last_fenced_ring,
-								reg->setup_seqno);
+								reg->setup_seqno,
+								true);
 					if (ret)
 						return ret;
 				}
@@ -3710,7 +3714,7 @@
 		return 0;
 	}
 
-	ret = i915_gpu_idle(dev);
+	ret = i915_gpu_idle(dev, true);
 	if (ret) {
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
@@ -4201,7 +4205,7 @@
 		 * This has a dramatic impact to reduce the number of
 		 * OOM-killer events whilst running the GPU aggressively.
 		 */
-		if (i915_gpu_idle(dev) == 0)
+		if (i915_gpu_idle(dev, true) == 0)
 			goto rescan;
 	}
 	mutex_unlock(&dev->struct_mutex);