drm/i915: Spin after waking up for an interrupt
When waiting for an interrupt (waiting for the engine to complete some
work), we know we are the only waiter to be woken on this engine. We also
know when the GPU has nearly completed our request (or at least started
processing it), so after being woken and we detect that the GPU is
active and working on our request, allow us the bottom-half (the first
waiter who wakes up to handle checking the seqno after the interrupt) to
spin for a very short while to reduce client latencies.
The impact is minimal, there was an improvement to the realtime-vs-many
clients case, but exporting the function proves useful later. However,
it is tempting to adjust irq_seqno_barrier to include the spin. The
problem is first ensuring that the "start-of-request" seqno is coherent
as we use that as our basis for judging when it is ok to spin. If we
could, spinning there could dramatically shorten some sleeps, and allow
us to make the barriers more conservative to handle missed seqno writes
on more platforms (all gen7+ are known to have the occasional issue, at
least).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1467390209-3576-7-git-send-email-chris@chris-wilson.co.uk
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f522a36..309bb2f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -663,7 +663,7 @@
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
engine->get_seqno(engine),
- i915_gem_request_completed(work->flip_queued_req, true));
+ i915_gem_request_completed(work->flip_queued_req));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1fefa8c..0ea69c5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3287,24 +3287,27 @@
return (int32_t)(seq1 - seq2) >= 0;
}
-static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
- bool lazy_coherency)
+static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
{
- if (!lazy_coherency && req->engine->irq_seqno_barrier)
- req->engine->irq_seqno_barrier(req->engine);
return i915_seqno_passed(req->engine->get_seqno(req->engine),
req->previous_seqno);
}
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
- bool lazy_coherency)
+static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
- if (!lazy_coherency && req->engine->irq_seqno_barrier)
- req->engine->irq_seqno_barrier(req->engine);
return i915_seqno_passed(req->engine->get_seqno(req->engine),
req->seqno);
}
+bool __i915_spin_request(const struct drm_i915_gem_request *request,
+ int state, unsigned long timeout_us);
+static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
+ int state, unsigned long timeout_us)
+{
+ return (i915_gem_request_started(request) &&
+ __i915_spin_request(request, state, timeout_us));
+}
+
int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
@@ -3983,6 +3986,8 @@
static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
{
+ struct intel_engine_cs *engine = req->engine;
+
/* Ensure our read of the seqno is coherent so that we
* do not "miss an interrupt" (i.e. if this is the last
* request and the seqno write from the GPU is not visible
@@ -3994,7 +3999,10 @@
* but it is easier and safer to do it every time the waiter
* is woken.
*/
- if (i915_gem_request_completed(req, false))
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ if (i915_gem_request_completed(req))
return true;
/* We need to check whether any gpu reset happened in between
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c981457..2aef737 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1375,9 +1375,9 @@
return this_cpu != cpu;
}
-static bool __i915_spin_request(struct drm_i915_gem_request *req, int state)
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+ int state, unsigned long timeout_us)
{
- unsigned long timeout;
unsigned cpu;
/* When waiting for high frequency requests, e.g. during synchronous
@@ -1390,19 +1390,15 @@
* takes to sleep on a request, on the order of a microsecond.
*/
- /* Only spin if we know the GPU is processing this request */
- if (!i915_gem_request_started(req, true))
- return false;
-
- timeout = local_clock_us(&cpu) + 5;
+ timeout_us += local_clock_us(&cpu);
do {
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
return true;
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout, cpu))
+ if (busywait_stop(timeout_us, cpu))
break;
cpu_relax_lowlatency();
@@ -1445,7 +1441,7 @@
if (list_empty(&req->list))
return 0;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
return 0;
timeout_remain = MAX_SCHEDULE_TIMEOUT;
@@ -1470,7 +1466,7 @@
gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
/* Optimistic spin for the next ~jiffie before touching IRQs */
- if (__i915_spin_request(req, state))
+ if (i915_spin_request(req, state, 5))
goto complete;
set_current_state(state);
@@ -1518,6 +1514,10 @@
*/
if (__i915_request_irq_complete(req))
break;
+
+ /* Only spin if we know the GPU is processing this request */
+ if (i915_spin_request(req, state, 2))
+ break;
}
remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
@@ -3055,8 +3055,16 @@
{
struct drm_i915_gem_request *request;
+ /* We are called by the error capture and reset at a random
+ * point in time. In particular, note that neither is crucially
+ * ordered with an interrupt. After a hang, the GPU is dead and we
+ * assume that no more writes can happen (we waited long enough for
+ * all writes that were in transaction to be flushed) - adding an
+ * extra delay for a recent interrupt is pointless. Hence, we do
+ * not need an engine->irq_seqno_barrier() before the seqno reads.
+ */
list_for_each_entry(request, &engine->request_list, list) {
- if (i915_gem_request_completed(request, false))
+ if (i915_gem_request_completed(request))
continue;
return request;
@@ -3188,7 +3196,7 @@
struct drm_i915_gem_request,
list);
- if (!i915_gem_request_completed(request, true))
+ if (!i915_gem_request_completed(request))
break;
i915_gem_request_retire(request);
@@ -3212,7 +3220,7 @@
}
if (unlikely(engine->trace_irq_req &&
- i915_gem_request_completed(engine->trace_irq_req, true))) {
+ i915_gem_request_completed(engine->trace_irq_req))) {
engine->irq_put(engine);
i915_gem_request_assign(&engine->trace_irq_req, NULL);
}
@@ -3310,7 +3318,7 @@
if (req == NULL)
continue;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
i915_gem_object_retire__read(obj, i);
}
@@ -3418,7 +3426,7 @@
if (to == from)
return 0;
- if (i915_gem_request_completed(from_req, true))
+ if (i915_gem_request_completed(from_req))
return 0;
if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 30c181a..88e899b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11628,7 +11628,7 @@
vblank = intel_crtc_get_vblank_counter(intel_crtc);
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_req &&
- !i915_gem_request_completed(work->flip_queued_req, true))
+ !i915_gem_request_completed(work->flip_queued_req))
return false;
work->flip_ready_vblank = vblank;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index cfe850f..82c2efd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7767,7 +7767,7 @@
struct request_boost *boost = container_of(work, struct request_boost, work);
struct drm_i915_gem_request *req = boost->req;
- if (!i915_gem_request_completed(req, true))
+ if (!i915_gem_request_completed(req))
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
i915_gem_request_unreference(req);
@@ -7781,7 +7781,7 @@
if (req == NULL || INTEL_GEN(req->i915) < 6)
return;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
return;
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);