blob: c8361f35035067b144d7abd6b8b7c0eab3eb201c [file] [log] [blame]
Chris Wilson688e6c72016-07-01 17:23:15 +01001/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
Chris Wilsonc81d4612016-07-01 17:23:25 +010025#include <linux/kthread.h>
26
Chris Wilson688e6c72016-07-01 17:23:15 +010027#include "i915_drv.h"
28
Chris Wilson8d769ea2017-02-27 20:58:47 +000029unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
30{
31 unsigned int result = 0;
32
33 /* Note that for this not to dangerously chase a dangling pointer,
34 * we must hold the rcu_read_lock here.
35 *
36 * Also note that tsk is likely to be in !TASK_RUNNING state so an
37 * early test for tsk->state != TASK_RUNNING before wake_up_process()
38 * is unlikely to be beneficial.
39 */
40 if (intel_engine_has_waiter(engine)) {
41 struct task_struct *tsk;
42
43 result = ENGINE_WAKEUP_WAITER;
44
45 rcu_read_lock();
46 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
47 if (tsk && !wake_up_process(tsk))
48 result |= ENGINE_WAKEUP_ACTIVE;
49 rcu_read_unlock();
50 }
51
52 return result;
53}
54
Chris Wilson2246bea2017-02-17 15:13:00 +000055static unsigned long wait_timeout(void)
56{
57 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
58}
59
Chris Wilson83348ba2016-08-09 17:47:51 +010060static void intel_breadcrumbs_hangcheck(unsigned long data)
61{
62 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
63 struct intel_breadcrumbs *b = &engine->breadcrumbs;
64
65 if (!b->irq_enabled)
66 return;
67
Chris Wilson2246bea2017-02-17 15:13:00 +000068 if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
69 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
70 mod_timer(&b->hangcheck, wait_timeout());
Chris Wilson83348ba2016-08-09 17:47:51 +010071 return;
72 }
73
Chris Wilson89985672017-02-17 15:13:02 +000074 /* If the waiter was currently running, assume it hasn't had a chance
75 * to process the pending interrupt (e.g, low priority task on a loaded
76 * system) and wait until it sleeps before declaring a missed interrupt.
77 */
Chris Wilson8d769ea2017-02-27 20:58:47 +000078 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ACTIVE) {
Chris Wilson89985672017-02-17 15:13:02 +000079 mod_timer(&b->hangcheck, wait_timeout());
80 return;
81 }
82
Chris Wilson83348ba2016-08-09 17:47:51 +010083 DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
84 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
85 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
86
87 /* Ensure that even if the GPU hangs, we get woken up.
88 *
89 * However, note that if no one is waiting, we never notice
90 * a gpu hang. Eventually, we will have to wait for a resource
91 * held by the GPU and so trigger a hangcheck. In the most
92 * pathological case, this will be upon memory starvation! To
93 * prevent this, we also queue the hangcheck from the retire
94 * worker.
95 */
96 i915_queue_hangcheck(engine->i915);
97}
98
Chris Wilson688e6c72016-07-01 17:23:15 +010099static void intel_breadcrumbs_fake_irq(unsigned long data)
100{
101 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
102
103 /*
104 * The timer persists in case we cannot enable interrupts,
105 * or if we have previously seen seqno/interrupt incoherency
106 * ("missed interrupt" syndrome). Here the worker will wake up
107 * every jiffie in order to kick the oldest waiter to do the
108 * coherent seqno check.
109 */
Chris Wilson688e6c72016-07-01 17:23:15 +0100110 if (intel_engine_wakeup(engine))
111 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
Chris Wilson688e6c72016-07-01 17:23:15 +0100112}
113
114static void irq_enable(struct intel_engine_cs *engine)
115{
Chris Wilson3d5564e2016-07-01 17:23:23 +0100116 /* Enabling the IRQ may miss the generation of the interrupt, but
117 * we still need to force the barrier before reading the seqno,
118 * just in case.
119 */
Chris Wilson538b2572017-01-24 15:18:05 +0000120 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
Chris Wilson31bb59c2016-07-01 17:23:27 +0100121
Chris Wilsonf6168e32016-10-28 13:58:55 +0100122 /* Caller disables interrupts */
123 spin_lock(&engine->i915->irq_lock);
Chris Wilson31bb59c2016-07-01 17:23:27 +0100124 engine->irq_enable(engine);
Chris Wilsonf6168e32016-10-28 13:58:55 +0100125 spin_unlock(&engine->i915->irq_lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100126}
127
128static void irq_disable(struct intel_engine_cs *engine)
129{
Chris Wilsonf6168e32016-10-28 13:58:55 +0100130 /* Caller disables interrupts */
131 spin_lock(&engine->i915->irq_lock);
Chris Wilson31bb59c2016-07-01 17:23:27 +0100132 engine->irq_disable(engine);
Chris Wilsonf6168e32016-10-28 13:58:55 +0100133 spin_unlock(&engine->i915->irq_lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100134}
135
Chris Wilson6ef98ea2017-02-17 15:13:03 +0000136static bool use_fake_irq(const struct intel_breadcrumbs *b)
137{
138 const struct intel_engine_cs *engine =
139 container_of(b, struct intel_engine_cs, breadcrumbs);
140
141 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
142 return false;
143
144 /* Only start with the heavy weight fake irq timer if we have not
145 * seen any interrupts since enabling it the first time. If the
146 * interrupts are still arriving, it means we made a mistake in our
147 * engine->seqno_barrier(), a timing error that should be transient
148 * and unlikely to reoccur.
149 */
150 return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
151}
152
Chris Wilson04171312016-07-06 12:39:00 +0100153static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
Chris Wilson688e6c72016-07-01 17:23:15 +0100154{
155 struct intel_engine_cs *engine =
156 container_of(b, struct intel_engine_cs, breadcrumbs);
157 struct drm_i915_private *i915 = engine->i915;
Chris Wilson688e6c72016-07-01 17:23:15 +0100158
159 assert_spin_locked(&b->lock);
160 if (b->rpm_wakelock)
Chris Wilson04171312016-07-06 12:39:00 +0100161 return;
Chris Wilson688e6c72016-07-01 17:23:15 +0100162
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000163 if (I915_SELFTEST_ONLY(b->mock)) {
164 /* For our mock objects we want to avoid interaction
165 * with the real hardware (which is not set up). So
166 * we simply pretend we have enabled the powerwell
167 * and the irq, and leave it up to the mock
168 * implementation to call intel_engine_wakeup()
169 * itself when it wants to simulate a user interrupt,
170 */
171 b->rpm_wakelock = true;
172 return;
173 }
174
Chris Wilson688e6c72016-07-01 17:23:15 +0100175 /* Since we are waiting on a request, the GPU should be busy
176 * and should have its own rpm reference. For completeness,
177 * record an rpm reference for ourselves to cover the
178 * interrupt we unmask.
179 */
180 intel_runtime_pm_get_noresume(i915);
181 b->rpm_wakelock = true;
182
183 /* No interrupts? Kick the waiter every jiffie! */
184 if (intel_irqs_enabled(i915)) {
Chris Wilson3d5564e2016-07-01 17:23:23 +0100185 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
Chris Wilson688e6c72016-07-01 17:23:15 +0100186 irq_enable(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100187 b->irq_enabled = true;
188 }
189
Chris Wilson6ef98ea2017-02-17 15:13:03 +0000190 if (!b->irq_enabled || use_fake_irq(b)) {
Chris Wilson688e6c72016-07-01 17:23:15 +0100191 mod_timer(&b->fake_irq, jiffies + 1);
Chris Wilson2f1ac9c2017-01-23 09:37:24 +0000192 i915_queue_hangcheck(i915);
Chris Wilson83348ba2016-08-09 17:47:51 +0100193 } else {
194 /* Ensure we never sleep indefinitely */
Chris Wilson2246bea2017-02-17 15:13:00 +0000195 mod_timer(&b->hangcheck, wait_timeout());
Chris Wilson83348ba2016-08-09 17:47:51 +0100196 }
Chris Wilson688e6c72016-07-01 17:23:15 +0100197}
198
199static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
200{
201 struct intel_engine_cs *engine =
202 container_of(b, struct intel_engine_cs, breadcrumbs);
203
204 assert_spin_locked(&b->lock);
205 if (!b->rpm_wakelock)
206 return;
207
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000208 if (I915_SELFTEST_ONLY(b->mock)) {
209 b->rpm_wakelock = false;
210 return;
211 }
212
Chris Wilson688e6c72016-07-01 17:23:15 +0100213 if (b->irq_enabled) {
214 irq_disable(engine);
215 b->irq_enabled = false;
216 }
217
218 intel_runtime_pm_put(engine->i915);
219 b->rpm_wakelock = false;
220}
221
222static inline struct intel_wait *to_wait(struct rb_node *node)
223{
Chris Wilsond8567862016-12-20 10:40:03 +0000224 return rb_entry(node, struct intel_wait, node);
Chris Wilson688e6c72016-07-01 17:23:15 +0100225}
226
227static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
228 struct intel_wait *wait)
229{
230 assert_spin_locked(&b->lock);
231
232 /* This request is completed, so remove it from the tree, mark it as
233 * complete, and *then* wake up the associated task.
234 */
235 rb_erase(&wait->node, &b->waiters);
236 RB_CLEAR_NODE(&wait->node);
237
238 wake_up_process(wait->tsk); /* implicit smp_wmb() */
239}
240
241static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
242 struct intel_wait *wait)
243{
244 struct intel_breadcrumbs *b = &engine->breadcrumbs;
245 struct rb_node **p, *parent, *completed;
246 bool first;
247 u32 seqno;
248
249 /* Insert the request into the retirement ordered list
250 * of waiters by walking the rbtree. If we are the oldest
251 * seqno in the tree (the first to be retired), then
252 * set ourselves as the bottom-half.
253 *
254 * As we descend the tree, prune completed branches since we hold the
255 * spinlock we know that the first_waiter must be delayed and can
256 * reduce some of the sequential wake up latency if we take action
257 * ourselves and wake up the completed tasks in parallel. Also, by
258 * removing stale elements in the tree, we may be able to reduce the
259 * ping-pong between the old bottom-half and ourselves as first-waiter.
260 */
261 first = true;
262 parent = NULL;
263 completed = NULL;
Chris Wilson1b7744e2016-07-01 17:23:17 +0100264 seqno = intel_engine_get_seqno(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100265
266 /* If the request completed before we managed to grab the spinlock,
267 * return now before adding ourselves to the rbtree. We let the
268 * current bottom-half handle any pending wakeups and instead
269 * try and get out of the way quickly.
270 */
271 if (i915_seqno_passed(seqno, wait->seqno)) {
272 RB_CLEAR_NODE(&wait->node);
273 return first;
274 }
275
276 p = &b->waiters.rb_node;
277 while (*p) {
278 parent = *p;
279 if (wait->seqno == to_wait(parent)->seqno) {
280 /* We have multiple waiters on the same seqno, select
281 * the highest priority task (that with the smallest
282 * task->prio) to serve as the bottom-half for this
283 * group.
284 */
285 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
286 p = &parent->rb_right;
287 first = false;
288 } else {
289 p = &parent->rb_left;
290 }
291 } else if (i915_seqno_passed(wait->seqno,
292 to_wait(parent)->seqno)) {
293 p = &parent->rb_right;
294 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
295 completed = parent;
296 else
297 first = false;
298 } else {
299 p = &parent->rb_left;
300 }
301 }
302 rb_link_node(&wait->node, parent, p);
303 rb_insert_color(&wait->node, &b->waiters);
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100304 GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
Chris Wilson688e6c72016-07-01 17:23:15 +0100305
306 if (completed) {
307 struct rb_node *next = rb_next(completed);
308
309 GEM_BUG_ON(!next && !first);
310 if (next && next != &wait->node) {
311 GEM_BUG_ON(first);
312 b->first_wait = to_wait(next);
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100313 rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100314 /* As there is a delay between reading the current
315 * seqno, processing the completed tasks and selecting
316 * the next waiter, we may have missed the interrupt
317 * and so need for the next bottom-half to wakeup.
318 *
319 * Also as we enable the IRQ, we may miss the
320 * interrupt for that seqno, so we have to wake up
321 * the next bottom-half in order to do a coherent check
322 * in case the seqno passed.
323 */
324 __intel_breadcrumbs_enable_irq(b);
Chris Wilson538b2572017-01-24 15:18:05 +0000325 if (test_bit(ENGINE_IRQ_BREADCRUMB,
326 &engine->irq_posted))
Chris Wilson3d5564e2016-07-01 17:23:23 +0100327 wake_up_process(to_wait(next)->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100328 }
329
330 do {
331 struct intel_wait *crumb = to_wait(completed);
332 completed = rb_prev(completed);
333 __intel_breadcrumbs_finish(b, crumb);
334 } while (completed);
335 }
336
337 if (first) {
338 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
339 b->first_wait = wait;
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100340 rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
Chris Wilson04171312016-07-06 12:39:00 +0100341 /* After assigning ourselves as the new bottom-half, we must
342 * perform a cursory check to prevent a missed interrupt.
343 * Either we miss the interrupt whilst programming the hardware,
344 * or if there was a previous waiter (for a later seqno) they
345 * may be woken instead of us (due to the inherent race
Chris Wilsonaca34b62016-07-06 12:39:02 +0100346 * in the unlocked read of b->irq_seqno_bh in the irq handler)
347 * and so we miss the wake up.
Chris Wilson04171312016-07-06 12:39:00 +0100348 */
349 __intel_breadcrumbs_enable_irq(b);
Chris Wilson688e6c72016-07-01 17:23:15 +0100350 }
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100351 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
Chris Wilson688e6c72016-07-01 17:23:15 +0100352 GEM_BUG_ON(!b->first_wait);
353 GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
354
355 return first;
356}
357
358bool intel_engine_add_wait(struct intel_engine_cs *engine,
359 struct intel_wait *wait)
360{
361 struct intel_breadcrumbs *b = &engine->breadcrumbs;
362 bool first;
363
Chris Wilsonf6168e32016-10-28 13:58:55 +0100364 spin_lock_irq(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100365 first = __intel_engine_add_wait(engine, wait);
Chris Wilsonf6168e32016-10-28 13:58:55 +0100366 spin_unlock_irq(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100367
368 return first;
369}
370
Chris Wilson688e6c72016-07-01 17:23:15 +0100371static inline bool chain_wakeup(struct rb_node *rb, int priority)
372{
373 return rb && to_wait(rb)->tsk->prio <= priority;
374}
375
Chris Wilsonc81d4612016-07-01 17:23:25 +0100376static inline int wakeup_priority(struct intel_breadcrumbs *b,
377 struct task_struct *tsk)
378{
379 if (tsk == b->signaler)
380 return INT_MIN;
381 else
382 return tsk->prio;
383}
384
Chris Wilson9eb143b2017-02-23 07:44:16 +0000385static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
386 struct intel_wait *wait)
Chris Wilson688e6c72016-07-01 17:23:15 +0100387{
388 struct intel_breadcrumbs *b = &engine->breadcrumbs;
389
Chris Wilson9eb143b2017-02-23 07:44:16 +0000390 assert_spin_locked(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100391
392 if (RB_EMPTY_NODE(&wait->node))
Chris Wilson9eb143b2017-02-23 07:44:16 +0000393 goto out;
Chris Wilson688e6c72016-07-01 17:23:15 +0100394
395 if (b->first_wait == wait) {
Chris Wilsonc81d4612016-07-01 17:23:25 +0100396 const int priority = wakeup_priority(b, wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100397 struct rb_node *next;
Chris Wilson688e6c72016-07-01 17:23:15 +0100398
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100399 GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100400
401 /* We are the current bottom-half. Find the next candidate,
402 * the first waiter in the queue on the remaining oldest
403 * request. As multiple seqnos may complete in the time it
404 * takes us to wake up and find the next waiter, we have to
405 * wake up that waiter for it to perform its own coherent
406 * completion check.
407 */
408 next = rb_next(&wait->node);
409 if (chain_wakeup(next, priority)) {
410 /* If the next waiter is already complete,
411 * wake it up and continue onto the next waiter. So
412 * if have a small herd, they will wake up in parallel
413 * rather than sequentially, which should reduce
414 * the overall latency in waking all the completed
415 * clients.
416 *
417 * However, waking up a chain adds extra latency to
418 * the first_waiter. This is undesirable if that
419 * waiter is a high priority task.
420 */
Chris Wilson1b7744e2016-07-01 17:23:17 +0100421 u32 seqno = intel_engine_get_seqno(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100422
423 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
424 struct rb_node *n = rb_next(next);
425
426 __intel_breadcrumbs_finish(b, to_wait(next));
427 next = n;
428 if (!chain_wakeup(next, priority))
429 break;
430 }
431 }
432
433 if (next) {
434 /* In our haste, we may have completed the first waiter
435 * before we enabled the interrupt. Do so now as we
436 * have a second waiter for a future seqno. Afterwards,
437 * we have to wake up that waiter in case we missed
438 * the interrupt, or if we have to handle an
439 * exception rather than a seqno completion.
440 */
441 b->first_wait = to_wait(next);
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100442 rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100443 if (b->first_wait->seqno != wait->seqno)
444 __intel_breadcrumbs_enable_irq(b);
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100445 wake_up_process(b->first_wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100446 } else {
447 b->first_wait = NULL;
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100448 rcu_assign_pointer(b->irq_seqno_bh, NULL);
Chris Wilson688e6c72016-07-01 17:23:15 +0100449 __intel_breadcrumbs_disable_irq(b);
450 }
451 } else {
452 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
453 }
454
455 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
456 rb_erase(&wait->node, &b->waiters);
457
Chris Wilson9eb143b2017-02-23 07:44:16 +0000458out:
Chris Wilson688e6c72016-07-01 17:23:15 +0100459 GEM_BUG_ON(b->first_wait == wait);
460 GEM_BUG_ON(rb_first(&b->waiters) !=
461 (b->first_wait ? &b->first_wait->node : NULL));
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100462 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
Chris Wilson9eb143b2017-02-23 07:44:16 +0000463}
464
465void intel_engine_remove_wait(struct intel_engine_cs *engine,
466 struct intel_wait *wait)
467{
468 struct intel_breadcrumbs *b = &engine->breadcrumbs;
469
470 /* Quick check to see if this waiter was already decoupled from
471 * the tree by the bottom-half to avoid contention on the spinlock
472 * by the herd.
473 */
474 if (RB_EMPTY_NODE(&wait->node))
475 return;
476
477 spin_lock_irq(&b->lock);
478 __intel_engine_remove_wait(engine, wait);
Chris Wilsonf6168e32016-10-28 13:58:55 +0100479 spin_unlock_irq(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100480}
481
Chris Wilsond6a22892017-02-23 07:44:17 +0000482static bool signal_valid(const struct drm_i915_gem_request *request)
483{
484 return intel_wait_check_request(&request->signaling.wait, request);
485}
486
487static bool signal_complete(const struct drm_i915_gem_request *request)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100488{
Chris Wilsonb3850852016-07-01 17:23:26 +0100489 if (!request)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100490 return false;
491
492 /* If another process served as the bottom-half it may have already
493 * signalled that this wait is already completed.
494 */
Chris Wilsonb3850852016-07-01 17:23:26 +0100495 if (intel_wait_complete(&request->signaling.wait))
Chris Wilsond6a22892017-02-23 07:44:17 +0000496 return signal_valid(request);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100497
498 /* Carefully check if the request is complete, giving time for the
499 * seqno to be visible or if the GPU hung.
500 */
Chris Wilsonb3850852016-07-01 17:23:26 +0100501 if (__i915_request_irq_complete(request))
Chris Wilsonc81d4612016-07-01 17:23:25 +0100502 return true;
503
504 return false;
505}
506
Chris Wilsonb3850852016-07-01 17:23:26 +0100507static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100508{
Chris Wilsond8567862016-12-20 10:40:03 +0000509 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100510}
511
512static void signaler_set_rtpriority(void)
513{
514 struct sched_param param = { .sched_priority = 1 };
515
516 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
517}
518
519static int intel_breadcrumbs_signaler(void *arg)
520{
521 struct intel_engine_cs *engine = arg;
522 struct intel_breadcrumbs *b = &engine->breadcrumbs;
Chris Wilsonb3850852016-07-01 17:23:26 +0100523 struct drm_i915_gem_request *request;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100524
525 /* Install ourselves with high priority to reduce signalling latency */
526 signaler_set_rtpriority();
527
528 do {
529 set_current_state(TASK_INTERRUPTIBLE);
530
531 /* We are either woken up by the interrupt bottom-half,
532 * or by a client adding a new signaller. In both cases,
533 * the GPU seqno may have advanced beyond our oldest signal.
534 * If it has, propagate the signal, remove the waiter and
535 * check again with the next oldest signal. Otherwise we
536 * need to wait for a new interrupt from the GPU or for
537 * a new client.
538 */
Chris Wilsoncced5e22017-02-23 07:44:15 +0000539 rcu_read_lock();
540 request = rcu_dereference(b->first_signal);
541 if (request)
542 request = i915_gem_request_get_rcu(request);
543 rcu_read_unlock();
Chris Wilsonb3850852016-07-01 17:23:26 +0100544 if (signal_complete(request)) {
Chris Wilson7c9e9342017-01-24 11:00:09 +0000545 local_bh_disable();
546 dma_fence_signal(&request->fence);
547 local_bh_enable(); /* kick start the tasklets */
548
Chris Wilson9eb143b2017-02-23 07:44:16 +0000549 spin_lock_irq(&b->lock);
550
Chris Wilsonc81d4612016-07-01 17:23:25 +0100551 /* Wake up all other completed waiters and select the
552 * next bottom-half for the next user interrupt.
553 */
Chris Wilson9eb143b2017-02-23 07:44:16 +0000554 __intel_engine_remove_wait(engine,
555 &request->signaling.wait);
Chris Wilson5590af32016-09-09 14:11:54 +0100556
Chris Wilsonc81d4612016-07-01 17:23:25 +0100557 /* Find the next oldest signal. Note that as we have
558 * not been holding the lock, another client may
559 * have installed an even older signal than the one
560 * we just completed - so double check we are still
561 * the oldest before picking the next one.
562 */
Chris Wilsoncced5e22017-02-23 07:44:15 +0000563 if (request == rcu_access_pointer(b->first_signal)) {
Chris Wilsonb3850852016-07-01 17:23:26 +0100564 struct rb_node *rb =
565 rb_next(&request->signaling.node);
Chris Wilsoncced5e22017-02-23 07:44:15 +0000566 rcu_assign_pointer(b->first_signal,
567 rb ? to_signaler(rb) : NULL);
Chris Wilsonb3850852016-07-01 17:23:26 +0100568 }
569 rb_erase(&request->signaling.node, &b->signals);
Chris Wilson9eb143b2017-02-23 07:44:16 +0000570 RB_CLEAR_NODE(&request->signaling.node);
571
Chris Wilsonf6168e32016-10-28 13:58:55 +0100572 spin_unlock_irq(&b->lock);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100573
Chris Wilsone8a261e2016-07-20 13:31:49 +0100574 i915_gem_request_put(request);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100575 } else {
Chris Wilsond6a22892017-02-23 07:44:17 +0000576 DEFINE_WAIT(exec);
577
Chris Wilsoncced5e22017-02-23 07:44:15 +0000578 if (kthread_should_stop()) {
579 GEM_BUG_ON(request);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100580 break;
Chris Wilsoncced5e22017-02-23 07:44:15 +0000581 }
Chris Wilsonc81d4612016-07-01 17:23:25 +0100582
Chris Wilsond6a22892017-02-23 07:44:17 +0000583 if (request)
584 add_wait_queue(&request->execute, &exec);
585
Chris Wilsonc81d4612016-07-01 17:23:25 +0100586 schedule();
Chris Wilsonfe3288b2017-02-12 17:20:01 +0000587
Chris Wilsond6a22892017-02-23 07:44:17 +0000588 if (request)
589 remove_wait_queue(&request->execute, &exec);
590
Chris Wilsonfe3288b2017-02-12 17:20:01 +0000591 if (kthread_should_park())
592 kthread_parkme();
Chris Wilsonc81d4612016-07-01 17:23:25 +0100593 }
Chris Wilsoncced5e22017-02-23 07:44:15 +0000594 i915_gem_request_put(request);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100595 } while (1);
596 __set_current_state(TASK_RUNNING);
597
598 return 0;
599}
600
Chris Wilsonb3850852016-07-01 17:23:26 +0100601void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100602{
603 struct intel_engine_cs *engine = request->engine;
604 struct intel_breadcrumbs *b = &engine->breadcrumbs;
605 struct rb_node *parent, **p;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100606 bool first, wakeup;
Chris Wilson754c9fd2017-02-23 07:44:14 +0000607 u32 seqno;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100608
Chris Wilsonf6168e32016-10-28 13:58:55 +0100609 /* Note that we may be called from an interrupt handler on another
610 * device (e.g. nouveau signaling a fence completion causing us
611 * to submit a request, and so enable signaling). As such,
612 * we need to make sure that all other users of b->lock protect
613 * against interrupts, i.e. use spin_lock_irqsave.
614 */
615
616 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
Chris Wilson4a50d202016-07-26 12:01:50 +0100617 assert_spin_locked(&request->lock);
Chris Wilson754c9fd2017-02-23 07:44:14 +0000618
619 seqno = i915_gem_request_global_seqno(request);
620 if (!seqno)
Chris Wilson65e47602016-10-28 13:58:49 +0100621 return;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100622
Chris Wilsonb3850852016-07-01 17:23:26 +0100623 request->signaling.wait.tsk = b->signaler;
Chris Wilson754c9fd2017-02-23 07:44:14 +0000624 request->signaling.wait.seqno = seqno;
Chris Wilsone8a261e2016-07-20 13:31:49 +0100625 i915_gem_request_get(request);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100626
Chris Wilson4a50d202016-07-26 12:01:50 +0100627 spin_lock(&b->lock);
628
Chris Wilsonc81d4612016-07-01 17:23:25 +0100629 /* First add ourselves into the list of waiters, but register our
630 * bottom-half as the signaller thread. As per usual, only the oldest
631 * waiter (not just signaller) is tasked as the bottom-half waking
632 * up all completed waiters after the user interrupt.
633 *
634 * If we are the oldest waiter, enable the irq (after which we
635 * must double check that the seqno did not complete).
636 */
Chris Wilsonb3850852016-07-01 17:23:26 +0100637 wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100638
639 /* Now insert ourselves into the retirement ordered list of signals
640 * on this engine. We track the oldest seqno as that will be the
641 * first signal to complete.
642 */
Chris Wilsonc81d4612016-07-01 17:23:25 +0100643 parent = NULL;
644 first = true;
645 p = &b->signals.rb_node;
646 while (*p) {
647 parent = *p;
Chris Wilson754c9fd2017-02-23 07:44:14 +0000648 if (i915_seqno_passed(seqno,
649 to_signaler(parent)->signaling.wait.seqno)) {
Chris Wilsonc81d4612016-07-01 17:23:25 +0100650 p = &parent->rb_right;
651 first = false;
652 } else {
653 p = &parent->rb_left;
654 }
655 }
Chris Wilsonb3850852016-07-01 17:23:26 +0100656 rb_link_node(&request->signaling.node, parent, p);
657 rb_insert_color(&request->signaling.node, &b->signals);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100658 if (first)
Chris Wilsoncced5e22017-02-23 07:44:15 +0000659 rcu_assign_pointer(b->first_signal, request);
Chris Wilsonb3850852016-07-01 17:23:26 +0100660
Chris Wilsonc81d4612016-07-01 17:23:25 +0100661 spin_unlock(&b->lock);
662
663 if (wakeup)
664 wake_up_process(b->signaler);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100665}
666
Chris Wilson9eb143b2017-02-23 07:44:16 +0000667void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
668{
669 struct intel_engine_cs *engine = request->engine;
670 struct intel_breadcrumbs *b = &engine->breadcrumbs;
671
672 assert_spin_locked(&request->lock);
673 GEM_BUG_ON(!request->signaling.wait.seqno);
674
675 spin_lock(&b->lock);
676
677 if (!RB_EMPTY_NODE(&request->signaling.node)) {
678 if (request == rcu_access_pointer(b->first_signal)) {
679 struct rb_node *rb =
680 rb_next(&request->signaling.node);
681 rcu_assign_pointer(b->first_signal,
682 rb ? to_signaler(rb) : NULL);
683 }
684 rb_erase(&request->signaling.node, &b->signals);
685 RB_CLEAR_NODE(&request->signaling.node);
686 i915_gem_request_put(request);
687 }
688
689 __intel_engine_remove_wait(engine, &request->signaling.wait);
690
691 spin_unlock(&b->lock);
692
693 request->signaling.wait.seqno = 0;
694}
695
Chris Wilson688e6c72016-07-01 17:23:15 +0100696int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
697{
698 struct intel_breadcrumbs *b = &engine->breadcrumbs;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100699 struct task_struct *tsk;
Chris Wilson688e6c72016-07-01 17:23:15 +0100700
701 spin_lock_init(&b->lock);
702 setup_timer(&b->fake_irq,
703 intel_breadcrumbs_fake_irq,
704 (unsigned long)engine);
Chris Wilson83348ba2016-08-09 17:47:51 +0100705 setup_timer(&b->hangcheck,
706 intel_breadcrumbs_hangcheck,
707 (unsigned long)engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100708
Chris Wilsonc81d4612016-07-01 17:23:25 +0100709 /* Spawn a thread to provide a common bottom-half for all signals.
710 * As this is an asynchronous interface we cannot steal the current
711 * task for handling the bottom-half to the user interrupt, therefore
712 * we create a thread to do the coherent seqno dance after the
713 * interrupt and then signal the waitqueue (via the dma-buf/fence).
714 */
715 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
716 "i915/signal:%d", engine->id);
717 if (IS_ERR(tsk))
718 return PTR_ERR(tsk);
719
720 b->signaler = tsk;
721
Chris Wilson688e6c72016-07-01 17:23:15 +0100722 return 0;
723}
724
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100725static void cancel_fake_irq(struct intel_engine_cs *engine)
726{
727 struct intel_breadcrumbs *b = &engine->breadcrumbs;
728
729 del_timer_sync(&b->hangcheck);
730 del_timer_sync(&b->fake_irq);
731 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
732}
733
734void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
735{
736 struct intel_breadcrumbs *b = &engine->breadcrumbs;
737
738 cancel_fake_irq(engine);
Chris Wilsonf6168e32016-10-28 13:58:55 +0100739 spin_lock_irq(&b->lock);
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100740
741 __intel_breadcrumbs_disable_irq(b);
742 if (intel_engine_has_waiter(engine)) {
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100743 __intel_breadcrumbs_enable_irq(b);
Chris Wilson538b2572017-01-24 15:18:05 +0000744 if (test_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted))
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100745 wake_up_process(b->first_wait->tsk);
746 } else {
747 /* sanitize the IMR and unmask any auxiliary interrupts */
748 irq_disable(engine);
749 }
750
Chris Wilsonf6168e32016-10-28 13:58:55 +0100751 spin_unlock_irq(&b->lock);
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100752}
753
Chris Wilson688e6c72016-07-01 17:23:15 +0100754void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
755{
756 struct intel_breadcrumbs *b = &engine->breadcrumbs;
757
Chris Wilson381744f2016-11-21 11:07:59 +0000758 /* The engines should be idle and all requests accounted for! */
759 WARN_ON(READ_ONCE(b->first_wait));
760 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
Chris Wilsoncced5e22017-02-23 07:44:15 +0000761 WARN_ON(rcu_access_pointer(b->first_signal));
Chris Wilson381744f2016-11-21 11:07:59 +0000762 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
763
Chris Wilsonc81d4612016-07-01 17:23:25 +0100764 if (!IS_ERR_OR_NULL(b->signaler))
765 kthread_stop(b->signaler);
766
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100767 cancel_fake_irq(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100768}
769
Chris Wilson9b6586a2017-02-23 07:44:08 +0000770bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100771{
Chris Wilson9b6586a2017-02-23 07:44:08 +0000772 struct intel_breadcrumbs *b = &engine->breadcrumbs;
773 bool busy = false;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100774
Chris Wilson9b6586a2017-02-23 07:44:08 +0000775 spin_lock_irq(&b->lock);
Chris Wilson6a5d1db2016-11-08 14:37:19 +0000776
Chris Wilson9b6586a2017-02-23 07:44:08 +0000777 if (b->first_wait) {
778 wake_up_process(b->first_wait->tsk);
779 busy |= intel_engine_flag(engine);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100780 }
781
Chris Wilsoncced5e22017-02-23 07:44:15 +0000782 if (rcu_access_pointer(b->first_signal)) {
Chris Wilson9b6586a2017-02-23 07:44:08 +0000783 wake_up_process(b->signaler);
784 busy |= intel_engine_flag(engine);
785 }
786
787 spin_unlock_irq(&b->lock);
788
789 return busy;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100790}
Chris Wilsonf97fbf92017-02-13 17:15:14 +0000791
792#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
793#include "selftests/intel_breadcrumbs.c"
794#endif