blob: c6fa77177615ebf3a396391012a9a8ee37ae2f72 [file] [log] [blame]
Chris Wilson688e6c72016-07-01 17:23:15 +01001/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
Chris Wilsonc81d4612016-07-01 17:23:25 +010025#include <linux/kthread.h>
26
Chris Wilson688e6c72016-07-01 17:23:15 +010027#include "i915_drv.h"
28
Chris Wilson83348ba2016-08-09 17:47:51 +010029static void intel_breadcrumbs_hangcheck(unsigned long data)
30{
31 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
32 struct intel_breadcrumbs *b = &engine->breadcrumbs;
33
34 if (!b->irq_enabled)
35 return;
36
37 if (time_before(jiffies, b->timeout)) {
38 mod_timer(&b->hangcheck, b->timeout);
39 return;
40 }
41
42 DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
43 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
44 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
45
46 /* Ensure that even if the GPU hangs, we get woken up.
47 *
48 * However, note that if no one is waiting, we never notice
49 * a gpu hang. Eventually, we will have to wait for a resource
50 * held by the GPU and so trigger a hangcheck. In the most
51 * pathological case, this will be upon memory starvation! To
52 * prevent this, we also queue the hangcheck from the retire
53 * worker.
54 */
55 i915_queue_hangcheck(engine->i915);
56}
57
58static unsigned long wait_timeout(void)
59{
60 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
61}
62
Chris Wilson688e6c72016-07-01 17:23:15 +010063static void intel_breadcrumbs_fake_irq(unsigned long data)
64{
65 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
66
67 /*
68 * The timer persists in case we cannot enable interrupts,
69 * or if we have previously seen seqno/interrupt incoherency
70 * ("missed interrupt" syndrome). Here the worker will wake up
71 * every jiffie in order to kick the oldest waiter to do the
72 * coherent seqno check.
73 */
Chris Wilson688e6c72016-07-01 17:23:15 +010074 if (intel_engine_wakeup(engine))
75 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
Chris Wilson688e6c72016-07-01 17:23:15 +010076}
77
78static void irq_enable(struct intel_engine_cs *engine)
79{
Chris Wilson3d5564e2016-07-01 17:23:23 +010080 /* Enabling the IRQ may miss the generation of the interrupt, but
81 * we still need to force the barrier before reading the seqno,
82 * just in case.
83 */
Chris Wilsonaca34b62016-07-06 12:39:02 +010084 engine->breadcrumbs.irq_posted = true;
Chris Wilson31bb59c2016-07-01 17:23:27 +010085
Chris Wilsonf6168e32016-10-28 13:58:55 +010086 /* Caller disables interrupts */
87 spin_lock(&engine->i915->irq_lock);
Chris Wilson31bb59c2016-07-01 17:23:27 +010088 engine->irq_enable(engine);
Chris Wilsonf6168e32016-10-28 13:58:55 +010089 spin_unlock(&engine->i915->irq_lock);
Chris Wilson688e6c72016-07-01 17:23:15 +010090}
91
92static void irq_disable(struct intel_engine_cs *engine)
93{
Chris Wilsonf6168e32016-10-28 13:58:55 +010094 /* Caller disables interrupts */
95 spin_lock(&engine->i915->irq_lock);
Chris Wilson31bb59c2016-07-01 17:23:27 +010096 engine->irq_disable(engine);
Chris Wilsonf6168e32016-10-28 13:58:55 +010097 spin_unlock(&engine->i915->irq_lock);
Chris Wilson31bb59c2016-07-01 17:23:27 +010098
Chris Wilsonaca34b62016-07-06 12:39:02 +010099 engine->breadcrumbs.irq_posted = false;
Chris Wilson688e6c72016-07-01 17:23:15 +0100100}
101
Chris Wilson04171312016-07-06 12:39:00 +0100102static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
Chris Wilson688e6c72016-07-01 17:23:15 +0100103{
104 struct intel_engine_cs *engine =
105 container_of(b, struct intel_engine_cs, breadcrumbs);
106 struct drm_i915_private *i915 = engine->i915;
Chris Wilson688e6c72016-07-01 17:23:15 +0100107
108 assert_spin_locked(&b->lock);
109 if (b->rpm_wakelock)
Chris Wilson04171312016-07-06 12:39:00 +0100110 return;
Chris Wilson688e6c72016-07-01 17:23:15 +0100111
112 /* Since we are waiting on a request, the GPU should be busy
113 * and should have its own rpm reference. For completeness,
114 * record an rpm reference for ourselves to cover the
115 * interrupt we unmask.
116 */
117 intel_runtime_pm_get_noresume(i915);
118 b->rpm_wakelock = true;
119
120 /* No interrupts? Kick the waiter every jiffie! */
121 if (intel_irqs_enabled(i915)) {
Chris Wilson3d5564e2016-07-01 17:23:23 +0100122 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
Chris Wilson688e6c72016-07-01 17:23:15 +0100123 irq_enable(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100124 b->irq_enabled = true;
125 }
126
127 if (!b->irq_enabled ||
Chris Wilson83348ba2016-08-09 17:47:51 +0100128 test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
Chris Wilson688e6c72016-07-01 17:23:15 +0100129 mod_timer(&b->fake_irq, jiffies + 1);
Chris Wilson2f1ac9c2017-01-23 09:37:24 +0000130 i915_queue_hangcheck(i915);
Chris Wilson83348ba2016-08-09 17:47:51 +0100131 } else {
132 /* Ensure we never sleep indefinitely */
133 GEM_BUG_ON(!time_after(b->timeout, jiffies));
134 mod_timer(&b->hangcheck, b->timeout);
135 }
Chris Wilson688e6c72016-07-01 17:23:15 +0100136}
137
138static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
139{
140 struct intel_engine_cs *engine =
141 container_of(b, struct intel_engine_cs, breadcrumbs);
142
143 assert_spin_locked(&b->lock);
144 if (!b->rpm_wakelock)
145 return;
146
147 if (b->irq_enabled) {
148 irq_disable(engine);
149 b->irq_enabled = false;
150 }
151
152 intel_runtime_pm_put(engine->i915);
153 b->rpm_wakelock = false;
154}
155
156static inline struct intel_wait *to_wait(struct rb_node *node)
157{
Chris Wilsond8567862016-12-20 10:40:03 +0000158 return rb_entry(node, struct intel_wait, node);
Chris Wilson688e6c72016-07-01 17:23:15 +0100159}
160
161static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
162 struct intel_wait *wait)
163{
164 assert_spin_locked(&b->lock);
165
166 /* This request is completed, so remove it from the tree, mark it as
167 * complete, and *then* wake up the associated task.
168 */
169 rb_erase(&wait->node, &b->waiters);
170 RB_CLEAR_NODE(&wait->node);
171
172 wake_up_process(wait->tsk); /* implicit smp_wmb() */
173}
174
175static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
176 struct intel_wait *wait)
177{
178 struct intel_breadcrumbs *b = &engine->breadcrumbs;
179 struct rb_node **p, *parent, *completed;
180 bool first;
181 u32 seqno;
182
183 /* Insert the request into the retirement ordered list
184 * of waiters by walking the rbtree. If we are the oldest
185 * seqno in the tree (the first to be retired), then
186 * set ourselves as the bottom-half.
187 *
188 * As we descend the tree, prune completed branches since we hold the
189 * spinlock we know that the first_waiter must be delayed and can
190 * reduce some of the sequential wake up latency if we take action
191 * ourselves and wake up the completed tasks in parallel. Also, by
192 * removing stale elements in the tree, we may be able to reduce the
193 * ping-pong between the old bottom-half and ourselves as first-waiter.
194 */
195 first = true;
196 parent = NULL;
197 completed = NULL;
Chris Wilson1b7744e2016-07-01 17:23:17 +0100198 seqno = intel_engine_get_seqno(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100199
200 /* If the request completed before we managed to grab the spinlock,
201 * return now before adding ourselves to the rbtree. We let the
202 * current bottom-half handle any pending wakeups and instead
203 * try and get out of the way quickly.
204 */
205 if (i915_seqno_passed(seqno, wait->seqno)) {
206 RB_CLEAR_NODE(&wait->node);
207 return first;
208 }
209
210 p = &b->waiters.rb_node;
211 while (*p) {
212 parent = *p;
213 if (wait->seqno == to_wait(parent)->seqno) {
214 /* We have multiple waiters on the same seqno, select
215 * the highest priority task (that with the smallest
216 * task->prio) to serve as the bottom-half for this
217 * group.
218 */
219 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
220 p = &parent->rb_right;
221 first = false;
222 } else {
223 p = &parent->rb_left;
224 }
225 } else if (i915_seqno_passed(wait->seqno,
226 to_wait(parent)->seqno)) {
227 p = &parent->rb_right;
228 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
229 completed = parent;
230 else
231 first = false;
232 } else {
233 p = &parent->rb_left;
234 }
235 }
236 rb_link_node(&wait->node, parent, p);
237 rb_insert_color(&wait->node, &b->waiters);
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100238 GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
Chris Wilson688e6c72016-07-01 17:23:15 +0100239
240 if (completed) {
241 struct rb_node *next = rb_next(completed);
242
243 GEM_BUG_ON(!next && !first);
244 if (next && next != &wait->node) {
245 GEM_BUG_ON(first);
Chris Wilson83348ba2016-08-09 17:47:51 +0100246 b->timeout = wait_timeout();
Chris Wilson688e6c72016-07-01 17:23:15 +0100247 b->first_wait = to_wait(next);
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100248 rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100249 /* As there is a delay between reading the current
250 * seqno, processing the completed tasks and selecting
251 * the next waiter, we may have missed the interrupt
252 * and so need for the next bottom-half to wakeup.
253 *
254 * Also as we enable the IRQ, we may miss the
255 * interrupt for that seqno, so we have to wake up
256 * the next bottom-half in order to do a coherent check
257 * in case the seqno passed.
258 */
259 __intel_breadcrumbs_enable_irq(b);
Chris Wilsonaca34b62016-07-06 12:39:02 +0100260 if (READ_ONCE(b->irq_posted))
Chris Wilson3d5564e2016-07-01 17:23:23 +0100261 wake_up_process(to_wait(next)->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100262 }
263
264 do {
265 struct intel_wait *crumb = to_wait(completed);
266 completed = rb_prev(completed);
267 __intel_breadcrumbs_finish(b, crumb);
268 } while (completed);
269 }
270
271 if (first) {
272 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
Chris Wilson83348ba2016-08-09 17:47:51 +0100273 b->timeout = wait_timeout();
Chris Wilson688e6c72016-07-01 17:23:15 +0100274 b->first_wait = wait;
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100275 rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
Chris Wilson04171312016-07-06 12:39:00 +0100276 /* After assigning ourselves as the new bottom-half, we must
277 * perform a cursory check to prevent a missed interrupt.
278 * Either we miss the interrupt whilst programming the hardware,
279 * or if there was a previous waiter (for a later seqno) they
280 * may be woken instead of us (due to the inherent race
Chris Wilsonaca34b62016-07-06 12:39:02 +0100281 * in the unlocked read of b->irq_seqno_bh in the irq handler)
282 * and so we miss the wake up.
Chris Wilson04171312016-07-06 12:39:00 +0100283 */
284 __intel_breadcrumbs_enable_irq(b);
Chris Wilson688e6c72016-07-01 17:23:15 +0100285 }
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100286 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
Chris Wilson688e6c72016-07-01 17:23:15 +0100287 GEM_BUG_ON(!b->first_wait);
288 GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
289
290 return first;
291}
292
293bool intel_engine_add_wait(struct intel_engine_cs *engine,
294 struct intel_wait *wait)
295{
296 struct intel_breadcrumbs *b = &engine->breadcrumbs;
297 bool first;
298
Chris Wilsonf6168e32016-10-28 13:58:55 +0100299 spin_lock_irq(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100300 first = __intel_engine_add_wait(engine, wait);
Chris Wilsonf6168e32016-10-28 13:58:55 +0100301 spin_unlock_irq(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100302
303 return first;
304}
305
Chris Wilson688e6c72016-07-01 17:23:15 +0100306static inline bool chain_wakeup(struct rb_node *rb, int priority)
307{
308 return rb && to_wait(rb)->tsk->prio <= priority;
309}
310
Chris Wilsonc81d4612016-07-01 17:23:25 +0100311static inline int wakeup_priority(struct intel_breadcrumbs *b,
312 struct task_struct *tsk)
313{
314 if (tsk == b->signaler)
315 return INT_MIN;
316 else
317 return tsk->prio;
318}
319
Chris Wilson688e6c72016-07-01 17:23:15 +0100320void intel_engine_remove_wait(struct intel_engine_cs *engine,
321 struct intel_wait *wait)
322{
323 struct intel_breadcrumbs *b = &engine->breadcrumbs;
324
325 /* Quick check to see if this waiter was already decoupled from
326 * the tree by the bottom-half to avoid contention on the spinlock
327 * by the herd.
328 */
329 if (RB_EMPTY_NODE(&wait->node))
330 return;
331
Chris Wilsonf6168e32016-10-28 13:58:55 +0100332 spin_lock_irq(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100333
334 if (RB_EMPTY_NODE(&wait->node))
335 goto out_unlock;
336
337 if (b->first_wait == wait) {
Chris Wilsonc81d4612016-07-01 17:23:25 +0100338 const int priority = wakeup_priority(b, wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100339 struct rb_node *next;
Chris Wilson688e6c72016-07-01 17:23:15 +0100340
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100341 GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100342
343 /* We are the current bottom-half. Find the next candidate,
344 * the first waiter in the queue on the remaining oldest
345 * request. As multiple seqnos may complete in the time it
346 * takes us to wake up and find the next waiter, we have to
347 * wake up that waiter for it to perform its own coherent
348 * completion check.
349 */
350 next = rb_next(&wait->node);
351 if (chain_wakeup(next, priority)) {
352 /* If the next waiter is already complete,
353 * wake it up and continue onto the next waiter. So
354 * if have a small herd, they will wake up in parallel
355 * rather than sequentially, which should reduce
356 * the overall latency in waking all the completed
357 * clients.
358 *
359 * However, waking up a chain adds extra latency to
360 * the first_waiter. This is undesirable if that
361 * waiter is a high priority task.
362 */
Chris Wilson1b7744e2016-07-01 17:23:17 +0100363 u32 seqno = intel_engine_get_seqno(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100364
365 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
366 struct rb_node *n = rb_next(next);
367
368 __intel_breadcrumbs_finish(b, to_wait(next));
369 next = n;
370 if (!chain_wakeup(next, priority))
371 break;
372 }
373 }
374
375 if (next) {
376 /* In our haste, we may have completed the first waiter
377 * before we enabled the interrupt. Do so now as we
378 * have a second waiter for a future seqno. Afterwards,
379 * we have to wake up that waiter in case we missed
380 * the interrupt, or if we have to handle an
381 * exception rather than a seqno completion.
382 */
Chris Wilson83348ba2016-08-09 17:47:51 +0100383 b->timeout = wait_timeout();
Chris Wilson688e6c72016-07-01 17:23:15 +0100384 b->first_wait = to_wait(next);
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100385 rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100386 if (b->first_wait->seqno != wait->seqno)
387 __intel_breadcrumbs_enable_irq(b);
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100388 wake_up_process(b->first_wait->tsk);
Chris Wilson688e6c72016-07-01 17:23:15 +0100389 } else {
390 b->first_wait = NULL;
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100391 rcu_assign_pointer(b->irq_seqno_bh, NULL);
Chris Wilson688e6c72016-07-01 17:23:15 +0100392 __intel_breadcrumbs_disable_irq(b);
393 }
394 } else {
395 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
396 }
397
398 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
399 rb_erase(&wait->node, &b->waiters);
400
401out_unlock:
402 GEM_BUG_ON(b->first_wait == wait);
403 GEM_BUG_ON(rb_first(&b->waiters) !=
404 (b->first_wait ? &b->first_wait->node : NULL));
Chris Wilsondbd6ef22016-08-09 17:47:52 +0100405 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
Chris Wilsonf6168e32016-10-28 13:58:55 +0100406 spin_unlock_irq(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100407}
408
Chris Wilsonb3850852016-07-01 17:23:26 +0100409static bool signal_complete(struct drm_i915_gem_request *request)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100410{
Chris Wilsonb3850852016-07-01 17:23:26 +0100411 if (!request)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100412 return false;
413
414 /* If another process served as the bottom-half it may have already
415 * signalled that this wait is already completed.
416 */
Chris Wilsonb3850852016-07-01 17:23:26 +0100417 if (intel_wait_complete(&request->signaling.wait))
Chris Wilsonc81d4612016-07-01 17:23:25 +0100418 return true;
419
420 /* Carefully check if the request is complete, giving time for the
421 * seqno to be visible or if the GPU hung.
422 */
Chris Wilsonb3850852016-07-01 17:23:26 +0100423 if (__i915_request_irq_complete(request))
Chris Wilsonc81d4612016-07-01 17:23:25 +0100424 return true;
425
426 return false;
427}
428
Chris Wilsonb3850852016-07-01 17:23:26 +0100429static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100430{
Chris Wilsond8567862016-12-20 10:40:03 +0000431 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100432}
433
434static void signaler_set_rtpriority(void)
435{
436 struct sched_param param = { .sched_priority = 1 };
437
438 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
439}
440
441static int intel_breadcrumbs_signaler(void *arg)
442{
443 struct intel_engine_cs *engine = arg;
444 struct intel_breadcrumbs *b = &engine->breadcrumbs;
Chris Wilsonb3850852016-07-01 17:23:26 +0100445 struct drm_i915_gem_request *request;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100446
447 /* Install ourselves with high priority to reduce signalling latency */
448 signaler_set_rtpriority();
449
450 do {
451 set_current_state(TASK_INTERRUPTIBLE);
452
453 /* We are either woken up by the interrupt bottom-half,
454 * or by a client adding a new signaller. In both cases,
455 * the GPU seqno may have advanced beyond our oldest signal.
456 * If it has, propagate the signal, remove the waiter and
457 * check again with the next oldest signal. Otherwise we
458 * need to wait for a new interrupt from the GPU or for
459 * a new client.
460 */
Chris Wilsonb3850852016-07-01 17:23:26 +0100461 request = READ_ONCE(b->first_signal);
462 if (signal_complete(request)) {
Chris Wilsonc81d4612016-07-01 17:23:25 +0100463 /* Wake up all other completed waiters and select the
464 * next bottom-half for the next user interrupt.
465 */
Chris Wilsonb3850852016-07-01 17:23:26 +0100466 intel_engine_remove_wait(engine,
467 &request->signaling.wait);
Chris Wilson5590af32016-09-09 14:11:54 +0100468
469 local_bh_disable();
Chris Wilsonf54d1862016-10-25 13:00:45 +0100470 dma_fence_signal(&request->fence);
Chris Wilson5590af32016-09-09 14:11:54 +0100471 local_bh_enable(); /* kick start the tasklets */
Chris Wilsonc81d4612016-07-01 17:23:25 +0100472
473 /* Find the next oldest signal. Note that as we have
474 * not been holding the lock, another client may
475 * have installed an even older signal than the one
476 * we just completed - so double check we are still
477 * the oldest before picking the next one.
478 */
Chris Wilsonf6168e32016-10-28 13:58:55 +0100479 spin_lock_irq(&b->lock);
Chris Wilsonb3850852016-07-01 17:23:26 +0100480 if (request == b->first_signal) {
481 struct rb_node *rb =
482 rb_next(&request->signaling.node);
483 b->first_signal = rb ? to_signaler(rb) : NULL;
484 }
485 rb_erase(&request->signaling.node, &b->signals);
Chris Wilsonf6168e32016-10-28 13:58:55 +0100486 spin_unlock_irq(&b->lock);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100487
Chris Wilsone8a261e2016-07-20 13:31:49 +0100488 i915_gem_request_put(request);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100489 } else {
490 if (kthread_should_stop())
491 break;
492
493 schedule();
494 }
495 } while (1);
496 __set_current_state(TASK_RUNNING);
497
498 return 0;
499}
500
Chris Wilsonb3850852016-07-01 17:23:26 +0100501void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100502{
503 struct intel_engine_cs *engine = request->engine;
504 struct intel_breadcrumbs *b = &engine->breadcrumbs;
505 struct rb_node *parent, **p;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100506 bool first, wakeup;
507
Chris Wilsonf6168e32016-10-28 13:58:55 +0100508 /* Note that we may be called from an interrupt handler on another
509 * device (e.g. nouveau signaling a fence completion causing us
510 * to submit a request, and so enable signaling). As such,
511 * we need to make sure that all other users of b->lock protect
512 * against interrupts, i.e. use spin_lock_irqsave.
513 */
514
515 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
Chris Wilson4a50d202016-07-26 12:01:50 +0100516 assert_spin_locked(&request->lock);
Chris Wilson65e47602016-10-28 13:58:49 +0100517 if (!request->global_seqno)
518 return;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100519
Chris Wilsonb3850852016-07-01 17:23:26 +0100520 request->signaling.wait.tsk = b->signaler;
Chris Wilson65e47602016-10-28 13:58:49 +0100521 request->signaling.wait.seqno = request->global_seqno;
Chris Wilsone8a261e2016-07-20 13:31:49 +0100522 i915_gem_request_get(request);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100523
Chris Wilson4a50d202016-07-26 12:01:50 +0100524 spin_lock(&b->lock);
525
Chris Wilsonc81d4612016-07-01 17:23:25 +0100526 /* First add ourselves into the list of waiters, but register our
527 * bottom-half as the signaller thread. As per usual, only the oldest
528 * waiter (not just signaller) is tasked as the bottom-half waking
529 * up all completed waiters after the user interrupt.
530 *
531 * If we are the oldest waiter, enable the irq (after which we
532 * must double check that the seqno did not complete).
533 */
Chris Wilsonb3850852016-07-01 17:23:26 +0100534 wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100535
536 /* Now insert ourselves into the retirement ordered list of signals
537 * on this engine. We track the oldest seqno as that will be the
538 * first signal to complete.
539 */
Chris Wilsonc81d4612016-07-01 17:23:25 +0100540 parent = NULL;
541 first = true;
542 p = &b->signals.rb_node;
543 while (*p) {
544 parent = *p;
Chris Wilson65e47602016-10-28 13:58:49 +0100545 if (i915_seqno_passed(request->global_seqno,
546 to_signaler(parent)->global_seqno)) {
Chris Wilsonc81d4612016-07-01 17:23:25 +0100547 p = &parent->rb_right;
548 first = false;
549 } else {
550 p = &parent->rb_left;
551 }
552 }
Chris Wilsonb3850852016-07-01 17:23:26 +0100553 rb_link_node(&request->signaling.node, parent, p);
554 rb_insert_color(&request->signaling.node, &b->signals);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100555 if (first)
Chris Wilsonb3850852016-07-01 17:23:26 +0100556 smp_store_mb(b->first_signal, request);
557
Chris Wilsonc81d4612016-07-01 17:23:25 +0100558 spin_unlock(&b->lock);
559
560 if (wakeup)
561 wake_up_process(b->signaler);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100562}
563
Chris Wilson688e6c72016-07-01 17:23:15 +0100564int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
565{
566 struct intel_breadcrumbs *b = &engine->breadcrumbs;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100567 struct task_struct *tsk;
Chris Wilson688e6c72016-07-01 17:23:15 +0100568
569 spin_lock_init(&b->lock);
570 setup_timer(&b->fake_irq,
571 intel_breadcrumbs_fake_irq,
572 (unsigned long)engine);
Chris Wilson83348ba2016-08-09 17:47:51 +0100573 setup_timer(&b->hangcheck,
574 intel_breadcrumbs_hangcheck,
575 (unsigned long)engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100576
Chris Wilsonc81d4612016-07-01 17:23:25 +0100577 /* Spawn a thread to provide a common bottom-half for all signals.
578 * As this is an asynchronous interface we cannot steal the current
579 * task for handling the bottom-half to the user interrupt, therefore
580 * we create a thread to do the coherent seqno dance after the
581 * interrupt and then signal the waitqueue (via the dma-buf/fence).
582 */
583 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
584 "i915/signal:%d", engine->id);
585 if (IS_ERR(tsk))
586 return PTR_ERR(tsk);
587
588 b->signaler = tsk;
589
Chris Wilson688e6c72016-07-01 17:23:15 +0100590 return 0;
591}
592
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100593static void cancel_fake_irq(struct intel_engine_cs *engine)
594{
595 struct intel_breadcrumbs *b = &engine->breadcrumbs;
596
597 del_timer_sync(&b->hangcheck);
598 del_timer_sync(&b->fake_irq);
599 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
600}
601
602void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
603{
604 struct intel_breadcrumbs *b = &engine->breadcrumbs;
605
606 cancel_fake_irq(engine);
Chris Wilsonf6168e32016-10-28 13:58:55 +0100607 spin_lock_irq(&b->lock);
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100608
609 __intel_breadcrumbs_disable_irq(b);
610 if (intel_engine_has_waiter(engine)) {
611 b->timeout = wait_timeout();
612 __intel_breadcrumbs_enable_irq(b);
613 if (READ_ONCE(b->irq_posted))
614 wake_up_process(b->first_wait->tsk);
615 } else {
616 /* sanitize the IMR and unmask any auxiliary interrupts */
617 irq_disable(engine);
618 }
619
Chris Wilsonf6168e32016-10-28 13:58:55 +0100620 spin_unlock_irq(&b->lock);
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100621}
622
Chris Wilson688e6c72016-07-01 17:23:15 +0100623void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
624{
625 struct intel_breadcrumbs *b = &engine->breadcrumbs;
626
Chris Wilson381744f2016-11-21 11:07:59 +0000627 /* The engines should be idle and all requests accounted for! */
628 WARN_ON(READ_ONCE(b->first_wait));
629 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
630 WARN_ON(READ_ONCE(b->first_signal));
631 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
632
Chris Wilsonc81d4612016-07-01 17:23:25 +0100633 if (!IS_ERR_OR_NULL(b->signaler))
634 kthread_stop(b->signaler);
635
Chris Wilsonad07dfc2016-10-07 07:53:26 +0100636 cancel_fake_irq(engine);
Chris Wilson688e6c72016-07-01 17:23:15 +0100637}
638
Chris Wilson6a5d1db2016-11-08 14:37:19 +0000639unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
Chris Wilsonc81d4612016-07-01 17:23:25 +0100640{
641 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530642 enum intel_engine_id id;
Chris Wilsonc81d4612016-07-01 17:23:25 +0100643 unsigned int mask = 0;
644
Akash Goel3b3f1652016-10-13 22:44:48 +0530645 for_each_engine(engine, i915, id) {
Chris Wilson6a5d1db2016-11-08 14:37:19 +0000646 struct intel_breadcrumbs *b = &engine->breadcrumbs;
647
648 spin_lock_irq(&b->lock);
649
650 if (b->first_wait) {
651 wake_up_process(b->first_wait->tsk);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100652 mask |= intel_engine_flag(engine);
653 }
Chris Wilson6a5d1db2016-11-08 14:37:19 +0000654
655 if (b->first_signal) {
656 wake_up_process(b->signaler);
657 mask |= intel_engine_flag(engine);
658 }
659
660 spin_unlock_irq(&b->lock);
Chris Wilsonc81d4612016-07-01 17:23:25 +0100661 }
662
663 return mask;
664}