blob: 3496e28785e7f465d84eb6af63f582e309a2ae13 [file] [log] [blame]
Chris Wilson05235c52016-07-20 09:21:08 +01001/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef I915_GEM_REQUEST_H
26#define I915_GEM_REQUEST_H
27
Chris Wilson04769652016-07-20 09:21:11 +010028#include <linux/fence.h>
29
30#include "i915_gem.h"
31
Chris Wilsondcff85c2016-08-05 10:14:11 +010032struct intel_wait {
33 struct rb_node node;
34 struct task_struct *tsk;
35 u32 seqno;
36};
37
38struct intel_signal_node {
39 struct rb_node node;
40 struct intel_wait wait;
41};
42
Chris Wilson05235c52016-07-20 09:21:08 +010043/**
44 * Request queue structure.
45 *
46 * The request queue allows us to note sequence numbers that have been emitted
47 * and may be associated with active buffers to be retired.
48 *
49 * By keeping this list, we can avoid having to do questionable sequence
50 * number comparisons on buffer last_read|write_seqno. It also allows an
51 * emission time to be associated with the request for tracking how far ahead
52 * of the GPU the submission is.
53 *
Chris Wilson04769652016-07-20 09:21:11 +010054 * The requests are reference counted.
Chris Wilson05235c52016-07-20 09:21:08 +010055 */
56struct drm_i915_gem_request {
Chris Wilson04769652016-07-20 09:21:11 +010057 struct fence fence;
58 spinlock_t lock;
Chris Wilson05235c52016-07-20 09:21:08 +010059
60 /** On Which ring this request was generated */
61 struct drm_i915_private *i915;
62
63 /**
64 * Context and ring buffer related to this request
65 * Contexts are refcounted, so when this request is associated with a
66 * context, we must increment the context's refcount, to guarantee that
67 * it persists while any request is linked to it. Requests themselves
68 * are also refcounted, so the request will only be freed when the last
69 * reference to it is dismissed, and the code in
70 * i915_gem_request_free() will then decrement the refcount on the
71 * context.
72 */
73 struct i915_gem_context *ctx;
74 struct intel_engine_cs *engine;
Chris Wilson7e37f882016-08-02 22:50:21 +010075 struct intel_ring *ring;
Chris Wilson05235c52016-07-20 09:21:08 +010076 struct intel_signal_node signaling;
77
78 /** GEM sequence number associated with the previous request,
79 * when the HWS breadcrumb is equal to this the GPU is processing
80 * this request.
81 */
82 u32 previous_seqno;
83
Chris Wilson05235c52016-07-20 09:21:08 +010084 /** Position in the ringbuffer of the start of the request */
85 u32 head;
86
87 /**
88 * Position in the ringbuffer of the start of the postfix.
89 * This is required to calculate the maximum available ringbuffer
90 * space without overwriting the postfix.
91 */
92 u32 postfix;
93
94 /** Position in the ringbuffer of the end of the whole request */
95 u32 tail;
96
97 /** Preallocate space in the ringbuffer for the emitting the request */
98 u32 reserved_space;
99
100 /**
101 * Context related to the previous request.
102 * As the contexts are accessed by the hardware until the switch is
103 * completed to a new context, the hardware may still be writing
104 * to the context object after the breadcrumb is visible. We must
105 * not unpin/unbind/prune that object whilst still active and so
106 * we keep the previous context pinned until the following (this)
107 * request is retired.
108 */
109 struct i915_gem_context *previous_context;
110
111 /** Batch buffer related to this request if any (used for
112 * error state dump only).
113 */
114 struct drm_i915_gem_object *batch_obj;
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100115 struct list_head active_list;
Chris Wilson05235c52016-07-20 09:21:08 +0100116
117 /** Time at which this request was emitted, in jiffies. */
118 unsigned long emitted_jiffies;
119
Chris Wilsonefdf7c02016-08-04 07:52:33 +0100120 /** engine->request_list entry for this request */
121 struct list_head link;
Chris Wilson05235c52016-07-20 09:21:08 +0100122
Chris Wilson675d9ad2016-08-04 07:52:36 +0100123 /** ring->request_list entry for this request */
124 struct list_head ring_link;
125
Chris Wilson05235c52016-07-20 09:21:08 +0100126 struct drm_i915_file_private *file_priv;
127 /** file_priv list entry for this request */
128 struct list_head client_list;
129
130 /** process identifier submitting this request */
131 struct pid *pid;
132
133 /**
134 * The ELSP only accepts two elements at a time, so we queue
135 * context/tail pairs on a given queue (ring->execlist_queue) until the
136 * hardware is available. The queue serves a double purpose: we also use
137 * it to keep track of the up to 2 contexts currently in the hardware
138 * (usually one in execution and the other queued up by the GPU): We
139 * only remove elements from the head of the queue when the hardware
140 * informs us that an element has been completed.
141 *
142 * All accesses to the queue are mediated by a spinlock
143 * (ring->execlist_lock).
144 */
145
146 /** Execlist link in the submission queue.*/
147 struct list_head execlist_link;
148
149 /** Execlists no. of times this request has been sent to the ELSP */
150 int elsp_submitted;
151
152 /** Execlists context hardware id. */
153 unsigned int ctx_hw_id;
154};
155
Chris Wilson04769652016-07-20 09:21:11 +0100156extern const struct fence_ops i915_fence_ops;
157
158static inline bool fence_is_i915(struct fence *fence)
159{
160 return fence->ops == &i915_fence_ops;
161}
162
Chris Wilson05235c52016-07-20 09:21:08 +0100163struct drm_i915_gem_request * __must_check
164i915_gem_request_alloc(struct intel_engine_cs *engine,
165 struct i915_gem_context *ctx);
Chris Wilson05235c52016-07-20 09:21:08 +0100166int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
167 struct drm_file *file);
168void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
169
170static inline u32
171i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
172{
Chris Wilson04769652016-07-20 09:21:11 +0100173 return req ? req->fence.seqno : 0;
Chris Wilson05235c52016-07-20 09:21:08 +0100174}
175
176static inline struct intel_engine_cs *
177i915_gem_request_get_engine(struct drm_i915_gem_request *req)
178{
179 return req ? req->engine : NULL;
180}
181
182static inline struct drm_i915_gem_request *
Chris Wilson04769652016-07-20 09:21:11 +0100183to_request(struct fence *fence)
184{
185 /* We assume that NULL fence/request are interoperable */
186 BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
187 GEM_BUG_ON(fence && !fence_is_i915(fence));
188 return container_of(fence, struct drm_i915_gem_request, fence);
189}
190
191static inline struct drm_i915_gem_request *
Chris Wilsone8a261e2016-07-20 13:31:49 +0100192i915_gem_request_get(struct drm_i915_gem_request *req)
Chris Wilson05235c52016-07-20 09:21:08 +0100193{
Chris Wilson04769652016-07-20 09:21:11 +0100194 return to_request(fence_get(&req->fence));
Chris Wilson05235c52016-07-20 09:21:08 +0100195}
196
Chris Wilson0eafec62016-08-04 16:32:41 +0100197static inline struct drm_i915_gem_request *
198i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
199{
200 return to_request(fence_get_rcu(&req->fence));
201}
202
Chris Wilson05235c52016-07-20 09:21:08 +0100203static inline void
Chris Wilsone8a261e2016-07-20 13:31:49 +0100204i915_gem_request_put(struct drm_i915_gem_request *req)
Chris Wilson05235c52016-07-20 09:21:08 +0100205{
Chris Wilson04769652016-07-20 09:21:11 +0100206 fence_put(&req->fence);
Chris Wilson05235c52016-07-20 09:21:08 +0100207}
208
209static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
210 struct drm_i915_gem_request *src)
211{
212 if (src)
Chris Wilsone8a261e2016-07-20 13:31:49 +0100213 i915_gem_request_get(src);
Chris Wilson05235c52016-07-20 09:21:08 +0100214
215 if (*pdst)
Chris Wilsone8a261e2016-07-20 13:31:49 +0100216 i915_gem_request_put(*pdst);
Chris Wilson05235c52016-07-20 09:21:08 +0100217
218 *pdst = src;
219}
220
221void __i915_add_request(struct drm_i915_gem_request *req,
222 struct drm_i915_gem_object *batch_obj,
223 bool flush_caches);
224#define i915_add_request(req) \
225 __i915_add_request(req, NULL, true)
226#define i915_add_request_no_flush(req) \
227 __i915_add_request(req, NULL, false)
228
229struct intel_rps_client;
Chris Wilson42df2712016-07-20 09:21:12 +0100230#define NO_WAITBOOST ERR_PTR(-1)
231#define IS_RPS_CLIENT(p) (!IS_ERR(p))
232#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
Chris Wilson05235c52016-07-20 09:21:08 +0100233
Chris Wilson776f3232016-08-04 07:52:40 +0100234int i915_wait_request(struct drm_i915_gem_request *req,
235 bool interruptible,
236 s64 *timeout,
237 struct intel_rps_client *rps)
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100238 __attribute__((nonnull(1)));
239
Chris Wilson05235c52016-07-20 09:21:08 +0100240static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
241
242/**
243 * Returns true if seq1 is later than seq2.
244 */
245static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
246{
247 return (s32)(seq1 - seq2) >= 0;
248}
249
250static inline bool
251i915_gem_request_started(const struct drm_i915_gem_request *req)
252{
253 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
254 req->previous_seqno);
255}
256
257static inline bool
258i915_gem_request_completed(const struct drm_i915_gem_request *req)
259{
260 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
Chris Wilson04769652016-07-20 09:21:11 +0100261 req->fence.seqno);
Chris Wilson05235c52016-07-20 09:21:08 +0100262}
263
264bool __i915_spin_request(const struct drm_i915_gem_request *request,
265 int state, unsigned long timeout_us);
266static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
267 int state, unsigned long timeout_us)
268{
269 return (i915_gem_request_started(request) &&
270 __i915_spin_request(request, state, timeout_us));
271}
272
Chris Wilson381f3712016-08-04 07:52:29 +0100273/* We treat requests as fences. This is not be to confused with our
274 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
275 * We use the fences to synchronize access from the CPU with activity on the
276 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
277 * is reading them. We also track fences at a higher level to provide
278 * implicit synchronisation around GEM objects, e.g. set-domain will wait
279 * for outstanding GPU rendering before marking the object ready for CPU
280 * access, or a pageflip will wait until the GPU is complete before showing
281 * the frame on the scanout.
282 *
283 * In order to use a fence, the object must track the fence it needs to
284 * serialise with. For example, GEM objects want to track both read and
285 * write access so that we can perform concurrent read operations between
286 * the CPU and GPU engines, as well as waiting for all rendering to
287 * complete, or waiting for the last GPU user of a "fence register". The
288 * object then embeds a #i915_gem_active to track the most recent (in
289 * retirement order) request relevant for the desired mode of access.
290 * The #i915_gem_active is updated with i915_gem_active_set() to track the
291 * most recent fence request, typically this is done as part of
292 * i915_vma_move_to_active().
293 *
294 * When the #i915_gem_active completes (is retired), it will
295 * signal its completion to the owner through a callback as well as mark
296 * itself as idle (i915_gem_active.request == NULL). The owner
297 * can then perform any action, such as delayed freeing of an active
298 * resource including itself.
299 */
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100300struct i915_gem_active;
301
302typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
303 struct drm_i915_gem_request *);
304
Chris Wilson381f3712016-08-04 07:52:29 +0100305struct i915_gem_active {
Chris Wilson0eafec62016-08-04 16:32:41 +0100306 struct drm_i915_gem_request __rcu *request;
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100307 struct list_head link;
308 i915_gem_retire_fn retire;
Chris Wilson381f3712016-08-04 07:52:29 +0100309};
310
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100311void i915_gem_retire_noop(struct i915_gem_active *,
312 struct drm_i915_gem_request *request);
313
314/**
315 * init_request_active - prepares the activity tracker for use
316 * @active - the active tracker
317 * @func - a callback when then the tracker is retired (becomes idle),
318 * can be NULL
319 *
320 * init_request_active() prepares the embedded @active struct for use as
321 * an activity tracker, that is for tracking the last known active request
322 * associated with it. When the last request becomes idle, when it is retired
323 * after completion, the optional callback @func is invoked.
324 */
325static inline void
326init_request_active(struct i915_gem_active *active,
327 i915_gem_retire_fn retire)
328{
329 INIT_LIST_HEAD(&active->link);
330 active->retire = retire ?: i915_gem_retire_noop;
331}
332
Chris Wilson27c01aa2016-08-04 07:52:30 +0100333/**
334 * i915_gem_active_set - updates the tracker to watch the current request
335 * @active - the active tracker
336 * @request - the request to watch
337 *
338 * i915_gem_active_set() watches the given @request for completion. Whilst
339 * that @request is busy, the @active reports busy. When that @request is
340 * retired, the @active tracker is updated to report idle.
341 */
Chris Wilson381f3712016-08-04 07:52:29 +0100342static inline void
343i915_gem_active_set(struct i915_gem_active *active,
344 struct drm_i915_gem_request *request)
345{
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100346 list_move(&active->link, &request->active_list);
Chris Wilson0eafec62016-08-04 16:32:41 +0100347 rcu_assign_pointer(active->request, request);
Chris Wilson381f3712016-08-04 07:52:29 +0100348}
349
Chris Wilsond72d9082016-08-04 07:52:31 +0100350static inline struct drm_i915_gem_request *
351__i915_gem_active_peek(const struct i915_gem_active *active)
352{
Chris Wilson0eafec62016-08-04 16:32:41 +0100353 /* Inside the error capture (running with the driver in an unknown
354 * state), we want to bend the rules slightly (a lot).
355 *
356 * Work is in progress to make it safer, in the meantime this keeps
357 * the known issue from spamming the logs.
358 */
359 return rcu_dereference_protected(active->request, 1);
Chris Wilsond72d9082016-08-04 07:52:31 +0100360}
361
Chris Wilson27c01aa2016-08-04 07:52:30 +0100362/**
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100363 * i915_gem_active_peek - report the active request being monitored
Chris Wilson27c01aa2016-08-04 07:52:30 +0100364 * @active - the active tracker
365 *
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100366 * i915_gem_active_peek() returns the current request being tracked if
367 * still active, or NULL. It does not obtain a reference on the request
368 * for the caller, so the caller must hold struct_mutex.
Chris Wilson27c01aa2016-08-04 07:52:30 +0100369 */
370static inline struct drm_i915_gem_request *
Chris Wilsond72d9082016-08-04 07:52:31 +0100371i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
Chris Wilson27c01aa2016-08-04 07:52:30 +0100372{
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100373 struct drm_i915_gem_request *request;
374
Chris Wilson0eafec62016-08-04 16:32:41 +0100375 request = rcu_dereference_protected(active->request,
376 lockdep_is_held(mutex));
377 if (!request || i915_gem_request_completed(request))
378 return NULL;
379
380 return request;
381}
382
383/**
384 * i915_gem_active_peek_rcu - report the active request being monitored
385 * @active - the active tracker
386 *
387 * i915_gem_active_peek_rcu() returns the current request being tracked if
388 * still active, or NULL. It does not obtain a reference on the request
389 * for the caller, and inspection of the request is only valid under
390 * the RCU lock.
391 */
392static inline struct drm_i915_gem_request *
393i915_gem_active_peek_rcu(const struct i915_gem_active *active)
394{
395 struct drm_i915_gem_request *request;
396
397 request = rcu_dereference(active->request);
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100398 if (!request || i915_gem_request_completed(request))
399 return NULL;
400
401 return request;
Chris Wilson27c01aa2016-08-04 07:52:30 +0100402}
403
404/**
405 * i915_gem_active_get - return a reference to the active request
406 * @active - the active tracker
407 *
408 * i915_gem_active_get() returns a reference to the active request, or NULL
409 * if the active tracker is idle. The caller must hold struct_mutex.
410 */
411static inline struct drm_i915_gem_request *
Chris Wilsond72d9082016-08-04 07:52:31 +0100412i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
Chris Wilson27c01aa2016-08-04 07:52:30 +0100413{
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100414 return i915_gem_request_get(i915_gem_active_peek(active, mutex));
Chris Wilson27c01aa2016-08-04 07:52:30 +0100415}
416
417/**
Chris Wilson0eafec62016-08-04 16:32:41 +0100418 * __i915_gem_active_get_rcu - return a reference to the active request
419 * @active - the active tracker
420 *
421 * __i915_gem_active_get() returns a reference to the active request, or NULL
422 * if the active tracker is idle. The caller must hold the RCU read lock, but
423 * the returned pointer is safe to use outside of RCU.
424 */
425static inline struct drm_i915_gem_request *
426__i915_gem_active_get_rcu(const struct i915_gem_active *active)
427{
428 /* Performing a lockless retrieval of the active request is super
429 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
430 * slab of request objects will not be freed whilst we hold the
431 * RCU read lock. It does not guarantee that the request itself
432 * will not be freed and then *reused*. Viz,
433 *
434 * Thread A Thread B
435 *
436 * req = active.request
437 * retire(req) -> free(req);
438 * (req is now first on the slab freelist)
439 * active.request = NULL
440 *
441 * req = new submission on a new object
442 * ref(req)
443 *
444 * To prevent the request from being reused whilst the caller
445 * uses it, we take a reference like normal. Whilst acquiring
446 * the reference we check that it is not in a destroyed state
447 * (refcnt == 0). That prevents the request being reallocated
448 * whilst the caller holds on to it. To check that the request
449 * was not reallocated as we acquired the reference we have to
450 * check that our request remains the active request across
451 * the lookup, in the same manner as a seqlock. The visibility
452 * of the pointer versus the reference counting is controlled
453 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
454 *
455 * In the middle of all that, we inspect whether the request is
456 * complete. Retiring is lazy so the request may be completed long
457 * before the active tracker is updated. Querying whether the
458 * request is complete is far cheaper (as it involves no locked
459 * instructions setting cachelines to exclusive) than acquiring
460 * the reference, so we do it first. The RCU read lock ensures the
461 * pointer dereference is valid, but does not ensure that the
462 * seqno nor HWS is the right one! However, if the request was
463 * reallocated, that means the active tracker's request was complete.
464 * If the new request is also complete, then both are and we can
465 * just report the active tracker is idle. If the new request is
466 * incomplete, then we acquire a reference on it and check that
467 * it remained the active request.
468 */
469 do {
470 struct drm_i915_gem_request *request;
471
472 request = rcu_dereference(active->request);
473 if (!request || i915_gem_request_completed(request))
474 return NULL;
475
476 request = i915_gem_request_get_rcu(request);
477
478 /* What stops the following rcu_access_pointer() from occurring
479 * before the above i915_gem_request_get_rcu()? If we were
480 * to read the value before pausing to get the reference to
481 * the request, we may not notice a change in the active
482 * tracker.
483 *
484 * The rcu_access_pointer() is a mere compiler barrier, which
485 * means both the CPU and compiler are free to perform the
486 * memory read without constraint. The compiler only has to
487 * ensure that any operations after the rcu_access_pointer()
488 * occur afterwards in program order. This means the read may
489 * be performed earlier by an out-of-order CPU, or adventurous
490 * compiler.
491 *
492 * The atomic operation at the heart of
493 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
494 * atomic_inc_not_zero() which is only a full memory barrier
495 * when successful. That is, if i915_gem_request_get_rcu()
496 * returns the request (and so with the reference counted
497 * incremented) then the following read for rcu_access_pointer()
498 * must occur after the atomic operation and so confirm
499 * that this request is the one currently being tracked.
500 */
501 if (!request || request == rcu_access_pointer(active->request))
502 return rcu_pointer_handoff(request);
503
504 i915_gem_request_put(request);
505 } while (1);
506}
507
508/**
509 * i915_gem_active_get_unlocked - return a reference to the active request
510 * @active - the active tracker
511 *
512 * i915_gem_active_get_unlocked() returns a reference to the active request,
513 * or NULL if the active tracker is idle. The reference is obtained under RCU,
514 * so no locking is required by the caller.
515 *
516 * The reference should be freed with i915_gem_request_put().
517 */
518static inline struct drm_i915_gem_request *
519i915_gem_active_get_unlocked(const struct i915_gem_active *active)
520{
521 struct drm_i915_gem_request *request;
522
523 rcu_read_lock();
524 request = __i915_gem_active_get_rcu(active);
525 rcu_read_unlock();
526
527 return request;
528}
529
530/**
Chris Wilson27c01aa2016-08-04 07:52:30 +0100531 * i915_gem_active_isset - report whether the active tracker is assigned
532 * @active - the active tracker
533 *
534 * i915_gem_active_isset() returns true if the active tracker is currently
535 * assigned to a request. Due to the lazy retiring, that request may be idle
536 * and this may report stale information.
537 */
538static inline bool
539i915_gem_active_isset(const struct i915_gem_active *active)
540{
Chris Wilson0eafec62016-08-04 16:32:41 +0100541 return rcu_access_pointer(active->request);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100542}
543
544/**
545 * i915_gem_active_is_idle - report whether the active tracker is idle
546 * @active - the active tracker
547 *
548 * i915_gem_active_is_idle() returns true if the active tracker is currently
549 * unassigned or if the request is complete (but not yet retired). Requires
550 * the caller to hold struct_mutex (but that can be relaxed if desired).
551 */
552static inline bool
Chris Wilsond72d9082016-08-04 07:52:31 +0100553i915_gem_active_is_idle(const struct i915_gem_active *active,
554 struct mutex *mutex)
Chris Wilson27c01aa2016-08-04 07:52:30 +0100555{
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100556 return !i915_gem_active_peek(active, mutex);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100557}
558
559/**
560 * i915_gem_active_wait - waits until the request is completed
561 * @active - the active request on which to wait
562 *
563 * i915_gem_active_wait() waits until the request is completed before
564 * returning. Note that it does not guarantee that the request is
565 * retired first, see i915_gem_active_retire().
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100566 *
567 * i915_gem_active_wait() returns immediately if the active
568 * request is already complete.
Chris Wilson27c01aa2016-08-04 07:52:30 +0100569 */
570static inline int __must_check
Chris Wilsond72d9082016-08-04 07:52:31 +0100571i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
Chris Wilson27c01aa2016-08-04 07:52:30 +0100572{
573 struct drm_i915_gem_request *request;
574
Chris Wilsond72d9082016-08-04 07:52:31 +0100575 request = i915_gem_active_peek(active, mutex);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100576 if (!request)
577 return 0;
578
Chris Wilson776f3232016-08-04 07:52:40 +0100579 return i915_wait_request(request, true, NULL, NULL);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100580}
581
582/**
Chris Wilson24676582016-08-05 10:14:06 +0100583 * i915_gem_active_wait_unlocked - waits until the request is completed
584 * @active - the active request on which to wait
585 * @interruptible - whether the wait can be woken by a userspace signal
586 * @timeout - how long to wait at most
587 * @rps - userspace client to charge for a waitboost
588 *
589 * i915_gem_active_wait_unlocked() waits until the request is completed before
590 * returning, without requiring any locks to be held. Note that it does not
591 * retire any requests before returning.
592 *
593 * This function relies on RCU in order to acquire the reference to the active
594 * request without holding any locks. See __i915_gem_active_get_rcu() for the
595 * glory details on how that is managed. Once the reference is acquired, we
596 * can then wait upon the request, and afterwards release our reference,
597 * free of any locking.
598 *
599 * This function wraps i915_wait_request(), see it for the full details on
600 * the arguments.
601 *
602 * Returns 0 if successful, or a negative error code.
603 */
604static inline int
605i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
606 bool interruptible,
607 s64 *timeout,
608 struct intel_rps_client *rps)
609{
610 struct drm_i915_gem_request *request;
611 int ret = 0;
612
613 request = i915_gem_active_get_unlocked(active);
614 if (request) {
615 ret = i915_wait_request(request, interruptible, timeout, rps);
616 i915_gem_request_put(request);
617 }
618
619 return ret;
620}
621
622/**
Chris Wilson27c01aa2016-08-04 07:52:30 +0100623 * i915_gem_active_retire - waits until the request is retired
624 * @active - the active request on which to wait
625 *
626 * i915_gem_active_retire() waits until the request is completed,
627 * and then ensures that at least the retirement handler for this
628 * @active tracker is called before returning. If the @active
629 * tracker is idle, the function returns immediately.
630 */
631static inline int __must_check
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100632i915_gem_active_retire(struct i915_gem_active *active,
Chris Wilsond72d9082016-08-04 07:52:31 +0100633 struct mutex *mutex)
Chris Wilson27c01aa2016-08-04 07:52:30 +0100634{
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100635 struct drm_i915_gem_request *request;
636 int ret;
637
Chris Wilson0eafec62016-08-04 16:32:41 +0100638 request = rcu_dereference_protected(active->request,
639 lockdep_is_held(mutex));
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100640 if (!request)
641 return 0;
642
Chris Wilson776f3232016-08-04 07:52:40 +0100643 ret = i915_wait_request(request, true, NULL, NULL);
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100644 if (ret)
645 return ret;
646
647 list_del_init(&active->link);
Chris Wilson0eafec62016-08-04 16:32:41 +0100648 RCU_INIT_POINTER(active->request, NULL);
649
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100650 active->retire(active, request);
651
652 return 0;
Chris Wilson27c01aa2016-08-04 07:52:30 +0100653}
654
655/* Convenience functions for peeking at state inside active's request whilst
656 * guarded by the struct_mutex.
657 */
658
659static inline uint32_t
Chris Wilsond72d9082016-08-04 07:52:31 +0100660i915_gem_active_get_seqno(const struct i915_gem_active *active,
661 struct mutex *mutex)
Chris Wilson27c01aa2016-08-04 07:52:30 +0100662{
Chris Wilsond72d9082016-08-04 07:52:31 +0100663 return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
Chris Wilson27c01aa2016-08-04 07:52:30 +0100664}
665
666static inline struct intel_engine_cs *
Chris Wilsond72d9082016-08-04 07:52:31 +0100667i915_gem_active_get_engine(const struct i915_gem_active *active,
668 struct mutex *mutex)
Chris Wilson27c01aa2016-08-04 07:52:30 +0100669{
Chris Wilsond72d9082016-08-04 07:52:31 +0100670 return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
Chris Wilson27c01aa2016-08-04 07:52:30 +0100671}
672
Chris Wilson381f3712016-08-04 07:52:29 +0100673#define for_each_active(mask, idx) \
674 for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
675
Chris Wilson05235c52016-07-20 09:21:08 +0100676#endif /* I915_GEM_REQUEST_H */