blob: 9f37eaa3723a4741dc7bc23722629e3def039a6d [file] [log] [blame]
Chris Wilson05235c52016-07-20 09:21:08 +01001/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
Chris Wilsonfa545cb2016-08-04 07:52:35 +010025#include <linux/prefetch.h>
Chris Wilsonb52992c2016-10-28 13:58:24 +010026#include <linux/dma-fence-array.h>
Chris Wilsonfa545cb2016-08-04 07:52:35 +010027
Chris Wilson05235c52016-07-20 09:21:08 +010028#include "i915_drv.h"
29
Chris Wilsonf54d1862016-10-25 13:00:45 +010030static const char *i915_fence_get_driver_name(struct dma_fence *fence)
Chris Wilson04769652016-07-20 09:21:11 +010031{
32 return "i915";
33}
34
Chris Wilsonf54d1862016-10-25 13:00:45 +010035static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
Chris Wilson04769652016-07-20 09:21:11 +010036{
Chris Wilson73cb9702016-10-28 13:58:46 +010037 return to_request(fence)->timeline->common->name;
Chris Wilson04769652016-07-20 09:21:11 +010038}
39
Chris Wilsonf54d1862016-10-25 13:00:45 +010040static bool i915_fence_signaled(struct dma_fence *fence)
Chris Wilson04769652016-07-20 09:21:11 +010041{
42 return i915_gem_request_completed(to_request(fence));
43}
44
Chris Wilsonf54d1862016-10-25 13:00:45 +010045static bool i915_fence_enable_signaling(struct dma_fence *fence)
Chris Wilson04769652016-07-20 09:21:11 +010046{
47 if (i915_fence_signaled(fence))
48 return false;
49
50 intel_engine_enable_signaling(to_request(fence));
51 return true;
52}
53
Chris Wilsonf54d1862016-10-25 13:00:45 +010054static signed long i915_fence_wait(struct dma_fence *fence,
Chris Wilson04769652016-07-20 09:21:11 +010055 bool interruptible,
Chris Wilsone95433c2016-10-28 13:58:27 +010056 signed long timeout)
Chris Wilson04769652016-07-20 09:21:11 +010057{
Chris Wilsone95433c2016-10-28 13:58:27 +010058 return i915_wait_request(to_request(fence), interruptible, timeout);
Chris Wilson04769652016-07-20 09:21:11 +010059}
60
Chris Wilsonf54d1862016-10-25 13:00:45 +010061static void i915_fence_release(struct dma_fence *fence)
Chris Wilson04769652016-07-20 09:21:11 +010062{
63 struct drm_i915_gem_request *req = to_request(fence);
64
65 kmem_cache_free(req->i915->requests, req);
66}
67
Chris Wilsonf54d1862016-10-25 13:00:45 +010068const struct dma_fence_ops i915_fence_ops = {
Chris Wilson04769652016-07-20 09:21:11 +010069 .get_driver_name = i915_fence_get_driver_name,
70 .get_timeline_name = i915_fence_get_timeline_name,
71 .enable_signaling = i915_fence_enable_signaling,
72 .signaled = i915_fence_signaled,
73 .wait = i915_fence_wait,
74 .release = i915_fence_release,
Chris Wilson04769652016-07-20 09:21:11 +010075};
76
Chris Wilson05235c52016-07-20 09:21:08 +010077int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
78 struct drm_file *file)
79{
80 struct drm_i915_private *dev_private;
81 struct drm_i915_file_private *file_priv;
82
83 WARN_ON(!req || !file || req->file_priv);
84
85 if (!req || !file)
86 return -EINVAL;
87
88 if (req->file_priv)
89 return -EINVAL;
90
91 dev_private = req->i915;
92 file_priv = file->driver_priv;
93
94 spin_lock(&file_priv->mm.lock);
95 req->file_priv = file_priv;
96 list_add_tail(&req->client_list, &file_priv->mm.request_list);
97 spin_unlock(&file_priv->mm.lock);
98
Chris Wilson05235c52016-07-20 09:21:08 +010099 return 0;
100}
101
102static inline void
103i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
104{
105 struct drm_i915_file_private *file_priv = request->file_priv;
106
107 if (!file_priv)
108 return;
109
110 spin_lock(&file_priv->mm.lock);
111 list_del(&request->client_list);
112 request->file_priv = NULL;
113 spin_unlock(&file_priv->mm.lock);
Chris Wilson05235c52016-07-20 09:21:08 +0100114}
115
Chris Wilson52e54202016-11-14 20:41:02 +0000116static struct i915_dependency *
117i915_dependency_alloc(struct drm_i915_private *i915)
118{
119 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
120}
121
122static void
123i915_dependency_free(struct drm_i915_private *i915,
124 struct i915_dependency *dep)
125{
126 kmem_cache_free(i915->dependencies, dep);
127}
128
129static void
130__i915_priotree_add_dependency(struct i915_priotree *pt,
131 struct i915_priotree *signal,
132 struct i915_dependency *dep,
133 unsigned long flags)
134{
Chris Wilson20311bd2016-11-14 20:41:03 +0000135 INIT_LIST_HEAD(&dep->dfs_link);
Chris Wilson52e54202016-11-14 20:41:02 +0000136 list_add(&dep->wait_link, &signal->waiters_list);
137 list_add(&dep->signal_link, &pt->signalers_list);
138 dep->signaler = signal;
139 dep->flags = flags;
140}
141
142static int
143i915_priotree_add_dependency(struct drm_i915_private *i915,
144 struct i915_priotree *pt,
145 struct i915_priotree *signal)
146{
147 struct i915_dependency *dep;
148
149 dep = i915_dependency_alloc(i915);
150 if (!dep)
151 return -ENOMEM;
152
153 __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
154 return 0;
155}
156
157static void
158i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
159{
160 struct i915_dependency *dep, *next;
161
Chris Wilson20311bd2016-11-14 20:41:03 +0000162 GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
163
Chris Wilson52e54202016-11-14 20:41:02 +0000164 /* Everyone we depended upon (the fences we wait to be signaled)
165 * should retire before us and remove themselves from our list.
166 * However, retirement is run independently on each timeline and
167 * so we may be called out-of-order.
168 */
169 list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
170 list_del(&dep->wait_link);
171 if (dep->flags & I915_DEPENDENCY_ALLOC)
172 i915_dependency_free(i915, dep);
173 }
174
175 /* Remove ourselves from everyone who depends upon us */
176 list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
177 list_del(&dep->signal_link);
178 if (dep->flags & I915_DEPENDENCY_ALLOC)
179 i915_dependency_free(i915, dep);
180 }
181}
182
183static void
184i915_priotree_init(struct i915_priotree *pt)
185{
186 INIT_LIST_HEAD(&pt->signalers_list);
187 INIT_LIST_HEAD(&pt->waiters_list);
Chris Wilson20311bd2016-11-14 20:41:03 +0000188 RB_CLEAR_NODE(&pt->node);
189 pt->priority = INT_MIN;
Chris Wilson52e54202016-11-14 20:41:02 +0000190}
191
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100192void i915_gem_retire_noop(struct i915_gem_active *active,
193 struct drm_i915_gem_request *request)
194{
195 /* Space left intentionally blank */
196}
197
Chris Wilson05235c52016-07-20 09:21:08 +0100198static void i915_gem_request_retire(struct drm_i915_gem_request *request)
199{
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100200 struct i915_gem_active *active, *next;
201
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100202 lockdep_assert_held(&request->i915->drm.struct_mutex);
Chris Wilson786d2902016-11-18 14:34:12 +0000203 GEM_BUG_ON(!i915_sw_fence_done(&request->submit));
204 GEM_BUG_ON(!i915_sw_fence_done(&request->execute));
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100205 GEM_BUG_ON(!i915_gem_request_completed(request));
Chris Wilson43020552016-11-15 16:46:20 +0000206 GEM_BUG_ON(!request->i915->gt.active_requests);
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100207
Chris Wilson05235c52016-07-20 09:21:08 +0100208 trace_i915_gem_request_retire(request);
Chris Wilson80b204b2016-10-28 13:58:58 +0100209
210 spin_lock_irq(&request->engine->timeline->lock);
Chris Wilsone95433c2016-10-28 13:58:27 +0100211 list_del_init(&request->link);
Chris Wilson80b204b2016-10-28 13:58:58 +0100212 spin_unlock_irq(&request->engine->timeline->lock);
Chris Wilson05235c52016-07-20 09:21:08 +0100213
214 /* We know the GPU must have read the request to have
215 * sent us the seqno + interrupt, so use the position
216 * of tail of the request to update the last known position
217 * of the GPU head.
218 *
219 * Note this requires that we are always called in request
220 * completion order.
221 */
Chris Wilson675d9ad2016-08-04 07:52:36 +0100222 list_del(&request->ring_link);
Chris Wilson1dae2df2016-08-02 22:50:19 +0100223 request->ring->last_retired_head = request->postfix;
Chris Wilson43020552016-11-15 16:46:20 +0000224 if (!--request->i915->gt.active_requests) {
225 GEM_BUG_ON(!request->i915->gt.awake);
226 mod_delayed_work(request->i915->wq,
227 &request->i915->gt.idle_work,
228 msecs_to_jiffies(100));
229 }
Chris Wilson05235c52016-07-20 09:21:08 +0100230
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100231 /* Walk through the active list, calling retire on each. This allows
232 * objects to track their GPU activity and mark themselves as idle
233 * when their *last* active request is completed (updating state
234 * tracking lists for eviction, active references for GEM, etc).
235 *
236 * As the ->retire() may free the node, we decouple it first and
237 * pass along the auxiliary information (to avoid dereferencing
238 * the node after the callback).
239 */
240 list_for_each_entry_safe(active, next, &request->active_list, link) {
241 /* In microbenchmarks or focusing upon time inside the kernel,
242 * we may spend an inordinate amount of time simply handling
243 * the retirement of requests and processing their callbacks.
244 * Of which, this loop itself is particularly hot due to the
245 * cache misses when jumping around the list of i915_gem_active.
246 * So we try to keep this loop as streamlined as possible and
247 * also prefetch the next i915_gem_active to try and hide
248 * the likely cache miss.
249 */
250 prefetchw(next);
251
252 INIT_LIST_HEAD(&active->link);
Chris Wilson0eafec62016-08-04 16:32:41 +0100253 RCU_INIT_POINTER(active->request, NULL);
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100254
255 active->retire(active, request);
256 }
257
Chris Wilson05235c52016-07-20 09:21:08 +0100258 i915_gem_request_remove_from_client(request);
259
260 if (request->previous_context) {
261 if (i915.enable_execlists)
262 intel_lr_context_unpin(request->previous_context,
263 request->engine);
264 }
265
Mika Kuoppalae5e1fc42016-11-16 17:20:31 +0200266 /* Retirement decays the ban score as it is a sign of ctx progress */
Mika Kuoppalabc1d53c2016-11-16 17:20:34 +0200267 if (request->ctx->ban_score > 0)
268 request->ctx->ban_score--;
Mika Kuoppalae5e1fc42016-11-16 17:20:31 +0200269
Chris Wilson9a6feaf2016-07-20 13:31:50 +0100270 i915_gem_context_put(request->ctx);
Chris Wilsond07f0e52016-10-28 13:58:44 +0100271
272 dma_fence_signal(&request->fence);
Chris Wilson52e54202016-11-14 20:41:02 +0000273
274 i915_priotree_fini(request->i915, &request->priotree);
Chris Wilsone8a261e2016-07-20 13:31:49 +0100275 i915_gem_request_put(request);
Chris Wilson05235c52016-07-20 09:21:08 +0100276}
277
278void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
279{
280 struct intel_engine_cs *engine = req->engine;
281 struct drm_i915_gem_request *tmp;
282
283 lockdep_assert_held(&req->i915->drm.struct_mutex);
Chris Wilsone95433c2016-10-28 13:58:27 +0100284 if (list_empty(&req->link))
285 return;
Chris Wilson05235c52016-07-20 09:21:08 +0100286
287 do {
Chris Wilson73cb9702016-10-28 13:58:46 +0100288 tmp = list_first_entry(&engine->timeline->requests,
Chris Wilsonefdf7c02016-08-04 07:52:33 +0100289 typeof(*tmp), link);
Chris Wilson05235c52016-07-20 09:21:08 +0100290
291 i915_gem_request_retire(tmp);
292 } while (tmp != req);
Chris Wilson05235c52016-07-20 09:21:08 +0100293}
294
Chris Wilson8af29b02016-09-09 14:11:47 +0100295static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
Chris Wilson05235c52016-07-20 09:21:08 +0100296{
Chris Wilson8af29b02016-09-09 14:11:47 +0100297 struct i915_gpu_error *error = &dev_priv->gpu_error;
298
299 if (i915_terminally_wedged(error))
Chris Wilson05235c52016-07-20 09:21:08 +0100300 return -EIO;
301
Chris Wilson8af29b02016-09-09 14:11:47 +0100302 if (i915_reset_in_progress(error)) {
Chris Wilson05235c52016-07-20 09:21:08 +0100303 /* Non-interruptible callers can't handle -EAGAIN, hence return
304 * -EIO unconditionally for these.
305 */
Chris Wilson8af29b02016-09-09 14:11:47 +0100306 if (!dev_priv->mm.interruptible)
Chris Wilson05235c52016-07-20 09:21:08 +0100307 return -EIO;
308
309 return -EAGAIN;
310 }
311
312 return 0;
313}
314
Chris Wilson85e17f52016-10-28 13:58:53 +0100315static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
Chris Wilson05235c52016-07-20 09:21:08 +0100316{
Chris Wilson85e17f52016-10-28 13:58:53 +0100317 struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
Chris Wilson05235c52016-07-20 09:21:08 +0100318 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530319 enum intel_engine_id id;
Chris Wilson05235c52016-07-20 09:21:08 +0100320 int ret;
321
322 /* Carefully retire all requests without writing to the rings */
Chris Wilson85e17f52016-10-28 13:58:53 +0100323 ret = i915_gem_wait_for_idle(i915,
Chris Wilson73cb9702016-10-28 13:58:46 +0100324 I915_WAIT_INTERRUPTIBLE |
325 I915_WAIT_LOCKED);
326 if (ret)
327 return ret;
328
Chris Wilson85e17f52016-10-28 13:58:53 +0100329 i915_gem_retire_requests(i915);
Chris Wilson28176ef2016-10-28 13:58:56 +0100330 GEM_BUG_ON(i915->gt.active_requests > 1);
Chris Wilson05235c52016-07-20 09:21:08 +0100331
332 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
Chris Wilson28176ef2016-10-28 13:58:56 +0100333 if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
Chris Wilson6a5d1db2016-11-08 14:37:19 +0000334 while (intel_breadcrumbs_busy(i915))
335 cond_resched(); /* spin until threads are complete */
Chris Wilson05235c52016-07-20 09:21:08 +0100336 }
Chris Wilson28176ef2016-10-28 13:58:56 +0100337 atomic_set(&timeline->next_seqno, seqno);
Chris Wilson05235c52016-07-20 09:21:08 +0100338
339 /* Finally reset hw state */
Chris Wilson85e17f52016-10-28 13:58:53 +0100340 for_each_engine(engine, i915, id)
Chris Wilson73cb9702016-10-28 13:58:46 +0100341 intel_engine_init_global_seqno(engine, seqno);
Chris Wilson05235c52016-07-20 09:21:08 +0100342
Chris Wilson85e17f52016-10-28 13:58:53 +0100343 list_for_each_entry(timeline, &i915->gt.timelines, link) {
344 for_each_engine(engine, i915, id) {
345 struct intel_timeline *tl = &timeline->engine[id];
346
347 memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
348 }
349 }
350
Chris Wilson05235c52016-07-20 09:21:08 +0100351 return 0;
352}
353
Chris Wilson73cb9702016-10-28 13:58:46 +0100354int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
Chris Wilson05235c52016-07-20 09:21:08 +0100355{
356 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson05235c52016-07-20 09:21:08 +0100357
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100358 lockdep_assert_held(&dev_priv->drm.struct_mutex);
359
Chris Wilson05235c52016-07-20 09:21:08 +0100360 if (seqno == 0)
361 return -EINVAL;
362
363 /* HWS page needs to be set less than what we
364 * will inject to ring
365 */
Chris Wilson28176ef2016-10-28 13:58:56 +0100366 return i915_gem_init_global_seqno(dev_priv, seqno - 1);
367}
Chris Wilson05235c52016-07-20 09:21:08 +0100368
Chris Wilson28176ef2016-10-28 13:58:56 +0100369static int reserve_global_seqno(struct drm_i915_private *i915)
370{
371 u32 active_requests = ++i915->gt.active_requests;
372 u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
373 int ret;
374
375 /* Reservation is fine until we need to wrap around */
376 if (likely(next_seqno + active_requests > next_seqno))
377 return 0;
378
379 ret = i915_gem_init_global_seqno(i915, 0);
380 if (ret) {
381 i915->gt.active_requests--;
382 return ret;
383 }
384
Chris Wilson05235c52016-07-20 09:21:08 +0100385 return 0;
386}
387
Chris Wilson80b204b2016-10-28 13:58:58 +0100388static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
389{
390 /* next_seqno only incremented under a mutex */
391 return ++tl->next_seqno.counter;
392}
393
Chris Wilson28176ef2016-10-28 13:58:56 +0100394static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
Chris Wilson05235c52016-07-20 09:21:08 +0100395{
Chris Wilson28176ef2016-10-28 13:58:56 +0100396 return atomic_inc_return(&tl->next_seqno);
Chris Wilson05235c52016-07-20 09:21:08 +0100397}
398
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000399void __i915_gem_request_submit(struct drm_i915_gem_request *request)
Chris Wilson5590af32016-09-09 14:11:54 +0100400{
Chris Wilson73cb9702016-10-28 13:58:46 +0100401 struct intel_engine_cs *engine = request->engine;
Chris Wilsonf2d13292016-10-28 13:58:57 +0100402 struct intel_timeline *timeline;
403 u32 seqno;
Chris Wilson5590af32016-09-09 14:11:54 +0100404
Chris Wilson80b204b2016-10-28 13:58:58 +0100405 /* Transfer from per-context onto the global per-engine timeline */
406 timeline = engine->timeline;
407 GEM_BUG_ON(timeline == request->timeline);
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000408 assert_spin_locked(&timeline->lock);
Chris Wilson5590af32016-09-09 14:11:54 +0100409
Chris Wilson80b204b2016-10-28 13:58:58 +0100410 seqno = timeline_get_seqno(timeline->common);
Chris Wilsonf2d13292016-10-28 13:58:57 +0100411 GEM_BUG_ON(!seqno);
412 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
413
414 GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
415 request->previous_seqno = timeline->last_submitted_seqno;
416 timeline->last_submitted_seqno = seqno;
417
418 /* We may be recursing from the signal callback of another i915 fence */
419 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
420 request->global_seqno = seqno;
421 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
422 intel_engine_enable_signaling(request);
423 spin_unlock(&request->lock);
424
425 GEM_BUG_ON(!request->global_seqno);
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100426 engine->emit_breadcrumb(request,
427 request->ring->vaddr + request->postfix);
Chris Wilson5590af32016-09-09 14:11:54 +0100428
Chris Wilsonbb894852016-11-14 20:40:57 +0000429 spin_lock(&request->timeline->lock);
Chris Wilson80b204b2016-10-28 13:58:58 +0100430 list_move_tail(&request->link, &timeline->requests);
431 spin_unlock(&request->timeline->lock);
432
Chris Wilson23902e42016-11-14 20:40:58 +0000433 i915_sw_fence_commit(&request->execute);
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000434}
Chris Wilson23902e42016-11-14 20:40:58 +0000435
Chris Wilsond55ac5b2016-11-14 20:40:59 +0000436void i915_gem_request_submit(struct drm_i915_gem_request *request)
437{
438 struct intel_engine_cs *engine = request->engine;
439 unsigned long flags;
440
441 /* Will be called from irq-context when using foreign fences. */
442 spin_lock_irqsave(&engine->timeline->lock, flags);
443
444 __i915_gem_request_submit(request);
445
446 spin_unlock_irqrestore(&engine->timeline->lock, flags);
447}
448
449static int __i915_sw_fence_call
450submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
451{
452 if (state == FENCE_COMPLETE) {
453 struct drm_i915_gem_request *request =
454 container_of(fence, typeof(*request), submit);
455
456 request->engine->submit_request(request);
457 }
Chris Wilson80b204b2016-10-28 13:58:58 +0100458
Chris Wilson5590af32016-09-09 14:11:54 +0100459 return NOTIFY_DONE;
460}
461
Chris Wilson23902e42016-11-14 20:40:58 +0000462static int __i915_sw_fence_call
463execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
464{
465 return NOTIFY_DONE;
466}
467
Chris Wilson8e637172016-08-02 22:50:26 +0100468/**
469 * i915_gem_request_alloc - allocate a request structure
470 *
471 * @engine: engine that we wish to issue the request on.
472 * @ctx: context that the request will be associated with.
473 * This can be NULL if the request is not directly related to
474 * any specific user context, in which case this function will
475 * choose an appropriate context to use.
476 *
477 * Returns a pointer to the allocated request if successful,
478 * or an error code if not.
479 */
480struct drm_i915_gem_request *
481i915_gem_request_alloc(struct intel_engine_cs *engine,
482 struct i915_gem_context *ctx)
Chris Wilson05235c52016-07-20 09:21:08 +0100483{
484 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson05235c52016-07-20 09:21:08 +0100485 struct drm_i915_gem_request *req;
486 int ret;
487
Chris Wilson28176ef2016-10-28 13:58:56 +0100488 lockdep_assert_held(&dev_priv->drm.struct_mutex);
489
Chris Wilson05235c52016-07-20 09:21:08 +0100490 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
491 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
492 * and restart.
493 */
Chris Wilson8af29b02016-09-09 14:11:47 +0100494 ret = i915_gem_check_wedge(dev_priv);
Chris Wilson05235c52016-07-20 09:21:08 +0100495 if (ret)
Chris Wilson8e637172016-08-02 22:50:26 +0100496 return ERR_PTR(ret);
Chris Wilson05235c52016-07-20 09:21:08 +0100497
Chris Wilson28176ef2016-10-28 13:58:56 +0100498 ret = reserve_global_seqno(dev_priv);
499 if (ret)
500 return ERR_PTR(ret);
501
Chris Wilson9b5f4e52016-07-20 09:21:09 +0100502 /* Move the oldest request to the slab-cache (if not in use!) */
Chris Wilson73cb9702016-10-28 13:58:46 +0100503 req = list_first_entry_or_null(&engine->timeline->requests,
Chris Wilsonefdf7c02016-08-04 07:52:33 +0100504 typeof(*req), link);
Chris Wilson80b204b2016-10-28 13:58:58 +0100505 if (req && __i915_gem_request_completed(req))
Chris Wilson2a1d7752016-07-26 12:01:51 +0100506 i915_gem_request_retire(req);
Chris Wilson9b5f4e52016-07-20 09:21:09 +0100507
Chris Wilson5a198b82016-08-09 09:23:34 +0100508 /* Beware: Dragons be flying overhead.
509 *
510 * We use RCU to look up requests in flight. The lookups may
511 * race with the request being allocated from the slab freelist.
512 * That is the request we are writing to here, may be in the process
Chris Wilson1426f712016-08-09 17:03:22 +0100513 * of being read by __i915_gem_active_get_rcu(). As such,
Chris Wilson5a198b82016-08-09 09:23:34 +0100514 * we have to be very careful when overwriting the contents. During
515 * the RCU lookup, we change chase the request->engine pointer,
Chris Wilson65e47602016-10-28 13:58:49 +0100516 * read the request->global_seqno and increment the reference count.
Chris Wilson5a198b82016-08-09 09:23:34 +0100517 *
518 * The reference count is incremented atomically. If it is zero,
519 * the lookup knows the request is unallocated and complete. Otherwise,
520 * it is either still in use, or has been reallocated and reset
Chris Wilsonf54d1862016-10-25 13:00:45 +0100521 * with dma_fence_init(). This increment is safe for release as we
522 * check that the request we have a reference to and matches the active
Chris Wilson5a198b82016-08-09 09:23:34 +0100523 * request.
524 *
525 * Before we increment the refcount, we chase the request->engine
526 * pointer. We must not call kmem_cache_zalloc() or else we set
527 * that pointer to NULL and cause a crash during the lookup. If
528 * we see the request is completed (based on the value of the
529 * old engine and seqno), the lookup is complete and reports NULL.
530 * If we decide the request is not completed (new engine or seqno),
531 * then we grab a reference and double check that it is still the
532 * active request - which it won't be and restart the lookup.
533 *
534 * Do not use kmem_cache_zalloc() here!
535 */
536 req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
Chris Wilson28176ef2016-10-28 13:58:56 +0100537 if (!req) {
538 ret = -ENOMEM;
539 goto err_unreserve;
540 }
Chris Wilson05235c52016-07-20 09:21:08 +0100541
Chris Wilson80b204b2016-10-28 13:58:58 +0100542 req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
543 GEM_BUG_ON(req->timeline == engine->timeline);
Chris Wilson73cb9702016-10-28 13:58:46 +0100544
Chris Wilson04769652016-07-20 09:21:11 +0100545 spin_lock_init(&req->lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100546 dma_fence_init(&req->fence,
547 &i915_fence_ops,
548 &req->lock,
Chris Wilson73cb9702016-10-28 13:58:46 +0100549 req->timeline->fence_context,
Chris Wilson80b204b2016-10-28 13:58:58 +0100550 __timeline_get_seqno(req->timeline->common));
Chris Wilson04769652016-07-20 09:21:11 +0100551
Chris Wilson5590af32016-09-09 14:11:54 +0100552 i915_sw_fence_init(&req->submit, submit_notify);
Chris Wilson23902e42016-11-14 20:40:58 +0000553 i915_sw_fence_init(&req->execute, execute_notify);
554 /* Ensure that the execute fence completes after the submit fence -
555 * as we complete the execute fence from within the submit fence
556 * callback, its completion would otherwise be visible first.
557 */
558 i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
Chris Wilson5590af32016-09-09 14:11:54 +0100559
Chris Wilson52e54202016-11-14 20:41:02 +0000560 i915_priotree_init(&req->priotree);
561
Chris Wilsonfa545cb2016-08-04 07:52:35 +0100562 INIT_LIST_HEAD(&req->active_list);
Chris Wilson05235c52016-07-20 09:21:08 +0100563 req->i915 = dev_priv;
564 req->engine = engine;
Chris Wilson9a6feaf2016-07-20 13:31:50 +0100565 req->ctx = i915_gem_context_get(ctx);
Chris Wilson05235c52016-07-20 09:21:08 +0100566
Chris Wilson5a198b82016-08-09 09:23:34 +0100567 /* No zalloc, must clear what we need by hand */
Chris Wilsonf2d13292016-10-28 13:58:57 +0100568 req->global_seqno = 0;
Chris Wilson5a198b82016-08-09 09:23:34 +0100569 req->previous_context = NULL;
570 req->file_priv = NULL;
Chris Wilson058d88c2016-08-15 10:49:06 +0100571 req->batch = NULL;
Chris Wilson5a198b82016-08-09 09:23:34 +0100572
Chris Wilson05235c52016-07-20 09:21:08 +0100573 /*
574 * Reserve space in the ring buffer for all the commands required to
575 * eventually emit this request. This is to guarantee that the
576 * i915_add_request() call can't fail. Note that the reserve may need
577 * to be redone if the request is not actually submitted straight
578 * away, e.g. because a GPU scheduler has deferred it.
579 */
580 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
Chris Wilson98f29e82016-10-28 13:58:51 +0100581 GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
Chris Wilson05235c52016-07-20 09:21:08 +0100582
583 if (i915.enable_execlists)
584 ret = intel_logical_ring_alloc_request_extras(req);
585 else
586 ret = intel_ring_alloc_request_extras(req);
587 if (ret)
588 goto err_ctx;
589
Chris Wilsond0454462016-08-15 10:48:40 +0100590 /* Record the position of the start of the request so that
591 * should we detect the updated seqno part-way through the
592 * GPU processing the request, we never over-estimate the
593 * position of the head.
594 */
595 req->head = req->ring->tail;
596
Chris Wilson8e637172016-08-02 22:50:26 +0100597 return req;
Chris Wilson05235c52016-07-20 09:21:08 +0100598
599err_ctx:
Chris Wilson9a6feaf2016-07-20 13:31:50 +0100600 i915_gem_context_put(ctx);
Chris Wilson05235c52016-07-20 09:21:08 +0100601 kmem_cache_free(dev_priv->requests, req);
Chris Wilson28176ef2016-10-28 13:58:56 +0100602err_unreserve:
603 dev_priv->gt.active_requests--;
Chris Wilson8e637172016-08-02 22:50:26 +0100604 return ERR_PTR(ret);
Chris Wilson05235c52016-07-20 09:21:08 +0100605}
606
Chris Wilsona2bc4692016-09-09 14:11:56 +0100607static int
608i915_gem_request_await_request(struct drm_i915_gem_request *to,
609 struct drm_i915_gem_request *from)
610{
Chris Wilson85e17f52016-10-28 13:58:53 +0100611 int ret;
Chris Wilsona2bc4692016-09-09 14:11:56 +0100612
613 GEM_BUG_ON(to == from);
614
Chris Wilson52e54202016-11-14 20:41:02 +0000615 if (to->engine->schedule) {
616 ret = i915_priotree_add_dependency(to->i915,
617 &to->priotree,
618 &from->priotree);
619 if (ret < 0)
620 return ret;
621 }
622
Chris Wilson73cb9702016-10-28 13:58:46 +0100623 if (to->timeline == from->timeline)
Chris Wilsona2bc4692016-09-09 14:11:56 +0100624 return 0;
625
Chris Wilson73cb9702016-10-28 13:58:46 +0100626 if (to->engine == from->engine) {
627 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
628 &from->submit,
629 GFP_KERNEL);
630 return ret < 0 ? ret : 0;
631 }
632
Chris Wilson65e47602016-10-28 13:58:49 +0100633 if (!from->global_seqno) {
634 ret = i915_sw_fence_await_dma_fence(&to->submit,
635 &from->fence, 0,
636 GFP_KERNEL);
637 return ret < 0 ? ret : 0;
638 }
639
Chris Wilson85e17f52016-10-28 13:58:53 +0100640 if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
Chris Wilsona2bc4692016-09-09 14:11:56 +0100641 return 0;
642
643 trace_i915_gem_ring_sync_to(to, from);
644 if (!i915.semaphores) {
Chris Wilson0a046a02016-09-09 14:12:00 +0100645 if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
646 ret = i915_sw_fence_await_dma_fence(&to->submit,
647 &from->fence, 0,
648 GFP_KERNEL);
649 if (ret < 0)
650 return ret;
651 }
Chris Wilsona2bc4692016-09-09 14:11:56 +0100652 } else {
653 ret = to->engine->semaphore.sync_to(to, from);
654 if (ret)
655 return ret;
656 }
657
Chris Wilson85e17f52016-10-28 13:58:53 +0100658 to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
Chris Wilsona2bc4692016-09-09 14:11:56 +0100659 return 0;
660}
661
Chris Wilsonb52992c2016-10-28 13:58:24 +0100662int
663i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
664 struct dma_fence *fence)
665{
666 struct dma_fence_array *array;
667 int ret;
668 int i;
669
670 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
671 return 0;
672
673 if (dma_fence_is_i915(fence))
674 return i915_gem_request_await_request(req, to_request(fence));
675
676 if (!dma_fence_is_array(fence)) {
677 ret = i915_sw_fence_await_dma_fence(&req->submit,
678 fence, I915_FENCE_TIMEOUT,
679 GFP_KERNEL);
680 return ret < 0 ? ret : 0;
681 }
682
683 /* Note that if the fence-array was created in signal-on-any mode,
684 * we should *not* decompose it into its individual fences. However,
685 * we don't currently store which mode the fence-array is operating
686 * in. Fortunately, the only user of signal-on-any is private to
687 * amdgpu and we should not see any incoming fence-array from
688 * sync-file being in signal-on-any mode.
689 */
690
691 array = to_dma_fence_array(fence);
692 for (i = 0; i < array->num_fences; i++) {
693 struct dma_fence *child = array->fences[i];
694
695 if (dma_fence_is_i915(child))
696 ret = i915_gem_request_await_request(req,
697 to_request(child));
698 else
699 ret = i915_sw_fence_await_dma_fence(&req->submit,
700 child, I915_FENCE_TIMEOUT,
701 GFP_KERNEL);
702 if (ret < 0)
703 return ret;
704 }
705
706 return 0;
707}
708
Chris Wilsona2bc4692016-09-09 14:11:56 +0100709/**
710 * i915_gem_request_await_object - set this request to (async) wait upon a bo
711 *
712 * @to: request we are wishing to use
713 * @obj: object which may be in use on another ring.
714 *
715 * This code is meant to abstract object synchronization with the GPU.
716 * Conceptually we serialise writes between engines inside the GPU.
717 * We only allow one engine to write into a buffer at any time, but
718 * multiple readers. To ensure each has a coherent view of memory, we must:
719 *
720 * - If there is an outstanding write request to the object, the new
721 * request must wait for it to complete (either CPU or in hw, requests
722 * on the same ring will be naturally ordered).
723 *
724 * - If we are a write request (pending_write_domain is set), the new
725 * request must wait for outstanding read requests to complete.
726 *
727 * Returns 0 if successful, else propagates up the lower layer error.
728 */
729int
730i915_gem_request_await_object(struct drm_i915_gem_request *to,
731 struct drm_i915_gem_object *obj,
732 bool write)
733{
Chris Wilsond07f0e52016-10-28 13:58:44 +0100734 struct dma_fence *excl;
735 int ret = 0;
Chris Wilsona2bc4692016-09-09 14:11:56 +0100736
737 if (write) {
Chris Wilsond07f0e52016-10-28 13:58:44 +0100738 struct dma_fence **shared;
739 unsigned int count, i;
Chris Wilsona2bc4692016-09-09 14:11:56 +0100740
Chris Wilsond07f0e52016-10-28 13:58:44 +0100741 ret = reservation_object_get_fences_rcu(obj->resv,
742 &excl, &count, &shared);
Chris Wilsona2bc4692016-09-09 14:11:56 +0100743 if (ret)
744 return ret;
Chris Wilsond07f0e52016-10-28 13:58:44 +0100745
746 for (i = 0; i < count; i++) {
747 ret = i915_gem_request_await_dma_fence(to, shared[i]);
748 if (ret)
749 break;
750
751 dma_fence_put(shared[i]);
752 }
753
754 for (; i < count; i++)
755 dma_fence_put(shared[i]);
756 kfree(shared);
757 } else {
758 excl = reservation_object_get_excl_rcu(obj->resv);
Chris Wilsona2bc4692016-09-09 14:11:56 +0100759 }
760
Chris Wilsond07f0e52016-10-28 13:58:44 +0100761 if (excl) {
762 if (ret == 0)
763 ret = i915_gem_request_await_dma_fence(to, excl);
764
765 dma_fence_put(excl);
766 }
767
768 return ret;
Chris Wilsona2bc4692016-09-09 14:11:56 +0100769}
770
Chris Wilson05235c52016-07-20 09:21:08 +0100771static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
772{
773 struct drm_i915_private *dev_priv = engine->i915;
774
Chris Wilson05235c52016-07-20 09:21:08 +0100775 if (dev_priv->gt.awake)
776 return;
777
Chris Wilson43020552016-11-15 16:46:20 +0000778 GEM_BUG_ON(!dev_priv->gt.active_requests);
779
Chris Wilson05235c52016-07-20 09:21:08 +0100780 intel_runtime_pm_get_noresume(dev_priv);
781 dev_priv->gt.awake = true;
782
Chris Wilson54b4f682016-07-21 21:16:19 +0100783 intel_enable_gt_powersave(dev_priv);
Chris Wilson05235c52016-07-20 09:21:08 +0100784 i915_update_gfx_val(dev_priv);
785 if (INTEL_GEN(dev_priv) >= 6)
786 gen6_rps_busy(dev_priv);
787
788 queue_delayed_work(dev_priv->wq,
789 &dev_priv->gt.retire_work,
790 round_jiffies_up_relative(HZ));
791}
792
793/*
794 * NB: This function is not allowed to fail. Doing so would mean the the
795 * request is not being tracked for completion but the work itself is
796 * going to happen on the hardware. This would be a Bad Thing(tm).
797 */
Chris Wilson17f298cf2016-08-10 13:41:46 +0100798void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
Chris Wilson05235c52016-07-20 09:21:08 +0100799{
Chris Wilson95b2ab52016-08-15 10:48:46 +0100800 struct intel_engine_cs *engine = request->engine;
801 struct intel_ring *ring = request->ring;
Chris Wilson73cb9702016-10-28 13:58:46 +0100802 struct intel_timeline *timeline = request->timeline;
Chris Wilson0a046a02016-09-09 14:12:00 +0100803 struct drm_i915_gem_request *prev;
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100804 int err;
Chris Wilson05235c52016-07-20 09:21:08 +0100805
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100806 lockdep_assert_held(&request->i915->drm.struct_mutex);
Chris Wilson0f25dff2016-09-09 14:11:55 +0100807 trace_i915_gem_request_add(request);
808
Chris Wilson05235c52016-07-20 09:21:08 +0100809 /*
810 * To ensure that this call will not fail, space for its emissions
811 * should already have been reserved in the ring buffer. Let the ring
812 * know that it is time to use that space up.
813 */
Chris Wilson05235c52016-07-20 09:21:08 +0100814 request->reserved_space = 0;
815
816 /*
817 * Emit any outstanding flushes - execbuf can fail to emit the flush
818 * after having emitted the batchbuffer command. Hence we need to fix
819 * things up similar to emitting the lazy request. The difference here
820 * is that the flush _must_ happen before the next request, no matter
821 * what.
822 */
823 if (flush_caches) {
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100824 err = engine->emit_flush(request, EMIT_FLUSH);
Chris Wilsonc7fe7d22016-08-02 22:50:24 +0100825
Chris Wilson05235c52016-07-20 09:21:08 +0100826 /* Not allowed to fail! */
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100827 WARN(err, "engine->emit_flush() failed: %d!\n", err);
Chris Wilson05235c52016-07-20 09:21:08 +0100828 }
829
Chris Wilsond0454462016-08-15 10:48:40 +0100830 /* Record the position of the start of the breadcrumb so that
Chris Wilson05235c52016-07-20 09:21:08 +0100831 * should we detect the updated seqno part-way through the
832 * GPU processing the request, we never over-estimate the
Chris Wilsond0454462016-08-15 10:48:40 +0100833 * position of the ring's HEAD.
Chris Wilson05235c52016-07-20 09:21:08 +0100834 */
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100835 err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
836 GEM_BUG_ON(err);
Chris Wilsonba76d912016-08-02 22:50:28 +0100837 request->postfix = ring->tail;
Chris Wilsoncaddfe72016-10-28 13:58:52 +0100838 ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
Chris Wilson05235c52016-07-20 09:21:08 +0100839
Chris Wilson0f25dff2016-09-09 14:11:55 +0100840 /* Seal the request and mark it as pending execution. Note that
841 * we may inspect this state, without holding any locks, during
842 * hangcheck. Hence we apply the barrier to ensure that we do not
843 * see a more recent value in the hws than we are tracking.
844 */
Chris Wilson0a046a02016-09-09 14:12:00 +0100845
Chris Wilson73cb9702016-10-28 13:58:46 +0100846 prev = i915_gem_active_raw(&timeline->last_request,
Chris Wilson0a046a02016-09-09 14:12:00 +0100847 &request->i915->drm.struct_mutex);
Chris Wilson52e54202016-11-14 20:41:02 +0000848 if (prev) {
Chris Wilson0a046a02016-09-09 14:12:00 +0100849 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
850 &request->submitq);
Chris Wilson52e54202016-11-14 20:41:02 +0000851 if (engine->schedule)
852 __i915_priotree_add_dependency(&request->priotree,
853 &prev->priotree,
854 &request->dep,
855 0);
856 }
Chris Wilson0a046a02016-09-09 14:12:00 +0100857
Chris Wilson80b204b2016-10-28 13:58:58 +0100858 spin_lock_irq(&timeline->lock);
Chris Wilsonf2d13292016-10-28 13:58:57 +0100859 list_add_tail(&request->link, &timeline->requests);
Chris Wilson80b204b2016-10-28 13:58:58 +0100860 spin_unlock_irq(&timeline->lock);
Chris Wilson28176ef2016-10-28 13:58:56 +0100861
Chris Wilson80b204b2016-10-28 13:58:58 +0100862 GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
863 request->fence.seqno));
864
865 timeline->last_submitted_seqno = request->fence.seqno;
Chris Wilson73cb9702016-10-28 13:58:46 +0100866 i915_gem_active_set(&timeline->last_request, request);
Chris Wilsonf2d13292016-10-28 13:58:57 +0100867
Chris Wilson0f25dff2016-09-09 14:11:55 +0100868 list_add_tail(&request->ring_link, &ring->request_list);
Chris Wilsonf2d13292016-10-28 13:58:57 +0100869 request->emitted_jiffies = jiffies;
Chris Wilson0f25dff2016-09-09 14:11:55 +0100870
Chris Wilson05235c52016-07-20 09:21:08 +0100871 i915_gem_mark_busy(engine);
Chris Wilson5590af32016-09-09 14:11:54 +0100872
Chris Wilson0de91362016-11-14 20:41:01 +0000873 /* Let the backend know a new request has arrived that may need
874 * to adjust the existing execution schedule due to a high priority
875 * request - i.e. we may want to preempt the current request in order
876 * to run a high priority dependency chain *before* we can execute this
877 * request.
878 *
879 * This is called before the request is ready to run so that we can
880 * decide whether to preempt the entire chain so that it is ready to
881 * run at the earliest possible convenience.
882 */
883 if (engine->schedule)
Chris Wilson9f792eb2016-11-14 20:41:04 +0000884 engine->schedule(request, request->ctx->priority);
Chris Wilson0de91362016-11-14 20:41:01 +0000885
Chris Wilson5590af32016-09-09 14:11:54 +0100886 local_bh_disable();
887 i915_sw_fence_commit(&request->submit);
888 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
Chris Wilson05235c52016-07-20 09:21:08 +0100889}
890
Chris Wilson221fe792016-09-09 14:11:51 +0100891static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
892{
893 unsigned long flags;
894
895 spin_lock_irqsave(&q->lock, flags);
896 if (list_empty(&wait->task_list))
897 __add_wait_queue(q, wait);
898 spin_unlock_irqrestore(&q->lock, flags);
899}
900
Chris Wilson05235c52016-07-20 09:21:08 +0100901static unsigned long local_clock_us(unsigned int *cpu)
902{
903 unsigned long t;
904
905 /* Cheaply and approximately convert from nanoseconds to microseconds.
906 * The result and subsequent calculations are also defined in the same
907 * approximate microseconds units. The principal source of timing
908 * error here is from the simple truncation.
909 *
910 * Note that local_clock() is only defined wrt to the current CPU;
911 * the comparisons are no longer valid if we switch CPUs. Instead of
912 * blocking preemption for the entire busywait, we can detect the CPU
913 * switch and use that as indicator of system load and a reason to
914 * stop busywaiting, see busywait_stop().
915 */
916 *cpu = get_cpu();
917 t = local_clock() >> 10;
918 put_cpu();
919
920 return t;
921}
922
923static bool busywait_stop(unsigned long timeout, unsigned int cpu)
924{
925 unsigned int this_cpu;
926
927 if (time_after(local_clock_us(&this_cpu), timeout))
928 return true;
929
930 return this_cpu != cpu;
931}
932
933bool __i915_spin_request(const struct drm_i915_gem_request *req,
934 int state, unsigned long timeout_us)
935{
936 unsigned int cpu;
937
938 /* When waiting for high frequency requests, e.g. during synchronous
939 * rendering split between the CPU and GPU, the finite amount of time
940 * required to set up the irq and wait upon it limits the response
941 * rate. By busywaiting on the request completion for a short while we
942 * can service the high frequency waits as quick as possible. However,
943 * if it is a slow request, we want to sleep as quickly as possible.
944 * The tradeoff between waiting and sleeping is roughly the time it
945 * takes to sleep on a request, on the order of a microsecond.
946 */
947
948 timeout_us += local_clock_us(&cpu);
949 do {
Chris Wilson65e47602016-10-28 13:58:49 +0100950 if (__i915_gem_request_completed(req))
Chris Wilson05235c52016-07-20 09:21:08 +0100951 return true;
952
953 if (signal_pending_state(state, current))
954 break;
955
956 if (busywait_stop(timeout_us, cpu))
957 break;
958
959 cpu_relax_lowlatency();
960 } while (!need_resched());
961
962 return false;
963}
964
Chris Wilson4680816b2016-10-28 13:58:48 +0100965static long
Chris Wilson23902e42016-11-14 20:40:58 +0000966__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
967 unsigned int flags,
968 long timeout)
Chris Wilson4680816b2016-10-28 13:58:48 +0100969{
970 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
971 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
972 wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
973 DEFINE_WAIT(reset);
974 DEFINE_WAIT(wait);
975
976 if (flags & I915_WAIT_LOCKED)
977 add_wait_queue(q, &reset);
978
979 do {
Chris Wilson23902e42016-11-14 20:40:58 +0000980 prepare_to_wait(&request->execute.wait, &wait, state);
Chris Wilson4680816b2016-10-28 13:58:48 +0100981
Chris Wilson23902e42016-11-14 20:40:58 +0000982 if (i915_sw_fence_done(&request->execute))
Chris Wilson4680816b2016-10-28 13:58:48 +0100983 break;
984
985 if (flags & I915_WAIT_LOCKED &&
986 i915_reset_in_progress(&request->i915->gpu_error)) {
987 __set_current_state(TASK_RUNNING);
988 i915_reset(request->i915);
989 reset_wait_queue(q, &reset);
990 continue;
991 }
992
993 if (signal_pending_state(state, current)) {
994 timeout = -ERESTARTSYS;
995 break;
996 }
997
998 timeout = io_schedule_timeout(timeout);
999 } while (timeout);
Chris Wilson23902e42016-11-14 20:40:58 +00001000 finish_wait(&request->execute.wait, &wait);
Chris Wilson4680816b2016-10-28 13:58:48 +01001001
1002 if (flags & I915_WAIT_LOCKED)
1003 remove_wait_queue(q, &reset);
1004
1005 return timeout;
1006}
1007
Chris Wilson05235c52016-07-20 09:21:08 +01001008/**
Chris Wilson776f3232016-08-04 07:52:40 +01001009 * i915_wait_request - wait until execution of request has finished
Chris Wilsone95433c2016-10-28 13:58:27 +01001010 * @req: the request to wait upon
Chris Wilsonea746f32016-09-09 14:11:49 +01001011 * @flags: how to wait
Chris Wilsone95433c2016-10-28 13:58:27 +01001012 * @timeout: how long to wait in jiffies
Chris Wilson05235c52016-07-20 09:21:08 +01001013 *
Chris Wilsone95433c2016-10-28 13:58:27 +01001014 * i915_wait_request() waits for the request to be completed, for a
1015 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1016 * unbounded wait).
Chris Wilson05235c52016-07-20 09:21:08 +01001017 *
Chris Wilsone95433c2016-10-28 13:58:27 +01001018 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1019 * in via the flags, and vice versa if the struct_mutex is not held, the caller
1020 * must not specify that the wait is locked.
1021 *
1022 * Returns the remaining time (in jiffies) if the request completed, which may
1023 * be zero or -ETIME if the request is unfinished after the timeout expires.
1024 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1025 * pending before the request completes.
Chris Wilson05235c52016-07-20 09:21:08 +01001026 */
Chris Wilsone95433c2016-10-28 13:58:27 +01001027long i915_wait_request(struct drm_i915_gem_request *req,
1028 unsigned int flags,
1029 long timeout)
Chris Wilson05235c52016-07-20 09:21:08 +01001030{
Chris Wilsonea746f32016-09-09 14:11:49 +01001031 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1032 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
Chris Wilson05235c52016-07-20 09:21:08 +01001033 DEFINE_WAIT(reset);
1034 struct intel_wait wait;
Chris Wilson05235c52016-07-20 09:21:08 +01001035
1036 might_sleep();
Chris Wilson22dd3bb2016-09-09 14:11:50 +01001037#if IS_ENABLED(CONFIG_LOCKDEP)
Chris Wilsone95433c2016-10-28 13:58:27 +01001038 GEM_BUG_ON(debug_locks &&
1039 !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
Chris Wilson22dd3bb2016-09-09 14:11:50 +01001040 !!(flags & I915_WAIT_LOCKED));
1041#endif
Chris Wilsone95433c2016-10-28 13:58:27 +01001042 GEM_BUG_ON(timeout < 0);
Chris Wilson05235c52016-07-20 09:21:08 +01001043
Chris Wilson05235c52016-07-20 09:21:08 +01001044 if (i915_gem_request_completed(req))
Chris Wilsone95433c2016-10-28 13:58:27 +01001045 return timeout;
Chris Wilson05235c52016-07-20 09:21:08 +01001046
Chris Wilsone95433c2016-10-28 13:58:27 +01001047 if (!timeout)
1048 return -ETIME;
Chris Wilson05235c52016-07-20 09:21:08 +01001049
1050 trace_i915_gem_request_wait_begin(req);
1051
Chris Wilson23902e42016-11-14 20:40:58 +00001052 if (!i915_sw_fence_done(&req->execute)) {
1053 timeout = __i915_request_wait_for_execute(req, flags, timeout);
Chris Wilson4680816b2016-10-28 13:58:48 +01001054 if (timeout < 0)
1055 goto complete;
1056
Chris Wilson23902e42016-11-14 20:40:58 +00001057 GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
Chris Wilson4680816b2016-10-28 13:58:48 +01001058 }
Chris Wilson23902e42016-11-14 20:40:58 +00001059 GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
Chris Wilson65e47602016-10-28 13:58:49 +01001060 GEM_BUG_ON(!req->global_seqno);
Chris Wilson4680816b2016-10-28 13:58:48 +01001061
Daniel Vetter437c3082016-08-05 18:11:24 +02001062 /* Optimistic short spin before touching IRQs */
Chris Wilson05235c52016-07-20 09:21:08 +01001063 if (i915_spin_request(req, state, 5))
1064 goto complete;
1065
1066 set_current_state(state);
Chris Wilson22dd3bb2016-09-09 14:11:50 +01001067 if (flags & I915_WAIT_LOCKED)
1068 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
Chris Wilson05235c52016-07-20 09:21:08 +01001069
Chris Wilson65e47602016-10-28 13:58:49 +01001070 intel_wait_init(&wait, req->global_seqno);
Chris Wilson05235c52016-07-20 09:21:08 +01001071 if (intel_engine_add_wait(req->engine, &wait))
1072 /* In order to check that we haven't missed the interrupt
1073 * as we enabled it, we need to kick ourselves to do a
1074 * coherent check on the seqno before we sleep.
1075 */
1076 goto wakeup;
1077
1078 for (;;) {
1079 if (signal_pending_state(state, current)) {
Chris Wilsone95433c2016-10-28 13:58:27 +01001080 timeout = -ERESTARTSYS;
Chris Wilson05235c52016-07-20 09:21:08 +01001081 break;
1082 }
1083
Chris Wilsone95433c2016-10-28 13:58:27 +01001084 if (!timeout) {
1085 timeout = -ETIME;
Chris Wilson05235c52016-07-20 09:21:08 +01001086 break;
1087 }
1088
Chris Wilsone95433c2016-10-28 13:58:27 +01001089 timeout = io_schedule_timeout(timeout);
1090
Chris Wilson05235c52016-07-20 09:21:08 +01001091 if (intel_wait_complete(&wait))
1092 break;
1093
1094 set_current_state(state);
1095
1096wakeup:
1097 /* Carefully check if the request is complete, giving time
1098 * for the seqno to be visible following the interrupt.
1099 * We also have to check in case we are kicked by the GPU
1100 * reset in order to drop the struct_mutex.
1101 */
1102 if (__i915_request_irq_complete(req))
1103 break;
1104
Chris Wilson221fe792016-09-09 14:11:51 +01001105 /* If the GPU is hung, and we hold the lock, reset the GPU
1106 * and then check for completion. On a full reset, the engine's
1107 * HW seqno will be advanced passed us and we are complete.
1108 * If we do a partial reset, we have to wait for the GPU to
1109 * resume and update the breadcrumb.
1110 *
1111 * If we don't hold the mutex, we can just wait for the worker
1112 * to come along and update the breadcrumb (either directly
1113 * itself, or indirectly by recovering the GPU).
1114 */
1115 if (flags & I915_WAIT_LOCKED &&
1116 i915_reset_in_progress(&req->i915->gpu_error)) {
1117 __set_current_state(TASK_RUNNING);
1118 i915_reset(req->i915);
1119 reset_wait_queue(&req->i915->gpu_error.wait_queue,
1120 &reset);
1121 continue;
1122 }
1123
Chris Wilson05235c52016-07-20 09:21:08 +01001124 /* Only spin if we know the GPU is processing this request */
1125 if (i915_spin_request(req, state, 2))
1126 break;
1127 }
Chris Wilson05235c52016-07-20 09:21:08 +01001128
1129 intel_engine_remove_wait(req->engine, &wait);
Chris Wilson22dd3bb2016-09-09 14:11:50 +01001130 if (flags & I915_WAIT_LOCKED)
1131 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
Chris Wilson05235c52016-07-20 09:21:08 +01001132 __set_current_state(TASK_RUNNING);
Chris Wilson22dd3bb2016-09-09 14:11:50 +01001133
Chris Wilson05235c52016-07-20 09:21:08 +01001134complete:
1135 trace_i915_gem_request_wait_end(req);
1136
Chris Wilsone95433c2016-10-28 13:58:27 +01001137 return timeout;
Chris Wilson05235c52016-07-20 09:21:08 +01001138}
Chris Wilson4b8de8e2016-08-04 07:52:42 +01001139
Chris Wilson28176ef2016-10-28 13:58:56 +01001140static void engine_retire_requests(struct intel_engine_cs *engine)
Chris Wilson4b8de8e2016-08-04 07:52:42 +01001141{
1142 struct drm_i915_gem_request *request, *next;
1143
Chris Wilson73cb9702016-10-28 13:58:46 +01001144 list_for_each_entry_safe(request, next,
1145 &engine->timeline->requests, link) {
Chris Wilson80b204b2016-10-28 13:58:58 +01001146 if (!__i915_gem_request_completed(request))
Chris Wilson28176ef2016-10-28 13:58:56 +01001147 return;
Chris Wilson4b8de8e2016-08-04 07:52:42 +01001148
1149 i915_gem_request_retire(request);
1150 }
1151}
1152
1153void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
1154{
1155 struct intel_engine_cs *engine;
Chris Wilson28176ef2016-10-28 13:58:56 +01001156 enum intel_engine_id id;
Chris Wilson4b8de8e2016-08-04 07:52:42 +01001157
1158 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1159
Chris Wilson28176ef2016-10-28 13:58:56 +01001160 if (!dev_priv->gt.active_requests)
Chris Wilson4b8de8e2016-08-04 07:52:42 +01001161 return;
1162
Chris Wilson28176ef2016-10-28 13:58:56 +01001163 for_each_engine(engine, dev_priv, id)
1164 engine_retire_requests(engine);
Chris Wilson4b8de8e2016-08-04 07:52:42 +01001165}