Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2015 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 25 | #include <linux/prefetch.h> |
Chris Wilson | b52992c | 2016-10-28 13:58:24 +0100 | [diff] [blame] | 26 | #include <linux/dma-fence-array.h> |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 27 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 28 | #include "i915_drv.h" |
| 29 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 30 | static const char *i915_fence_get_driver_name(struct dma_fence *fence) |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 31 | { |
| 32 | return "i915"; |
| 33 | } |
| 34 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 35 | static const char *i915_fence_get_timeline_name(struct dma_fence *fence) |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 36 | { |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 37 | return to_request(fence)->timeline->common->name; |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 38 | } |
| 39 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 40 | static bool i915_fence_signaled(struct dma_fence *fence) |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 41 | { |
| 42 | return i915_gem_request_completed(to_request(fence)); |
| 43 | } |
| 44 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 45 | static bool i915_fence_enable_signaling(struct dma_fence *fence) |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 46 | { |
| 47 | if (i915_fence_signaled(fence)) |
| 48 | return false; |
| 49 | |
| 50 | intel_engine_enable_signaling(to_request(fence)); |
| 51 | return true; |
| 52 | } |
| 53 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 54 | static signed long i915_fence_wait(struct dma_fence *fence, |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 55 | bool interruptible, |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 56 | signed long timeout) |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 57 | { |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 58 | return i915_wait_request(to_request(fence), interruptible, timeout); |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 59 | } |
| 60 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 61 | static void i915_fence_release(struct dma_fence *fence) |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 62 | { |
| 63 | struct drm_i915_gem_request *req = to_request(fence); |
| 64 | |
Chris Wilson | fc15840 | 2016-11-25 13:17:18 +0000 | [diff] [blame] | 65 | /* The request is put onto a RCU freelist (i.e. the address |
| 66 | * is immediately reused), mark the fences as being freed now. |
| 67 | * Otherwise the debugobjects for the fences are only marked as |
| 68 | * freed when the slab cache itself is freed, and so we would get |
| 69 | * caught trying to reuse dead objects. |
| 70 | */ |
| 71 | i915_sw_fence_fini(&req->submit); |
Chris Wilson | fc15840 | 2016-11-25 13:17:18 +0000 | [diff] [blame] | 72 | |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 73 | kmem_cache_free(req->i915->requests, req); |
| 74 | } |
| 75 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 76 | const struct dma_fence_ops i915_fence_ops = { |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 77 | .get_driver_name = i915_fence_get_driver_name, |
| 78 | .get_timeline_name = i915_fence_get_timeline_name, |
| 79 | .enable_signaling = i915_fence_enable_signaling, |
| 80 | .signaled = i915_fence_signaled, |
| 81 | .wait = i915_fence_wait, |
| 82 | .release = i915_fence_release, |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 83 | }; |
| 84 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 85 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
| 86 | struct drm_file *file) |
| 87 | { |
| 88 | struct drm_i915_private *dev_private; |
| 89 | struct drm_i915_file_private *file_priv; |
| 90 | |
| 91 | WARN_ON(!req || !file || req->file_priv); |
| 92 | |
| 93 | if (!req || !file) |
| 94 | return -EINVAL; |
| 95 | |
| 96 | if (req->file_priv) |
| 97 | return -EINVAL; |
| 98 | |
| 99 | dev_private = req->i915; |
| 100 | file_priv = file->driver_priv; |
| 101 | |
| 102 | spin_lock(&file_priv->mm.lock); |
| 103 | req->file_priv = file_priv; |
| 104 | list_add_tail(&req->client_list, &file_priv->mm.request_list); |
| 105 | spin_unlock(&file_priv->mm.lock); |
| 106 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 107 | return 0; |
| 108 | } |
| 109 | |
| 110 | static inline void |
| 111 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) |
| 112 | { |
| 113 | struct drm_i915_file_private *file_priv = request->file_priv; |
| 114 | |
| 115 | if (!file_priv) |
| 116 | return; |
| 117 | |
| 118 | spin_lock(&file_priv->mm.lock); |
| 119 | list_del(&request->client_list); |
| 120 | request->file_priv = NULL; |
| 121 | spin_unlock(&file_priv->mm.lock); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 122 | } |
| 123 | |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 124 | static struct i915_dependency * |
| 125 | i915_dependency_alloc(struct drm_i915_private *i915) |
| 126 | { |
| 127 | return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); |
| 128 | } |
| 129 | |
| 130 | static void |
| 131 | i915_dependency_free(struct drm_i915_private *i915, |
| 132 | struct i915_dependency *dep) |
| 133 | { |
| 134 | kmem_cache_free(i915->dependencies, dep); |
| 135 | } |
| 136 | |
| 137 | static void |
| 138 | __i915_priotree_add_dependency(struct i915_priotree *pt, |
| 139 | struct i915_priotree *signal, |
| 140 | struct i915_dependency *dep, |
| 141 | unsigned long flags) |
| 142 | { |
Chris Wilson | 20311bd | 2016-11-14 20:41:03 +0000 | [diff] [blame] | 143 | INIT_LIST_HEAD(&dep->dfs_link); |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 144 | list_add(&dep->wait_link, &signal->waiters_list); |
| 145 | list_add(&dep->signal_link, &pt->signalers_list); |
| 146 | dep->signaler = signal; |
| 147 | dep->flags = flags; |
| 148 | } |
| 149 | |
| 150 | static int |
| 151 | i915_priotree_add_dependency(struct drm_i915_private *i915, |
| 152 | struct i915_priotree *pt, |
| 153 | struct i915_priotree *signal) |
| 154 | { |
| 155 | struct i915_dependency *dep; |
| 156 | |
| 157 | dep = i915_dependency_alloc(i915); |
| 158 | if (!dep) |
| 159 | return -ENOMEM; |
| 160 | |
| 161 | __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC); |
| 162 | return 0; |
| 163 | } |
| 164 | |
| 165 | static void |
| 166 | i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt) |
| 167 | { |
| 168 | struct i915_dependency *dep, *next; |
| 169 | |
Chris Wilson | 20311bd | 2016-11-14 20:41:03 +0000 | [diff] [blame] | 170 | GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node)); |
| 171 | |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 172 | /* Everyone we depended upon (the fences we wait to be signaled) |
| 173 | * should retire before us and remove themselves from our list. |
| 174 | * However, retirement is run independently on each timeline and |
| 175 | * so we may be called out-of-order. |
| 176 | */ |
| 177 | list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) { |
| 178 | list_del(&dep->wait_link); |
| 179 | if (dep->flags & I915_DEPENDENCY_ALLOC) |
| 180 | i915_dependency_free(i915, dep); |
| 181 | } |
| 182 | |
| 183 | /* Remove ourselves from everyone who depends upon us */ |
| 184 | list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) { |
| 185 | list_del(&dep->signal_link); |
| 186 | if (dep->flags & I915_DEPENDENCY_ALLOC) |
| 187 | i915_dependency_free(i915, dep); |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | static void |
| 192 | i915_priotree_init(struct i915_priotree *pt) |
| 193 | { |
| 194 | INIT_LIST_HEAD(&pt->signalers_list); |
| 195 | INIT_LIST_HEAD(&pt->waiters_list); |
Chris Wilson | 20311bd | 2016-11-14 20:41:03 +0000 | [diff] [blame] | 196 | RB_CLEAR_NODE(&pt->node); |
| 197 | pt->priority = INT_MIN; |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 198 | } |
| 199 | |
Chris Wilson | 12d3173 | 2017-02-23 07:44:09 +0000 | [diff] [blame] | 200 | static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) |
| 201 | { |
| 202 | struct i915_gem_timeline *timeline = &i915->gt.global_timeline; |
| 203 | struct intel_engine_cs *engine; |
| 204 | enum intel_engine_id id; |
| 205 | int ret; |
| 206 | |
| 207 | /* Carefully retire all requests without writing to the rings */ |
| 208 | ret = i915_gem_wait_for_idle(i915, |
| 209 | I915_WAIT_INTERRUPTIBLE | |
| 210 | I915_WAIT_LOCKED); |
| 211 | if (ret) |
| 212 | return ret; |
| 213 | |
| 214 | i915_gem_retire_requests(i915); |
| 215 | GEM_BUG_ON(i915->gt.active_requests > 1); |
| 216 | |
| 217 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ |
| 218 | for_each_engine(engine, i915, id) { |
| 219 | struct intel_timeline *tl = &timeline->engine[id]; |
| 220 | |
| 221 | if (!i915_seqno_passed(seqno, tl->seqno)) { |
| 222 | /* spin until threads are complete */ |
| 223 | while (intel_breadcrumbs_busy(engine)) |
| 224 | cond_resched(); |
| 225 | } |
| 226 | |
| 227 | /* Finally reset hw state */ |
| 228 | tl->seqno = seqno; |
| 229 | intel_engine_init_global_seqno(engine, seqno); |
| 230 | } |
| 231 | |
| 232 | list_for_each_entry(timeline, &i915->gt.timelines, link) { |
| 233 | for_each_engine(engine, i915, id) { |
| 234 | struct intel_timeline *tl = &timeline->engine[id]; |
| 235 | |
| 236 | memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno)); |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | return 0; |
| 241 | } |
| 242 | |
| 243 | int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) |
| 244 | { |
| 245 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 246 | |
| 247 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
| 248 | |
| 249 | if (seqno == 0) |
| 250 | return -EINVAL; |
| 251 | |
| 252 | /* HWS page needs to be set less than what we |
| 253 | * will inject to ring |
| 254 | */ |
| 255 | return reset_all_global_seqno(dev_priv, seqno - 1); |
| 256 | } |
| 257 | |
| 258 | static int reserve_seqno(struct intel_engine_cs *engine) |
| 259 | { |
| 260 | u32 active = ++engine->timeline->inflight_seqnos; |
| 261 | u32 seqno = engine->timeline->seqno; |
| 262 | int ret; |
| 263 | |
| 264 | /* Reservation is fine until we need to wrap around */ |
| 265 | if (likely(!add_overflows(seqno, active))) |
| 266 | return 0; |
| 267 | |
| 268 | ret = reset_all_global_seqno(engine->i915, 0); |
| 269 | if (ret) { |
| 270 | engine->timeline->inflight_seqnos--; |
| 271 | return ret; |
| 272 | } |
| 273 | |
| 274 | return 0; |
| 275 | } |
| 276 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 277 | static void unreserve_seqno(struct intel_engine_cs *engine) |
| 278 | { |
| 279 | GEM_BUG_ON(!engine->timeline->inflight_seqnos); |
| 280 | engine->timeline->inflight_seqnos--; |
| 281 | } |
| 282 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 283 | void i915_gem_retire_noop(struct i915_gem_active *active, |
| 284 | struct drm_i915_gem_request *request) |
| 285 | { |
| 286 | /* Space left intentionally blank */ |
| 287 | } |
| 288 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 289 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) |
| 290 | { |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 291 | struct intel_engine_cs *engine = request->engine; |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 292 | struct i915_gem_active *active, *next; |
| 293 | |
Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 294 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
Chris Wilson | 48bc2a4 | 2016-11-25 13:17:17 +0000 | [diff] [blame] | 295 | GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); |
Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 296 | GEM_BUG_ON(!i915_gem_request_completed(request)); |
Chris Wilson | 4302055 | 2016-11-15 16:46:20 +0000 | [diff] [blame] | 297 | GEM_BUG_ON(!request->i915->gt.active_requests); |
Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 298 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 299 | trace_i915_gem_request_retire(request); |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 300 | |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 301 | spin_lock_irq(&engine->timeline->lock); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 302 | list_del_init(&request->link); |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 303 | spin_unlock_irq(&engine->timeline->lock); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 304 | |
| 305 | /* We know the GPU must have read the request to have |
| 306 | * sent us the seqno + interrupt, so use the position |
| 307 | * of tail of the request to update the last known position |
| 308 | * of the GPU head. |
| 309 | * |
| 310 | * Note this requires that we are always called in request |
| 311 | * completion order. |
| 312 | */ |
Chris Wilson | 675d9ad | 2016-08-04 07:52:36 +0100 | [diff] [blame] | 313 | list_del(&request->ring_link); |
Chris Wilson | 1dae2df | 2016-08-02 22:50:19 +0100 | [diff] [blame] | 314 | request->ring->last_retired_head = request->postfix; |
Chris Wilson | 4302055 | 2016-11-15 16:46:20 +0000 | [diff] [blame] | 315 | if (!--request->i915->gt.active_requests) { |
| 316 | GEM_BUG_ON(!request->i915->gt.awake); |
| 317 | mod_delayed_work(request->i915->wq, |
| 318 | &request->i915->gt.idle_work, |
| 319 | msecs_to_jiffies(100)); |
| 320 | } |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 321 | unreserve_seqno(request->engine); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 322 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 323 | /* Walk through the active list, calling retire on each. This allows |
| 324 | * objects to track their GPU activity and mark themselves as idle |
| 325 | * when their *last* active request is completed (updating state |
| 326 | * tracking lists for eviction, active references for GEM, etc). |
| 327 | * |
| 328 | * As the ->retire() may free the node, we decouple it first and |
| 329 | * pass along the auxiliary information (to avoid dereferencing |
| 330 | * the node after the callback). |
| 331 | */ |
| 332 | list_for_each_entry_safe(active, next, &request->active_list, link) { |
| 333 | /* In microbenchmarks or focusing upon time inside the kernel, |
| 334 | * we may spend an inordinate amount of time simply handling |
| 335 | * the retirement of requests and processing their callbacks. |
| 336 | * Of which, this loop itself is particularly hot due to the |
| 337 | * cache misses when jumping around the list of i915_gem_active. |
| 338 | * So we try to keep this loop as streamlined as possible and |
| 339 | * also prefetch the next i915_gem_active to try and hide |
| 340 | * the likely cache miss. |
| 341 | */ |
| 342 | prefetchw(next); |
| 343 | |
| 344 | INIT_LIST_HEAD(&active->link); |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 345 | RCU_INIT_POINTER(active->request, NULL); |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 346 | |
| 347 | active->retire(active, request); |
| 348 | } |
| 349 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 350 | i915_gem_request_remove_from_client(request); |
| 351 | |
Mika Kuoppala | e5e1fc4 | 2016-11-16 17:20:31 +0200 | [diff] [blame] | 352 | /* Retirement decays the ban score as it is a sign of ctx progress */ |
Mika Kuoppala | bc1d53c | 2016-11-16 17:20:34 +0200 | [diff] [blame] | 353 | if (request->ctx->ban_score > 0) |
| 354 | request->ctx->ban_score--; |
Mika Kuoppala | e5e1fc4 | 2016-11-16 17:20:31 +0200 | [diff] [blame] | 355 | |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 356 | /* The backing object for the context is done after switching to the |
| 357 | * *next* context. Therefore we cannot retire the previous context until |
| 358 | * the next context has already started running. However, since we |
| 359 | * cannot take the required locks at i915_gem_request_submit() we |
| 360 | * defer the unpinning of the active context to now, retirement of |
| 361 | * the subsequent request. |
| 362 | */ |
| 363 | if (engine->last_retired_context) |
| 364 | engine->context_unpin(engine, engine->last_retired_context); |
| 365 | engine->last_retired_context = request->ctx; |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 366 | |
| 367 | dma_fence_signal(&request->fence); |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 368 | |
| 369 | i915_priotree_fini(request->i915, &request->priotree); |
Chris Wilson | e8a261e | 2016-07-20 13:31:49 +0100 | [diff] [blame] | 370 | i915_gem_request_put(request); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) |
| 374 | { |
| 375 | struct intel_engine_cs *engine = req->engine; |
| 376 | struct drm_i915_gem_request *tmp; |
| 377 | |
| 378 | lockdep_assert_held(&req->i915->drm.struct_mutex); |
Chris Wilson | 4ffd6e0 | 2016-11-25 13:17:15 +0000 | [diff] [blame] | 379 | GEM_BUG_ON(!i915_gem_request_completed(req)); |
| 380 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 381 | if (list_empty(&req->link)) |
| 382 | return; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 383 | |
| 384 | do { |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 385 | tmp = list_first_entry(&engine->timeline->requests, |
Chris Wilson | efdf7c0 | 2016-08-04 07:52:33 +0100 | [diff] [blame] | 386 | typeof(*tmp), link); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 387 | |
| 388 | i915_gem_request_retire(tmp); |
| 389 | } while (tmp != req); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 390 | } |
| 391 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 392 | static u32 timeline_get_seqno(struct intel_timeline *tl) |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 393 | { |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 394 | return ++tl->seqno; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 395 | } |
| 396 | |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 397 | void __i915_gem_request_submit(struct drm_i915_gem_request *request) |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 398 | { |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 399 | struct intel_engine_cs *engine = request->engine; |
Chris Wilson | f2d1329 | 2016-10-28 13:58:57 +0100 | [diff] [blame] | 400 | struct intel_timeline *timeline; |
| 401 | u32 seqno; |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 402 | |
Chris Wilson | fe49789 | 2017-02-23 07:44:13 +0000 | [diff] [blame] | 403 | trace_i915_gem_request_execute(request); |
| 404 | |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 405 | /* Transfer from per-context onto the global per-engine timeline */ |
| 406 | timeline = engine->timeline; |
| 407 | GEM_BUG_ON(timeline == request->timeline); |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 408 | assert_spin_locked(&timeline->lock); |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 409 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 410 | seqno = timeline_get_seqno(timeline); |
Chris Wilson | f2d1329 | 2016-10-28 13:58:57 +0100 | [diff] [blame] | 411 | GEM_BUG_ON(!seqno); |
| 412 | GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); |
| 413 | |
Chris Wilson | f2d1329 | 2016-10-28 13:58:57 +0100 | [diff] [blame] | 414 | /* We may be recursing from the signal callback of another i915 fence */ |
| 415 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); |
| 416 | request->global_seqno = seqno; |
| 417 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) |
| 418 | intel_engine_enable_signaling(request); |
| 419 | spin_unlock(&request->lock); |
| 420 | |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 421 | engine->emit_breadcrumb(request, |
| 422 | request->ring->vaddr + request->postfix); |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 423 | |
Chris Wilson | bb89485 | 2016-11-14 20:40:57 +0000 | [diff] [blame] | 424 | spin_lock(&request->timeline->lock); |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 425 | list_move_tail(&request->link, &timeline->requests); |
| 426 | spin_unlock(&request->timeline->lock); |
| 427 | |
Chris Wilson | fe49789 | 2017-02-23 07:44:13 +0000 | [diff] [blame] | 428 | wake_up_all(&request->execute); |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 429 | } |
Chris Wilson | 23902e4 | 2016-11-14 20:40:58 +0000 | [diff] [blame] | 430 | |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 431 | void i915_gem_request_submit(struct drm_i915_gem_request *request) |
| 432 | { |
| 433 | struct intel_engine_cs *engine = request->engine; |
| 434 | unsigned long flags; |
| 435 | |
| 436 | /* Will be called from irq-context when using foreign fences. */ |
| 437 | spin_lock_irqsave(&engine->timeline->lock, flags); |
| 438 | |
| 439 | __i915_gem_request_submit(request); |
| 440 | |
| 441 | spin_unlock_irqrestore(&engine->timeline->lock, flags); |
| 442 | } |
| 443 | |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 444 | void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request) |
| 445 | { |
| 446 | struct intel_engine_cs *engine = request->engine; |
| 447 | struct intel_timeline *timeline; |
| 448 | |
| 449 | assert_spin_locked(&engine->timeline->lock); |
| 450 | |
| 451 | /* Only unwind in reverse order, required so that the per-context list |
| 452 | * is kept in seqno/ring order. |
| 453 | */ |
| 454 | GEM_BUG_ON(request->global_seqno != engine->timeline->seqno); |
| 455 | engine->timeline->seqno--; |
| 456 | |
| 457 | /* We may be recursing from the signal callback of another i915 fence */ |
| 458 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); |
| 459 | request->global_seqno = 0; |
| 460 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) |
| 461 | intel_engine_cancel_signaling(request); |
| 462 | spin_unlock(&request->lock); |
| 463 | |
| 464 | /* Transfer back from the global per-engine timeline to per-context */ |
| 465 | timeline = request->timeline; |
| 466 | GEM_BUG_ON(timeline == engine->timeline); |
| 467 | |
| 468 | spin_lock(&timeline->lock); |
| 469 | list_move(&request->link, &timeline->requests); |
| 470 | spin_unlock(&timeline->lock); |
| 471 | |
| 472 | /* We don't need to wake_up any waiters on request->execute, they |
| 473 | * will get woken by any other event or us re-adding this request |
| 474 | * to the engine timeline (__i915_gem_request_submit()). The waiters |
| 475 | * should be quite adapt at finding that the request now has a new |
| 476 | * global_seqno to the one they went to sleep on. |
| 477 | */ |
| 478 | } |
| 479 | |
| 480 | void i915_gem_request_unsubmit(struct drm_i915_gem_request *request) |
| 481 | { |
| 482 | struct intel_engine_cs *engine = request->engine; |
| 483 | unsigned long flags; |
| 484 | |
| 485 | /* Will be called from irq-context when using foreign fences. */ |
| 486 | spin_lock_irqsave(&engine->timeline->lock, flags); |
| 487 | |
| 488 | __i915_gem_request_unsubmit(request); |
| 489 | |
| 490 | spin_unlock_irqrestore(&engine->timeline->lock, flags); |
| 491 | } |
| 492 | |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 493 | static int __i915_sw_fence_call |
| 494 | submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) |
| 495 | { |
Chris Wilson | 48bc2a4 | 2016-11-25 13:17:17 +0000 | [diff] [blame] | 496 | struct drm_i915_gem_request *request = |
| 497 | container_of(fence, typeof(*request), submit); |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 498 | |
Chris Wilson | 48bc2a4 | 2016-11-25 13:17:17 +0000 | [diff] [blame] | 499 | switch (state) { |
| 500 | case FENCE_COMPLETE: |
Tvrtko Ursulin | 354d036 | 2017-02-21 11:01:42 +0000 | [diff] [blame] | 501 | trace_i915_gem_request_submit(request); |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 502 | request->engine->submit_request(request); |
Chris Wilson | 48bc2a4 | 2016-11-25 13:17:17 +0000 | [diff] [blame] | 503 | break; |
| 504 | |
| 505 | case FENCE_FREE: |
| 506 | i915_gem_request_put(request); |
| 507 | break; |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 508 | } |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 509 | |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 510 | return NOTIFY_DONE; |
| 511 | } |
| 512 | |
Chris Wilson | 8e63717 | 2016-08-02 22:50:26 +0100 | [diff] [blame] | 513 | /** |
| 514 | * i915_gem_request_alloc - allocate a request structure |
| 515 | * |
| 516 | * @engine: engine that we wish to issue the request on. |
| 517 | * @ctx: context that the request will be associated with. |
| 518 | * This can be NULL if the request is not directly related to |
| 519 | * any specific user context, in which case this function will |
| 520 | * choose an appropriate context to use. |
| 521 | * |
| 522 | * Returns a pointer to the allocated request if successful, |
| 523 | * or an error code if not. |
| 524 | */ |
| 525 | struct drm_i915_gem_request * |
| 526 | i915_gem_request_alloc(struct intel_engine_cs *engine, |
| 527 | struct i915_gem_context *ctx) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 528 | { |
| 529 | struct drm_i915_private *dev_priv = engine->i915; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 530 | struct drm_i915_gem_request *req; |
| 531 | int ret; |
| 532 | |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 533 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
| 534 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 535 | /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report |
Chris Wilson | 6ffb7d0 | 2017-01-14 16:23:33 +0000 | [diff] [blame] | 536 | * EIO if the GPU is already wedged. |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 537 | */ |
Chris Wilson | 6ffb7d0 | 2017-01-14 16:23:33 +0000 | [diff] [blame] | 538 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
| 539 | return ERR_PTR(-EIO); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 540 | |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 541 | /* Pinning the contexts may generate requests in order to acquire |
| 542 | * GGTT space, so do this first before we reserve a seqno for |
| 543 | * ourselves. |
| 544 | */ |
| 545 | ret = engine->context_pin(engine, ctx); |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 546 | if (ret) |
| 547 | return ERR_PTR(ret); |
| 548 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 549 | ret = reserve_seqno(engine); |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 550 | if (ret) |
| 551 | goto err_unpin; |
| 552 | |
Chris Wilson | 9b5f4e5 | 2016-07-20 09:21:09 +0100 | [diff] [blame] | 553 | /* Move the oldest request to the slab-cache (if not in use!) */ |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 554 | req = list_first_entry_or_null(&engine->timeline->requests, |
Chris Wilson | efdf7c0 | 2016-08-04 07:52:33 +0100 | [diff] [blame] | 555 | typeof(*req), link); |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 556 | if (req && i915_gem_request_completed(req)) |
Chris Wilson | 2a1d775 | 2016-07-26 12:01:51 +0100 | [diff] [blame] | 557 | i915_gem_request_retire(req); |
Chris Wilson | 9b5f4e5 | 2016-07-20 09:21:09 +0100 | [diff] [blame] | 558 | |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 559 | /* Beware: Dragons be flying overhead. |
| 560 | * |
| 561 | * We use RCU to look up requests in flight. The lookups may |
| 562 | * race with the request being allocated from the slab freelist. |
| 563 | * That is the request we are writing to here, may be in the process |
Chris Wilson | 1426f71 | 2016-08-09 17:03:22 +0100 | [diff] [blame] | 564 | * of being read by __i915_gem_active_get_rcu(). As such, |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 565 | * we have to be very careful when overwriting the contents. During |
| 566 | * the RCU lookup, we change chase the request->engine pointer, |
Chris Wilson | 65e4760 | 2016-10-28 13:58:49 +0100 | [diff] [blame] | 567 | * read the request->global_seqno and increment the reference count. |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 568 | * |
| 569 | * The reference count is incremented atomically. If it is zero, |
| 570 | * the lookup knows the request is unallocated and complete. Otherwise, |
| 571 | * it is either still in use, or has been reallocated and reset |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 572 | * with dma_fence_init(). This increment is safe for release as we |
| 573 | * check that the request we have a reference to and matches the active |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 574 | * request. |
| 575 | * |
| 576 | * Before we increment the refcount, we chase the request->engine |
| 577 | * pointer. We must not call kmem_cache_zalloc() or else we set |
| 578 | * that pointer to NULL and cause a crash during the lookup. If |
| 579 | * we see the request is completed (based on the value of the |
| 580 | * old engine and seqno), the lookup is complete and reports NULL. |
| 581 | * If we decide the request is not completed (new engine or seqno), |
| 582 | * then we grab a reference and double check that it is still the |
| 583 | * active request - which it won't be and restart the lookup. |
| 584 | * |
| 585 | * Do not use kmem_cache_zalloc() here! |
| 586 | */ |
| 587 | req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 588 | if (!req) { |
| 589 | ret = -ENOMEM; |
| 590 | goto err_unreserve; |
| 591 | } |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 592 | |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 593 | req->timeline = i915_gem_context_lookup_timeline(ctx, engine); |
| 594 | GEM_BUG_ON(req->timeline == engine->timeline); |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 595 | |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 596 | spin_lock_init(&req->lock); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 597 | dma_fence_init(&req->fence, |
| 598 | &i915_fence_ops, |
| 599 | &req->lock, |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 600 | req->timeline->fence_context, |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 601 | timeline_get_seqno(req->timeline)); |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 602 | |
Chris Wilson | 48bc2a4 | 2016-11-25 13:17:17 +0000 | [diff] [blame] | 603 | /* We bump the ref for the fence chain */ |
| 604 | i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); |
Chris Wilson | fe49789 | 2017-02-23 07:44:13 +0000 | [diff] [blame] | 605 | init_waitqueue_head(&req->execute); |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 606 | |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 607 | i915_priotree_init(&req->priotree); |
| 608 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 609 | INIT_LIST_HEAD(&req->active_list); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 610 | req->i915 = dev_priv; |
| 611 | req->engine = engine; |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 612 | req->ctx = ctx; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 613 | |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 614 | /* No zalloc, must clear what we need by hand */ |
Chris Wilson | f2d1329 | 2016-10-28 13:58:57 +0100 | [diff] [blame] | 615 | req->global_seqno = 0; |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 616 | req->file_priv = NULL; |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 617 | req->batch = NULL; |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 618 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 619 | /* |
| 620 | * Reserve space in the ring buffer for all the commands required to |
| 621 | * eventually emit this request. This is to guarantee that the |
| 622 | * i915_add_request() call can't fail. Note that the reserve may need |
| 623 | * to be redone if the request is not actually submitted straight |
| 624 | * away, e.g. because a GPU scheduler has deferred it. |
| 625 | */ |
| 626 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; |
Chris Wilson | 98f29e8 | 2016-10-28 13:58:51 +0100 | [diff] [blame] | 627 | GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 628 | |
Chris Wilson | f73e739 | 2016-12-18 15:37:24 +0000 | [diff] [blame] | 629 | ret = engine->request_alloc(req); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 630 | if (ret) |
| 631 | goto err_ctx; |
| 632 | |
Chris Wilson | d045446 | 2016-08-15 10:48:40 +0100 | [diff] [blame] | 633 | /* Record the position of the start of the request so that |
| 634 | * should we detect the updated seqno part-way through the |
| 635 | * GPU processing the request, we never over-estimate the |
| 636 | * position of the head. |
| 637 | */ |
| 638 | req->head = req->ring->tail; |
| 639 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 640 | /* Check that we didn't interrupt ourselves with a new request */ |
| 641 | GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); |
Chris Wilson | 8e63717 | 2016-08-02 22:50:26 +0100 | [diff] [blame] | 642 | return req; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 643 | |
| 644 | err_ctx: |
Chris Wilson | 1618bdb | 2016-11-25 13:17:16 +0000 | [diff] [blame] | 645 | /* Make sure we didn't add ourselves to external state before freeing */ |
| 646 | GEM_BUG_ON(!list_empty(&req->active_list)); |
| 647 | GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); |
| 648 | GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); |
| 649 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 650 | kmem_cache_free(dev_priv->requests, req); |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 651 | err_unreserve: |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 652 | unreserve_seqno(engine); |
Chris Wilson | e8a9c58 | 2016-12-18 15:37:20 +0000 | [diff] [blame] | 653 | err_unpin: |
| 654 | engine->context_unpin(engine, ctx); |
Chris Wilson | 8e63717 | 2016-08-02 22:50:26 +0100 | [diff] [blame] | 655 | return ERR_PTR(ret); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 656 | } |
| 657 | |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 658 | static int |
| 659 | i915_gem_request_await_request(struct drm_i915_gem_request *to, |
| 660 | struct drm_i915_gem_request *from) |
| 661 | { |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 662 | u32 seqno; |
Chris Wilson | 85e17f5 | 2016-10-28 13:58:53 +0100 | [diff] [blame] | 663 | int ret; |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 664 | |
| 665 | GEM_BUG_ON(to == from); |
| 666 | |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 667 | if (to->engine->schedule) { |
| 668 | ret = i915_priotree_add_dependency(to->i915, |
| 669 | &to->priotree, |
| 670 | &from->priotree); |
| 671 | if (ret < 0) |
| 672 | return ret; |
| 673 | } |
| 674 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 675 | if (to->timeline == from->timeline) |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 676 | return 0; |
| 677 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 678 | if (to->engine == from->engine) { |
| 679 | ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, |
| 680 | &from->submit, |
| 681 | GFP_KERNEL); |
| 682 | return ret < 0 ? ret : 0; |
| 683 | } |
| 684 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 685 | seqno = i915_gem_request_global_seqno(from); |
| 686 | if (!seqno) { |
Chris Wilson | 65e4760 | 2016-10-28 13:58:49 +0100 | [diff] [blame] | 687 | ret = i915_sw_fence_await_dma_fence(&to->submit, |
| 688 | &from->fence, 0, |
| 689 | GFP_KERNEL); |
| 690 | return ret < 0 ? ret : 0; |
| 691 | } |
| 692 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 693 | if (seqno <= to->timeline->sync_seqno[from->engine->id]) |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 694 | return 0; |
| 695 | |
| 696 | trace_i915_gem_ring_sync_to(to, from); |
| 697 | if (!i915.semaphores) { |
Chris Wilson | 0a046a0 | 2016-09-09 14:12:00 +0100 | [diff] [blame] | 698 | if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) { |
| 699 | ret = i915_sw_fence_await_dma_fence(&to->submit, |
| 700 | &from->fence, 0, |
| 701 | GFP_KERNEL); |
| 702 | if (ret < 0) |
| 703 | return ret; |
| 704 | } |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 705 | } else { |
| 706 | ret = to->engine->semaphore.sync_to(to, from); |
| 707 | if (ret) |
| 708 | return ret; |
| 709 | } |
| 710 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 711 | to->timeline->sync_seqno[from->engine->id] = seqno; |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 712 | return 0; |
| 713 | } |
| 714 | |
Chris Wilson | b52992c | 2016-10-28 13:58:24 +0100 | [diff] [blame] | 715 | int |
| 716 | i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, |
| 717 | struct dma_fence *fence) |
| 718 | { |
| 719 | struct dma_fence_array *array; |
| 720 | int ret; |
| 721 | int i; |
| 722 | |
| 723 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
| 724 | return 0; |
| 725 | |
| 726 | if (dma_fence_is_i915(fence)) |
| 727 | return i915_gem_request_await_request(req, to_request(fence)); |
| 728 | |
| 729 | if (!dma_fence_is_array(fence)) { |
| 730 | ret = i915_sw_fence_await_dma_fence(&req->submit, |
| 731 | fence, I915_FENCE_TIMEOUT, |
| 732 | GFP_KERNEL); |
| 733 | return ret < 0 ? ret : 0; |
| 734 | } |
| 735 | |
| 736 | /* Note that if the fence-array was created in signal-on-any mode, |
| 737 | * we should *not* decompose it into its individual fences. However, |
| 738 | * we don't currently store which mode the fence-array is operating |
| 739 | * in. Fortunately, the only user of signal-on-any is private to |
| 740 | * amdgpu and we should not see any incoming fence-array from |
| 741 | * sync-file being in signal-on-any mode. |
| 742 | */ |
| 743 | |
| 744 | array = to_dma_fence_array(fence); |
| 745 | for (i = 0; i < array->num_fences; i++) { |
| 746 | struct dma_fence *child = array->fences[i]; |
| 747 | |
| 748 | if (dma_fence_is_i915(child)) |
| 749 | ret = i915_gem_request_await_request(req, |
| 750 | to_request(child)); |
| 751 | else |
| 752 | ret = i915_sw_fence_await_dma_fence(&req->submit, |
| 753 | child, I915_FENCE_TIMEOUT, |
| 754 | GFP_KERNEL); |
| 755 | if (ret < 0) |
| 756 | return ret; |
| 757 | } |
| 758 | |
| 759 | return 0; |
| 760 | } |
| 761 | |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 762 | /** |
| 763 | * i915_gem_request_await_object - set this request to (async) wait upon a bo |
| 764 | * |
| 765 | * @to: request we are wishing to use |
| 766 | * @obj: object which may be in use on another ring. |
| 767 | * |
| 768 | * This code is meant to abstract object synchronization with the GPU. |
| 769 | * Conceptually we serialise writes between engines inside the GPU. |
| 770 | * We only allow one engine to write into a buffer at any time, but |
| 771 | * multiple readers. To ensure each has a coherent view of memory, we must: |
| 772 | * |
| 773 | * - If there is an outstanding write request to the object, the new |
| 774 | * request must wait for it to complete (either CPU or in hw, requests |
| 775 | * on the same ring will be naturally ordered). |
| 776 | * |
| 777 | * - If we are a write request (pending_write_domain is set), the new |
| 778 | * request must wait for outstanding read requests to complete. |
| 779 | * |
| 780 | * Returns 0 if successful, else propagates up the lower layer error. |
| 781 | */ |
| 782 | int |
| 783 | i915_gem_request_await_object(struct drm_i915_gem_request *to, |
| 784 | struct drm_i915_gem_object *obj, |
| 785 | bool write) |
| 786 | { |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 787 | struct dma_fence *excl; |
| 788 | int ret = 0; |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 789 | |
| 790 | if (write) { |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 791 | struct dma_fence **shared; |
| 792 | unsigned int count, i; |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 793 | |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 794 | ret = reservation_object_get_fences_rcu(obj->resv, |
| 795 | &excl, &count, &shared); |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 796 | if (ret) |
| 797 | return ret; |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 798 | |
| 799 | for (i = 0; i < count; i++) { |
| 800 | ret = i915_gem_request_await_dma_fence(to, shared[i]); |
| 801 | if (ret) |
| 802 | break; |
| 803 | |
| 804 | dma_fence_put(shared[i]); |
| 805 | } |
| 806 | |
| 807 | for (; i < count; i++) |
| 808 | dma_fence_put(shared[i]); |
| 809 | kfree(shared); |
| 810 | } else { |
| 811 | excl = reservation_object_get_excl_rcu(obj->resv); |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 812 | } |
| 813 | |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 814 | if (excl) { |
| 815 | if (ret == 0) |
| 816 | ret = i915_gem_request_await_dma_fence(to, excl); |
| 817 | |
| 818 | dma_fence_put(excl); |
| 819 | } |
| 820 | |
| 821 | return ret; |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 822 | } |
| 823 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 824 | static void i915_gem_mark_busy(const struct intel_engine_cs *engine) |
| 825 | { |
| 826 | struct drm_i915_private *dev_priv = engine->i915; |
| 827 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 828 | if (dev_priv->gt.awake) |
| 829 | return; |
| 830 | |
Chris Wilson | 4302055 | 2016-11-15 16:46:20 +0000 | [diff] [blame] | 831 | GEM_BUG_ON(!dev_priv->gt.active_requests); |
| 832 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 833 | intel_runtime_pm_get_noresume(dev_priv); |
| 834 | dev_priv->gt.awake = true; |
| 835 | |
Chris Wilson | 54b4f68 | 2016-07-21 21:16:19 +0100 | [diff] [blame] | 836 | intel_enable_gt_powersave(dev_priv); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 837 | i915_update_gfx_val(dev_priv); |
| 838 | if (INTEL_GEN(dev_priv) >= 6) |
| 839 | gen6_rps_busy(dev_priv); |
| 840 | |
| 841 | queue_delayed_work(dev_priv->wq, |
| 842 | &dev_priv->gt.retire_work, |
| 843 | round_jiffies_up_relative(HZ)); |
| 844 | } |
| 845 | |
| 846 | /* |
| 847 | * NB: This function is not allowed to fail. Doing so would mean the the |
| 848 | * request is not being tracked for completion but the work itself is |
| 849 | * going to happen on the hardware. This would be a Bad Thing(tm). |
| 850 | */ |
Chris Wilson | 17f298cf | 2016-08-10 13:41:46 +0100 | [diff] [blame] | 851 | void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 852 | { |
Chris Wilson | 95b2ab5 | 2016-08-15 10:48:46 +0100 | [diff] [blame] | 853 | struct intel_engine_cs *engine = request->engine; |
| 854 | struct intel_ring *ring = request->ring; |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 855 | struct intel_timeline *timeline = request->timeline; |
Chris Wilson | 0a046a0 | 2016-09-09 14:12:00 +0100 | [diff] [blame] | 856 | struct drm_i915_gem_request *prev; |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 857 | u32 *cs; |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 858 | int err; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 859 | |
Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 860 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
Chris Wilson | 0f25dff | 2016-09-09 14:11:55 +0100 | [diff] [blame] | 861 | trace_i915_gem_request_add(request); |
| 862 | |
Chris Wilson | c781c97 | 2017-01-11 14:08:58 +0000 | [diff] [blame] | 863 | /* Make sure that no request gazumped us - if it was allocated after |
| 864 | * our i915_gem_request_alloc() and called __i915_add_request() before |
| 865 | * us, the timeline will hold its seqno which is later than ours. |
| 866 | */ |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 867 | GEM_BUG_ON(timeline->seqno != request->fence.seqno); |
Chris Wilson | c781c97 | 2017-01-11 14:08:58 +0000 | [diff] [blame] | 868 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 869 | /* |
| 870 | * To ensure that this call will not fail, space for its emissions |
| 871 | * should already have been reserved in the ring buffer. Let the ring |
| 872 | * know that it is time to use that space up. |
| 873 | */ |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 874 | request->reserved_space = 0; |
| 875 | |
| 876 | /* |
| 877 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
| 878 | * after having emitted the batchbuffer command. Hence we need to fix |
| 879 | * things up similar to emitting the lazy request. The difference here |
| 880 | * is that the flush _must_ happen before the next request, no matter |
| 881 | * what. |
| 882 | */ |
| 883 | if (flush_caches) { |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 884 | err = engine->emit_flush(request, EMIT_FLUSH); |
Chris Wilson | c7fe7d2 | 2016-08-02 22:50:24 +0100 | [diff] [blame] | 885 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 886 | /* Not allowed to fail! */ |
Chris Wilson | caddfe7 | 2016-10-28 13:58:52 +0100 | [diff] [blame] | 887 | WARN(err, "engine->emit_flush() failed: %d!\n", err); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 888 | } |
| 889 | |
Chris Wilson | d045446 | 2016-08-15 10:48:40 +0100 | [diff] [blame] | 890 | /* Record the position of the start of the breadcrumb so that |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 891 | * should we detect the updated seqno part-way through the |
| 892 | * GPU processing the request, we never over-estimate the |
Chris Wilson | d045446 | 2016-08-15 10:48:40 +0100 | [diff] [blame] | 893 | * position of the ring's HEAD. |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 894 | */ |
Tvrtko Ursulin | 73dec95 | 2017-02-14 11:32:42 +0000 | [diff] [blame] | 895 | cs = intel_ring_begin(request, engine->emit_breadcrumb_sz); |
| 896 | GEM_BUG_ON(IS_ERR(cs)); |
| 897 | request->postfix = intel_ring_offset(request, cs); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 898 | |
Chris Wilson | 0f25dff | 2016-09-09 14:11:55 +0100 | [diff] [blame] | 899 | /* Seal the request and mark it as pending execution. Note that |
| 900 | * we may inspect this state, without holding any locks, during |
| 901 | * hangcheck. Hence we apply the barrier to ensure that we do not |
| 902 | * see a more recent value in the hws than we are tracking. |
| 903 | */ |
Chris Wilson | 0a046a0 | 2016-09-09 14:12:00 +0100 | [diff] [blame] | 904 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 905 | prev = i915_gem_active_raw(&timeline->last_request, |
Chris Wilson | 0a046a0 | 2016-09-09 14:12:00 +0100 | [diff] [blame] | 906 | &request->i915->drm.struct_mutex); |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 907 | if (prev) { |
Chris Wilson | 0a046a0 | 2016-09-09 14:12:00 +0100 | [diff] [blame] | 908 | i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, |
| 909 | &request->submitq); |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 910 | if (engine->schedule) |
| 911 | __i915_priotree_add_dependency(&request->priotree, |
| 912 | &prev->priotree, |
| 913 | &request->dep, |
| 914 | 0); |
| 915 | } |
Chris Wilson | 0a046a0 | 2016-09-09 14:12:00 +0100 | [diff] [blame] | 916 | |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 917 | spin_lock_irq(&timeline->lock); |
Chris Wilson | f2d1329 | 2016-10-28 13:58:57 +0100 | [diff] [blame] | 918 | list_add_tail(&request->link, &timeline->requests); |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 919 | spin_unlock_irq(&timeline->lock); |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 920 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 921 | GEM_BUG_ON(timeline->seqno != request->fence.seqno); |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 922 | i915_gem_active_set(&timeline->last_request, request); |
Chris Wilson | f2d1329 | 2016-10-28 13:58:57 +0100 | [diff] [blame] | 923 | |
Chris Wilson | 0f25dff | 2016-09-09 14:11:55 +0100 | [diff] [blame] | 924 | list_add_tail(&request->ring_link, &ring->request_list); |
Chris Wilson | f2d1329 | 2016-10-28 13:58:57 +0100 | [diff] [blame] | 925 | request->emitted_jiffies = jiffies; |
Chris Wilson | 0f25dff | 2016-09-09 14:11:55 +0100 | [diff] [blame] | 926 | |
Chris Wilson | 9b6586a | 2017-02-23 07:44:08 +0000 | [diff] [blame] | 927 | if (!request->i915->gt.active_requests++) |
| 928 | i915_gem_mark_busy(engine); |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 929 | |
Chris Wilson | 0de9136 | 2016-11-14 20:41:01 +0000 | [diff] [blame] | 930 | /* Let the backend know a new request has arrived that may need |
| 931 | * to adjust the existing execution schedule due to a high priority |
| 932 | * request - i.e. we may want to preempt the current request in order |
| 933 | * to run a high priority dependency chain *before* we can execute this |
| 934 | * request. |
| 935 | * |
| 936 | * This is called before the request is ready to run so that we can |
| 937 | * decide whether to preempt the entire chain so that it is ready to |
| 938 | * run at the earliest possible convenience. |
| 939 | */ |
| 940 | if (engine->schedule) |
Chris Wilson | 9f792eb | 2016-11-14 20:41:04 +0000 | [diff] [blame] | 941 | engine->schedule(request, request->ctx->priority); |
Chris Wilson | 0de9136 | 2016-11-14 20:41:01 +0000 | [diff] [blame] | 942 | |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 943 | local_bh_disable(); |
| 944 | i915_sw_fence_commit(&request->submit); |
| 945 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 946 | } |
| 947 | |
| 948 | static unsigned long local_clock_us(unsigned int *cpu) |
| 949 | { |
| 950 | unsigned long t; |
| 951 | |
| 952 | /* Cheaply and approximately convert from nanoseconds to microseconds. |
| 953 | * The result and subsequent calculations are also defined in the same |
| 954 | * approximate microseconds units. The principal source of timing |
| 955 | * error here is from the simple truncation. |
| 956 | * |
| 957 | * Note that local_clock() is only defined wrt to the current CPU; |
| 958 | * the comparisons are no longer valid if we switch CPUs. Instead of |
| 959 | * blocking preemption for the entire busywait, we can detect the CPU |
| 960 | * switch and use that as indicator of system load and a reason to |
| 961 | * stop busywaiting, see busywait_stop(). |
| 962 | */ |
| 963 | *cpu = get_cpu(); |
| 964 | t = local_clock() >> 10; |
| 965 | put_cpu(); |
| 966 | |
| 967 | return t; |
| 968 | } |
| 969 | |
| 970 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) |
| 971 | { |
| 972 | unsigned int this_cpu; |
| 973 | |
| 974 | if (time_after(local_clock_us(&this_cpu), timeout)) |
| 975 | return true; |
| 976 | |
| 977 | return this_cpu != cpu; |
| 978 | } |
| 979 | |
| 980 | bool __i915_spin_request(const struct drm_i915_gem_request *req, |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 981 | u32 seqno, int state, unsigned long timeout_us) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 982 | { |
Chris Wilson | c33ed06 | 2017-02-17 15:13:01 +0000 | [diff] [blame] | 983 | struct intel_engine_cs *engine = req->engine; |
| 984 | unsigned int irq, cpu; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 985 | |
| 986 | /* When waiting for high frequency requests, e.g. during synchronous |
| 987 | * rendering split between the CPU and GPU, the finite amount of time |
| 988 | * required to set up the irq and wait upon it limits the response |
| 989 | * rate. By busywaiting on the request completion for a short while we |
| 990 | * can service the high frequency waits as quick as possible. However, |
| 991 | * if it is a slow request, we want to sleep as quickly as possible. |
| 992 | * The tradeoff between waiting and sleeping is roughly the time it |
| 993 | * takes to sleep on a request, on the order of a microsecond. |
| 994 | */ |
| 995 | |
Chris Wilson | c33ed06 | 2017-02-17 15:13:01 +0000 | [diff] [blame] | 996 | irq = atomic_read(&engine->irq_count); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 997 | timeout_us += local_clock_us(&cpu); |
| 998 | do { |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 999 | if (seqno != i915_gem_request_global_seqno(req)) |
| 1000 | break; |
| 1001 | |
| 1002 | if (i915_seqno_passed(intel_engine_get_seqno(req->engine), |
| 1003 | seqno)) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1004 | return true; |
| 1005 | |
Chris Wilson | c33ed06 | 2017-02-17 15:13:01 +0000 | [diff] [blame] | 1006 | /* Seqno are meant to be ordered *before* the interrupt. If |
| 1007 | * we see an interrupt without a corresponding seqno advance, |
| 1008 | * assume we won't see one in the near future but require |
| 1009 | * the engine->seqno_barrier() to fixup coherency. |
| 1010 | */ |
| 1011 | if (atomic_read(&engine->irq_count) != irq) |
| 1012 | break; |
| 1013 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1014 | if (signal_pending_state(state, current)) |
| 1015 | break; |
| 1016 | |
| 1017 | if (busywait_stop(timeout_us, cpu)) |
| 1018 | break; |
| 1019 | |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 1020 | cpu_relax(); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1021 | } while (!need_resched()); |
| 1022 | |
| 1023 | return false; |
| 1024 | } |
| 1025 | |
Chris Wilson | e070511 | 2017-02-23 07:44:20 +0000 | [diff] [blame] | 1026 | static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request) |
| 1027 | { |
| 1028 | if (likely(!i915_reset_in_progress(&request->i915->gpu_error))) |
| 1029 | return false; |
| 1030 | |
| 1031 | __set_current_state(TASK_RUNNING); |
| 1032 | i915_reset(request->i915); |
| 1033 | return true; |
| 1034 | } |
| 1035 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1036 | /** |
Chris Wilson | 776f323 | 2016-08-04 07:52:40 +0100 | [diff] [blame] | 1037 | * i915_wait_request - wait until execution of request has finished |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1038 | * @req: the request to wait upon |
Chris Wilson | ea746f3 | 2016-09-09 14:11:49 +0100 | [diff] [blame] | 1039 | * @flags: how to wait |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1040 | * @timeout: how long to wait in jiffies |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1041 | * |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1042 | * i915_wait_request() waits for the request to be completed, for a |
| 1043 | * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an |
| 1044 | * unbounded wait). |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1045 | * |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1046 | * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED |
| 1047 | * in via the flags, and vice versa if the struct_mutex is not held, the caller |
| 1048 | * must not specify that the wait is locked. |
| 1049 | * |
| 1050 | * Returns the remaining time (in jiffies) if the request completed, which may |
| 1051 | * be zero or -ETIME if the request is unfinished after the timeout expires. |
| 1052 | * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is |
| 1053 | * pending before the request completes. |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1054 | */ |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1055 | long i915_wait_request(struct drm_i915_gem_request *req, |
| 1056 | unsigned int flags, |
| 1057 | long timeout) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1058 | { |
Chris Wilson | ea746f3 | 2016-09-09 14:11:49 +0100 | [diff] [blame] | 1059 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? |
| 1060 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; |
Chris Wilson | 4b36b2e | 2017-02-23 07:44:10 +0000 | [diff] [blame] | 1061 | wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue; |
Chris Wilson | a49625f | 2017-02-23 07:44:19 +0000 | [diff] [blame] | 1062 | DEFINE_WAIT_FUNC(reset, default_wake_function); |
| 1063 | DEFINE_WAIT_FUNC(exec, default_wake_function); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1064 | struct intel_wait wait; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1065 | |
| 1066 | might_sleep(); |
Chris Wilson | 22dd3bb | 2016-09-09 14:11:50 +0100 | [diff] [blame] | 1067 | #if IS_ENABLED(CONFIG_LOCKDEP) |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1068 | GEM_BUG_ON(debug_locks && |
| 1069 | !!lockdep_is_held(&req->i915->drm.struct_mutex) != |
Chris Wilson | 22dd3bb | 2016-09-09 14:11:50 +0100 | [diff] [blame] | 1070 | !!(flags & I915_WAIT_LOCKED)); |
| 1071 | #endif |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1072 | GEM_BUG_ON(timeout < 0); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1073 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1074 | if (i915_gem_request_completed(req)) |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1075 | return timeout; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1076 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1077 | if (!timeout) |
| 1078 | return -ETIME; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1079 | |
Tvrtko Ursulin | 9369250 | 2017-02-21 11:00:24 +0000 | [diff] [blame] | 1080 | trace_i915_gem_request_wait_begin(req, flags); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1081 | |
Chris Wilson | a49625f | 2017-02-23 07:44:19 +0000 | [diff] [blame] | 1082 | add_wait_queue(&req->execute, &exec); |
Chris Wilson | 7de53bf | 2017-02-23 07:44:11 +0000 | [diff] [blame] | 1083 | if (flags & I915_WAIT_LOCKED) |
| 1084 | add_wait_queue(errq, &reset); |
| 1085 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 1086 | intel_wait_init(&wait); |
| 1087 | |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 1088 | restart: |
Chris Wilson | 0f2f61d | 2017-02-23 07:44:22 +0000 | [diff] [blame^] | 1089 | do { |
| 1090 | set_current_state(state); |
| 1091 | if (intel_wait_update_request(&wait, req)) |
| 1092 | break; |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 1093 | |
Chris Wilson | 0f2f61d | 2017-02-23 07:44:22 +0000 | [diff] [blame^] | 1094 | if (flags & I915_WAIT_LOCKED && |
| 1095 | __i915_wait_request_check_and_reset(req)) |
| 1096 | continue; |
Chris Wilson | 541ca6e | 2017-02-23 07:44:12 +0000 | [diff] [blame] | 1097 | |
Chris Wilson | 0f2f61d | 2017-02-23 07:44:22 +0000 | [diff] [blame^] | 1098 | if (signal_pending_state(state, current)) { |
| 1099 | timeout = -ERESTARTSYS; |
| 1100 | goto complete; |
| 1101 | } |
Chris Wilson | 541ca6e | 2017-02-23 07:44:12 +0000 | [diff] [blame] | 1102 | |
Chris Wilson | 0f2f61d | 2017-02-23 07:44:22 +0000 | [diff] [blame^] | 1103 | if (!timeout) { |
| 1104 | timeout = -ETIME; |
| 1105 | goto complete; |
| 1106 | } |
Chris Wilson | 541ca6e | 2017-02-23 07:44:12 +0000 | [diff] [blame] | 1107 | |
Chris Wilson | 0f2f61d | 2017-02-23 07:44:22 +0000 | [diff] [blame^] | 1108 | timeout = io_schedule_timeout(timeout); |
| 1109 | } while (1); |
Chris Wilson | 541ca6e | 2017-02-23 07:44:12 +0000 | [diff] [blame] | 1110 | |
Chris Wilson | 0f2f61d | 2017-02-23 07:44:22 +0000 | [diff] [blame^] | 1111 | GEM_BUG_ON(!intel_wait_has_seqno(&wait)); |
Chris Wilson | fe49789 | 2017-02-23 07:44:13 +0000 | [diff] [blame] | 1112 | GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); |
Chris Wilson | 4680816b | 2016-10-28 13:58:48 +0100 | [diff] [blame] | 1113 | |
Daniel Vetter | 437c308 | 2016-08-05 18:11:24 +0200 | [diff] [blame] | 1114 | /* Optimistic short spin before touching IRQs */ |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1115 | if (i915_spin_request(req, state, 5)) |
| 1116 | goto complete; |
| 1117 | |
| 1118 | set_current_state(state); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1119 | if (intel_engine_add_wait(req->engine, &wait)) |
| 1120 | /* In order to check that we haven't missed the interrupt |
| 1121 | * as we enabled it, we need to kick ourselves to do a |
| 1122 | * coherent check on the seqno before we sleep. |
| 1123 | */ |
| 1124 | goto wakeup; |
| 1125 | |
Chris Wilson | 24f417e | 2017-02-23 07:44:21 +0000 | [diff] [blame] | 1126 | if (flags & I915_WAIT_LOCKED) |
| 1127 | __i915_wait_request_check_and_reset(req); |
| 1128 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1129 | for (;;) { |
| 1130 | if (signal_pending_state(state, current)) { |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1131 | timeout = -ERESTARTSYS; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1132 | break; |
| 1133 | } |
| 1134 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1135 | if (!timeout) { |
| 1136 | timeout = -ETIME; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1137 | break; |
| 1138 | } |
| 1139 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1140 | timeout = io_schedule_timeout(timeout); |
| 1141 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 1142 | if (intel_wait_complete(&wait) && |
| 1143 | intel_wait_check_request(&wait, req)) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1144 | break; |
| 1145 | |
| 1146 | set_current_state(state); |
| 1147 | |
| 1148 | wakeup: |
| 1149 | /* Carefully check if the request is complete, giving time |
| 1150 | * for the seqno to be visible following the interrupt. |
| 1151 | * We also have to check in case we are kicked by the GPU |
| 1152 | * reset in order to drop the struct_mutex. |
| 1153 | */ |
| 1154 | if (__i915_request_irq_complete(req)) |
| 1155 | break; |
| 1156 | |
Chris Wilson | 221fe79 | 2016-09-09 14:11:51 +0100 | [diff] [blame] | 1157 | /* If the GPU is hung, and we hold the lock, reset the GPU |
| 1158 | * and then check for completion. On a full reset, the engine's |
| 1159 | * HW seqno will be advanced passed us and we are complete. |
| 1160 | * If we do a partial reset, we have to wait for the GPU to |
| 1161 | * resume and update the breadcrumb. |
| 1162 | * |
| 1163 | * If we don't hold the mutex, we can just wait for the worker |
| 1164 | * to come along and update the breadcrumb (either directly |
| 1165 | * itself, or indirectly by recovering the GPU). |
| 1166 | */ |
| 1167 | if (flags & I915_WAIT_LOCKED && |
Chris Wilson | e070511 | 2017-02-23 07:44:20 +0000 | [diff] [blame] | 1168 | __i915_wait_request_check_and_reset(req)) |
Chris Wilson | 221fe79 | 2016-09-09 14:11:51 +0100 | [diff] [blame] | 1169 | continue; |
Chris Wilson | 221fe79 | 2016-09-09 14:11:51 +0100 | [diff] [blame] | 1170 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1171 | /* Only spin if we know the GPU is processing this request */ |
| 1172 | if (i915_spin_request(req, state, 2)) |
| 1173 | break; |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 1174 | |
| 1175 | if (!intel_wait_check_request(&wait, req)) { |
| 1176 | intel_engine_remove_wait(req->engine, &wait); |
| 1177 | goto restart; |
| 1178 | } |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1179 | } |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1180 | |
| 1181 | intel_engine_remove_wait(req->engine, &wait); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1182 | complete: |
Chris Wilson | a49625f | 2017-02-23 07:44:19 +0000 | [diff] [blame] | 1183 | __set_current_state(TASK_RUNNING); |
Chris Wilson | 7de53bf | 2017-02-23 07:44:11 +0000 | [diff] [blame] | 1184 | if (flags & I915_WAIT_LOCKED) |
| 1185 | remove_wait_queue(errq, &reset); |
Chris Wilson | a49625f | 2017-02-23 07:44:19 +0000 | [diff] [blame] | 1186 | remove_wait_queue(&req->execute, &exec); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1187 | trace_i915_gem_request_wait_end(req); |
| 1188 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 1189 | return timeout; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1190 | } |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1191 | |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 1192 | static void engine_retire_requests(struct intel_engine_cs *engine) |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1193 | { |
| 1194 | struct drm_i915_gem_request *request, *next; |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 1195 | u32 seqno = intel_engine_get_seqno(engine); |
| 1196 | LIST_HEAD(retire); |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1197 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 1198 | spin_lock_irq(&engine->timeline->lock); |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 1199 | list_for_each_entry_safe(request, next, |
| 1200 | &engine->timeline->requests, link) { |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 1201 | if (!i915_seqno_passed(seqno, request->global_seqno)) |
| 1202 | break; |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1203 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 1204 | list_move_tail(&request->link, &retire); |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1205 | } |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 1206 | spin_unlock_irq(&engine->timeline->lock); |
| 1207 | |
| 1208 | list_for_each_entry_safe(request, next, &retire, link) |
| 1209 | i915_gem_request_retire(request); |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1210 | } |
| 1211 | |
| 1212 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv) |
| 1213 | { |
| 1214 | struct intel_engine_cs *engine; |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 1215 | enum intel_engine_id id; |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1216 | |
| 1217 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
| 1218 | |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 1219 | if (!dev_priv->gt.active_requests) |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1220 | return; |
| 1221 | |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 1222 | for_each_engine(engine, dev_priv, id) |
| 1223 | engine_retire_requests(engine); |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 1224 | } |
Chris Wilson | c835c55 | 2017-02-13 17:15:21 +0000 | [diff] [blame] | 1225 | |
| 1226 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 1227 | #include "selftests/mock_request.c" |
| 1228 | #include "selftests/i915_gem_request.c" |
| 1229 | #endif |