Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2015 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 25 | #include <linux/prefetch.h> |
| 26 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 27 | #include "i915_drv.h" |
| 28 | |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 29 | static const char *i915_fence_get_driver_name(struct fence *fence) |
| 30 | { |
| 31 | return "i915"; |
| 32 | } |
| 33 | |
| 34 | static const char *i915_fence_get_timeline_name(struct fence *fence) |
| 35 | { |
| 36 | /* Timelines are bound by eviction to a VM. However, since |
| 37 | * we only have a global seqno at the moment, we only have |
| 38 | * a single timeline. Note that each timeline will have |
| 39 | * multiple execution contexts (fence contexts) as we allow |
| 40 | * engines within a single timeline to execute in parallel. |
| 41 | */ |
| 42 | return "global"; |
| 43 | } |
| 44 | |
| 45 | static bool i915_fence_signaled(struct fence *fence) |
| 46 | { |
| 47 | return i915_gem_request_completed(to_request(fence)); |
| 48 | } |
| 49 | |
| 50 | static bool i915_fence_enable_signaling(struct fence *fence) |
| 51 | { |
| 52 | if (i915_fence_signaled(fence)) |
| 53 | return false; |
| 54 | |
| 55 | intel_engine_enable_signaling(to_request(fence)); |
| 56 | return true; |
| 57 | } |
| 58 | |
| 59 | static signed long i915_fence_wait(struct fence *fence, |
| 60 | bool interruptible, |
| 61 | signed long timeout_jiffies) |
| 62 | { |
| 63 | s64 timeout_ns, *timeout; |
| 64 | int ret; |
| 65 | |
| 66 | if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { |
| 67 | timeout_ns = jiffies_to_nsecs(timeout_jiffies); |
| 68 | timeout = &timeout_ns; |
| 69 | } else { |
| 70 | timeout = NULL; |
| 71 | } |
| 72 | |
Chris Wilson | 776f323 | 2016-08-04 07:52:40 +0100 | [diff] [blame] | 73 | ret = i915_wait_request(to_request(fence), |
| 74 | interruptible, timeout, |
| 75 | NO_WAITBOOST); |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 76 | if (ret == -ETIME) |
| 77 | return 0; |
| 78 | |
| 79 | if (ret < 0) |
| 80 | return ret; |
| 81 | |
| 82 | if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) |
| 83 | timeout_jiffies = nsecs_to_jiffies(timeout_ns); |
| 84 | |
| 85 | return timeout_jiffies; |
| 86 | } |
| 87 | |
| 88 | static void i915_fence_value_str(struct fence *fence, char *str, int size) |
| 89 | { |
| 90 | snprintf(str, size, "%u", fence->seqno); |
| 91 | } |
| 92 | |
| 93 | static void i915_fence_timeline_value_str(struct fence *fence, char *str, |
| 94 | int size) |
| 95 | { |
| 96 | snprintf(str, size, "%u", |
| 97 | intel_engine_get_seqno(to_request(fence)->engine)); |
| 98 | } |
| 99 | |
| 100 | static void i915_fence_release(struct fence *fence) |
| 101 | { |
| 102 | struct drm_i915_gem_request *req = to_request(fence); |
| 103 | |
| 104 | kmem_cache_free(req->i915->requests, req); |
| 105 | } |
| 106 | |
| 107 | const struct fence_ops i915_fence_ops = { |
| 108 | .get_driver_name = i915_fence_get_driver_name, |
| 109 | .get_timeline_name = i915_fence_get_timeline_name, |
| 110 | .enable_signaling = i915_fence_enable_signaling, |
| 111 | .signaled = i915_fence_signaled, |
| 112 | .wait = i915_fence_wait, |
| 113 | .release = i915_fence_release, |
| 114 | .fence_value_str = i915_fence_value_str, |
| 115 | .timeline_value_str = i915_fence_timeline_value_str, |
| 116 | }; |
| 117 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 118 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
| 119 | struct drm_file *file) |
| 120 | { |
| 121 | struct drm_i915_private *dev_private; |
| 122 | struct drm_i915_file_private *file_priv; |
| 123 | |
| 124 | WARN_ON(!req || !file || req->file_priv); |
| 125 | |
| 126 | if (!req || !file) |
| 127 | return -EINVAL; |
| 128 | |
| 129 | if (req->file_priv) |
| 130 | return -EINVAL; |
| 131 | |
| 132 | dev_private = req->i915; |
| 133 | file_priv = file->driver_priv; |
| 134 | |
| 135 | spin_lock(&file_priv->mm.lock); |
| 136 | req->file_priv = file_priv; |
| 137 | list_add_tail(&req->client_list, &file_priv->mm.request_list); |
| 138 | spin_unlock(&file_priv->mm.lock); |
| 139 | |
| 140 | req->pid = get_pid(task_pid(current)); |
| 141 | |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | static inline void |
| 146 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) |
| 147 | { |
| 148 | struct drm_i915_file_private *file_priv = request->file_priv; |
| 149 | |
| 150 | if (!file_priv) |
| 151 | return; |
| 152 | |
| 153 | spin_lock(&file_priv->mm.lock); |
| 154 | list_del(&request->client_list); |
| 155 | request->file_priv = NULL; |
| 156 | spin_unlock(&file_priv->mm.lock); |
| 157 | |
| 158 | put_pid(request->pid); |
| 159 | request->pid = NULL; |
| 160 | } |
| 161 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 162 | void i915_gem_retire_noop(struct i915_gem_active *active, |
| 163 | struct drm_i915_gem_request *request) |
| 164 | { |
| 165 | /* Space left intentionally blank */ |
| 166 | } |
| 167 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 168 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) |
| 169 | { |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 170 | struct i915_gem_active *active, *next; |
| 171 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 172 | trace_i915_gem_request_retire(request); |
Chris Wilson | 209b3f7 | 2016-08-05 10:14:24 +0100 | [diff] [blame] | 173 | list_del(&request->link); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 174 | |
| 175 | /* We know the GPU must have read the request to have |
| 176 | * sent us the seqno + interrupt, so use the position |
| 177 | * of tail of the request to update the last known position |
| 178 | * of the GPU head. |
| 179 | * |
| 180 | * Note this requires that we are always called in request |
| 181 | * completion order. |
| 182 | */ |
Chris Wilson | 675d9ad | 2016-08-04 07:52:36 +0100 | [diff] [blame] | 183 | list_del(&request->ring_link); |
Chris Wilson | 1dae2df | 2016-08-02 22:50:19 +0100 | [diff] [blame] | 184 | request->ring->last_retired_head = request->postfix; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 185 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 186 | /* Walk through the active list, calling retire on each. This allows |
| 187 | * objects to track their GPU activity and mark themselves as idle |
| 188 | * when their *last* active request is completed (updating state |
| 189 | * tracking lists for eviction, active references for GEM, etc). |
| 190 | * |
| 191 | * As the ->retire() may free the node, we decouple it first and |
| 192 | * pass along the auxiliary information (to avoid dereferencing |
| 193 | * the node after the callback). |
| 194 | */ |
| 195 | list_for_each_entry_safe(active, next, &request->active_list, link) { |
| 196 | /* In microbenchmarks or focusing upon time inside the kernel, |
| 197 | * we may spend an inordinate amount of time simply handling |
| 198 | * the retirement of requests and processing their callbacks. |
| 199 | * Of which, this loop itself is particularly hot due to the |
| 200 | * cache misses when jumping around the list of i915_gem_active. |
| 201 | * So we try to keep this loop as streamlined as possible and |
| 202 | * also prefetch the next i915_gem_active to try and hide |
| 203 | * the likely cache miss. |
| 204 | */ |
| 205 | prefetchw(next); |
| 206 | |
| 207 | INIT_LIST_HEAD(&active->link); |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 208 | RCU_INIT_POINTER(active->request, NULL); |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 209 | |
| 210 | active->retire(active, request); |
| 211 | } |
| 212 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 213 | i915_gem_request_remove_from_client(request); |
| 214 | |
| 215 | if (request->previous_context) { |
| 216 | if (i915.enable_execlists) |
| 217 | intel_lr_context_unpin(request->previous_context, |
| 218 | request->engine); |
| 219 | } |
| 220 | |
Chris Wilson | 9a6feaf | 2016-07-20 13:31:50 +0100 | [diff] [blame] | 221 | i915_gem_context_put(request->ctx); |
Chris Wilson | e8a261e | 2016-07-20 13:31:49 +0100 | [diff] [blame] | 222 | i915_gem_request_put(request); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) |
| 226 | { |
| 227 | struct intel_engine_cs *engine = req->engine; |
| 228 | struct drm_i915_gem_request *tmp; |
| 229 | |
| 230 | lockdep_assert_held(&req->i915->drm.struct_mutex); |
Chris Wilson | 209b3f7 | 2016-08-05 10:14:24 +0100 | [diff] [blame] | 231 | GEM_BUG_ON(list_empty(&req->link)); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 232 | |
| 233 | do { |
| 234 | tmp = list_first_entry(&engine->request_list, |
Chris Wilson | efdf7c0 | 2016-08-04 07:52:33 +0100 | [diff] [blame] | 235 | typeof(*tmp), link); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 236 | |
| 237 | i915_gem_request_retire(tmp); |
| 238 | } while (tmp != req); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible) |
| 242 | { |
| 243 | if (__i915_terminally_wedged(reset_counter)) |
| 244 | return -EIO; |
| 245 | |
| 246 | if (__i915_reset_in_progress(reset_counter)) { |
| 247 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
| 248 | * -EIO unconditionally for these. |
| 249 | */ |
| 250 | if (!interruptible) |
| 251 | return -EIO; |
| 252 | |
| 253 | return -EAGAIN; |
| 254 | } |
| 255 | |
| 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) |
| 260 | { |
| 261 | struct intel_engine_cs *engine; |
| 262 | int ret; |
| 263 | |
| 264 | /* Carefully retire all requests without writing to the rings */ |
| 265 | for_each_engine(engine, dev_priv) { |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 266 | ret = intel_engine_idle(engine, true); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 267 | if (ret) |
| 268 | return ret; |
| 269 | } |
| 270 | i915_gem_retire_requests(dev_priv); |
| 271 | |
| 272 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ |
| 273 | if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { |
| 274 | while (intel_kick_waiters(dev_priv) || |
| 275 | intel_kick_signalers(dev_priv)) |
| 276 | yield(); |
| 277 | } |
| 278 | |
| 279 | /* Finally reset hw state */ |
| 280 | for_each_engine(engine, dev_priv) |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 281 | intel_engine_init_seqno(engine, seqno); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 282 | |
| 283 | return 0; |
| 284 | } |
| 285 | |
| 286 | int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) |
| 287 | { |
| 288 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 289 | int ret; |
| 290 | |
| 291 | if (seqno == 0) |
| 292 | return -EINVAL; |
| 293 | |
| 294 | /* HWS page needs to be set less than what we |
| 295 | * will inject to ring |
| 296 | */ |
| 297 | ret = i915_gem_init_seqno(dev_priv, seqno - 1); |
| 298 | if (ret) |
| 299 | return ret; |
| 300 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 301 | dev_priv->next_seqno = seqno; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 302 | return 0; |
| 303 | } |
| 304 | |
| 305 | static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) |
| 306 | { |
| 307 | /* reserve 0 for non-seqno */ |
| 308 | if (unlikely(dev_priv->next_seqno == 0)) { |
| 309 | int ret; |
| 310 | |
| 311 | ret = i915_gem_init_seqno(dev_priv, 0); |
| 312 | if (ret) |
| 313 | return ret; |
| 314 | |
| 315 | dev_priv->next_seqno = 1; |
| 316 | } |
| 317 | |
Chris Wilson | ddf07be | 2016-08-02 22:50:39 +0100 | [diff] [blame] | 318 | *seqno = dev_priv->next_seqno++; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 319 | return 0; |
| 320 | } |
| 321 | |
Chris Wilson | 8e63717 | 2016-08-02 22:50:26 +0100 | [diff] [blame] | 322 | /** |
| 323 | * i915_gem_request_alloc - allocate a request structure |
| 324 | * |
| 325 | * @engine: engine that we wish to issue the request on. |
| 326 | * @ctx: context that the request will be associated with. |
| 327 | * This can be NULL if the request is not directly related to |
| 328 | * any specific user context, in which case this function will |
| 329 | * choose an appropriate context to use. |
| 330 | * |
| 331 | * Returns a pointer to the allocated request if successful, |
| 332 | * or an error code if not. |
| 333 | */ |
| 334 | struct drm_i915_gem_request * |
| 335 | i915_gem_request_alloc(struct intel_engine_cs *engine, |
| 336 | struct i915_gem_context *ctx) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 337 | { |
| 338 | struct drm_i915_private *dev_priv = engine->i915; |
| 339 | unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error); |
| 340 | struct drm_i915_gem_request *req; |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 341 | u32 seqno; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 342 | int ret; |
| 343 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 344 | /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report |
| 345 | * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex |
| 346 | * and restart. |
| 347 | */ |
| 348 | ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible); |
| 349 | if (ret) |
Chris Wilson | 8e63717 | 2016-08-02 22:50:26 +0100 | [diff] [blame] | 350 | return ERR_PTR(ret); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 351 | |
Chris Wilson | 9b5f4e5 | 2016-07-20 09:21:09 +0100 | [diff] [blame] | 352 | /* Move the oldest request to the slab-cache (if not in use!) */ |
Chris Wilson | 2a1d775 | 2016-07-26 12:01:51 +0100 | [diff] [blame] | 353 | req = list_first_entry_or_null(&engine->request_list, |
Chris Wilson | efdf7c0 | 2016-08-04 07:52:33 +0100 | [diff] [blame] | 354 | typeof(*req), link); |
Chris Wilson | 2a1d775 | 2016-07-26 12:01:51 +0100 | [diff] [blame] | 355 | if (req && i915_gem_request_completed(req)) |
| 356 | i915_gem_request_retire(req); |
Chris Wilson | 9b5f4e5 | 2016-07-20 09:21:09 +0100 | [diff] [blame] | 357 | |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 358 | /* Beware: Dragons be flying overhead. |
| 359 | * |
| 360 | * We use RCU to look up requests in flight. The lookups may |
| 361 | * race with the request being allocated from the slab freelist. |
| 362 | * That is the request we are writing to here, may be in the process |
Chris Wilson | 1426f71 | 2016-08-09 17:03:22 +0100 | [diff] [blame] | 363 | * of being read by __i915_gem_active_get_rcu(). As such, |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 364 | * we have to be very careful when overwriting the contents. During |
| 365 | * the RCU lookup, we change chase the request->engine pointer, |
| 366 | * read the request->fence.seqno and increment the reference count. |
| 367 | * |
| 368 | * The reference count is incremented atomically. If it is zero, |
| 369 | * the lookup knows the request is unallocated and complete. Otherwise, |
| 370 | * it is either still in use, or has been reallocated and reset |
| 371 | * with fence_init(). This increment is safe for release as we check |
| 372 | * that the request we have a reference to and matches the active |
| 373 | * request. |
| 374 | * |
| 375 | * Before we increment the refcount, we chase the request->engine |
| 376 | * pointer. We must not call kmem_cache_zalloc() or else we set |
| 377 | * that pointer to NULL and cause a crash during the lookup. If |
| 378 | * we see the request is completed (based on the value of the |
| 379 | * old engine and seqno), the lookup is complete and reports NULL. |
| 380 | * If we decide the request is not completed (new engine or seqno), |
| 381 | * then we grab a reference and double check that it is still the |
| 382 | * active request - which it won't be and restart the lookup. |
| 383 | * |
| 384 | * Do not use kmem_cache_zalloc() here! |
| 385 | */ |
| 386 | req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 387 | if (!req) |
Chris Wilson | 8e63717 | 2016-08-02 22:50:26 +0100 | [diff] [blame] | 388 | return ERR_PTR(-ENOMEM); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 389 | |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 390 | ret = i915_gem_get_seqno(dev_priv, &seqno); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 391 | if (ret) |
| 392 | goto err; |
| 393 | |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 394 | spin_lock_init(&req->lock); |
| 395 | fence_init(&req->fence, |
| 396 | &i915_fence_ops, |
| 397 | &req->lock, |
| 398 | engine->fence_context, |
| 399 | seqno); |
| 400 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 401 | INIT_LIST_HEAD(&req->active_list); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 402 | req->i915 = dev_priv; |
| 403 | req->engine = engine; |
Chris Wilson | 9a6feaf | 2016-07-20 13:31:50 +0100 | [diff] [blame] | 404 | req->ctx = i915_gem_context_get(ctx); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 405 | |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 406 | /* No zalloc, must clear what we need by hand */ |
| 407 | req->previous_context = NULL; |
| 408 | req->file_priv = NULL; |
| 409 | req->batch_obj = NULL; |
| 410 | req->pid = NULL; |
| 411 | req->elsp_submitted = 0; |
| 412 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 413 | /* |
| 414 | * Reserve space in the ring buffer for all the commands required to |
| 415 | * eventually emit this request. This is to guarantee that the |
| 416 | * i915_add_request() call can't fail. Note that the reserve may need |
| 417 | * to be redone if the request is not actually submitted straight |
| 418 | * away, e.g. because a GPU scheduler has deferred it. |
| 419 | */ |
| 420 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; |
| 421 | |
| 422 | if (i915.enable_execlists) |
| 423 | ret = intel_logical_ring_alloc_request_extras(req); |
| 424 | else |
| 425 | ret = intel_ring_alloc_request_extras(req); |
| 426 | if (ret) |
| 427 | goto err_ctx; |
| 428 | |
Chris Wilson | d045446 | 2016-08-15 10:48:40 +0100 | [diff] [blame] | 429 | /* Record the position of the start of the request so that |
| 430 | * should we detect the updated seqno part-way through the |
| 431 | * GPU processing the request, we never over-estimate the |
| 432 | * position of the head. |
| 433 | */ |
| 434 | req->head = req->ring->tail; |
| 435 | |
Chris Wilson | 8e63717 | 2016-08-02 22:50:26 +0100 | [diff] [blame] | 436 | return req; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 437 | |
| 438 | err_ctx: |
Chris Wilson | 9a6feaf | 2016-07-20 13:31:50 +0100 | [diff] [blame] | 439 | i915_gem_context_put(ctx); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 440 | err: |
| 441 | kmem_cache_free(dev_priv->requests, req); |
Chris Wilson | 8e63717 | 2016-08-02 22:50:26 +0100 | [diff] [blame] | 442 | return ERR_PTR(ret); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 443 | } |
| 444 | |
| 445 | static void i915_gem_mark_busy(const struct intel_engine_cs *engine) |
| 446 | { |
| 447 | struct drm_i915_private *dev_priv = engine->i915; |
| 448 | |
| 449 | dev_priv->gt.active_engines |= intel_engine_flag(engine); |
| 450 | if (dev_priv->gt.awake) |
| 451 | return; |
| 452 | |
| 453 | intel_runtime_pm_get_noresume(dev_priv); |
| 454 | dev_priv->gt.awake = true; |
| 455 | |
Chris Wilson | 54b4f68 | 2016-07-21 21:16:19 +0100 | [diff] [blame] | 456 | intel_enable_gt_powersave(dev_priv); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 457 | i915_update_gfx_val(dev_priv); |
| 458 | if (INTEL_GEN(dev_priv) >= 6) |
| 459 | gen6_rps_busy(dev_priv); |
| 460 | |
| 461 | queue_delayed_work(dev_priv->wq, |
| 462 | &dev_priv->gt.retire_work, |
| 463 | round_jiffies_up_relative(HZ)); |
| 464 | } |
| 465 | |
| 466 | /* |
| 467 | * NB: This function is not allowed to fail. Doing so would mean the the |
| 468 | * request is not being tracked for completion but the work itself is |
| 469 | * going to happen on the hardware. This would be a Bad Thing(tm). |
| 470 | */ |
Chris Wilson | 17f298cf | 2016-08-10 13:41:46 +0100 | [diff] [blame] | 471 | void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 472 | { |
Chris Wilson | 95b2ab5 | 2016-08-15 10:48:46 +0100 | [diff] [blame^] | 473 | struct intel_engine_cs *engine = request->engine; |
| 474 | struct intel_ring *ring = request->ring; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 475 | u32 request_start; |
| 476 | u32 reserved_tail; |
| 477 | int ret; |
| 478 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 479 | /* |
| 480 | * To ensure that this call will not fail, space for its emissions |
| 481 | * should already have been reserved in the ring buffer. Let the ring |
| 482 | * know that it is time to use that space up. |
| 483 | */ |
Chris Wilson | ba76d91 | 2016-08-02 22:50:28 +0100 | [diff] [blame] | 484 | request_start = ring->tail; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 485 | reserved_tail = request->reserved_space; |
| 486 | request->reserved_space = 0; |
| 487 | |
| 488 | /* |
| 489 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
| 490 | * after having emitted the batchbuffer command. Hence we need to fix |
| 491 | * things up similar to emitting the lazy request. The difference here |
| 492 | * is that the flush _must_ happen before the next request, no matter |
| 493 | * what. |
| 494 | */ |
| 495 | if (flush_caches) { |
Chris Wilson | 7c9cf4e | 2016-08-02 22:50:25 +0100 | [diff] [blame] | 496 | ret = engine->emit_flush(request, EMIT_FLUSH); |
Chris Wilson | c7fe7d2 | 2016-08-02 22:50:24 +0100 | [diff] [blame] | 497 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 498 | /* Not allowed to fail! */ |
Chris Wilson | c7fe7d2 | 2016-08-02 22:50:24 +0100 | [diff] [blame] | 499 | WARN(ret, "engine->emit_flush() failed: %d!\n", ret); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 500 | } |
| 501 | |
| 502 | trace_i915_gem_request_add(request); |
| 503 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 504 | /* Seal the request and mark it as pending execution. Note that |
| 505 | * we may inspect this state, without holding any locks, during |
| 506 | * hangcheck. Hence we apply the barrier to ensure that we do not |
| 507 | * see a more recent value in the hws than we are tracking. |
| 508 | */ |
| 509 | request->emitted_jiffies = jiffies; |
| 510 | request->previous_seqno = engine->last_submitted_seqno; |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 511 | engine->last_submitted_seqno = request->fence.seqno; |
| 512 | i915_gem_active_set(&engine->last_request, request); |
Chris Wilson | efdf7c0 | 2016-08-04 07:52:33 +0100 | [diff] [blame] | 513 | list_add_tail(&request->link, &engine->request_list); |
Chris Wilson | 675d9ad | 2016-08-04 07:52:36 +0100 | [diff] [blame] | 514 | list_add_tail(&request->ring_link, &ring->request_list); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 515 | |
Chris Wilson | d045446 | 2016-08-15 10:48:40 +0100 | [diff] [blame] | 516 | /* Record the position of the start of the breadcrumb so that |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 517 | * should we detect the updated seqno part-way through the |
| 518 | * GPU processing the request, we never over-estimate the |
Chris Wilson | d045446 | 2016-08-15 10:48:40 +0100 | [diff] [blame] | 519 | * position of the ring's HEAD. |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 520 | */ |
Chris Wilson | ba76d91 | 2016-08-02 22:50:28 +0100 | [diff] [blame] | 521 | request->postfix = ring->tail; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 522 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 523 | /* Not allowed to fail! */ |
Chris Wilson | ddd66c5 | 2016-08-02 22:50:31 +0100 | [diff] [blame] | 524 | ret = engine->emit_request(request); |
| 525 | WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret); |
Chris Wilson | c5efa1a | 2016-08-02 22:50:29 +0100 | [diff] [blame] | 526 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 527 | /* Sanity check that the reserved size was large enough. */ |
Chris Wilson | ba76d91 | 2016-08-02 22:50:28 +0100 | [diff] [blame] | 528 | ret = ring->tail - request_start; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 529 | if (ret < 0) |
Chris Wilson | 1dae2df | 2016-08-02 22:50:19 +0100 | [diff] [blame] | 530 | ret += ring->size; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 531 | WARN_ONCE(ret > reserved_tail, |
| 532 | "Not enough space reserved (%d bytes) " |
| 533 | "for adding the request (%d bytes)\n", |
| 534 | reserved_tail, ret); |
| 535 | |
| 536 | i915_gem_mark_busy(engine); |
Chris Wilson | ddd66c5 | 2016-08-02 22:50:31 +0100 | [diff] [blame] | 537 | engine->submit_request(request); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 538 | } |
| 539 | |
| 540 | static unsigned long local_clock_us(unsigned int *cpu) |
| 541 | { |
| 542 | unsigned long t; |
| 543 | |
| 544 | /* Cheaply and approximately convert from nanoseconds to microseconds. |
| 545 | * The result and subsequent calculations are also defined in the same |
| 546 | * approximate microseconds units. The principal source of timing |
| 547 | * error here is from the simple truncation. |
| 548 | * |
| 549 | * Note that local_clock() is only defined wrt to the current CPU; |
| 550 | * the comparisons are no longer valid if we switch CPUs. Instead of |
| 551 | * blocking preemption for the entire busywait, we can detect the CPU |
| 552 | * switch and use that as indicator of system load and a reason to |
| 553 | * stop busywaiting, see busywait_stop(). |
| 554 | */ |
| 555 | *cpu = get_cpu(); |
| 556 | t = local_clock() >> 10; |
| 557 | put_cpu(); |
| 558 | |
| 559 | return t; |
| 560 | } |
| 561 | |
| 562 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) |
| 563 | { |
| 564 | unsigned int this_cpu; |
| 565 | |
| 566 | if (time_after(local_clock_us(&this_cpu), timeout)) |
| 567 | return true; |
| 568 | |
| 569 | return this_cpu != cpu; |
| 570 | } |
| 571 | |
| 572 | bool __i915_spin_request(const struct drm_i915_gem_request *req, |
| 573 | int state, unsigned long timeout_us) |
| 574 | { |
| 575 | unsigned int cpu; |
| 576 | |
| 577 | /* When waiting for high frequency requests, e.g. during synchronous |
| 578 | * rendering split between the CPU and GPU, the finite amount of time |
| 579 | * required to set up the irq and wait upon it limits the response |
| 580 | * rate. By busywaiting on the request completion for a short while we |
| 581 | * can service the high frequency waits as quick as possible. However, |
| 582 | * if it is a slow request, we want to sleep as quickly as possible. |
| 583 | * The tradeoff between waiting and sleeping is roughly the time it |
| 584 | * takes to sleep on a request, on the order of a microsecond. |
| 585 | */ |
| 586 | |
| 587 | timeout_us += local_clock_us(&cpu); |
| 588 | do { |
| 589 | if (i915_gem_request_completed(req)) |
| 590 | return true; |
| 591 | |
| 592 | if (signal_pending_state(state, current)) |
| 593 | break; |
| 594 | |
| 595 | if (busywait_stop(timeout_us, cpu)) |
| 596 | break; |
| 597 | |
| 598 | cpu_relax_lowlatency(); |
| 599 | } while (!need_resched()); |
| 600 | |
| 601 | return false; |
| 602 | } |
| 603 | |
| 604 | /** |
Chris Wilson | 776f323 | 2016-08-04 07:52:40 +0100 | [diff] [blame] | 605 | * i915_wait_request - wait until execution of request has finished |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 606 | * @req: duh! |
| 607 | * @interruptible: do an interruptible wait (normally yes) |
| 608 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
| 609 | * @rps: client to charge for RPS boosting |
| 610 | * |
| 611 | * Note: It is of utmost importance that the passed in seqno and reset_counter |
| 612 | * values have been read by the caller in an smp safe manner. Where read-side |
| 613 | * locks are involved, it is sufficient to read the reset_counter before |
| 614 | * unlocking the lock that protects the seqno. For lockless tricks, the |
| 615 | * reset_counter _must_ be read before, and an appropriate smp_rmb must be |
| 616 | * inserted. |
| 617 | * |
| 618 | * Returns 0 if the request was found within the alloted time. Else returns the |
| 619 | * errno with remaining time filled in timeout argument. |
| 620 | */ |
Chris Wilson | 776f323 | 2016-08-04 07:52:40 +0100 | [diff] [blame] | 621 | int i915_wait_request(struct drm_i915_gem_request *req, |
| 622 | bool interruptible, |
| 623 | s64 *timeout, |
| 624 | struct intel_rps_client *rps) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 625 | { |
| 626 | int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; |
| 627 | DEFINE_WAIT(reset); |
| 628 | struct intel_wait wait; |
| 629 | unsigned long timeout_remain; |
| 630 | int ret = 0; |
| 631 | |
| 632 | might_sleep(); |
| 633 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 634 | if (i915_gem_request_completed(req)) |
| 635 | return 0; |
| 636 | |
| 637 | timeout_remain = MAX_SCHEDULE_TIMEOUT; |
| 638 | if (timeout) { |
| 639 | if (WARN_ON(*timeout < 0)) |
| 640 | return -EINVAL; |
| 641 | |
| 642 | if (*timeout == 0) |
| 643 | return -ETIME; |
| 644 | |
| 645 | /* Record current time in case interrupted, or wedged */ |
| 646 | timeout_remain = nsecs_to_jiffies_timeout(*timeout); |
| 647 | *timeout += ktime_get_raw_ns(); |
| 648 | } |
| 649 | |
| 650 | trace_i915_gem_request_wait_begin(req); |
| 651 | |
| 652 | /* This client is about to stall waiting for the GPU. In many cases |
| 653 | * this is undesirable and limits the throughput of the system, as |
| 654 | * many clients cannot continue processing user input/output whilst |
| 655 | * blocked. RPS autotuning may take tens of milliseconds to respond |
| 656 | * to the GPU load and thus incurs additional latency for the client. |
| 657 | * We can circumvent that by promoting the GPU frequency to maximum |
| 658 | * before we wait. This makes the GPU throttle up much more quickly |
| 659 | * (good for benchmarks and user experience, e.g. window animations), |
| 660 | * but at a cost of spending more power processing the workload |
| 661 | * (bad for battery). Not all clients even want their results |
| 662 | * immediately and for them we should just let the GPU select its own |
| 663 | * frequency to maximise efficiency. To prevent a single client from |
| 664 | * forcing the clocks too high for the whole system, we only allow |
| 665 | * each client to waitboost once in a busy period. |
| 666 | */ |
Chris Wilson | 42df271 | 2016-07-20 09:21:12 +0100 | [diff] [blame] | 667 | if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 668 | gen6_rps_boost(req->i915, rps, req->emitted_jiffies); |
| 669 | |
Daniel Vetter | 437c308 | 2016-08-05 18:11:24 +0200 | [diff] [blame] | 670 | /* Optimistic short spin before touching IRQs */ |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 671 | if (i915_spin_request(req, state, 5)) |
| 672 | goto complete; |
| 673 | |
| 674 | set_current_state(state); |
| 675 | add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); |
| 676 | |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 677 | intel_wait_init(&wait, req->fence.seqno); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 678 | if (intel_engine_add_wait(req->engine, &wait)) |
| 679 | /* In order to check that we haven't missed the interrupt |
| 680 | * as we enabled it, we need to kick ourselves to do a |
| 681 | * coherent check on the seqno before we sleep. |
| 682 | */ |
| 683 | goto wakeup; |
| 684 | |
| 685 | for (;;) { |
| 686 | if (signal_pending_state(state, current)) { |
| 687 | ret = -ERESTARTSYS; |
| 688 | break; |
| 689 | } |
| 690 | |
| 691 | timeout_remain = io_schedule_timeout(timeout_remain); |
| 692 | if (timeout_remain == 0) { |
| 693 | ret = -ETIME; |
| 694 | break; |
| 695 | } |
| 696 | |
| 697 | if (intel_wait_complete(&wait)) |
| 698 | break; |
| 699 | |
| 700 | set_current_state(state); |
| 701 | |
| 702 | wakeup: |
| 703 | /* Carefully check if the request is complete, giving time |
| 704 | * for the seqno to be visible following the interrupt. |
| 705 | * We also have to check in case we are kicked by the GPU |
| 706 | * reset in order to drop the struct_mutex. |
| 707 | */ |
| 708 | if (__i915_request_irq_complete(req)) |
| 709 | break; |
| 710 | |
| 711 | /* Only spin if we know the GPU is processing this request */ |
| 712 | if (i915_spin_request(req, state, 2)) |
| 713 | break; |
| 714 | } |
| 715 | remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); |
| 716 | |
| 717 | intel_engine_remove_wait(req->engine, &wait); |
| 718 | __set_current_state(TASK_RUNNING); |
| 719 | complete: |
| 720 | trace_i915_gem_request_wait_end(req); |
| 721 | |
| 722 | if (timeout) { |
| 723 | *timeout -= ktime_get_raw_ns(); |
| 724 | if (*timeout < 0) |
| 725 | *timeout = 0; |
| 726 | |
| 727 | /* |
| 728 | * Apparently ktime isn't accurate enough and occasionally has a |
| 729 | * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch |
| 730 | * things up to make the test happy. We allow up to 1 jiffy. |
| 731 | * |
| 732 | * This is a regrssion from the timespec->ktime conversion. |
| 733 | */ |
| 734 | if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) |
| 735 | *timeout = 0; |
| 736 | } |
| 737 | |
Chris Wilson | 42df271 | 2016-07-20 09:21:12 +0100 | [diff] [blame] | 738 | if (IS_RPS_USER(rps) && |
| 739 | req->fence.seqno == req->engine->last_submitted_seqno) { |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 740 | /* The GPU is now idle and this client has stalled. |
| 741 | * Since no other client has submitted a request in the |
| 742 | * meantime, assume that this client is the only one |
| 743 | * supplying work to the GPU but is unable to keep that |
| 744 | * work supplied because it is waiting. Since the GPU is |
| 745 | * then never kept fully busy, RPS autoclocking will |
| 746 | * keep the clocks relatively low, causing further delays. |
| 747 | * Compensate by giving the synchronous client credit for |
| 748 | * a waitboost next time. |
| 749 | */ |
| 750 | spin_lock(&req->i915->rps.client_lock); |
| 751 | list_del_init(&rps->link); |
| 752 | spin_unlock(&req->i915->rps.client_lock); |
| 753 | } |
| 754 | |
| 755 | return ret; |
| 756 | } |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 757 | |
Chris Wilson | 0340d9f | 2016-08-04 16:32:20 +0100 | [diff] [blame] | 758 | static void engine_retire_requests(struct intel_engine_cs *engine) |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 759 | { |
| 760 | struct drm_i915_gem_request *request, *next; |
| 761 | |
| 762 | list_for_each_entry_safe(request, next, &engine->request_list, link) { |
| 763 | if (!i915_gem_request_completed(request)) |
| 764 | break; |
| 765 | |
| 766 | i915_gem_request_retire(request); |
| 767 | } |
| 768 | } |
| 769 | |
| 770 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv) |
| 771 | { |
| 772 | struct intel_engine_cs *engine; |
| 773 | |
| 774 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
| 775 | |
| 776 | if (dev_priv->gt.active_engines == 0) |
| 777 | return; |
| 778 | |
| 779 | GEM_BUG_ON(!dev_priv->gt.awake); |
| 780 | |
| 781 | for_each_engine(engine, dev_priv) { |
Chris Wilson | 0340d9f | 2016-08-04 16:32:20 +0100 | [diff] [blame] | 782 | engine_retire_requests(engine); |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 783 | if (!intel_engine_is_active(engine)) |
Chris Wilson | 4b8de8e | 2016-08-04 07:52:42 +0100 | [diff] [blame] | 784 | dev_priv->gt.active_engines &= ~intel_engine_flag(engine); |
| 785 | } |
| 786 | |
| 787 | if (dev_priv->gt.active_engines == 0) |
| 788 | queue_delayed_work(dev_priv->wq, |
| 789 | &dev_priv->gt.idle_work, |
| 790 | msecs_to_jiffies(100)); |
| 791 | } |