Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 1 | /* |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 2 | * Copyright © 2008-2018 Intel Corporation |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 25 | #ifndef I915_REQUEST_H |
| 26 | #define I915_REQUEST_H |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 27 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 28 | #include <linux/dma-fence.h> |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 29 | |
| 30 | #include "i915_gem.h" |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 31 | #include "i915_sw_fence.h" |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 32 | |
Chris Wilson | ac14fbd | 2017-10-03 21:34:53 +0100 | [diff] [blame] | 33 | #include <uapi/drm/i915_drm.h> |
| 34 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 35 | struct drm_file; |
| 36 | struct drm_i915_gem_object; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 37 | struct i915_request; |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 38 | |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 39 | struct intel_wait { |
| 40 | struct rb_node node; |
| 41 | struct task_struct *tsk; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 42 | struct i915_request *request; |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 43 | u32 seqno; |
| 44 | }; |
| 45 | |
| 46 | struct intel_signal_node { |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 47 | struct intel_wait wait; |
Chris Wilson | cd46c54 | 2018-02-22 09:25:44 +0000 | [diff] [blame] | 48 | struct list_head link; |
Chris Wilson | dcff85c | 2016-08-05 10:14:11 +0100 | [diff] [blame] | 49 | }; |
| 50 | |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 51 | struct i915_dependency { |
| 52 | struct i915_priotree *signaler; |
| 53 | struct list_head signal_link; |
| 54 | struct list_head wait_link; |
Chris Wilson | 20311bd | 2016-11-14 20:41:03 +0000 | [diff] [blame] | 55 | struct list_head dfs_link; |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 56 | unsigned long flags; |
| 57 | #define I915_DEPENDENCY_ALLOC BIT(0) |
| 58 | }; |
| 59 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 60 | /* |
| 61 | * "People assume that time is a strict progression of cause to effect, but |
| 62 | * actually, from a nonlinear, non-subjective viewpoint, it's more like a big |
| 63 | * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015 |
| 64 | * |
| 65 | * Requests exist in a complex web of interdependencies. Each request |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 66 | * has to wait for some other request to complete before it is ready to be run |
| 67 | * (e.g. we have to wait until the pixels have been rendering into a texture |
| 68 | * before we can copy from it). We track the readiness of a request in terms |
| 69 | * of fences, but we also need to keep the dependency tree for the lifetime |
| 70 | * of the request (beyond the life of an individual fence). We use the tree |
| 71 | * at various points to reorder the requests whilst keeping the requests |
| 72 | * in order with respect to their various dependencies. |
| 73 | */ |
| 74 | struct i915_priotree { |
| 75 | struct list_head signalers_list; /* those before us, we depend upon */ |
| 76 | struct list_head waiters_list; /* those after us, they depend upon us */ |
Chris Wilson | 6c06757 | 2017-05-17 13:10:03 +0100 | [diff] [blame] | 77 | struct list_head link; |
Chris Wilson | 20311bd | 2016-11-14 20:41:03 +0000 | [diff] [blame] | 78 | int priority; |
Chris Wilson | ac14fbd | 2017-10-03 21:34:53 +0100 | [diff] [blame] | 79 | }; |
| 80 | |
| 81 | enum { |
| 82 | I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, |
| 83 | I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, |
| 84 | I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, |
| 85 | |
| 86 | I915_PRIORITY_INVALID = INT_MIN |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 87 | }; |
| 88 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 89 | struct i915_capture_list { |
| 90 | struct i915_capture_list *next; |
Chris Wilson | b0fd47a | 2017-04-15 10:39:02 +0100 | [diff] [blame] | 91 | struct i915_vma *vma; |
| 92 | }; |
| 93 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 94 | /** |
| 95 | * Request queue structure. |
| 96 | * |
| 97 | * The request queue allows us to note sequence numbers that have been emitted |
| 98 | * and may be associated with active buffers to be retired. |
| 99 | * |
| 100 | * By keeping this list, we can avoid having to do questionable sequence |
| 101 | * number comparisons on buffer last_read|write_seqno. It also allows an |
| 102 | * emission time to be associated with the request for tracking how far ahead |
| 103 | * of the GPU the submission is. |
| 104 | * |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 105 | * When modifying this structure be very aware that we perform a lockless |
| 106 | * RCU lookup of it that may race against reallocation of the struct |
| 107 | * from the slab freelist. We intentionally do not zero the structure on |
| 108 | * allocation so that the lookup can use the dangling pointers (and is |
| 109 | * cogniscent that those pointers may be wrong). Instead, everything that |
| 110 | * needs to be initialised must be done so explicitly. |
| 111 | * |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 112 | * The requests are reference counted. |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 113 | */ |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 114 | struct i915_request { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 115 | struct dma_fence fence; |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 116 | spinlock_t lock; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 117 | |
| 118 | /** On Which ring this request was generated */ |
| 119 | struct drm_i915_private *i915; |
| 120 | |
| 121 | /** |
| 122 | * Context and ring buffer related to this request |
| 123 | * Contexts are refcounted, so when this request is associated with a |
| 124 | * context, we must increment the context's refcount, to guarantee that |
| 125 | * it persists while any request is linked to it. Requests themselves |
| 126 | * are also refcounted, so the request will only be freed when the last |
| 127 | * reference to it is dismissed, and the code in |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 128 | * i915_request_free() will then decrement the refcount on the |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 129 | * context. |
| 130 | */ |
| 131 | struct i915_gem_context *ctx; |
| 132 | struct intel_engine_cs *engine; |
Chris Wilson | 7e37f88 | 2016-08-02 22:50:21 +0100 | [diff] [blame] | 133 | struct intel_ring *ring; |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 134 | struct intel_timeline *timeline; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 135 | struct intel_signal_node signaling; |
| 136 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 137 | /* |
| 138 | * Fences for the various phases in the request's lifetime. |
Chris Wilson | 23902e4 | 2016-11-14 20:40:58 +0000 | [diff] [blame] | 139 | * |
| 140 | * The submit fence is used to await upon all of the request's |
| 141 | * dependencies. When it is signaled, the request is ready to run. |
| 142 | * It is used by the driver to then queue the request for execution. |
Chris Wilson | 23902e4 | 2016-11-14 20:40:58 +0000 | [diff] [blame] | 143 | */ |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 144 | struct i915_sw_fence submit; |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 145 | wait_queue_entry_t submitq; |
Chris Wilson | fe49789 | 2017-02-23 07:44:13 +0000 | [diff] [blame] | 146 | wait_queue_head_t execute; |
Chris Wilson | 5590af3 | 2016-09-09 14:11:54 +0100 | [diff] [blame] | 147 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 148 | /* |
| 149 | * A list of everyone we wait upon, and everyone who waits upon us. |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 150 | * Even though we will not be submitted to the hardware before the |
| 151 | * submit fence is signaled (it waits for all external events as well |
| 152 | * as our own requests), the scheduler still needs to know the |
| 153 | * dependency tree for the lifetime of the request (from execbuf |
| 154 | * to retirement), i.e. bidirectional dependency information for the |
| 155 | * request not tied to individual fences. |
| 156 | */ |
| 157 | struct i915_priotree priotree; |
| 158 | struct i915_dependency dep; |
| 159 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 160 | /** |
| 161 | * GEM sequence number associated with this request on the |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 162 | * global execution timeline. It is zero when the request is not |
| 163 | * on the HW queue (i.e. not on the engine timeline list). |
| 164 | * Its value is guarded by the timeline spinlock. |
| 165 | */ |
Chris Wilson | 65e4760 | 2016-10-28 13:58:49 +0100 | [diff] [blame] | 166 | u32 global_seqno; |
| 167 | |
Chris Wilson | a52abd2 | 2016-09-09 14:11:43 +0100 | [diff] [blame] | 168 | /** Position in the ring of the start of the request */ |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 169 | u32 head; |
| 170 | |
| 171 | /** |
Chris Wilson | a52abd2 | 2016-09-09 14:11:43 +0100 | [diff] [blame] | 172 | * Position in the ring of the start of the postfix. |
| 173 | * This is required to calculate the maximum available ring space |
| 174 | * without overwriting the postfix. |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 175 | */ |
| 176 | u32 postfix; |
| 177 | |
Chris Wilson | a52abd2 | 2016-09-09 14:11:43 +0100 | [diff] [blame] | 178 | /** Position in the ring of the end of the whole request */ |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 179 | u32 tail; |
| 180 | |
Chris Wilson | a52abd2 | 2016-09-09 14:11:43 +0100 | [diff] [blame] | 181 | /** Position in the ring of the end of any workarounds after the tail */ |
| 182 | u32 wa_tail; |
| 183 | |
| 184 | /** Preallocate space in the ring for the emitting the request */ |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 185 | u32 reserved_space; |
| 186 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 187 | /** Batch buffer related to this request if any (used for |
| 188 | * error state dump only). |
| 189 | */ |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 190 | struct i915_vma *batch; |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 191 | /** |
| 192 | * Additional buffers requested by userspace to be captured upon |
Chris Wilson | b0fd47a | 2017-04-15 10:39:02 +0100 | [diff] [blame] | 193 | * a GPU hang. The vma/obj on this list are protected by their |
| 194 | * active reference - all objects on this list must also be |
| 195 | * on the active_list (of their final request). |
| 196 | */ |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 197 | struct i915_capture_list *capture_list; |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 198 | struct list_head active_list; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 199 | |
| 200 | /** Time at which this request was emitted, in jiffies. */ |
| 201 | unsigned long emitted_jiffies; |
| 202 | |
Chris Wilson | 7b92c1b | 2017-06-28 13:35:48 +0100 | [diff] [blame] | 203 | bool waitboost; |
| 204 | |
Chris Wilson | efdf7c0 | 2016-08-04 07:52:33 +0100 | [diff] [blame] | 205 | /** engine->request_list entry for this request */ |
| 206 | struct list_head link; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 207 | |
Chris Wilson | 675d9ad | 2016-08-04 07:52:36 +0100 | [diff] [blame] | 208 | /** ring->request_list entry for this request */ |
| 209 | struct list_head ring_link; |
| 210 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 211 | struct drm_i915_file_private *file_priv; |
| 212 | /** file_priv list entry for this request */ |
Chris Wilson | c8659ef | 2017-03-02 12:25:25 +0000 | [diff] [blame] | 213 | struct list_head client_link; |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 214 | }; |
| 215 | |
Chris Wilson | 2abe2f8 | 2017-12-12 18:06:51 +0000 | [diff] [blame] | 216 | #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) |
| 217 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 218 | extern const struct dma_fence_ops i915_fence_ops; |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 219 | |
Chris Wilson | b52992c | 2016-10-28 13:58:24 +0100 | [diff] [blame] | 220 | static inline bool dma_fence_is_i915(const struct dma_fence *fence) |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 221 | { |
| 222 | return fence->ops == &i915_fence_ops; |
| 223 | } |
| 224 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 225 | struct i915_request * __must_check |
| 226 | i915_request_alloc(struct intel_engine_cs *engine, |
| 227 | struct i915_gem_context *ctx); |
| 228 | void i915_request_retire_upto(struct i915_request *rq); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 229 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 230 | static inline struct i915_request * |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 231 | to_request(struct dma_fence *fence) |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 232 | { |
| 233 | /* We assume that NULL fence/request are interoperable */ |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 234 | BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0); |
Chris Wilson | b52992c | 2016-10-28 13:58:24 +0100 | [diff] [blame] | 235 | GEM_BUG_ON(fence && !dma_fence_is_i915(fence)); |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 236 | return container_of(fence, struct i915_request, fence); |
Chris Wilson | 0476965 | 2016-07-20 09:21:11 +0100 | [diff] [blame] | 237 | } |
| 238 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 239 | static inline struct i915_request * |
| 240 | i915_request_get(struct i915_request *rq) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 241 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 242 | return to_request(dma_fence_get(&rq->fence)); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 243 | } |
| 244 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 245 | static inline struct i915_request * |
| 246 | i915_request_get_rcu(struct i915_request *rq) |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 247 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 248 | return to_request(dma_fence_get_rcu(&rq->fence)); |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 249 | } |
| 250 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 251 | static inline void |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 252 | i915_request_put(struct i915_request *rq) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 253 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 254 | dma_fence_put(&rq->fence); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 255 | } |
| 256 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 257 | /** |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 258 | * i915_request_global_seqno - report the current global seqno |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 259 | * @request - the request |
| 260 | * |
| 261 | * A request is assigned a global seqno only when it is on the hardware |
| 262 | * execution queue. The global seqno can be used to maintain a list of |
| 263 | * requests on the same engine in retirement order, for example for |
| 264 | * constructing a priority queue for waiting. Prior to its execution, or |
| 265 | * if it is subsequently removed in the event of preemption, its global |
| 266 | * seqno is zero. As both insertion and removal from the execution queue |
| 267 | * may operate in IRQ context, it is not guarded by the usual struct_mutex |
| 268 | * BKL. Instead those relying on the global seqno must be prepared for its |
| 269 | * value to change between reads. Only when the request is complete can |
| 270 | * the global seqno be stable (due to the memory barriers on submitting |
| 271 | * the commands to the hardware to write the breadcrumb, if the HWS shows |
| 272 | * that it has passed the global seqno and the global seqno is unchanged |
| 273 | * after the read, it is indeed complete). |
| 274 | */ |
| 275 | static u32 |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 276 | i915_request_global_seqno(const struct i915_request *request) |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 277 | { |
| 278 | return READ_ONCE(request->global_seqno); |
| 279 | } |
| 280 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 281 | int i915_request_await_object(struct i915_request *to, |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 282 | struct drm_i915_gem_object *obj, |
| 283 | bool write); |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 284 | int i915_request_await_dma_fence(struct i915_request *rq, |
| 285 | struct dma_fence *fence); |
Chris Wilson | a2bc469 | 2016-09-09 14:11:56 +0100 | [diff] [blame] | 286 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 287 | void __i915_request_add(struct i915_request *rq, bool flush_caches); |
| 288 | #define i915_request_add(rq) \ |
| 289 | __i915_request_add(rq, false) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 290 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 291 | void __i915_request_submit(struct i915_request *request); |
| 292 | void i915_request_submit(struct i915_request *request); |
Chris Wilson | d55ac5b | 2016-11-14 20:40:59 +0000 | [diff] [blame] | 293 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 294 | void __i915_request_unsubmit(struct i915_request *request); |
| 295 | void i915_request_unsubmit(struct i915_request *request); |
Chris Wilson | d6a2289 | 2017-02-23 07:44:17 +0000 | [diff] [blame] | 296 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 297 | long i915_request_wait(struct i915_request *rq, |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 298 | unsigned int flags, |
| 299 | long timeout) |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 300 | __attribute__((nonnull(1))); |
Chris Wilson | 22dd3bb | 2016-09-09 14:11:50 +0100 | [diff] [blame] | 301 | #define I915_WAIT_INTERRUPTIBLE BIT(0) |
| 302 | #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 303 | #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 304 | |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 305 | static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); |
| 306 | |
| 307 | /** |
| 308 | * Returns true if seq1 is later than seq2. |
| 309 | */ |
| 310 | static inline bool i915_seqno_passed(u32 seq1, u32 seq2) |
| 311 | { |
| 312 | return (s32)(seq1 - seq2) >= 0; |
| 313 | } |
| 314 | |
| 315 | static inline bool |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 316 | __i915_request_completed(const struct i915_request *rq, u32 seqno) |
Chris Wilson | 65e4760 | 2016-10-28 13:58:49 +0100 | [diff] [blame] | 317 | { |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 318 | GEM_BUG_ON(!seqno); |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 319 | return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) && |
| 320 | seqno == i915_request_global_seqno(rq); |
Chris Wilson | 65e4760 | 2016-10-28 13:58:49 +0100 | [diff] [blame] | 321 | } |
| 322 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 323 | static inline bool i915_request_completed(const struct i915_request *rq) |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 324 | { |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 325 | u32 seqno; |
| 326 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 327 | seqno = i915_request_global_seqno(rq); |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 328 | if (!seqno) |
Chris Wilson | 65e4760 | 2016-10-28 13:58:49 +0100 | [diff] [blame] | 329 | return false; |
| 330 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 331 | return __i915_request_completed(rq, seqno); |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 332 | } |
| 333 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 334 | static inline bool i915_request_started(const struct i915_request *rq) |
Chris Wilson | e9af4ea | 2018-01-18 13:16:09 +0000 | [diff] [blame] | 335 | { |
| 336 | u32 seqno; |
| 337 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 338 | seqno = i915_request_global_seqno(rq); |
Chris Wilson | e9af4ea | 2018-01-18 13:16:09 +0000 | [diff] [blame] | 339 | if (!seqno) |
| 340 | return false; |
| 341 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 342 | return i915_seqno_passed(intel_engine_get_seqno(rq->engine), |
Chris Wilson | e9af4ea | 2018-01-18 13:16:09 +0000 | [diff] [blame] | 343 | seqno - 1); |
| 344 | } |
| 345 | |
Chris Wilson | 83cc84c | 2018-01-02 15:12:25 +0000 | [diff] [blame] | 346 | static inline bool i915_priotree_signaled(const struct i915_priotree *pt) |
| 347 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 348 | const struct i915_request *rq = |
| 349 | container_of(pt, const struct i915_request, priotree); |
Chris Wilson | 83cc84c | 2018-01-02 15:12:25 +0000 | [diff] [blame] | 350 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 351 | return i915_request_completed(rq); |
Chris Wilson | 83cc84c | 2018-01-02 15:12:25 +0000 | [diff] [blame] | 352 | } |
| 353 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 354 | void i915_retire_requests(struct drm_i915_private *i915); |
| 355 | |
| 356 | /* |
| 357 | * We treat requests as fences. This is not be to confused with our |
Chris Wilson | 381f371 | 2016-08-04 07:52:29 +0100 | [diff] [blame] | 358 | * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. |
| 359 | * We use the fences to synchronize access from the CPU with activity on the |
| 360 | * GPU, for example, we should not rewrite an object's PTE whilst the GPU |
| 361 | * is reading them. We also track fences at a higher level to provide |
| 362 | * implicit synchronisation around GEM objects, e.g. set-domain will wait |
| 363 | * for outstanding GPU rendering before marking the object ready for CPU |
| 364 | * access, or a pageflip will wait until the GPU is complete before showing |
| 365 | * the frame on the scanout. |
| 366 | * |
| 367 | * In order to use a fence, the object must track the fence it needs to |
| 368 | * serialise with. For example, GEM objects want to track both read and |
| 369 | * write access so that we can perform concurrent read operations between |
| 370 | * the CPU and GPU engines, as well as waiting for all rendering to |
| 371 | * complete, or waiting for the last GPU user of a "fence register". The |
| 372 | * object then embeds a #i915_gem_active to track the most recent (in |
| 373 | * retirement order) request relevant for the desired mode of access. |
| 374 | * The #i915_gem_active is updated with i915_gem_active_set() to track the |
| 375 | * most recent fence request, typically this is done as part of |
| 376 | * i915_vma_move_to_active(). |
| 377 | * |
| 378 | * When the #i915_gem_active completes (is retired), it will |
| 379 | * signal its completion to the owner through a callback as well as mark |
| 380 | * itself as idle (i915_gem_active.request == NULL). The owner |
| 381 | * can then perform any action, such as delayed freeing of an active |
| 382 | * resource including itself. |
| 383 | */ |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 384 | struct i915_gem_active; |
| 385 | |
| 386 | typedef void (*i915_gem_retire_fn)(struct i915_gem_active *, |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 387 | struct i915_request *); |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 388 | |
Chris Wilson | 381f371 | 2016-08-04 07:52:29 +0100 | [diff] [blame] | 389 | struct i915_gem_active { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 390 | struct i915_request __rcu *request; |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 391 | struct list_head link; |
| 392 | i915_gem_retire_fn retire; |
Chris Wilson | 381f371 | 2016-08-04 07:52:29 +0100 | [diff] [blame] | 393 | }; |
| 394 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 395 | void i915_gem_retire_noop(struct i915_gem_active *, |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 396 | struct i915_request *request); |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 397 | |
| 398 | /** |
| 399 | * init_request_active - prepares the activity tracker for use |
| 400 | * @active - the active tracker |
| 401 | * @func - a callback when then the tracker is retired (becomes idle), |
| 402 | * can be NULL |
| 403 | * |
| 404 | * init_request_active() prepares the embedded @active struct for use as |
| 405 | * an activity tracker, that is for tracking the last known active request |
| 406 | * associated with it. When the last request becomes idle, when it is retired |
| 407 | * after completion, the optional callback @func is invoked. |
| 408 | */ |
| 409 | static inline void |
| 410 | init_request_active(struct i915_gem_active *active, |
| 411 | i915_gem_retire_fn retire) |
| 412 | { |
| 413 | INIT_LIST_HEAD(&active->link); |
| 414 | active->retire = retire ?: i915_gem_retire_noop; |
| 415 | } |
| 416 | |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 417 | /** |
| 418 | * i915_gem_active_set - updates the tracker to watch the current request |
| 419 | * @active - the active tracker |
| 420 | * @request - the request to watch |
| 421 | * |
| 422 | * i915_gem_active_set() watches the given @request for completion. Whilst |
| 423 | * that @request is busy, the @active reports busy. When that @request is |
| 424 | * retired, the @active tracker is updated to report idle. |
| 425 | */ |
Chris Wilson | 381f371 | 2016-08-04 07:52:29 +0100 | [diff] [blame] | 426 | static inline void |
| 427 | i915_gem_active_set(struct i915_gem_active *active, |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 428 | struct i915_request *request) |
Chris Wilson | 381f371 | 2016-08-04 07:52:29 +0100 | [diff] [blame] | 429 | { |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 430 | list_move(&active->link, &request->active_list); |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 431 | rcu_assign_pointer(active->request, request); |
Chris Wilson | 381f371 | 2016-08-04 07:52:29 +0100 | [diff] [blame] | 432 | } |
| 433 | |
Ville Syrjälä | ecd9caa0 | 2016-12-07 17:56:47 +0000 | [diff] [blame] | 434 | /** |
| 435 | * i915_gem_active_set_retire_fn - updates the retirement callback |
| 436 | * @active - the active tracker |
| 437 | * @fn - the routine called when the request is retired |
| 438 | * @mutex - struct_mutex used to guard retirements |
| 439 | * |
| 440 | * i915_gem_active_set_retire_fn() updates the function pointer that |
| 441 | * is called when the final request associated with the @active tracker |
| 442 | * is retired. |
| 443 | */ |
| 444 | static inline void |
| 445 | i915_gem_active_set_retire_fn(struct i915_gem_active *active, |
| 446 | i915_gem_retire_fn fn, |
| 447 | struct mutex *mutex) |
| 448 | { |
| 449 | lockdep_assert_held(mutex); |
| 450 | active->retire = fn ?: i915_gem_retire_noop; |
| 451 | } |
| 452 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 453 | static inline struct i915_request * |
Chris Wilson | d72d908 | 2016-08-04 07:52:31 +0100 | [diff] [blame] | 454 | __i915_gem_active_peek(const struct i915_gem_active *active) |
| 455 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 456 | /* |
| 457 | * Inside the error capture (running with the driver in an unknown |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 458 | * state), we want to bend the rules slightly (a lot). |
| 459 | * |
| 460 | * Work is in progress to make it safer, in the meantime this keeps |
| 461 | * the known issue from spamming the logs. |
| 462 | */ |
| 463 | return rcu_dereference_protected(active->request, 1); |
Chris Wilson | d72d908 | 2016-08-04 07:52:31 +0100 | [diff] [blame] | 464 | } |
| 465 | |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 466 | /** |
Chris Wilson | 385384a | 2016-08-09 08:37:01 +0100 | [diff] [blame] | 467 | * i915_gem_active_raw - return the active request |
| 468 | * @active - the active tracker |
| 469 | * |
| 470 | * i915_gem_active_raw() returns the current request being tracked, or NULL. |
| 471 | * It does not obtain a reference on the request for the caller, so the caller |
| 472 | * must hold struct_mutex. |
| 473 | */ |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 474 | static inline struct i915_request * |
Chris Wilson | 385384a | 2016-08-09 08:37:01 +0100 | [diff] [blame] | 475 | i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex) |
| 476 | { |
| 477 | return rcu_dereference_protected(active->request, |
| 478 | lockdep_is_held(mutex)); |
| 479 | } |
| 480 | |
| 481 | /** |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 482 | * i915_gem_active_peek - report the active request being monitored |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 483 | * @active - the active tracker |
| 484 | * |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 485 | * i915_gem_active_peek() returns the current request being tracked if |
| 486 | * still active, or NULL. It does not obtain a reference on the request |
| 487 | * for the caller, so the caller must hold struct_mutex. |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 488 | */ |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 489 | static inline struct i915_request * |
Chris Wilson | d72d908 | 2016-08-04 07:52:31 +0100 | [diff] [blame] | 490 | i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 491 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 492 | struct i915_request *request; |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 493 | |
Chris Wilson | 385384a | 2016-08-09 08:37:01 +0100 | [diff] [blame] | 494 | request = i915_gem_active_raw(active, mutex); |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 495 | if (!request || i915_request_completed(request)) |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 496 | return NULL; |
| 497 | |
| 498 | return request; |
| 499 | } |
| 500 | |
| 501 | /** |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 502 | * i915_gem_active_get - return a reference to the active request |
| 503 | * @active - the active tracker |
| 504 | * |
| 505 | * i915_gem_active_get() returns a reference to the active request, or NULL |
| 506 | * if the active tracker is idle. The caller must hold struct_mutex. |
| 507 | */ |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 508 | static inline struct i915_request * |
Chris Wilson | d72d908 | 2016-08-04 07:52:31 +0100 | [diff] [blame] | 509 | i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex) |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 510 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 511 | return i915_request_get(i915_gem_active_peek(active, mutex)); |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | /** |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 515 | * __i915_gem_active_get_rcu - return a reference to the active request |
| 516 | * @active - the active tracker |
| 517 | * |
| 518 | * __i915_gem_active_get() returns a reference to the active request, or NULL |
| 519 | * if the active tracker is idle. The caller must hold the RCU read lock, but |
| 520 | * the returned pointer is safe to use outside of RCU. |
| 521 | */ |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 522 | static inline struct i915_request * |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 523 | __i915_gem_active_get_rcu(const struct i915_gem_active *active) |
| 524 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 525 | /* |
| 526 | * Performing a lockless retrieval of the active request is super |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 527 | * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 528 | * slab of request objects will not be freed whilst we hold the |
| 529 | * RCU read lock. It does not guarantee that the request itself |
| 530 | * will not be freed and then *reused*. Viz, |
| 531 | * |
| 532 | * Thread A Thread B |
| 533 | * |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 534 | * rq = active.request |
| 535 | * retire(rq) -> free(rq); |
| 536 | * (rq is now first on the slab freelist) |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 537 | * active.request = NULL |
| 538 | * |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 539 | * rq = new submission on a new object |
| 540 | * ref(rq) |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 541 | * |
| 542 | * To prevent the request from being reused whilst the caller |
| 543 | * uses it, we take a reference like normal. Whilst acquiring |
| 544 | * the reference we check that it is not in a destroyed state |
| 545 | * (refcnt == 0). That prevents the request being reallocated |
| 546 | * whilst the caller holds on to it. To check that the request |
| 547 | * was not reallocated as we acquired the reference we have to |
| 548 | * check that our request remains the active request across |
| 549 | * the lookup, in the same manner as a seqlock. The visibility |
| 550 | * of the pointer versus the reference counting is controlled |
| 551 | * by using RCU barriers (rcu_dereference and rcu_assign_pointer). |
| 552 | * |
| 553 | * In the middle of all that, we inspect whether the request is |
| 554 | * complete. Retiring is lazy so the request may be completed long |
| 555 | * before the active tracker is updated. Querying whether the |
| 556 | * request is complete is far cheaper (as it involves no locked |
| 557 | * instructions setting cachelines to exclusive) than acquiring |
| 558 | * the reference, so we do it first. The RCU read lock ensures the |
| 559 | * pointer dereference is valid, but does not ensure that the |
| 560 | * seqno nor HWS is the right one! However, if the request was |
| 561 | * reallocated, that means the active tracker's request was complete. |
| 562 | * If the new request is also complete, then both are and we can |
| 563 | * just report the active tracker is idle. If the new request is |
| 564 | * incomplete, then we acquire a reference on it and check that |
| 565 | * it remained the active request. |
Chris Wilson | 5a198b8 | 2016-08-09 09:23:34 +0100 | [diff] [blame] | 566 | * |
| 567 | * It is then imperative that we do not zero the request on |
| 568 | * reallocation, so that we can chase the dangling pointers! |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 569 | * See i915_request_alloc(). |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 570 | */ |
| 571 | do { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 572 | struct i915_request *request; |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 573 | |
| 574 | request = rcu_dereference(active->request); |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 575 | if (!request || i915_request_completed(request)) |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 576 | return NULL; |
| 577 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 578 | /* |
| 579 | * An especially silly compiler could decide to recompute the |
| 580 | * result of i915_request_completed, more specifically |
Daniel Vetter | c75870d | 2016-08-22 10:55:22 +0200 | [diff] [blame] | 581 | * re-emit the load for request->fence.seqno. A race would catch |
| 582 | * a later seqno value, which could flip the result from true to |
| 583 | * false. Which means part of the instructions below might not |
| 584 | * be executed, while later on instructions are executed. Due to |
| 585 | * barriers within the refcounting the inconsistency can't reach |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 586 | * past the call to i915_request_get_rcu, but not executing |
| 587 | * that while still executing i915_request_put() creates |
Daniel Vetter | c75870d | 2016-08-22 10:55:22 +0200 | [diff] [blame] | 588 | * havoc enough. Prevent this with a compiler barrier. |
| 589 | */ |
| 590 | barrier(); |
| 591 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 592 | request = i915_request_get_rcu(request); |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 593 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 594 | /* |
| 595 | * What stops the following rcu_access_pointer() from occurring |
| 596 | * before the above i915_request_get_rcu()? If we were |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 597 | * to read the value before pausing to get the reference to |
| 598 | * the request, we may not notice a change in the active |
| 599 | * tracker. |
| 600 | * |
| 601 | * The rcu_access_pointer() is a mere compiler barrier, which |
| 602 | * means both the CPU and compiler are free to perform the |
| 603 | * memory read without constraint. The compiler only has to |
| 604 | * ensure that any operations after the rcu_access_pointer() |
| 605 | * occur afterwards in program order. This means the read may |
| 606 | * be performed earlier by an out-of-order CPU, or adventurous |
| 607 | * compiler. |
| 608 | * |
| 609 | * The atomic operation at the heart of |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 610 | * i915_request_get_rcu(), see dma_fence_get_rcu(), is |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 611 | * atomic_inc_not_zero() which is only a full memory barrier |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 612 | * when successful. That is, if i915_request_get_rcu() |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 613 | * returns the request (and so with the reference counted |
| 614 | * incremented) then the following read for rcu_access_pointer() |
| 615 | * must occur after the atomic operation and so confirm |
| 616 | * that this request is the one currently being tracked. |
Chris Wilson | edf6b76 | 2016-08-09 09:23:33 +0100 | [diff] [blame] | 617 | * |
| 618 | * The corresponding write barrier is part of |
| 619 | * rcu_assign_pointer(). |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 620 | */ |
| 621 | if (!request || request == rcu_access_pointer(active->request)) |
| 622 | return rcu_pointer_handoff(request); |
| 623 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 624 | i915_request_put(request); |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 625 | } while (1); |
| 626 | } |
| 627 | |
| 628 | /** |
| 629 | * i915_gem_active_get_unlocked - return a reference to the active request |
| 630 | * @active - the active tracker |
| 631 | * |
| 632 | * i915_gem_active_get_unlocked() returns a reference to the active request, |
| 633 | * or NULL if the active tracker is idle. The reference is obtained under RCU, |
| 634 | * so no locking is required by the caller. |
| 635 | * |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 636 | * The reference should be freed with i915_request_put(). |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 637 | */ |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 638 | static inline struct i915_request * |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 639 | i915_gem_active_get_unlocked(const struct i915_gem_active *active) |
| 640 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 641 | struct i915_request *request; |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 642 | |
| 643 | rcu_read_lock(); |
| 644 | request = __i915_gem_active_get_rcu(active); |
| 645 | rcu_read_unlock(); |
| 646 | |
| 647 | return request; |
| 648 | } |
| 649 | |
| 650 | /** |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 651 | * i915_gem_active_isset - report whether the active tracker is assigned |
| 652 | * @active - the active tracker |
| 653 | * |
| 654 | * i915_gem_active_isset() returns true if the active tracker is currently |
| 655 | * assigned to a request. Due to the lazy retiring, that request may be idle |
| 656 | * and this may report stale information. |
| 657 | */ |
| 658 | static inline bool |
| 659 | i915_gem_active_isset(const struct i915_gem_active *active) |
| 660 | { |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 661 | return rcu_access_pointer(active->request); |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 662 | } |
| 663 | |
| 664 | /** |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 665 | * i915_gem_active_wait - waits until the request is completed |
Chris Wilson | 2467658 | 2016-08-05 10:14:06 +0100 | [diff] [blame] | 666 | * @active - the active request on which to wait |
Chris Wilson | ea746f3 | 2016-09-09 14:11:49 +0100 | [diff] [blame] | 667 | * @flags - how to wait |
Chris Wilson | 2467658 | 2016-08-05 10:14:06 +0100 | [diff] [blame] | 668 | * @timeout - how long to wait at most |
| 669 | * @rps - userspace client to charge for a waitboost |
| 670 | * |
Chris Wilson | 2e36991 | 2016-10-28 13:58:28 +0100 | [diff] [blame] | 671 | * i915_gem_active_wait() waits until the request is completed before |
Chris Wilson | 2467658 | 2016-08-05 10:14:06 +0100 | [diff] [blame] | 672 | * returning, without requiring any locks to be held. Note that it does not |
| 673 | * retire any requests before returning. |
| 674 | * |
| 675 | * This function relies on RCU in order to acquire the reference to the active |
| 676 | * request without holding any locks. See __i915_gem_active_get_rcu() for the |
| 677 | * glory details on how that is managed. Once the reference is acquired, we |
| 678 | * can then wait upon the request, and afterwards release our reference, |
| 679 | * free of any locking. |
| 680 | * |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 681 | * This function wraps i915_request_wait(), see it for the full details on |
Chris Wilson | 2467658 | 2016-08-05 10:14:06 +0100 | [diff] [blame] | 682 | * the arguments. |
| 683 | * |
| 684 | * Returns 0 if successful, or a negative error code. |
| 685 | */ |
| 686 | static inline int |
Chris Wilson | 2e36991 | 2016-10-28 13:58:28 +0100 | [diff] [blame] | 687 | i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags) |
Chris Wilson | 2467658 | 2016-08-05 10:14:06 +0100 | [diff] [blame] | 688 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 689 | struct i915_request *request; |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 690 | long ret = 0; |
Chris Wilson | 2467658 | 2016-08-05 10:14:06 +0100 | [diff] [blame] | 691 | |
| 692 | request = i915_gem_active_get_unlocked(active); |
| 693 | if (request) { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 694 | ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT); |
| 695 | i915_request_put(request); |
Chris Wilson | 2467658 | 2016-08-05 10:14:06 +0100 | [diff] [blame] | 696 | } |
| 697 | |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 698 | return ret < 0 ? ret : 0; |
Chris Wilson | 2467658 | 2016-08-05 10:14:06 +0100 | [diff] [blame] | 699 | } |
| 700 | |
| 701 | /** |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 702 | * i915_gem_active_retire - waits until the request is retired |
| 703 | * @active - the active request on which to wait |
| 704 | * |
| 705 | * i915_gem_active_retire() waits until the request is completed, |
| 706 | * and then ensures that at least the retirement handler for this |
| 707 | * @active tracker is called before returning. If the @active |
| 708 | * tracker is idle, the function returns immediately. |
| 709 | */ |
| 710 | static inline int __must_check |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 711 | i915_gem_active_retire(struct i915_gem_active *active, |
Chris Wilson | d72d908 | 2016-08-04 07:52:31 +0100 | [diff] [blame] | 712 | struct mutex *mutex) |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 713 | { |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 714 | struct i915_request *request; |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 715 | long ret; |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 716 | |
Chris Wilson | 385384a | 2016-08-09 08:37:01 +0100 | [diff] [blame] | 717 | request = i915_gem_active_raw(active, mutex); |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 718 | if (!request) |
| 719 | return 0; |
| 720 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 721 | ret = i915_request_wait(request, |
Chris Wilson | 22dd3bb | 2016-09-09 14:11:50 +0100 | [diff] [blame] | 722 | I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 723 | MAX_SCHEDULE_TIMEOUT); |
| 724 | if (ret < 0) |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 725 | return ret; |
| 726 | |
| 727 | list_del_init(&active->link); |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 728 | RCU_INIT_POINTER(active->request, NULL); |
| 729 | |
Chris Wilson | fa545cb | 2016-08-04 07:52:35 +0100 | [diff] [blame] | 730 | active->retire(active, request); |
| 731 | |
| 732 | return 0; |
Chris Wilson | 27c01aa | 2016-08-04 07:52:30 +0100 | [diff] [blame] | 733 | } |
| 734 | |
Chris Wilson | 381f371 | 2016-08-04 07:52:29 +0100 | [diff] [blame] | 735 | #define for_each_active(mask, idx) \ |
| 736 | for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx)) |
| 737 | |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 738 | #endif /* I915_REQUEST_H */ |