Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2011-2012 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Ben Widawsky <ben@bwidawsk.net> |
| 25 | * |
| 26 | */ |
| 27 | |
| 28 | /* |
| 29 | * This file implements HW context support. On gen5+ a HW context consists of an |
| 30 | * opaque GPU object which is referenced at times of context saves and restores. |
| 31 | * With RC6 enabled, the context is also referenced as the GPU enters and exists |
| 32 | * from RC6 (GPU has it's own internal power context, except on gen5). Though |
| 33 | * something like a context does exist for the media ring, the code only |
| 34 | * supports contexts for the render ring. |
| 35 | * |
| 36 | * In software, there is a distinction between contexts created by the user, |
| 37 | * and the default HW context. The default HW context is used by GPU clients |
| 38 | * that do not request setup of their own hardware context. The default |
| 39 | * context's state is never restored to help prevent programming errors. This |
| 40 | * would happen if a client ran and piggy-backed off another clients GPU state. |
| 41 | * The default context only exists to give the GPU some offset to load as the |
| 42 | * current to invoke a save of the context we actually care about. In fact, the |
| 43 | * code could likely be constructed, albeit in a more complicated fashion, to |
| 44 | * never use the default context, though that limits the driver's ability to |
| 45 | * swap out, and/or destroy other contexts. |
| 46 | * |
| 47 | * All other contexts are created as a request by the GPU client. These contexts |
| 48 | * store GPU state, and thus allow GPU clients to not re-emit state (and |
| 49 | * potentially query certain state) at any time. The kernel driver makes |
| 50 | * certain that the appropriate commands are inserted. |
| 51 | * |
| 52 | * The context life cycle is semi-complicated in that context BOs may live |
| 53 | * longer than the context itself because of the way the hardware, and object |
| 54 | * tracking works. Below is a very crude representation of the state machine |
| 55 | * describing the context life. |
| 56 | * refcount pincount active |
| 57 | * S0: initial state 0 0 0 |
| 58 | * S1: context created 1 0 0 |
| 59 | * S2: context is currently running 2 1 X |
| 60 | * S3: GPU referenced, but not current 2 0 1 |
| 61 | * S4: context is current, but destroyed 1 1 0 |
| 62 | * S5: like S3, but destroyed 1 0 1 |
| 63 | * |
| 64 | * The most common (but not all) transitions: |
| 65 | * S0->S1: client creates a context |
| 66 | * S1->S2: client submits execbuf with context |
| 67 | * S2->S3: other clients submits execbuf with context |
| 68 | * S3->S1: context object was retired |
| 69 | * S3->S2: clients submits another execbuf |
| 70 | * S2->S4: context destroy called with current context |
| 71 | * S3->S5->S0: destroy path |
| 72 | * S4->S5->S0: destroy path on current context |
| 73 | * |
| 74 | * There are two confusing terms used above: |
| 75 | * The "current context" means the context which is currently running on the |
Damien Lespiau | 508842a | 2013-08-30 14:40:26 +0100 | [diff] [blame] | 76 | * GPU. The GPU has loaded its state already and has stored away the gtt |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 77 | * offset of the BO. The GPU is not actively referencing the data at this |
| 78 | * offset, but it will on the next context switch. The only way to avoid this |
| 79 | * is to do a GPU reset. |
| 80 | * |
| 81 | * An "active context' is one which was previously the "current context" and is |
| 82 | * on the active list waiting for the next context switch to occur. Until this |
| 83 | * happens, the object must remain at the same gtt offset. It is therefore |
| 84 | * possible to destroy a context, but it is still active. |
| 85 | * |
| 86 | */ |
| 87 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 88 | #include <drm/drmP.h> |
| 89 | #include <drm/i915_drm.h> |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 90 | #include "i915_drv.h" |
Daniele Ceraolo Spurio | 198c974 | 2014-11-10 13:44:31 +0000 | [diff] [blame] | 91 | #include "i915_trace.h" |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 92 | |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 93 | /* This is a HW constraint. The value below is the largest known requirement |
| 94 | * I've seen in a spec to date, and that was a workaround for a non-shipping |
| 95 | * part. It should be safe to decrease this, but it's more future proof as is. |
| 96 | */ |
Ben Widawsky | b731d33 | 2013-12-06 14:10:59 -0800 | [diff] [blame] | 97 | #define GEN6_CONTEXT_ALIGN (64<<10) |
| 98 | #define GEN7_CONTEXT_ALIGN 4096 |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 99 | |
Ben Widawsky | b731d33 | 2013-12-06 14:10:59 -0800 | [diff] [blame] | 100 | static size_t get_context_alignment(struct drm_device *dev) |
| 101 | { |
| 102 | if (IS_GEN6(dev)) |
| 103 | return GEN6_CONTEXT_ALIGN; |
| 104 | |
| 105 | return GEN7_CONTEXT_ALIGN; |
| 106 | } |
| 107 | |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 108 | static int get_context_size(struct drm_device *dev) |
| 109 | { |
| 110 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 111 | int ret; |
| 112 | u32 reg; |
| 113 | |
| 114 | switch (INTEL_INFO(dev)->gen) { |
| 115 | case 6: |
| 116 | reg = I915_READ(CXT_SIZE); |
| 117 | ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; |
| 118 | break; |
| 119 | case 7: |
Ben Widawsky | 4f91dd6 | 2012-07-18 10:10:09 -0700 | [diff] [blame] | 120 | reg = I915_READ(GEN7_CXT_SIZE); |
Ben Widawsky | 2e4291e | 2012-07-24 20:47:30 -0700 | [diff] [blame] | 121 | if (IS_HASWELL(dev)) |
Ben Widawsky | a0de80a | 2013-06-25 21:53:40 -0700 | [diff] [blame] | 122 | ret = HSW_CXT_TOTAL_SIZE; |
Ben Widawsky | 2e4291e | 2012-07-24 20:47:30 -0700 | [diff] [blame] | 123 | else |
| 124 | ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 125 | break; |
Ben Widawsky | 8897644 | 2013-11-02 21:07:05 -0700 | [diff] [blame] | 126 | case 8: |
| 127 | ret = GEN8_CXT_TOTAL_SIZE; |
| 128 | break; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 129 | default: |
| 130 | BUG(); |
| 131 | } |
| 132 | |
| 133 | return ret; |
| 134 | } |
| 135 | |
Tvrtko Ursulin | e9f24d5 | 2015-10-05 13:26:36 +0100 | [diff] [blame] | 136 | static void i915_gem_context_clean(struct intel_context *ctx) |
| 137 | { |
| 138 | struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; |
| 139 | struct i915_vma *vma, *next; |
| 140 | |
Tvrtko Ursulin | 61fb588 | 2015-10-08 15:37:00 +0100 | [diff] [blame] | 141 | if (!ppgtt) |
Tvrtko Ursulin | e9f24d5 | 2015-10-05 13:26:36 +0100 | [diff] [blame] | 142 | return; |
| 143 | |
Tvrtko Ursulin | e9f24d5 | 2015-10-05 13:26:36 +0100 | [diff] [blame] | 144 | list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, |
| 145 | mm_list) { |
| 146 | if (WARN_ON(__i915_vma_unbind_no_wait(vma))) |
| 147 | break; |
| 148 | } |
| 149 | } |
| 150 | |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 151 | void i915_gem_context_free(struct kref *ctx_ref) |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 152 | { |
Chris Wilson | 9ea4fee | 2015-05-05 09:17:29 +0100 | [diff] [blame] | 153 | struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 154 | |
Daniele Ceraolo Spurio | 198c974 | 2014-11-10 13:44:31 +0000 | [diff] [blame] | 155 | trace_i915_context_free(ctx); |
| 156 | |
Daniel Vetter | ae6c480 | 2014-08-06 15:04:53 +0200 | [diff] [blame] | 157 | if (i915.enable_execlists) |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 158 | intel_lr_context_free(ctx); |
Ben Widawsky | c7c48df | 2013-12-06 14:11:15 -0800 | [diff] [blame] | 159 | |
Tvrtko Ursulin | e9f24d5 | 2015-10-05 13:26:36 +0100 | [diff] [blame] | 160 | /* |
| 161 | * This context is going away and we need to remove all VMAs still |
| 162 | * around. This is to handle imported shared objects for which |
| 163 | * destructor did not run when their handles were closed. |
| 164 | */ |
| 165 | i915_gem_context_clean(ctx); |
| 166 | |
Daniel Vetter | ae6c480 | 2014-08-06 15:04:53 +0200 | [diff] [blame] | 167 | i915_ppgtt_put(ctx->ppgtt); |
| 168 | |
Ben Widawsky | 2f29579 | 2014-07-01 11:17:47 -0700 | [diff] [blame] | 169 | if (ctx->legacy_hw_ctx.rcs_state) |
| 170 | drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); |
Ben Widawsky | c7c48df | 2013-12-06 14:11:15 -0800 | [diff] [blame] | 171 | list_del(&ctx->link); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 172 | kfree(ctx); |
| 173 | } |
| 174 | |
Oscar Mateo | 8c857917 | 2014-07-24 17:04:14 +0100 | [diff] [blame] | 175 | struct drm_i915_gem_object * |
Oscar Mateo | aa0c13d | 2014-07-03 16:27:58 +0100 | [diff] [blame] | 176 | i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) |
| 177 | { |
| 178 | struct drm_i915_gem_object *obj; |
| 179 | int ret; |
| 180 | |
Ville Syrjälä | 5261392 | 2015-06-29 20:28:35 +0300 | [diff] [blame] | 181 | obj = i915_gem_alloc_object(dev, size); |
Oscar Mateo | aa0c13d | 2014-07-03 16:27:58 +0100 | [diff] [blame] | 182 | if (obj == NULL) |
| 183 | return ERR_PTR(-ENOMEM); |
| 184 | |
| 185 | /* |
| 186 | * Try to make the context utilize L3 as well as LLC. |
| 187 | * |
| 188 | * On VLV we don't have L3 controls in the PTEs so we |
| 189 | * shouldn't touch the cache level, especially as that |
| 190 | * would make the object snooped which might have a |
| 191 | * negative performance impact. |
Wayne Boyer | 4d3e904 | 2015-12-08 09:38:52 -0800 | [diff] [blame] | 192 | * |
| 193 | * Snooping is required on non-llc platforms in execlist |
| 194 | * mode, but since all GGTT accesses use PAT entry 0 we |
| 195 | * get snooping anyway regardless of cache_level. |
| 196 | * |
| 197 | * This is only applicable for Ivy Bridge devices since |
| 198 | * later platforms don't have L3 control bits in the PTE. |
Oscar Mateo | aa0c13d | 2014-07-03 16:27:58 +0100 | [diff] [blame] | 199 | */ |
Wayne Boyer | 4d3e904 | 2015-12-08 09:38:52 -0800 | [diff] [blame] | 200 | if (IS_IVYBRIDGE(dev)) { |
Oscar Mateo | aa0c13d | 2014-07-03 16:27:58 +0100 | [diff] [blame] | 201 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); |
| 202 | /* Failure shouldn't ever happen this early */ |
| 203 | if (WARN_ON(ret)) { |
| 204 | drm_gem_object_unreference(&obj->base); |
| 205 | return ERR_PTR(ret); |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | return obj; |
| 210 | } |
| 211 | |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 212 | static struct intel_context * |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 213 | __create_hw_context(struct drm_device *dev, |
Daniel Vetter | ee960be | 2014-08-06 15:04:45 +0200 | [diff] [blame] | 214 | struct drm_i915_file_private *file_priv) |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 215 | { |
| 216 | struct drm_i915_private *dev_priv = dev->dev_private; |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 217 | struct intel_context *ctx; |
Tejun Heo | c8c470a | 2013-02-27 17:04:10 -0800 | [diff] [blame] | 218 | int ret; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 219 | |
Ben Widawsky | f94982b | 2012-11-10 10:56:04 -0800 | [diff] [blame] | 220 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
Ben Widawsky | 146937e | 2012-06-29 10:30:39 -0700 | [diff] [blame] | 221 | if (ctx == NULL) |
| 222 | return ERR_PTR(-ENOMEM); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 223 | |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 224 | kref_init(&ctx->ref); |
Ben Widawsky | a33afea | 2013-09-17 21:12:45 -0700 | [diff] [blame] | 225 | list_add_tail(&ctx->link, &dev_priv->context_list); |
Chris Wilson | 9ea4fee | 2015-05-05 09:17:29 +0100 | [diff] [blame] | 226 | ctx->i915 = dev_priv; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 227 | |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 228 | if (dev_priv->hw_context_size) { |
Oscar Mateo | aa0c13d | 2014-07-03 16:27:58 +0100 | [diff] [blame] | 229 | struct drm_i915_gem_object *obj = |
| 230 | i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); |
| 231 | if (IS_ERR(obj)) { |
| 232 | ret = PTR_ERR(obj); |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 233 | goto err_out; |
| 234 | } |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 235 | ctx->legacy_hw_ctx.rcs_state = obj; |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 236 | } |
| 237 | |
| 238 | /* Default context will never have a file_priv */ |
| 239 | if (file_priv != NULL) { |
| 240 | ret = idr_alloc(&file_priv->context_idr, ctx, |
Oscar Mateo | 821d66d | 2014-07-03 16:28:00 +0100 | [diff] [blame] | 241 | DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 242 | if (ret < 0) |
| 243 | goto err_out; |
| 244 | } else |
Oscar Mateo | 821d66d | 2014-07-03 16:28:00 +0100 | [diff] [blame] | 245 | ret = DEFAULT_CONTEXT_HANDLE; |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 246 | |
| 247 | ctx->file_priv = file_priv; |
Oscar Mateo | 821d66d | 2014-07-03 16:28:00 +0100 | [diff] [blame] | 248 | ctx->user_handle = ret; |
Ben Widawsky | 3ccfd19 | 2013-09-18 19:03:18 -0700 | [diff] [blame] | 249 | /* NB: Mark all slices as needing a remap so that when the context first |
| 250 | * loads it will restore whatever remap state already exists. If there |
| 251 | * is no remap info, it will be a NOP. */ |
| 252 | ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 253 | |
Chris Wilson | 676fa57 | 2014-12-24 08:13:39 -0800 | [diff] [blame] | 254 | ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; |
| 255 | |
Ben Widawsky | 146937e | 2012-06-29 10:30:39 -0700 | [diff] [blame] | 256 | return ctx; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 257 | |
| 258 | err_out: |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 259 | i915_gem_context_unreference(ctx); |
Ben Widawsky | 146937e | 2012-06-29 10:30:39 -0700 | [diff] [blame] | 260 | return ERR_PTR(ret); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 261 | } |
| 262 | |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 263 | /** |
| 264 | * The default context needs to exist per ring that uses contexts. It stores the |
| 265 | * context state of the GPU for applications that don't utilize HW contexts, as |
| 266 | * well as an idle case. |
| 267 | */ |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 268 | static struct intel_context * |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 269 | i915_gem_create_context(struct drm_device *dev, |
Daniel Vetter | d624d86 | 2014-08-06 15:04:54 +0200 | [diff] [blame] | 270 | struct drm_i915_file_private *file_priv) |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 271 | { |
Chris Wilson | 42c3b60 | 2014-01-23 19:40:02 +0000 | [diff] [blame] | 272 | const bool is_global_default_ctx = file_priv == NULL; |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 273 | struct intel_context *ctx; |
Ben Widawsky | bdf4fd7 | 2013-12-06 14:11:18 -0800 | [diff] [blame] | 274 | int ret = 0; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 275 | |
Ben Widawsky | b731d33 | 2013-12-06 14:10:59 -0800 | [diff] [blame] | 276 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 277 | |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 278 | ctx = __create_hw_context(dev, file_priv); |
Ben Widawsky | 146937e | 2012-06-29 10:30:39 -0700 | [diff] [blame] | 279 | if (IS_ERR(ctx)) |
Ben Widawsky | a45d0f6 | 2013-12-06 14:11:05 -0800 | [diff] [blame] | 280 | return ctx; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 281 | |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 282 | if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { |
Chris Wilson | 42c3b60 | 2014-01-23 19:40:02 +0000 | [diff] [blame] | 283 | /* We may need to do things with the shrinker which |
| 284 | * require us to immediately switch back to the default |
| 285 | * context. This can cause a problem as pinning the |
| 286 | * default context also requires GTT space which may not |
| 287 | * be available. To avoid this we always pin the default |
| 288 | * context. |
| 289 | */ |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 290 | ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 291 | get_context_alignment(dev), 0); |
Chris Wilson | 42c3b60 | 2014-01-23 19:40:02 +0000 | [diff] [blame] | 292 | if (ret) { |
| 293 | DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); |
| 294 | goto err_destroy; |
| 295 | } |
| 296 | } |
| 297 | |
Daniel Vetter | d624d86 | 2014-08-06 15:04:54 +0200 | [diff] [blame] | 298 | if (USES_FULL_PPGTT(dev)) { |
Daniel Vetter | 4d88470 | 2014-08-06 15:04:47 +0200 | [diff] [blame] | 299 | struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); |
Ben Widawsky | bdf4fd7 | 2013-12-06 14:11:18 -0800 | [diff] [blame] | 300 | |
| 301 | if (IS_ERR_OR_NULL(ppgtt)) { |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 302 | DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", |
| 303 | PTR_ERR(ppgtt)); |
Ben Widawsky | bdf4fd7 | 2013-12-06 14:11:18 -0800 | [diff] [blame] | 304 | ret = PTR_ERR(ppgtt); |
Chris Wilson | 42c3b60 | 2014-01-23 19:40:02 +0000 | [diff] [blame] | 305 | goto err_unpin; |
Daniel Vetter | ae6c480 | 2014-08-06 15:04:53 +0200 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | ctx->ppgtt = ppgtt; |
| 309 | } |
Ben Widawsky | bdf4fd7 | 2013-12-06 14:11:18 -0800 | [diff] [blame] | 310 | |
Daniele Ceraolo Spurio | 198c974 | 2014-11-10 13:44:31 +0000 | [diff] [blame] | 311 | trace_i915_context_create(ctx); |
| 312 | |
Ben Widawsky | a45d0f6 | 2013-12-06 14:11:05 -0800 | [diff] [blame] | 313 | return ctx; |
Chris Wilson | 9a3b530 | 2012-07-15 12:34:24 +0100 | [diff] [blame] | 314 | |
Chris Wilson | 42c3b60 | 2014-01-23 19:40:02 +0000 | [diff] [blame] | 315 | err_unpin: |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 316 | if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) |
| 317 | i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); |
Chris Wilson | 9a3b530 | 2012-07-15 12:34:24 +0100 | [diff] [blame] | 318 | err_destroy: |
Chris Wilson | 37876df | 2015-08-08 14:02:36 +0100 | [diff] [blame] | 319 | idr_remove(&file_priv->context_idr, ctx->user_handle); |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 320 | i915_gem_context_unreference(ctx); |
Ben Widawsky | a45d0f6 | 2013-12-06 14:11:05 -0800 | [diff] [blame] | 321 | return ERR_PTR(ret); |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 322 | } |
| 323 | |
Ben Widawsky | acce9ff | 2013-12-06 14:11:03 -0800 | [diff] [blame] | 324 | void i915_gem_context_reset(struct drm_device *dev) |
| 325 | { |
| 326 | struct drm_i915_private *dev_priv = dev->dev_private; |
Ben Widawsky | acce9ff | 2013-12-06 14:11:03 -0800 | [diff] [blame] | 327 | int i; |
| 328 | |
Thomas Daniel | 3e5b6f0 | 2015-02-16 16:12:53 +0000 | [diff] [blame] | 329 | if (i915.enable_execlists) { |
| 330 | struct intel_context *ctx; |
| 331 | |
| 332 | list_for_each_entry(ctx, &dev_priv->context_list, link) { |
| 333 | intel_lr_context_reset(dev, ctx); |
| 334 | } |
| 335 | |
Thomas Daniel | ecdb5fd | 2014-08-20 16:29:24 +0100 | [diff] [blame] | 336 | return; |
Thomas Daniel | 3e5b6f0 | 2015-02-16 16:12:53 +0000 | [diff] [blame] | 337 | } |
Thomas Daniel | ecdb5fd | 2014-08-20 16:29:24 +0100 | [diff] [blame] | 338 | |
Ben Widawsky | acce9ff | 2013-12-06 14:11:03 -0800 | [diff] [blame] | 339 | for (i = 0; i < I915_NUM_RINGS; i++) { |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 340 | struct intel_engine_cs *ring = &dev_priv->ring[i]; |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 341 | struct intel_context *lctx = ring->last_context; |
Ben Widawsky | acce9ff | 2013-12-06 14:11:03 -0800 | [diff] [blame] | 342 | |
McAulay, Alistair | 6689c16 | 2014-08-15 18:51:35 +0100 | [diff] [blame] | 343 | if (lctx) { |
| 344 | if (lctx->legacy_hw_ctx.rcs_state && i == RCS) |
| 345 | i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); |
Ben Widawsky | acce9ff | 2013-12-06 14:11:03 -0800 | [diff] [blame] | 346 | |
McAulay, Alistair | 6689c16 | 2014-08-15 18:51:35 +0100 | [diff] [blame] | 347 | i915_gem_context_unreference(lctx); |
| 348 | ring->last_context = NULL; |
Ben Widawsky | acce9ff | 2013-12-06 14:11:03 -0800 | [diff] [blame] | 349 | } |
Chris Wilson | 06ef83a | 2015-11-27 13:28:55 +0000 | [diff] [blame] | 350 | |
| 351 | /* Force the GPU state to be reinitialised on enabling */ |
| 352 | if (ring->default_context) |
| 353 | ring->default_context->legacy_hw_ctx.initialized = false; |
Ben Widawsky | acce9ff | 2013-12-06 14:11:03 -0800 | [diff] [blame] | 354 | } |
| 355 | } |
| 356 | |
Ben Widawsky | 8245be3 | 2013-11-06 13:56:29 -0200 | [diff] [blame] | 357 | int i915_gem_context_init(struct drm_device *dev) |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 358 | { |
| 359 | struct drm_i915_private *dev_priv = dev->dev_private; |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 360 | struct intel_context *ctx; |
Ben Widawsky | a45d0f6 | 2013-12-06 14:11:05 -0800 | [diff] [blame] | 361 | int i; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 362 | |
Ben Widawsky | 2fa48d8 | 2013-12-06 14:11:04 -0800 | [diff] [blame] | 363 | /* Init should only be called once per module load. Eventually the |
| 364 | * restriction on the context_disabled check can be loosened. */ |
| 365 | if (WARN_ON(dev_priv->ring[RCS].default_context)) |
Ben Widawsky | 8245be3 | 2013-11-06 13:56:29 -0200 | [diff] [blame] | 366 | return 0; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 367 | |
Zhiyuan Lv | a0bd6c3 | 2015-08-28 15:41:16 +0800 | [diff] [blame] | 368 | if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { |
| 369 | if (!i915.enable_execlists) { |
| 370 | DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); |
| 371 | return -EINVAL; |
| 372 | } |
| 373 | } |
| 374 | |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 375 | if (i915.enable_execlists) { |
| 376 | /* NB: intentionally left blank. We will allocate our own |
| 377 | * backing objects as we need them, thank you very much */ |
| 378 | dev_priv->hw_context_size = 0; |
| 379 | } else if (HAS_HW_CONTEXTS(dev)) { |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 380 | dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); |
| 381 | if (dev_priv->hw_context_size > (1<<20)) { |
| 382 | DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", |
| 383 | dev_priv->hw_context_size); |
| 384 | dev_priv->hw_context_size = 0; |
| 385 | } |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 386 | } |
| 387 | |
Daniel Vetter | d624d86 | 2014-08-06 15:04:54 +0200 | [diff] [blame] | 388 | ctx = i915_gem_create_context(dev, NULL); |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 389 | if (IS_ERR(ctx)) { |
| 390 | DRM_ERROR("Failed to create default global context (error %ld)\n", |
| 391 | PTR_ERR(ctx)); |
| 392 | return PTR_ERR(ctx); |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 393 | } |
| 394 | |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 395 | for (i = 0; i < I915_NUM_RINGS; i++) { |
| 396 | struct intel_engine_cs *ring = &dev_priv->ring[i]; |
Ben Widawsky | 67e3d297 | 2013-12-06 14:11:01 -0800 | [diff] [blame] | 397 | |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 398 | /* NB: RCS will hold a ref for all rings */ |
| 399 | ring->default_context = ctx; |
Oscar Mateo | ede7d42 | 2014-07-24 17:04:12 +0100 | [diff] [blame] | 400 | } |
| 401 | |
| 402 | DRM_DEBUG_DRIVER("%s context support initialized\n", |
| 403 | i915.enable_execlists ? "LR" : |
| 404 | dev_priv->hw_context_size ? "HW" : "fake"); |
Ben Widawsky | 8245be3 | 2013-11-06 13:56:29 -0200 | [diff] [blame] | 405 | return 0; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | void i915_gem_context_fini(struct drm_device *dev) |
| 409 | { |
| 410 | struct drm_i915_private *dev_priv = dev->dev_private; |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 411 | struct intel_context *dctx = dev_priv->ring[RCS].default_context; |
Ben Widawsky | 67e3d297 | 2013-12-06 14:11:01 -0800 | [diff] [blame] | 412 | int i; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 413 | |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 414 | if (dctx->legacy_hw_ctx.rcs_state) { |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 415 | /* The only known way to stop the gpu from accessing the hw context is |
| 416 | * to reset it. Do this as the very last operation to avoid confusing |
| 417 | * other code, leading to spurious errors. */ |
| 418 | intel_gpu_reset(dev); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 419 | |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 420 | /* When default context is created and switched to, base object refcount |
| 421 | * will be 2 (+1 from object creation and +1 from do_switch()). |
| 422 | * i915_gem_context_fini() will be called after gpu_idle() has switched |
| 423 | * to default context. So we need to unreference the base object once |
| 424 | * to offset the do_switch part, so that i915_gem_context_unreference() |
| 425 | * can then free the base object correctly. */ |
| 426 | WARN_ON(!dev_priv->ring[RCS].last_context); |
| 427 | if (dev_priv->ring[RCS].last_context == dctx) { |
| 428 | /* Fake switch to NULL context */ |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 429 | WARN_ON(dctx->legacy_hw_ctx.rcs_state->active); |
| 430 | i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 431 | i915_gem_context_unreference(dctx); |
| 432 | dev_priv->ring[RCS].last_context = NULL; |
| 433 | } |
Chris Wilson | d3b448d | 2014-05-16 18:59:00 +0100 | [diff] [blame] | 434 | |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 435 | i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); |
Ben Widawsky | 67e3d297 | 2013-12-06 14:11:01 -0800 | [diff] [blame] | 436 | } |
| 437 | |
| 438 | for (i = 0; i < I915_NUM_RINGS; i++) { |
Oscar Mateo | a4872ba | 2014-05-22 14:13:33 +0100 | [diff] [blame] | 439 | struct intel_engine_cs *ring = &dev_priv->ring[i]; |
Ben Widawsky | 67e3d297 | 2013-12-06 14:11:01 -0800 | [diff] [blame] | 440 | |
| 441 | if (ring->last_context) |
| 442 | i915_gem_context_unreference(ring->last_context); |
| 443 | |
| 444 | ring->default_context = NULL; |
Ben Widawsky | 0009e46 | 2013-12-06 14:11:02 -0800 | [diff] [blame] | 445 | ring->last_context = NULL; |
Ben Widawsky | 71b76d0 | 2013-10-14 10:01:37 -0700 | [diff] [blame] | 446 | } |
| 447 | |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 448 | i915_gem_context_unreference(dctx); |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 449 | } |
| 450 | |
John Harrison | b3dd6b9 | 2015-05-29 17:43:40 +0100 | [diff] [blame] | 451 | int i915_gem_context_enable(struct drm_i915_gem_request *req) |
Ben Widawsky | 2fa48d8 | 2013-12-06 14:11:04 -0800 | [diff] [blame] | 452 | { |
John Harrison | b3dd6b9 | 2015-05-29 17:43:40 +0100 | [diff] [blame] | 453 | struct intel_engine_cs *ring = req->ring; |
John Harrison | 90638cc | 2015-05-29 17:43:37 +0100 | [diff] [blame] | 454 | int ret; |
Ben Widawsky | bdf4fd7 | 2013-12-06 14:11:18 -0800 | [diff] [blame] | 455 | |
Thomas Daniel | e7778be | 2014-12-02 12:50:48 +0000 | [diff] [blame] | 456 | if (i915.enable_execlists) { |
John Harrison | 90638cc | 2015-05-29 17:43:37 +0100 | [diff] [blame] | 457 | if (ring->init_context == NULL) |
| 458 | return 0; |
Thomas Daniel | ecdb5fd | 2014-08-20 16:29:24 +0100 | [diff] [blame] | 459 | |
John Harrison | 8753181 | 2015-05-29 17:43:44 +0100 | [diff] [blame] | 460 | ret = ring->init_context(req); |
Thomas Daniel | e7778be | 2014-12-02 12:50:48 +0000 | [diff] [blame] | 461 | } else |
John Harrison | ba01cc9 | 2015-05-29 17:43:41 +0100 | [diff] [blame] | 462 | ret = i915_switch_context(req); |
John Harrison | 90638cc | 2015-05-29 17:43:37 +0100 | [diff] [blame] | 463 | |
| 464 | if (ret) { |
| 465 | DRM_ERROR("ring init context: %d\n", ret); |
| 466 | return ret; |
| 467 | } |
Ben Widawsky | 2fa48d8 | 2013-12-06 14:11:04 -0800 | [diff] [blame] | 468 | |
| 469 | return 0; |
| 470 | } |
| 471 | |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 472 | static int context_idr_cleanup(int id, void *p, void *data) |
| 473 | { |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 474 | struct intel_context *ctx = p; |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 475 | |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 476 | i915_gem_context_unreference(ctx); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 477 | return 0; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 478 | } |
| 479 | |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 480 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) |
| 481 | { |
| 482 | struct drm_i915_file_private *file_priv = file->driver_priv; |
Oscar Mateo | f83d651 | 2014-05-22 14:13:38 +0100 | [diff] [blame] | 483 | struct intel_context *ctx; |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 484 | |
| 485 | idr_init(&file_priv->context_idr); |
| 486 | |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 487 | mutex_lock(&dev->struct_mutex); |
Daniel Vetter | d624d86 | 2014-08-06 15:04:54 +0200 | [diff] [blame] | 488 | ctx = i915_gem_create_context(dev, file_priv); |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 489 | mutex_unlock(&dev->struct_mutex); |
| 490 | |
Oscar Mateo | f83d651 | 2014-05-22 14:13:38 +0100 | [diff] [blame] | 491 | if (IS_ERR(ctx)) { |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 492 | idr_destroy(&file_priv->context_idr); |
Oscar Mateo | f83d651 | 2014-05-22 14:13:38 +0100 | [diff] [blame] | 493 | return PTR_ERR(ctx); |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 494 | } |
| 495 | |
Ben Widawsky | e422b88 | 2013-12-06 14:10:58 -0800 | [diff] [blame] | 496 | return 0; |
| 497 | } |
| 498 | |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 499 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) |
| 500 | { |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 501 | struct drm_i915_file_private *file_priv = file->driver_priv; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 502 | |
Daniel Vetter | 73c273e | 2012-06-19 20:27:39 +0200 | [diff] [blame] | 503 | idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 504 | idr_destroy(&file_priv->context_idr); |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 505 | } |
| 506 | |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 507 | struct intel_context * |
Ben Widawsky | 4052105 | 2012-06-04 14:42:43 -0700 | [diff] [blame] | 508 | i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) |
| 509 | { |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 510 | struct intel_context *ctx; |
Ben Widawsky | 72ad5c4 | 2014-01-02 19:50:27 -1000 | [diff] [blame] | 511 | |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 512 | ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id); |
Ben Widawsky | 72ad5c4 | 2014-01-02 19:50:27 -1000 | [diff] [blame] | 513 | if (!ctx) |
| 514 | return ERR_PTR(-ENOENT); |
| 515 | |
| 516 | return ctx; |
Ben Widawsky | 254f965 | 2012-06-04 14:42:42 -0700 | [diff] [blame] | 517 | } |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 518 | |
| 519 | static inline int |
John Harrison | 1d719cd | 2015-05-29 17:43:52 +0100 | [diff] [blame] | 520 | mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 521 | { |
John Harrison | 1d719cd | 2015-05-29 17:43:52 +0100 | [diff] [blame] | 522 | struct intel_engine_cs *ring = req->ring; |
Ben Widawsky | e80f14b | 2014-08-18 10:35:28 -0700 | [diff] [blame] | 523 | u32 flags = hw_flags | MI_MM_SPACE_GTT; |
Chris Wilson | 2c55018 | 2014-12-16 10:02:27 +0000 | [diff] [blame] | 524 | const int num_rings = |
| 525 | /* Use an extended w/a on ivb+ if signalling from other rings */ |
| 526 | i915_semaphore_is_enabled(ring->dev) ? |
| 527 | hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : |
| 528 | 0; |
| 529 | int len, i, ret; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 530 | |
Ben Widawsky | 12b0286 | 2012-06-04 14:42:50 -0700 | [diff] [blame] | 531 | /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB |
| 532 | * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value |
| 533 | * explicitly, so we rely on the value at ring init, stored in |
| 534 | * itlb_before_ctx_switch. |
| 535 | */ |
Ben Widawsky | 057f6a8 | 2014-04-02 22:30:23 -0700 | [diff] [blame] | 536 | if (IS_GEN6(ring->dev)) { |
John Harrison | a84c3ae | 2015-05-29 17:43:57 +0100 | [diff] [blame] | 537 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0); |
Ben Widawsky | 12b0286 | 2012-06-04 14:42:50 -0700 | [diff] [blame] | 538 | if (ret) |
| 539 | return ret; |
| 540 | } |
| 541 | |
Ben Widawsky | e80f14b | 2014-08-18 10:35:28 -0700 | [diff] [blame] | 542 | /* These flags are for resource streamer on HSW+ */ |
Abdiel Janulgue | 4c436d55 | 2015-06-16 13:39:41 +0300 | [diff] [blame] | 543 | if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8) |
| 544 | flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); |
| 545 | else if (INTEL_INFO(ring->dev)->gen < 8) |
Ben Widawsky | e80f14b | 2014-08-18 10:35:28 -0700 | [diff] [blame] | 546 | flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); |
| 547 | |
Chris Wilson | 2c55018 | 2014-12-16 10:02:27 +0000 | [diff] [blame] | 548 | |
| 549 | len = 4; |
| 550 | if (INTEL_INFO(ring->dev)->gen >= 7) |
| 551 | len += 2 + (num_rings ? 4*num_rings + 2 : 0); |
| 552 | |
John Harrison | 5fb9de1 | 2015-05-29 17:44:07 +0100 | [diff] [blame] | 553 | ret = intel_ring_begin(req, len); |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 554 | if (ret) |
| 555 | return ret; |
| 556 | |
Ville Syrjälä | b3f797a | 2014-04-28 14:31:09 +0300 | [diff] [blame] | 557 | /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ |
Chris Wilson | 2c55018 | 2014-12-16 10:02:27 +0000 | [diff] [blame] | 558 | if (INTEL_INFO(ring->dev)->gen >= 7) { |
Ben Widawsky | e37ec39 | 2012-06-04 14:42:48 -0700 | [diff] [blame] | 559 | intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); |
Chris Wilson | 2c55018 | 2014-12-16 10:02:27 +0000 | [diff] [blame] | 560 | if (num_rings) { |
| 561 | struct intel_engine_cs *signaller; |
| 562 | |
| 563 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); |
| 564 | for_each_ring(signaller, to_i915(ring->dev), i) { |
| 565 | if (signaller == ring) |
| 566 | continue; |
| 567 | |
Ville Syrjälä | f92a916 | 2015-11-04 23:20:07 +0200 | [diff] [blame] | 568 | intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); |
Chris Wilson | 2c55018 | 2014-12-16 10:02:27 +0000 | [diff] [blame] | 569 | intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); |
| 570 | } |
| 571 | } |
| 572 | } |
Ben Widawsky | e37ec39 | 2012-06-04 14:42:48 -0700 | [diff] [blame] | 573 | |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 574 | intel_ring_emit(ring, MI_NOOP); |
| 575 | intel_ring_emit(ring, MI_SET_CONTEXT); |
John Harrison | 1d719cd | 2015-05-29 17:43:52 +0100 | [diff] [blame] | 576 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | |
Ben Widawsky | e80f14b | 2014-08-18 10:35:28 -0700 | [diff] [blame] | 577 | flags); |
Ville Syrjälä | 2b7e808 | 2014-01-22 21:32:43 +0200 | [diff] [blame] | 578 | /* |
| 579 | * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP |
| 580 | * WaMiSetContext_Hang:snb,ivb,vlv |
| 581 | */ |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 582 | intel_ring_emit(ring, MI_NOOP); |
| 583 | |
Chris Wilson | 2c55018 | 2014-12-16 10:02:27 +0000 | [diff] [blame] | 584 | if (INTEL_INFO(ring->dev)->gen >= 7) { |
| 585 | if (num_rings) { |
| 586 | struct intel_engine_cs *signaller; |
| 587 | |
| 588 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); |
| 589 | for_each_ring(signaller, to_i915(ring->dev), i) { |
| 590 | if (signaller == ring) |
| 591 | continue; |
| 592 | |
Ville Syrjälä | f92a916 | 2015-11-04 23:20:07 +0200 | [diff] [blame] | 593 | intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); |
Chris Wilson | 2c55018 | 2014-12-16 10:02:27 +0000 | [diff] [blame] | 594 | intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); |
| 595 | } |
| 596 | } |
Ben Widawsky | e37ec39 | 2012-06-04 14:42:48 -0700 | [diff] [blame] | 597 | intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); |
Chris Wilson | 2c55018 | 2014-12-16 10:02:27 +0000 | [diff] [blame] | 598 | } |
Ben Widawsky | e37ec39 | 2012-06-04 14:42:48 -0700 | [diff] [blame] | 599 | |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 600 | intel_ring_advance(ring); |
| 601 | |
| 602 | return ret; |
| 603 | } |
| 604 | |
Ben Widawsky | 317b4e9 | 2015-03-16 16:00:55 +0000 | [diff] [blame] | 605 | static inline bool should_skip_switch(struct intel_engine_cs *ring, |
| 606 | struct intel_context *from, |
| 607 | struct intel_context *to) |
| 608 | { |
Ben Widawsky | 563222a | 2015-03-19 12:53:28 +0000 | [diff] [blame] | 609 | if (to->remap_slice) |
| 610 | return false; |
| 611 | |
Daniel Vetter | 9258811 | 2015-04-14 17:35:19 +0200 | [diff] [blame] | 612 | if (to->ppgtt && from == to && |
| 613 | !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) |
| 614 | return true; |
Ben Widawsky | 317b4e9 | 2015-03-16 16:00:55 +0000 | [diff] [blame] | 615 | |
| 616 | return false; |
| 617 | } |
| 618 | |
| 619 | static bool |
| 620 | needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) |
| 621 | { |
| 622 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 623 | |
| 624 | if (!to->ppgtt) |
| 625 | return false; |
| 626 | |
| 627 | if (INTEL_INFO(ring->dev)->gen < 8) |
| 628 | return true; |
| 629 | |
| 630 | if (ring != &dev_priv->ring[RCS]) |
| 631 | return true; |
| 632 | |
| 633 | return false; |
| 634 | } |
| 635 | |
| 636 | static bool |
Ben Widawsky | 6702cf1 | 2015-03-16 16:00:58 +0000 | [diff] [blame] | 637 | needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, |
| 638 | u32 hw_flags) |
Ben Widawsky | 317b4e9 | 2015-03-16 16:00:55 +0000 | [diff] [blame] | 639 | { |
| 640 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 641 | |
| 642 | if (!to->ppgtt) |
| 643 | return false; |
| 644 | |
| 645 | if (!IS_GEN8(ring->dev)) |
| 646 | return false; |
| 647 | |
| 648 | if (ring != &dev_priv->ring[RCS]) |
| 649 | return false; |
| 650 | |
Ben Widawsky | 6702cf1 | 2015-03-16 16:00:58 +0000 | [diff] [blame] | 651 | if (hw_flags & MI_RESTORE_INHIBIT) |
Ben Widawsky | 317b4e9 | 2015-03-16 16:00:55 +0000 | [diff] [blame] | 652 | return true; |
| 653 | |
| 654 | return false; |
| 655 | } |
| 656 | |
John Harrison | abd68d9 | 2015-05-29 17:43:42 +0100 | [diff] [blame] | 657 | static int do_switch(struct drm_i915_gem_request *req) |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 658 | { |
John Harrison | abd68d9 | 2015-05-29 17:43:42 +0100 | [diff] [blame] | 659 | struct intel_context *to = req->ctx; |
| 660 | struct intel_engine_cs *ring = req->ring; |
Ben Widawsky | 6f65e29 | 2013-12-06 14:10:56 -0800 | [diff] [blame] | 661 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 662 | struct intel_context *from = ring->last_context; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 663 | u32 hw_flags = 0; |
Chris Wilson | 967ab6b | 2014-05-30 14:16:30 +0100 | [diff] [blame] | 664 | bool uninitialized = false; |
Ben Widawsky | 3ccfd19 | 2013-09-18 19:03:18 -0700 | [diff] [blame] | 665 | int ret, i; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 666 | |
Ben Widawsky | 67e3d297 | 2013-12-06 14:11:01 -0800 | [diff] [blame] | 667 | if (from != NULL && ring == &dev_priv->ring[RCS]) { |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 668 | BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); |
| 669 | BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); |
Ben Widawsky | 67e3d297 | 2013-12-06 14:11:01 -0800 | [diff] [blame] | 670 | } |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 671 | |
Ben Widawsky | 317b4e9 | 2015-03-16 16:00:55 +0000 | [diff] [blame] | 672 | if (should_skip_switch(ring, from, to)) |
Chris Wilson | 9a3b530 | 2012-07-15 12:34:24 +0100 | [diff] [blame] | 673 | return 0; |
| 674 | |
Ben Widawsky | 7e0d96b | 2013-12-06 14:11:26 -0800 | [diff] [blame] | 675 | /* Trying to pin first makes error handling easier. */ |
| 676 | if (ring == &dev_priv->ring[RCS]) { |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 677 | ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 678 | get_context_alignment(ring->dev), 0); |
Ben Widawsky | 7e0d96b | 2013-12-06 14:11:26 -0800 | [diff] [blame] | 679 | if (ret) |
| 680 | return ret; |
Ben Widawsky | 67e3d297 | 2013-12-06 14:11:01 -0800 | [diff] [blame] | 681 | } |
| 682 | |
Daniel Vetter | acc240d | 2013-12-05 15:42:34 +0100 | [diff] [blame] | 683 | /* |
| 684 | * Pin can switch back to the default context if we end up calling into |
| 685 | * evict_everything - as a last ditch gtt defrag effort that also |
| 686 | * switches to the default context. Hence we need to reload from here. |
| 687 | */ |
| 688 | from = ring->last_context; |
| 689 | |
Ben Widawsky | 317b4e9 | 2015-03-16 16:00:55 +0000 | [diff] [blame] | 690 | if (needs_pd_load_pre(ring, to)) { |
| 691 | /* Older GENs and non render rings still want the load first, |
| 692 | * "PP_DCLV followed by PP_DIR_BASE register through Load |
| 693 | * Register Immediate commands in Ring Buffer before submitting |
| 694 | * a context."*/ |
Daniele Ceraolo Spurio | 198c974 | 2014-11-10 13:44:31 +0000 | [diff] [blame] | 695 | trace_switch_mm(ring, to); |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 696 | ret = to->ppgtt->switch_mm(to->ppgtt, req); |
Ben Widawsky | 7e0d96b | 2013-12-06 14:11:26 -0800 | [diff] [blame] | 697 | if (ret) |
| 698 | goto unpin_out; |
Ben Widawsky | 563222a | 2015-03-19 12:53:28 +0000 | [diff] [blame] | 699 | |
| 700 | /* Doing a PD load always reloads the page dirs */ |
Daniel Vetter | 9258811 | 2015-04-14 17:35:19 +0200 | [diff] [blame] | 701 | to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); |
Ben Widawsky | 7e0d96b | 2013-12-06 14:11:26 -0800 | [diff] [blame] | 702 | } |
| 703 | |
| 704 | if (ring != &dev_priv->ring[RCS]) { |
| 705 | if (from) |
| 706 | i915_gem_context_unreference(from); |
| 707 | goto done; |
| 708 | } |
| 709 | |
Daniel Vetter | acc240d | 2013-12-05 15:42:34 +0100 | [diff] [blame] | 710 | /* |
| 711 | * Clear this page out of any CPU caches for coherent swap-in/out. Note |
Chris Wilson | d3373a2 | 2012-07-15 12:34:22 +0100 | [diff] [blame] | 712 | * that thanks to write = false in this call and us not setting any gpu |
| 713 | * write domains when putting a context object onto the active list |
| 714 | * (when switching away from it), this won't block. |
Daniel Vetter | acc240d | 2013-12-05 15:42:34 +0100 | [diff] [blame] | 715 | * |
| 716 | * XXX: We need a real interface to do this instead of trickery. |
| 717 | */ |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 718 | ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); |
Ben Widawsky | 7e0d96b | 2013-12-06 14:11:26 -0800 | [diff] [blame] | 719 | if (ret) |
| 720 | goto unpin_out; |
Chris Wilson | d3373a2 | 2012-07-15 12:34:22 +0100 | [diff] [blame] | 721 | |
Chris Wilson | 06ef83a | 2015-11-27 13:28:55 +0000 | [diff] [blame] | 722 | if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) { |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 723 | hw_flags |= MI_RESTORE_INHIBIT; |
Ben Widawsky | 6702cf1 | 2015-03-16 16:00:58 +0000 | [diff] [blame] | 724 | /* NB: If we inhibit the restore, the context is not allowed to |
| 725 | * die because future work may end up depending on valid address |
| 726 | * space. This means we must enforce that a page table load |
| 727 | * occur when this occurs. */ |
| 728 | } else if (to->ppgtt && |
Daniel Vetter | 9258811 | 2015-04-14 17:35:19 +0200 | [diff] [blame] | 729 | (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) { |
Ben Widawsky | 563222a | 2015-03-19 12:53:28 +0000 | [diff] [blame] | 730 | hw_flags |= MI_FORCE_RESTORE; |
Daniel Vetter | 9258811 | 2015-04-14 17:35:19 +0200 | [diff] [blame] | 731 | to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); |
| 732 | } |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 733 | |
Ben Widawsky | 6702cf1 | 2015-03-16 16:00:58 +0000 | [diff] [blame] | 734 | /* We should never emit switch_mm more than once */ |
| 735 | WARN_ON(needs_pd_load_pre(ring, to) && |
Daniel Vetter | 9258811 | 2015-04-14 17:35:19 +0200 | [diff] [blame] | 736 | needs_pd_load_post(ring, to, hw_flags)); |
Ben Widawsky | 6702cf1 | 2015-03-16 16:00:58 +0000 | [diff] [blame] | 737 | |
John Harrison | 1d719cd | 2015-05-29 17:43:52 +0100 | [diff] [blame] | 738 | ret = mi_set_context(req, hw_flags); |
Ben Widawsky | 7e0d96b | 2013-12-06 14:11:26 -0800 | [diff] [blame] | 739 | if (ret) |
| 740 | goto unpin_out; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 741 | |
Ben Widawsky | 6702cf1 | 2015-03-16 16:00:58 +0000 | [diff] [blame] | 742 | /* GEN8 does *not* require an explicit reload if the PDPs have been |
| 743 | * setup, and we do not wish to move them. |
| 744 | */ |
| 745 | if (needs_pd_load_post(ring, to, hw_flags)) { |
Ben Widawsky | 317b4e9 | 2015-03-16 16:00:55 +0000 | [diff] [blame] | 746 | trace_switch_mm(ring, to); |
John Harrison | e85b26d | 2015-05-29 17:43:56 +0100 | [diff] [blame] | 747 | ret = to->ppgtt->switch_mm(to->ppgtt, req); |
Ben Widawsky | 317b4e9 | 2015-03-16 16:00:55 +0000 | [diff] [blame] | 748 | /* The hardware context switch is emitted, but we haven't |
| 749 | * actually changed the state - so it's probably safe to bail |
| 750 | * here. Still, let the user know something dangerous has |
| 751 | * happened. |
| 752 | */ |
| 753 | if (ret) { |
| 754 | DRM_ERROR("Failed to change address space on context switch\n"); |
| 755 | goto unpin_out; |
| 756 | } |
| 757 | } |
| 758 | |
Ben Widawsky | 3ccfd19 | 2013-09-18 19:03:18 -0700 | [diff] [blame] | 759 | for (i = 0; i < MAX_L3_SLICES; i++) { |
| 760 | if (!(to->remap_slice & (1<<i))) |
| 761 | continue; |
| 762 | |
John Harrison | 6909a66 | 2015-05-29 17:43:51 +0100 | [diff] [blame] | 763 | ret = i915_gem_l3_remap(req, i); |
Ben Widawsky | 3ccfd19 | 2013-09-18 19:03:18 -0700 | [diff] [blame] | 764 | /* If it failed, try again next round */ |
| 765 | if (ret) |
| 766 | DRM_DEBUG_DRIVER("L3 remapping failed\n"); |
| 767 | else |
| 768 | to->remap_slice &= ~(1<<i); |
| 769 | } |
| 770 | |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 771 | /* The backing object for the context is done after switching to the |
| 772 | * *next* context. Therefore we cannot retire the previous context until |
| 773 | * the next context has already started running. In fact, the below code |
| 774 | * is a bit suboptimal because the retiring can occur simply after the |
| 775 | * MI_SET_CONTEXT instead of when the next seqno has completed. |
| 776 | */ |
Chris Wilson | 112522f | 2013-05-02 16:48:07 +0300 | [diff] [blame] | 777 | if (from != NULL) { |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 778 | from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; |
John Harrison | b2af037 | 2015-05-29 17:43:50 +0100 | [diff] [blame] | 779 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 780 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the |
| 781 | * whole damn pipeline, we don't need to explicitly mark the |
| 782 | * object dirty. The only exception is that the context must be |
| 783 | * correct in case the object gets swapped out. Ideally we'd be |
| 784 | * able to defer doing this until we know the object would be |
| 785 | * swapped, but there is no way to do that yet. |
| 786 | */ |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 787 | from->legacy_hw_ctx.rcs_state->dirty = 1; |
Chris Wilson | b259b31 | 2012-07-15 12:34:23 +0100 | [diff] [blame] | 788 | |
Chris Wilson | c0321e2 | 2013-08-26 19:50:53 -0300 | [diff] [blame] | 789 | /* obj is kept alive until the next request by its active ref */ |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 790 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); |
Chris Wilson | 112522f | 2013-05-02 16:48:07 +0300 | [diff] [blame] | 791 | i915_gem_context_unreference(from); |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 792 | } |
| 793 | |
Ben Widawsky | 6702cf1 | 2015-03-16 16:00:58 +0000 | [diff] [blame] | 794 | uninitialized = !to->legacy_hw_ctx.initialized; |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 795 | to->legacy_hw_ctx.initialized = true; |
Chris Wilson | 967ab6b | 2014-05-30 14:16:30 +0100 | [diff] [blame] | 796 | |
Ben Widawsky | 67e3d297 | 2013-12-06 14:11:01 -0800 | [diff] [blame] | 797 | done: |
Chris Wilson | 112522f | 2013-05-02 16:48:07 +0300 | [diff] [blame] | 798 | i915_gem_context_reference(to); |
| 799 | ring->last_context = to; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 800 | |
Chris Wilson | 967ab6b | 2014-05-30 14:16:30 +0100 | [diff] [blame] | 801 | if (uninitialized) { |
Arun Siluvery | 86d7f23 | 2014-08-26 14:44:50 +0100 | [diff] [blame] | 802 | if (ring->init_context) { |
John Harrison | 8753181 | 2015-05-29 17:43:44 +0100 | [diff] [blame] | 803 | ret = ring->init_context(req); |
Arun Siluvery | 86d7f23 | 2014-08-26 14:44:50 +0100 | [diff] [blame] | 804 | if (ret) |
| 805 | DRM_ERROR("ring init context: %d\n", ret); |
| 806 | } |
Mika Kuoppala | 46470fc9 | 2014-05-21 19:01:06 +0300 | [diff] [blame] | 807 | } |
| 808 | |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 809 | return 0; |
Ben Widawsky | 7e0d96b | 2013-12-06 14:11:26 -0800 | [diff] [blame] | 810 | |
| 811 | unpin_out: |
| 812 | if (ring->id == RCS) |
Oscar Mateo | ea0c76f | 2014-07-03 16:27:59 +0100 | [diff] [blame] | 813 | i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); |
Ben Widawsky | 7e0d96b | 2013-12-06 14:11:26 -0800 | [diff] [blame] | 814 | return ret; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 815 | } |
| 816 | |
| 817 | /** |
| 818 | * i915_switch_context() - perform a GPU context switch. |
John Harrison | ba01cc9 | 2015-05-29 17:43:41 +0100 | [diff] [blame] | 819 | * @req: request for which we'll execute the context switch |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 820 | * |
| 821 | * The context life cycle is simple. The context refcount is incremented and |
| 822 | * decremented by 1 and create and destroy. If the context is in use by the GPU, |
Thomas Daniel | ecdb5fd | 2014-08-20 16:29:24 +0100 | [diff] [blame] | 823 | * it will have a refcount > 1. This allows us to destroy the context abstract |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 824 | * object while letting the normal object tracking destroy the backing BO. |
Thomas Daniel | ecdb5fd | 2014-08-20 16:29:24 +0100 | [diff] [blame] | 825 | * |
| 826 | * This function should not be used in execlists mode. Instead the context is |
| 827 | * switched by writing to the ELSP and requests keep a reference to their |
| 828 | * context. |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 829 | */ |
John Harrison | ba01cc9 | 2015-05-29 17:43:41 +0100 | [diff] [blame] | 830 | int i915_switch_context(struct drm_i915_gem_request *req) |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 831 | { |
John Harrison | ba01cc9 | 2015-05-29 17:43:41 +0100 | [diff] [blame] | 832 | struct intel_engine_cs *ring = req->ring; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 833 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 834 | |
Thomas Daniel | ecdb5fd | 2014-08-20 16:29:24 +0100 | [diff] [blame] | 835 | WARN_ON(i915.enable_execlists); |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 836 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
| 837 | |
John Harrison | ba01cc9 | 2015-05-29 17:43:41 +0100 | [diff] [blame] | 838 | if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ |
| 839 | if (req->ctx != ring->last_context) { |
| 840 | i915_gem_context_reference(req->ctx); |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 841 | if (ring->last_context) |
| 842 | i915_gem_context_unreference(ring->last_context); |
John Harrison | ba01cc9 | 2015-05-29 17:43:41 +0100 | [diff] [blame] | 843 | ring->last_context = req->ctx; |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 844 | } |
Ben Widawsky | c482972 | 2013-12-06 14:11:20 -0800 | [diff] [blame] | 845 | return 0; |
Mika Kuoppala | a95f6a0 | 2014-03-14 16:22:10 +0200 | [diff] [blame] | 846 | } |
Ben Widawsky | c482972 | 2013-12-06 14:11:20 -0800 | [diff] [blame] | 847 | |
John Harrison | abd68d9 | 2015-05-29 17:43:42 +0100 | [diff] [blame] | 848 | return do_switch(req); |
Ben Widawsky | e055684 | 2012-06-04 14:42:46 -0700 | [diff] [blame] | 849 | } |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 850 | |
Oscar Mateo | ec3e996 | 2014-07-24 17:04:18 +0100 | [diff] [blame] | 851 | static bool contexts_enabled(struct drm_device *dev) |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 852 | { |
Oscar Mateo | ec3e996 | 2014-07-24 17:04:18 +0100 | [diff] [blame] | 853 | return i915.enable_execlists || to_i915(dev)->hw_context_size; |
Chris Wilson | 691e641 | 2014-04-09 09:07:36 +0100 | [diff] [blame] | 854 | } |
| 855 | |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 856 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
| 857 | struct drm_file *file) |
| 858 | { |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 859 | struct drm_i915_gem_context_create *args = data; |
| 860 | struct drm_i915_file_private *file_priv = file->driver_priv; |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 861 | struct intel_context *ctx; |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 862 | int ret; |
| 863 | |
Oscar Mateo | ec3e996 | 2014-07-24 17:04:18 +0100 | [diff] [blame] | 864 | if (!contexts_enabled(dev)) |
Daniel Vetter | 5fa8be6 | 2012-06-19 17:16:01 +0200 | [diff] [blame] | 865 | return -ENODEV; |
| 866 | |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 867 | ret = i915_mutex_lock_interruptible(dev); |
| 868 | if (ret) |
| 869 | return ret; |
| 870 | |
Daniel Vetter | d624d86 | 2014-08-06 15:04:54 +0200 | [diff] [blame] | 871 | ctx = i915_gem_create_context(dev, file_priv); |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 872 | mutex_unlock(&dev->struct_mutex); |
Dan Carpenter | be63638 | 2012-07-17 09:44:49 +0300 | [diff] [blame] | 873 | if (IS_ERR(ctx)) |
| 874 | return PTR_ERR(ctx); |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 875 | |
Oscar Mateo | 821d66d | 2014-07-03 16:28:00 +0100 | [diff] [blame] | 876 | args->ctx_id = ctx->user_handle; |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 877 | DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); |
| 878 | |
Dan Carpenter | be63638 | 2012-07-17 09:44:49 +0300 | [diff] [blame] | 879 | return 0; |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 880 | } |
| 881 | |
| 882 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
| 883 | struct drm_file *file) |
| 884 | { |
| 885 | struct drm_i915_gem_context_destroy *args = data; |
| 886 | struct drm_i915_file_private *file_priv = file->driver_priv; |
Oscar Mateo | 273497e | 2014-05-22 14:13:37 +0100 | [diff] [blame] | 887 | struct intel_context *ctx; |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 888 | int ret; |
| 889 | |
Oscar Mateo | 821d66d | 2014-07-03 16:28:00 +0100 | [diff] [blame] | 890 | if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) |
Ben Widawsky | c2cf241 | 2013-12-24 16:02:54 -0800 | [diff] [blame] | 891 | return -ENOENT; |
Ben Widawsky | 0eea67e | 2013-12-06 14:11:19 -0800 | [diff] [blame] | 892 | |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 893 | ret = i915_mutex_lock_interruptible(dev); |
| 894 | if (ret) |
| 895 | return ret; |
| 896 | |
| 897 | ctx = i915_gem_context_get(file_priv, args->ctx_id); |
Ben Widawsky | 72ad5c4 | 2014-01-02 19:50:27 -1000 | [diff] [blame] | 898 | if (IS_ERR(ctx)) { |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 899 | mutex_unlock(&dev->struct_mutex); |
Ben Widawsky | 72ad5c4 | 2014-01-02 19:50:27 -1000 | [diff] [blame] | 900 | return PTR_ERR(ctx); |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 901 | } |
| 902 | |
Oscar Mateo | 821d66d | 2014-07-03 16:28:00 +0100 | [diff] [blame] | 903 | idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); |
Mika Kuoppala | dce3271 | 2013-04-30 13:30:33 +0300 | [diff] [blame] | 904 | i915_gem_context_unreference(ctx); |
Ben Widawsky | 8462481 | 2012-06-04 14:42:54 -0700 | [diff] [blame] | 905 | mutex_unlock(&dev->struct_mutex); |
| 906 | |
| 907 | DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); |
| 908 | return 0; |
| 909 | } |
Chris Wilson | c9dc0f3 | 2014-12-24 08:13:40 -0800 | [diff] [blame] | 910 | |
| 911 | int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, |
| 912 | struct drm_file *file) |
| 913 | { |
| 914 | struct drm_i915_file_private *file_priv = file->driver_priv; |
| 915 | struct drm_i915_gem_context_param *args = data; |
| 916 | struct intel_context *ctx; |
| 917 | int ret; |
| 918 | |
| 919 | ret = i915_mutex_lock_interruptible(dev); |
| 920 | if (ret) |
| 921 | return ret; |
| 922 | |
| 923 | ctx = i915_gem_context_get(file_priv, args->ctx_id); |
| 924 | if (IS_ERR(ctx)) { |
| 925 | mutex_unlock(&dev->struct_mutex); |
| 926 | return PTR_ERR(ctx); |
| 927 | } |
| 928 | |
| 929 | args->size = 0; |
| 930 | switch (args->param) { |
| 931 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
| 932 | args->value = ctx->hang_stats.ban_period_seconds; |
| 933 | break; |
David Weinehall | b1b3827 | 2015-05-20 17:00:13 +0300 | [diff] [blame] | 934 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
| 935 | args->value = ctx->flags & CONTEXT_NO_ZEROMAP; |
| 936 | break; |
Chris Wilson | fa8848f | 2015-10-14 14:17:11 +0100 | [diff] [blame] | 937 | case I915_CONTEXT_PARAM_GTT_SIZE: |
| 938 | if (ctx->ppgtt) |
| 939 | args->value = ctx->ppgtt->base.total; |
| 940 | else if (to_i915(dev)->mm.aliasing_ppgtt) |
| 941 | args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; |
| 942 | else |
| 943 | args->value = to_i915(dev)->gtt.base.total; |
| 944 | break; |
Chris Wilson | c9dc0f3 | 2014-12-24 08:13:40 -0800 | [diff] [blame] | 945 | default: |
| 946 | ret = -EINVAL; |
| 947 | break; |
| 948 | } |
| 949 | mutex_unlock(&dev->struct_mutex); |
| 950 | |
| 951 | return ret; |
| 952 | } |
| 953 | |
| 954 | int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, |
| 955 | struct drm_file *file) |
| 956 | { |
| 957 | struct drm_i915_file_private *file_priv = file->driver_priv; |
| 958 | struct drm_i915_gem_context_param *args = data; |
| 959 | struct intel_context *ctx; |
| 960 | int ret; |
| 961 | |
| 962 | ret = i915_mutex_lock_interruptible(dev); |
| 963 | if (ret) |
| 964 | return ret; |
| 965 | |
| 966 | ctx = i915_gem_context_get(file_priv, args->ctx_id); |
| 967 | if (IS_ERR(ctx)) { |
| 968 | mutex_unlock(&dev->struct_mutex); |
| 969 | return PTR_ERR(ctx); |
| 970 | } |
| 971 | |
| 972 | switch (args->param) { |
| 973 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
| 974 | if (args->size) |
| 975 | ret = -EINVAL; |
| 976 | else if (args->value < ctx->hang_stats.ban_period_seconds && |
| 977 | !capable(CAP_SYS_ADMIN)) |
| 978 | ret = -EPERM; |
| 979 | else |
| 980 | ctx->hang_stats.ban_period_seconds = args->value; |
| 981 | break; |
David Weinehall | b1b3827 | 2015-05-20 17:00:13 +0300 | [diff] [blame] | 982 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
| 983 | if (args->size) { |
| 984 | ret = -EINVAL; |
| 985 | } else { |
| 986 | ctx->flags &= ~CONTEXT_NO_ZEROMAP; |
| 987 | ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; |
| 988 | } |
| 989 | break; |
Chris Wilson | c9dc0f3 | 2014-12-24 08:13:40 -0800 | [diff] [blame] | 990 | default: |
| 991 | ret = -EINVAL; |
| 992 | break; |
| 993 | } |
| 994 | mutex_unlock(&dev->struct_mutex); |
| 995 | |
| 996 | return ret; |
| 997 | } |