blob: c25083c78ba7c21c7cdc92ed43f62c7e499851bb [file] [log] [blame]
Ben Widawsky254f9652012-06-04 14:42:42 -07001/*
2 * Copyright © 2011-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28/*
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
35 *
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
46 *
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
51 *
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
63 *
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
73 *
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
Damien Lespiau508842a2013-08-30 14:40:26 +010076 * GPU. The GPU has loaded its state already and has stored away the gtt
Ben Widawsky254f9652012-06-04 14:42:42 -070077 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
80 *
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
85 *
86 */
87
David Howells760285e2012-10-02 18:01:07 +010088#include <drm/drmP.h>
89#include <drm/i915_drm.h>
Ben Widawsky254f9652012-06-04 14:42:42 -070090#include "i915_drv.h"
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +000091#include "i915_trace.h"
Ben Widawsky254f9652012-06-04 14:42:42 -070092
Ben Widawsky40521052012-06-04 14:42:43 -070093/* This is a HW constraint. The value below is the largest known requirement
94 * I've seen in a spec to date, and that was a workaround for a non-shipping
95 * part. It should be safe to decrease this, but it's more future proof as is.
96 */
Ben Widawskyb731d332013-12-06 14:10:59 -080097#define GEN6_CONTEXT_ALIGN (64<<10)
98#define GEN7_CONTEXT_ALIGN 4096
Ben Widawsky40521052012-06-04 14:42:43 -070099
Ben Widawskyb731d332013-12-06 14:10:59 -0800100static size_t get_context_alignment(struct drm_device *dev)
101{
102 if (IS_GEN6(dev))
103 return GEN6_CONTEXT_ALIGN;
104
105 return GEN7_CONTEXT_ALIGN;
106}
107
Ben Widawsky254f9652012-06-04 14:42:42 -0700108static int get_context_size(struct drm_device *dev)
109{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret;
112 u32 reg;
113
114 switch (INTEL_INFO(dev)->gen) {
115 case 6:
116 reg = I915_READ(CXT_SIZE);
117 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
118 break;
119 case 7:
Ben Widawsky4f91dd62012-07-18 10:10:09 -0700120 reg = I915_READ(GEN7_CXT_SIZE);
Ben Widawsky2e4291e2012-07-24 20:47:30 -0700121 if (IS_HASWELL(dev))
Ben Widawskya0de80a2013-06-25 21:53:40 -0700122 ret = HSW_CXT_TOTAL_SIZE;
Ben Widawsky2e4291e2012-07-24 20:47:30 -0700123 else
124 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
Ben Widawsky254f9652012-06-04 14:42:42 -0700125 break;
Ben Widawsky88976442013-11-02 21:07:05 -0700126 case 8:
127 ret = GEN8_CXT_TOTAL_SIZE;
128 break;
Ben Widawsky254f9652012-06-04 14:42:42 -0700129 default:
130 BUG();
131 }
132
133 return ret;
134}
135
Tvrtko Ursuline9f24d52015-10-05 13:26:36 +0100136static void i915_gem_context_clean(struct intel_context *ctx)
137{
138 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
139 struct i915_vma *vma, *next;
140
Tvrtko Ursulin61fb5882015-10-08 15:37:00 +0100141 if (!ppgtt)
Tvrtko Ursuline9f24d52015-10-05 13:26:36 +0100142 return;
143
Tvrtko Ursuline9f24d52015-10-05 13:26:36 +0100144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
145 mm_list) {
146 if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
147 break;
148 }
149}
150
Mika Kuoppaladce32712013-04-30 13:30:33 +0300151void i915_gem_context_free(struct kref *ctx_ref)
Ben Widawsky40521052012-06-04 14:42:43 -0700152{
Chris Wilson9ea4fee2015-05-05 09:17:29 +0100153 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
Ben Widawsky40521052012-06-04 14:42:43 -0700154
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +0000155 trace_i915_context_free(ctx);
156
Daniel Vetterae6c4802014-08-06 15:04:53 +0200157 if (i915.enable_execlists)
Oscar Mateoede7d422014-07-24 17:04:12 +0100158 intel_lr_context_free(ctx);
Ben Widawskyc7c48df2013-12-06 14:11:15 -0800159
Tvrtko Ursuline9f24d52015-10-05 13:26:36 +0100160 /*
161 * This context is going away and we need to remove all VMAs still
162 * around. This is to handle imported shared objects for which
163 * destructor did not run when their handles were closed.
164 */
165 i915_gem_context_clean(ctx);
166
Daniel Vetterae6c4802014-08-06 15:04:53 +0200167 i915_ppgtt_put(ctx->ppgtt);
168
Ben Widawsky2f295792014-07-01 11:17:47 -0700169 if (ctx->legacy_hw_ctx.rcs_state)
170 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
Ben Widawskyc7c48df2013-12-06 14:11:15 -0800171 list_del(&ctx->link);
Ben Widawsky40521052012-06-04 14:42:43 -0700172 kfree(ctx);
173}
174
Oscar Mateo8c8579172014-07-24 17:04:14 +0100175struct drm_i915_gem_object *
Oscar Mateoaa0c13d2014-07-03 16:27:58 +0100176i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
177{
178 struct drm_i915_gem_object *obj;
179 int ret;
180
Ville Syrjälä52613922015-06-29 20:28:35 +0300181 obj = i915_gem_alloc_object(dev, size);
Oscar Mateoaa0c13d2014-07-03 16:27:58 +0100182 if (obj == NULL)
183 return ERR_PTR(-ENOMEM);
184
185 /*
186 * Try to make the context utilize L3 as well as LLC.
187 *
188 * On VLV we don't have L3 controls in the PTEs so we
189 * shouldn't touch the cache level, especially as that
190 * would make the object snooped which might have a
191 * negative performance impact.
Wayne Boyer4d3e9042015-12-08 09:38:52 -0800192 *
193 * Snooping is required on non-llc platforms in execlist
194 * mode, but since all GGTT accesses use PAT entry 0 we
195 * get snooping anyway regardless of cache_level.
196 *
197 * This is only applicable for Ivy Bridge devices since
198 * later platforms don't have L3 control bits in the PTE.
Oscar Mateoaa0c13d2014-07-03 16:27:58 +0100199 */
Wayne Boyer4d3e9042015-12-08 09:38:52 -0800200 if (IS_IVYBRIDGE(dev)) {
Oscar Mateoaa0c13d2014-07-03 16:27:58 +0100201 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
202 /* Failure shouldn't ever happen this early */
203 if (WARN_ON(ret)) {
204 drm_gem_object_unreference(&obj->base);
205 return ERR_PTR(ret);
206 }
207 }
208
209 return obj;
210}
211
Oscar Mateo273497e2014-05-22 14:13:37 +0100212static struct intel_context *
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800213__create_hw_context(struct drm_device *dev,
Daniel Vetteree960be2014-08-06 15:04:45 +0200214 struct drm_i915_file_private *file_priv)
Ben Widawsky40521052012-06-04 14:42:43 -0700215{
216 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateo273497e2014-05-22 14:13:37 +0100217 struct intel_context *ctx;
Tejun Heoc8c470a2013-02-27 17:04:10 -0800218 int ret;
Ben Widawsky40521052012-06-04 14:42:43 -0700219
Ben Widawskyf94982b2012-11-10 10:56:04 -0800220 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
Ben Widawsky146937e2012-06-29 10:30:39 -0700221 if (ctx == NULL)
222 return ERR_PTR(-ENOMEM);
Ben Widawsky40521052012-06-04 14:42:43 -0700223
Mika Kuoppaladce32712013-04-30 13:30:33 +0300224 kref_init(&ctx->ref);
Ben Widawskya33afea2013-09-17 21:12:45 -0700225 list_add_tail(&ctx->link, &dev_priv->context_list);
Chris Wilson9ea4fee2015-05-05 09:17:29 +0100226 ctx->i915 = dev_priv;
Ben Widawsky40521052012-06-04 14:42:43 -0700227
Chris Wilson691e6412014-04-09 09:07:36 +0100228 if (dev_priv->hw_context_size) {
Oscar Mateoaa0c13d2014-07-03 16:27:58 +0100229 struct drm_i915_gem_object *obj =
230 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
231 if (IS_ERR(obj)) {
232 ret = PTR_ERR(obj);
Chris Wilson691e6412014-04-09 09:07:36 +0100233 goto err_out;
234 }
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100235 ctx->legacy_hw_ctx.rcs_state = obj;
Chris Wilson691e6412014-04-09 09:07:36 +0100236 }
237
238 /* Default context will never have a file_priv */
239 if (file_priv != NULL) {
240 ret = idr_alloc(&file_priv->context_idr, ctx,
Oscar Mateo821d66d2014-07-03 16:28:00 +0100241 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
Chris Wilson691e6412014-04-09 09:07:36 +0100242 if (ret < 0)
243 goto err_out;
244 } else
Oscar Mateo821d66d2014-07-03 16:28:00 +0100245 ret = DEFAULT_CONTEXT_HANDLE;
Mika Kuoppaladce32712013-04-30 13:30:33 +0300246
247 ctx->file_priv = file_priv;
Oscar Mateo821d66d2014-07-03 16:28:00 +0100248 ctx->user_handle = ret;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700249 /* NB: Mark all slices as needing a remap so that when the context first
250 * loads it will restore whatever remap state already exists. If there
251 * is no remap info, it will be a NOP. */
252 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
Ben Widawsky40521052012-06-04 14:42:43 -0700253
Chris Wilson676fa572014-12-24 08:13:39 -0800254 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
255
Ben Widawsky146937e2012-06-29 10:30:39 -0700256 return ctx;
Ben Widawsky40521052012-06-04 14:42:43 -0700257
258err_out:
Mika Kuoppaladce32712013-04-30 13:30:33 +0300259 i915_gem_context_unreference(ctx);
Ben Widawsky146937e2012-06-29 10:30:39 -0700260 return ERR_PTR(ret);
Ben Widawsky40521052012-06-04 14:42:43 -0700261}
262
Ben Widawsky254f9652012-06-04 14:42:42 -0700263/**
264 * The default context needs to exist per ring that uses contexts. It stores the
265 * context state of the GPU for applications that don't utilize HW contexts, as
266 * well as an idle case.
267 */
Oscar Mateo273497e2014-05-22 14:13:37 +0100268static struct intel_context *
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800269i915_gem_create_context(struct drm_device *dev,
Daniel Vetterd624d862014-08-06 15:04:54 +0200270 struct drm_i915_file_private *file_priv)
Ben Widawsky254f9652012-06-04 14:42:42 -0700271{
Chris Wilson42c3b602014-01-23 19:40:02 +0000272 const bool is_global_default_ctx = file_priv == NULL;
Oscar Mateo273497e2014-05-22 14:13:37 +0100273 struct intel_context *ctx;
Ben Widawskybdf4fd72013-12-06 14:11:18 -0800274 int ret = 0;
Ben Widawsky40521052012-06-04 14:42:43 -0700275
Ben Widawskyb731d332013-12-06 14:10:59 -0800276 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
Ben Widawsky40521052012-06-04 14:42:43 -0700277
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800278 ctx = __create_hw_context(dev, file_priv);
Ben Widawsky146937e2012-06-29 10:30:39 -0700279 if (IS_ERR(ctx))
Ben Widawskya45d0f62013-12-06 14:11:05 -0800280 return ctx;
Ben Widawsky40521052012-06-04 14:42:43 -0700281
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100282 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
Chris Wilson42c3b602014-01-23 19:40:02 +0000283 /* We may need to do things with the shrinker which
284 * require us to immediately switch back to the default
285 * context. This can cause a problem as pinning the
286 * default context also requires GTT space which may not
287 * be available. To avoid this we always pin the default
288 * context.
289 */
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100290 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100291 get_context_alignment(dev), 0);
Chris Wilson42c3b602014-01-23 19:40:02 +0000292 if (ret) {
293 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
294 goto err_destroy;
295 }
296 }
297
Daniel Vetterd624d862014-08-06 15:04:54 +0200298 if (USES_FULL_PPGTT(dev)) {
Daniel Vetter4d884702014-08-06 15:04:47 +0200299 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
Ben Widawskybdf4fd72013-12-06 14:11:18 -0800300
301 if (IS_ERR_OR_NULL(ppgtt)) {
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800302 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
303 PTR_ERR(ppgtt));
Ben Widawskybdf4fd72013-12-06 14:11:18 -0800304 ret = PTR_ERR(ppgtt);
Chris Wilson42c3b602014-01-23 19:40:02 +0000305 goto err_unpin;
Daniel Vetterae6c4802014-08-06 15:04:53 +0200306 }
307
308 ctx->ppgtt = ppgtt;
309 }
Ben Widawskybdf4fd72013-12-06 14:11:18 -0800310
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +0000311 trace_i915_context_create(ctx);
312
Ben Widawskya45d0f62013-12-06 14:11:05 -0800313 return ctx;
Chris Wilson9a3b5302012-07-15 12:34:24 +0100314
Chris Wilson42c3b602014-01-23 19:40:02 +0000315err_unpin:
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100316 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
317 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
Chris Wilson9a3b5302012-07-15 12:34:24 +0100318err_destroy:
Chris Wilson37876df2015-08-08 14:02:36 +0100319 idr_remove(&file_priv->context_idr, ctx->user_handle);
Mika Kuoppaladce32712013-04-30 13:30:33 +0300320 i915_gem_context_unreference(ctx);
Ben Widawskya45d0f62013-12-06 14:11:05 -0800321 return ERR_PTR(ret);
Ben Widawsky254f9652012-06-04 14:42:42 -0700322}
323
Ben Widawskyacce9ff2013-12-06 14:11:03 -0800324void i915_gem_context_reset(struct drm_device *dev)
325{
326 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyacce9ff2013-12-06 14:11:03 -0800327 int i;
328
Thomas Daniel3e5b6f02015-02-16 16:12:53 +0000329 if (i915.enable_execlists) {
330 struct intel_context *ctx;
331
332 list_for_each_entry(ctx, &dev_priv->context_list, link) {
333 intel_lr_context_reset(dev, ctx);
334 }
335
Thomas Danielecdb5fd2014-08-20 16:29:24 +0100336 return;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +0000337 }
Thomas Danielecdb5fd2014-08-20 16:29:24 +0100338
Ben Widawskyacce9ff2013-12-06 14:11:03 -0800339 for (i = 0; i < I915_NUM_RINGS; i++) {
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100340 struct intel_engine_cs *ring = &dev_priv->ring[i];
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100341 struct intel_context *lctx = ring->last_context;
Ben Widawskyacce9ff2013-12-06 14:11:03 -0800342
McAulay, Alistair6689c162014-08-15 18:51:35 +0100343 if (lctx) {
344 if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
345 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
Ben Widawskyacce9ff2013-12-06 14:11:03 -0800346
McAulay, Alistair6689c162014-08-15 18:51:35 +0100347 i915_gem_context_unreference(lctx);
348 ring->last_context = NULL;
Ben Widawskyacce9ff2013-12-06 14:11:03 -0800349 }
Chris Wilson06ef83a2015-11-27 13:28:55 +0000350
351 /* Force the GPU state to be reinitialised on enabling */
352 if (ring->default_context)
353 ring->default_context->legacy_hw_ctx.initialized = false;
Ben Widawskyacce9ff2013-12-06 14:11:03 -0800354 }
355}
356
Ben Widawsky8245be32013-11-06 13:56:29 -0200357int i915_gem_context_init(struct drm_device *dev)
Ben Widawsky254f9652012-06-04 14:42:42 -0700358{
359 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateo273497e2014-05-22 14:13:37 +0100360 struct intel_context *ctx;
Ben Widawskya45d0f62013-12-06 14:11:05 -0800361 int i;
Ben Widawsky254f9652012-06-04 14:42:42 -0700362
Ben Widawsky2fa48d82013-12-06 14:11:04 -0800363 /* Init should only be called once per module load. Eventually the
364 * restriction on the context_disabled check can be loosened. */
365 if (WARN_ON(dev_priv->ring[RCS].default_context))
Ben Widawsky8245be32013-11-06 13:56:29 -0200366 return 0;
Ben Widawsky254f9652012-06-04 14:42:42 -0700367
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800368 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
369 if (!i915.enable_execlists) {
370 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
371 return -EINVAL;
372 }
373 }
374
Oscar Mateoede7d422014-07-24 17:04:12 +0100375 if (i915.enable_execlists) {
376 /* NB: intentionally left blank. We will allocate our own
377 * backing objects as we need them, thank you very much */
378 dev_priv->hw_context_size = 0;
379 } else if (HAS_HW_CONTEXTS(dev)) {
Chris Wilson691e6412014-04-09 09:07:36 +0100380 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
381 if (dev_priv->hw_context_size > (1<<20)) {
382 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
383 dev_priv->hw_context_size);
384 dev_priv->hw_context_size = 0;
385 }
Ben Widawsky254f9652012-06-04 14:42:42 -0700386 }
387
Daniel Vetterd624d862014-08-06 15:04:54 +0200388 ctx = i915_gem_create_context(dev, NULL);
Chris Wilson691e6412014-04-09 09:07:36 +0100389 if (IS_ERR(ctx)) {
390 DRM_ERROR("Failed to create default global context (error %ld)\n",
391 PTR_ERR(ctx));
392 return PTR_ERR(ctx);
Ben Widawsky254f9652012-06-04 14:42:42 -0700393 }
394
Oscar Mateoede7d422014-07-24 17:04:12 +0100395 for (i = 0; i < I915_NUM_RINGS; i++) {
396 struct intel_engine_cs *ring = &dev_priv->ring[i];
Ben Widawsky67e3d2972013-12-06 14:11:01 -0800397
Oscar Mateoede7d422014-07-24 17:04:12 +0100398 /* NB: RCS will hold a ref for all rings */
399 ring->default_context = ctx;
Oscar Mateoede7d422014-07-24 17:04:12 +0100400 }
401
402 DRM_DEBUG_DRIVER("%s context support initialized\n",
403 i915.enable_execlists ? "LR" :
404 dev_priv->hw_context_size ? "HW" : "fake");
Ben Widawsky8245be32013-11-06 13:56:29 -0200405 return 0;
Ben Widawsky254f9652012-06-04 14:42:42 -0700406}
407
408void i915_gem_context_fini(struct drm_device *dev)
409{
410 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateo273497e2014-05-22 14:13:37 +0100411 struct intel_context *dctx = dev_priv->ring[RCS].default_context;
Ben Widawsky67e3d2972013-12-06 14:11:01 -0800412 int i;
Ben Widawsky254f9652012-06-04 14:42:42 -0700413
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100414 if (dctx->legacy_hw_ctx.rcs_state) {
Chris Wilson691e6412014-04-09 09:07:36 +0100415 /* The only known way to stop the gpu from accessing the hw context is
416 * to reset it. Do this as the very last operation to avoid confusing
417 * other code, leading to spurious errors. */
418 intel_gpu_reset(dev);
Ben Widawsky40521052012-06-04 14:42:43 -0700419
Chris Wilson691e6412014-04-09 09:07:36 +0100420 /* When default context is created and switched to, base object refcount
421 * will be 2 (+1 from object creation and +1 from do_switch()).
422 * i915_gem_context_fini() will be called after gpu_idle() has switched
423 * to default context. So we need to unreference the base object once
424 * to offset the do_switch part, so that i915_gem_context_unreference()
425 * can then free the base object correctly. */
426 WARN_ON(!dev_priv->ring[RCS].last_context);
427 if (dev_priv->ring[RCS].last_context == dctx) {
428 /* Fake switch to NULL context */
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100429 WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
430 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
Chris Wilson691e6412014-04-09 09:07:36 +0100431 i915_gem_context_unreference(dctx);
432 dev_priv->ring[RCS].last_context = NULL;
433 }
Chris Wilsond3b448d2014-05-16 18:59:00 +0100434
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100435 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
Ben Widawsky67e3d2972013-12-06 14:11:01 -0800436 }
437
438 for (i = 0; i < I915_NUM_RINGS; i++) {
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100439 struct intel_engine_cs *ring = &dev_priv->ring[i];
Ben Widawsky67e3d2972013-12-06 14:11:01 -0800440
441 if (ring->last_context)
442 i915_gem_context_unreference(ring->last_context);
443
444 ring->default_context = NULL;
Ben Widawsky0009e462013-12-06 14:11:02 -0800445 ring->last_context = NULL;
Ben Widawsky71b76d02013-10-14 10:01:37 -0700446 }
447
Mika Kuoppaladce32712013-04-30 13:30:33 +0300448 i915_gem_context_unreference(dctx);
Ben Widawsky254f9652012-06-04 14:42:42 -0700449}
450
John Harrisonb3dd6b92015-05-29 17:43:40 +0100451int i915_gem_context_enable(struct drm_i915_gem_request *req)
Ben Widawsky2fa48d82013-12-06 14:11:04 -0800452{
John Harrisonb3dd6b92015-05-29 17:43:40 +0100453 struct intel_engine_cs *ring = req->ring;
John Harrison90638cc2015-05-29 17:43:37 +0100454 int ret;
Ben Widawskybdf4fd72013-12-06 14:11:18 -0800455
Thomas Daniele7778be2014-12-02 12:50:48 +0000456 if (i915.enable_execlists) {
John Harrison90638cc2015-05-29 17:43:37 +0100457 if (ring->init_context == NULL)
458 return 0;
Thomas Danielecdb5fd2014-08-20 16:29:24 +0100459
John Harrison87531812015-05-29 17:43:44 +0100460 ret = ring->init_context(req);
Thomas Daniele7778be2014-12-02 12:50:48 +0000461 } else
John Harrisonba01cc92015-05-29 17:43:41 +0100462 ret = i915_switch_context(req);
John Harrison90638cc2015-05-29 17:43:37 +0100463
464 if (ret) {
465 DRM_ERROR("ring init context: %d\n", ret);
466 return ret;
467 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -0800468
469 return 0;
470}
471
Ben Widawsky40521052012-06-04 14:42:43 -0700472static int context_idr_cleanup(int id, void *p, void *data)
473{
Oscar Mateo273497e2014-05-22 14:13:37 +0100474 struct intel_context *ctx = p;
Ben Widawsky40521052012-06-04 14:42:43 -0700475
Mika Kuoppaladce32712013-04-30 13:30:33 +0300476 i915_gem_context_unreference(ctx);
Ben Widawsky40521052012-06-04 14:42:43 -0700477 return 0;
Ben Widawsky254f9652012-06-04 14:42:42 -0700478}
479
Ben Widawskye422b882013-12-06 14:10:58 -0800480int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
481{
482 struct drm_i915_file_private *file_priv = file->driver_priv;
Oscar Mateof83d6512014-05-22 14:13:38 +0100483 struct intel_context *ctx;
Ben Widawskye422b882013-12-06 14:10:58 -0800484
485 idr_init(&file_priv->context_idr);
486
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800487 mutex_lock(&dev->struct_mutex);
Daniel Vetterd624d862014-08-06 15:04:54 +0200488 ctx = i915_gem_create_context(dev, file_priv);
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800489 mutex_unlock(&dev->struct_mutex);
490
Oscar Mateof83d6512014-05-22 14:13:38 +0100491 if (IS_ERR(ctx)) {
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800492 idr_destroy(&file_priv->context_idr);
Oscar Mateof83d6512014-05-22 14:13:38 +0100493 return PTR_ERR(ctx);
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800494 }
495
Ben Widawskye422b882013-12-06 14:10:58 -0800496 return 0;
497}
498
Ben Widawsky254f9652012-06-04 14:42:42 -0700499void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
500{
Ben Widawsky40521052012-06-04 14:42:43 -0700501 struct drm_i915_file_private *file_priv = file->driver_priv;
Ben Widawsky254f9652012-06-04 14:42:42 -0700502
Daniel Vetter73c273e2012-06-19 20:27:39 +0200503 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
Ben Widawsky40521052012-06-04 14:42:43 -0700504 idr_destroy(&file_priv->context_idr);
Ben Widawsky40521052012-06-04 14:42:43 -0700505}
506
Oscar Mateo273497e2014-05-22 14:13:37 +0100507struct intel_context *
Ben Widawsky40521052012-06-04 14:42:43 -0700508i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
509{
Oscar Mateo273497e2014-05-22 14:13:37 +0100510 struct intel_context *ctx;
Ben Widawsky72ad5c42014-01-02 19:50:27 -1000511
Oscar Mateo273497e2014-05-22 14:13:37 +0100512 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
Ben Widawsky72ad5c42014-01-02 19:50:27 -1000513 if (!ctx)
514 return ERR_PTR(-ENOENT);
515
516 return ctx;
Ben Widawsky254f9652012-06-04 14:42:42 -0700517}
Ben Widawskye0556842012-06-04 14:42:46 -0700518
519static inline int
John Harrison1d719cd2015-05-29 17:43:52 +0100520mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
Ben Widawskye0556842012-06-04 14:42:46 -0700521{
John Harrison1d719cd2015-05-29 17:43:52 +0100522 struct intel_engine_cs *ring = req->ring;
Ben Widawskye80f14b2014-08-18 10:35:28 -0700523 u32 flags = hw_flags | MI_MM_SPACE_GTT;
Chris Wilson2c550182014-12-16 10:02:27 +0000524 const int num_rings =
525 /* Use an extended w/a on ivb+ if signalling from other rings */
526 i915_semaphore_is_enabled(ring->dev) ?
527 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
528 0;
529 int len, i, ret;
Ben Widawskye0556842012-06-04 14:42:46 -0700530
Ben Widawsky12b02862012-06-04 14:42:50 -0700531 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
532 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
533 * explicitly, so we rely on the value at ring init, stored in
534 * itlb_before_ctx_switch.
535 */
Ben Widawsky057f6a82014-04-02 22:30:23 -0700536 if (IS_GEN6(ring->dev)) {
John Harrisona84c3ae2015-05-29 17:43:57 +0100537 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
Ben Widawsky12b02862012-06-04 14:42:50 -0700538 if (ret)
539 return ret;
540 }
541
Ben Widawskye80f14b2014-08-18 10:35:28 -0700542 /* These flags are for resource streamer on HSW+ */
Abdiel Janulgue4c436d552015-06-16 13:39:41 +0300543 if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
544 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
545 else if (INTEL_INFO(ring->dev)->gen < 8)
Ben Widawskye80f14b2014-08-18 10:35:28 -0700546 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
547
Chris Wilson2c550182014-12-16 10:02:27 +0000548
549 len = 4;
550 if (INTEL_INFO(ring->dev)->gen >= 7)
551 len += 2 + (num_rings ? 4*num_rings + 2 : 0);
552
John Harrison5fb9de12015-05-29 17:44:07 +0100553 ret = intel_ring_begin(req, len);
Ben Widawskye0556842012-06-04 14:42:46 -0700554 if (ret)
555 return ret;
556
Ville Syrjäläb3f797a2014-04-28 14:31:09 +0300557 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
Chris Wilson2c550182014-12-16 10:02:27 +0000558 if (INTEL_INFO(ring->dev)->gen >= 7) {
Ben Widawskye37ec392012-06-04 14:42:48 -0700559 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Chris Wilson2c550182014-12-16 10:02:27 +0000560 if (num_rings) {
561 struct intel_engine_cs *signaller;
562
563 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
564 for_each_ring(signaller, to_i915(ring->dev), i) {
565 if (signaller == ring)
566 continue;
567
Ville Syrjäläf92a9162015-11-04 23:20:07 +0200568 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
Chris Wilson2c550182014-12-16 10:02:27 +0000569 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
570 }
571 }
572 }
Ben Widawskye37ec392012-06-04 14:42:48 -0700573
Ben Widawskye0556842012-06-04 14:42:46 -0700574 intel_ring_emit(ring, MI_NOOP);
575 intel_ring_emit(ring, MI_SET_CONTEXT);
John Harrison1d719cd2015-05-29 17:43:52 +0100576 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
Ben Widawskye80f14b2014-08-18 10:35:28 -0700577 flags);
Ville Syrjälä2b7e8082014-01-22 21:32:43 +0200578 /*
579 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
580 * WaMiSetContext_Hang:snb,ivb,vlv
581 */
Ben Widawskye0556842012-06-04 14:42:46 -0700582 intel_ring_emit(ring, MI_NOOP);
583
Chris Wilson2c550182014-12-16 10:02:27 +0000584 if (INTEL_INFO(ring->dev)->gen >= 7) {
585 if (num_rings) {
586 struct intel_engine_cs *signaller;
587
588 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
589 for_each_ring(signaller, to_i915(ring->dev), i) {
590 if (signaller == ring)
591 continue;
592
Ville Syrjäläf92a9162015-11-04 23:20:07 +0200593 intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
Chris Wilson2c550182014-12-16 10:02:27 +0000594 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
595 }
596 }
Ben Widawskye37ec392012-06-04 14:42:48 -0700597 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Chris Wilson2c550182014-12-16 10:02:27 +0000598 }
Ben Widawskye37ec392012-06-04 14:42:48 -0700599
Ben Widawskye0556842012-06-04 14:42:46 -0700600 intel_ring_advance(ring);
601
602 return ret;
603}
604
Ben Widawsky317b4e92015-03-16 16:00:55 +0000605static inline bool should_skip_switch(struct intel_engine_cs *ring,
606 struct intel_context *from,
607 struct intel_context *to)
608{
Ben Widawsky563222a2015-03-19 12:53:28 +0000609 if (to->remap_slice)
610 return false;
611
Daniel Vetter92588112015-04-14 17:35:19 +0200612 if (to->ppgtt && from == to &&
613 !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
614 return true;
Ben Widawsky317b4e92015-03-16 16:00:55 +0000615
616 return false;
617}
618
619static bool
620needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
621{
622 struct drm_i915_private *dev_priv = ring->dev->dev_private;
623
624 if (!to->ppgtt)
625 return false;
626
627 if (INTEL_INFO(ring->dev)->gen < 8)
628 return true;
629
630 if (ring != &dev_priv->ring[RCS])
631 return true;
632
633 return false;
634}
635
636static bool
Ben Widawsky6702cf12015-03-16 16:00:58 +0000637needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
638 u32 hw_flags)
Ben Widawsky317b4e92015-03-16 16:00:55 +0000639{
640 struct drm_i915_private *dev_priv = ring->dev->dev_private;
641
642 if (!to->ppgtt)
643 return false;
644
645 if (!IS_GEN8(ring->dev))
646 return false;
647
648 if (ring != &dev_priv->ring[RCS])
649 return false;
650
Ben Widawsky6702cf12015-03-16 16:00:58 +0000651 if (hw_flags & MI_RESTORE_INHIBIT)
Ben Widawsky317b4e92015-03-16 16:00:55 +0000652 return true;
653
654 return false;
655}
656
John Harrisonabd68d92015-05-29 17:43:42 +0100657static int do_switch(struct drm_i915_gem_request *req)
Ben Widawskye0556842012-06-04 14:42:46 -0700658{
John Harrisonabd68d92015-05-29 17:43:42 +0100659 struct intel_context *to = req->ctx;
660 struct intel_engine_cs *ring = req->ring;
Ben Widawsky6f65e292013-12-06 14:10:56 -0800661 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Oscar Mateo273497e2014-05-22 14:13:37 +0100662 struct intel_context *from = ring->last_context;
Ben Widawskye0556842012-06-04 14:42:46 -0700663 u32 hw_flags = 0;
Chris Wilson967ab6b2014-05-30 14:16:30 +0100664 bool uninitialized = false;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700665 int ret, i;
Ben Widawskye0556842012-06-04 14:42:46 -0700666
Ben Widawsky67e3d2972013-12-06 14:11:01 -0800667 if (from != NULL && ring == &dev_priv->ring[RCS]) {
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100668 BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
669 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
Ben Widawsky67e3d2972013-12-06 14:11:01 -0800670 }
Ben Widawskye0556842012-06-04 14:42:46 -0700671
Ben Widawsky317b4e92015-03-16 16:00:55 +0000672 if (should_skip_switch(ring, from, to))
Chris Wilson9a3b5302012-07-15 12:34:24 +0100673 return 0;
674
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800675 /* Trying to pin first makes error handling easier. */
676 if (ring == &dev_priv->ring[RCS]) {
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100677 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100678 get_context_alignment(ring->dev), 0);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800679 if (ret)
680 return ret;
Ben Widawsky67e3d2972013-12-06 14:11:01 -0800681 }
682
Daniel Vetteracc240d2013-12-05 15:42:34 +0100683 /*
684 * Pin can switch back to the default context if we end up calling into
685 * evict_everything - as a last ditch gtt defrag effort that also
686 * switches to the default context. Hence we need to reload from here.
687 */
688 from = ring->last_context;
689
Ben Widawsky317b4e92015-03-16 16:00:55 +0000690 if (needs_pd_load_pre(ring, to)) {
691 /* Older GENs and non render rings still want the load first,
692 * "PP_DCLV followed by PP_DIR_BASE register through Load
693 * Register Immediate commands in Ring Buffer before submitting
694 * a context."*/
Daniele Ceraolo Spurio198c9742014-11-10 13:44:31 +0000695 trace_switch_mm(ring, to);
John Harrisone85b26d2015-05-29 17:43:56 +0100696 ret = to->ppgtt->switch_mm(to->ppgtt, req);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800697 if (ret)
698 goto unpin_out;
Ben Widawsky563222a2015-03-19 12:53:28 +0000699
700 /* Doing a PD load always reloads the page dirs */
Daniel Vetter92588112015-04-14 17:35:19 +0200701 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800702 }
703
704 if (ring != &dev_priv->ring[RCS]) {
705 if (from)
706 i915_gem_context_unreference(from);
707 goto done;
708 }
709
Daniel Vetteracc240d2013-12-05 15:42:34 +0100710 /*
711 * Clear this page out of any CPU caches for coherent swap-in/out. Note
Chris Wilsond3373a22012-07-15 12:34:22 +0100712 * that thanks to write = false in this call and us not setting any gpu
713 * write domains when putting a context object onto the active list
714 * (when switching away from it), this won't block.
Daniel Vetteracc240d2013-12-05 15:42:34 +0100715 *
716 * XXX: We need a real interface to do this instead of trickery.
717 */
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100718 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800719 if (ret)
720 goto unpin_out;
Chris Wilsond3373a22012-07-15 12:34:22 +0100721
Chris Wilson06ef83a2015-11-27 13:28:55 +0000722 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
Ben Widawskye0556842012-06-04 14:42:46 -0700723 hw_flags |= MI_RESTORE_INHIBIT;
Ben Widawsky6702cf12015-03-16 16:00:58 +0000724 /* NB: If we inhibit the restore, the context is not allowed to
725 * die because future work may end up depending on valid address
726 * space. This means we must enforce that a page table load
727 * occur when this occurs. */
728 } else if (to->ppgtt &&
Daniel Vetter92588112015-04-14 17:35:19 +0200729 (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
Ben Widawsky563222a2015-03-19 12:53:28 +0000730 hw_flags |= MI_FORCE_RESTORE;
Daniel Vetter92588112015-04-14 17:35:19 +0200731 to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
732 }
Ben Widawskye0556842012-06-04 14:42:46 -0700733
Ben Widawsky6702cf12015-03-16 16:00:58 +0000734 /* We should never emit switch_mm more than once */
735 WARN_ON(needs_pd_load_pre(ring, to) &&
Daniel Vetter92588112015-04-14 17:35:19 +0200736 needs_pd_load_post(ring, to, hw_flags));
Ben Widawsky6702cf12015-03-16 16:00:58 +0000737
John Harrison1d719cd2015-05-29 17:43:52 +0100738 ret = mi_set_context(req, hw_flags);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800739 if (ret)
740 goto unpin_out;
Ben Widawskye0556842012-06-04 14:42:46 -0700741
Ben Widawsky6702cf12015-03-16 16:00:58 +0000742 /* GEN8 does *not* require an explicit reload if the PDPs have been
743 * setup, and we do not wish to move them.
744 */
745 if (needs_pd_load_post(ring, to, hw_flags)) {
Ben Widawsky317b4e92015-03-16 16:00:55 +0000746 trace_switch_mm(ring, to);
John Harrisone85b26d2015-05-29 17:43:56 +0100747 ret = to->ppgtt->switch_mm(to->ppgtt, req);
Ben Widawsky317b4e92015-03-16 16:00:55 +0000748 /* The hardware context switch is emitted, but we haven't
749 * actually changed the state - so it's probably safe to bail
750 * here. Still, let the user know something dangerous has
751 * happened.
752 */
753 if (ret) {
754 DRM_ERROR("Failed to change address space on context switch\n");
755 goto unpin_out;
756 }
757 }
758
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700759 for (i = 0; i < MAX_L3_SLICES; i++) {
760 if (!(to->remap_slice & (1<<i)))
761 continue;
762
John Harrison6909a662015-05-29 17:43:51 +0100763 ret = i915_gem_l3_remap(req, i);
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700764 /* If it failed, try again next round */
765 if (ret)
766 DRM_DEBUG_DRIVER("L3 remapping failed\n");
767 else
768 to->remap_slice &= ~(1<<i);
769 }
770
Ben Widawskye0556842012-06-04 14:42:46 -0700771 /* The backing object for the context is done after switching to the
772 * *next* context. Therefore we cannot retire the previous context until
773 * the next context has already started running. In fact, the below code
774 * is a bit suboptimal because the retiring can occur simply after the
775 * MI_SET_CONTEXT instead of when the next seqno has completed.
776 */
Chris Wilson112522f2013-05-02 16:48:07 +0300777 if (from != NULL) {
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100778 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
John Harrisonb2af0372015-05-29 17:43:50 +0100779 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
Ben Widawskye0556842012-06-04 14:42:46 -0700780 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
781 * whole damn pipeline, we don't need to explicitly mark the
782 * object dirty. The only exception is that the context must be
783 * correct in case the object gets swapped out. Ideally we'd be
784 * able to defer doing this until we know the object would be
785 * swapped, but there is no way to do that yet.
786 */
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100787 from->legacy_hw_ctx.rcs_state->dirty = 1;
Chris Wilsonb259b312012-07-15 12:34:23 +0100788
Chris Wilsonc0321e22013-08-26 19:50:53 -0300789 /* obj is kept alive until the next request by its active ref */
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100790 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
Chris Wilson112522f2013-05-02 16:48:07 +0300791 i915_gem_context_unreference(from);
Ben Widawskye0556842012-06-04 14:42:46 -0700792 }
793
Ben Widawsky6702cf12015-03-16 16:00:58 +0000794 uninitialized = !to->legacy_hw_ctx.initialized;
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100795 to->legacy_hw_ctx.initialized = true;
Chris Wilson967ab6b2014-05-30 14:16:30 +0100796
Ben Widawsky67e3d2972013-12-06 14:11:01 -0800797done:
Chris Wilson112522f2013-05-02 16:48:07 +0300798 i915_gem_context_reference(to);
799 ring->last_context = to;
Ben Widawskye0556842012-06-04 14:42:46 -0700800
Chris Wilson967ab6b2014-05-30 14:16:30 +0100801 if (uninitialized) {
Arun Siluvery86d7f232014-08-26 14:44:50 +0100802 if (ring->init_context) {
John Harrison87531812015-05-29 17:43:44 +0100803 ret = ring->init_context(req);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100804 if (ret)
805 DRM_ERROR("ring init context: %d\n", ret);
806 }
Mika Kuoppala46470fc92014-05-21 19:01:06 +0300807 }
808
Ben Widawskye0556842012-06-04 14:42:46 -0700809 return 0;
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800810
811unpin_out:
812 if (ring->id == RCS)
Oscar Mateoea0c76f2014-07-03 16:27:59 +0100813 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
Ben Widawsky7e0d96b2013-12-06 14:11:26 -0800814 return ret;
Ben Widawskye0556842012-06-04 14:42:46 -0700815}
816
817/**
818 * i915_switch_context() - perform a GPU context switch.
John Harrisonba01cc92015-05-29 17:43:41 +0100819 * @req: request for which we'll execute the context switch
Ben Widawskye0556842012-06-04 14:42:46 -0700820 *
821 * The context life cycle is simple. The context refcount is incremented and
822 * decremented by 1 and create and destroy. If the context is in use by the GPU,
Thomas Danielecdb5fd2014-08-20 16:29:24 +0100823 * it will have a refcount > 1. This allows us to destroy the context abstract
Ben Widawskye0556842012-06-04 14:42:46 -0700824 * object while letting the normal object tracking destroy the backing BO.
Thomas Danielecdb5fd2014-08-20 16:29:24 +0100825 *
826 * This function should not be used in execlists mode. Instead the context is
827 * switched by writing to the ELSP and requests keep a reference to their
828 * context.
Ben Widawskye0556842012-06-04 14:42:46 -0700829 */
John Harrisonba01cc92015-05-29 17:43:41 +0100830int i915_switch_context(struct drm_i915_gem_request *req)
Ben Widawskye0556842012-06-04 14:42:46 -0700831{
John Harrisonba01cc92015-05-29 17:43:41 +0100832 struct intel_engine_cs *ring = req->ring;
Ben Widawskye0556842012-06-04 14:42:46 -0700833 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Ben Widawskye0556842012-06-04 14:42:46 -0700834
Thomas Danielecdb5fd2014-08-20 16:29:24 +0100835 WARN_ON(i915.enable_execlists);
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800836 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
837
John Harrisonba01cc92015-05-29 17:43:41 +0100838 if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
839 if (req->ctx != ring->last_context) {
840 i915_gem_context_reference(req->ctx);
Chris Wilson691e6412014-04-09 09:07:36 +0100841 if (ring->last_context)
842 i915_gem_context_unreference(ring->last_context);
John Harrisonba01cc92015-05-29 17:43:41 +0100843 ring->last_context = req->ctx;
Chris Wilson691e6412014-04-09 09:07:36 +0100844 }
Ben Widawskyc4829722013-12-06 14:11:20 -0800845 return 0;
Mika Kuoppalaa95f6a02014-03-14 16:22:10 +0200846 }
Ben Widawskyc4829722013-12-06 14:11:20 -0800847
John Harrisonabd68d92015-05-29 17:43:42 +0100848 return do_switch(req);
Ben Widawskye0556842012-06-04 14:42:46 -0700849}
Ben Widawsky84624812012-06-04 14:42:54 -0700850
Oscar Mateoec3e9962014-07-24 17:04:18 +0100851static bool contexts_enabled(struct drm_device *dev)
Chris Wilson691e6412014-04-09 09:07:36 +0100852{
Oscar Mateoec3e9962014-07-24 17:04:18 +0100853 return i915.enable_execlists || to_i915(dev)->hw_context_size;
Chris Wilson691e6412014-04-09 09:07:36 +0100854}
855
Ben Widawsky84624812012-06-04 14:42:54 -0700856int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
857 struct drm_file *file)
858{
Ben Widawsky84624812012-06-04 14:42:54 -0700859 struct drm_i915_gem_context_create *args = data;
860 struct drm_i915_file_private *file_priv = file->driver_priv;
Oscar Mateo273497e2014-05-22 14:13:37 +0100861 struct intel_context *ctx;
Ben Widawsky84624812012-06-04 14:42:54 -0700862 int ret;
863
Oscar Mateoec3e9962014-07-24 17:04:18 +0100864 if (!contexts_enabled(dev))
Daniel Vetter5fa8be62012-06-19 17:16:01 +0200865 return -ENODEV;
866
Ben Widawsky84624812012-06-04 14:42:54 -0700867 ret = i915_mutex_lock_interruptible(dev);
868 if (ret)
869 return ret;
870
Daniel Vetterd624d862014-08-06 15:04:54 +0200871 ctx = i915_gem_create_context(dev, file_priv);
Ben Widawsky84624812012-06-04 14:42:54 -0700872 mutex_unlock(&dev->struct_mutex);
Dan Carpenterbe636382012-07-17 09:44:49 +0300873 if (IS_ERR(ctx))
874 return PTR_ERR(ctx);
Ben Widawsky84624812012-06-04 14:42:54 -0700875
Oscar Mateo821d66d2014-07-03 16:28:00 +0100876 args->ctx_id = ctx->user_handle;
Ben Widawsky84624812012-06-04 14:42:54 -0700877 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
878
Dan Carpenterbe636382012-07-17 09:44:49 +0300879 return 0;
Ben Widawsky84624812012-06-04 14:42:54 -0700880}
881
882int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
883 struct drm_file *file)
884{
885 struct drm_i915_gem_context_destroy *args = data;
886 struct drm_i915_file_private *file_priv = file->driver_priv;
Oscar Mateo273497e2014-05-22 14:13:37 +0100887 struct intel_context *ctx;
Ben Widawsky84624812012-06-04 14:42:54 -0700888 int ret;
889
Oscar Mateo821d66d2014-07-03 16:28:00 +0100890 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
Ben Widawskyc2cf2412013-12-24 16:02:54 -0800891 return -ENOENT;
Ben Widawsky0eea67e2013-12-06 14:11:19 -0800892
Ben Widawsky84624812012-06-04 14:42:54 -0700893 ret = i915_mutex_lock_interruptible(dev);
894 if (ret)
895 return ret;
896
897 ctx = i915_gem_context_get(file_priv, args->ctx_id);
Ben Widawsky72ad5c42014-01-02 19:50:27 -1000898 if (IS_ERR(ctx)) {
Ben Widawsky84624812012-06-04 14:42:54 -0700899 mutex_unlock(&dev->struct_mutex);
Ben Widawsky72ad5c42014-01-02 19:50:27 -1000900 return PTR_ERR(ctx);
Ben Widawsky84624812012-06-04 14:42:54 -0700901 }
902
Oscar Mateo821d66d2014-07-03 16:28:00 +0100903 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
Mika Kuoppaladce32712013-04-30 13:30:33 +0300904 i915_gem_context_unreference(ctx);
Ben Widawsky84624812012-06-04 14:42:54 -0700905 mutex_unlock(&dev->struct_mutex);
906
907 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
908 return 0;
909}
Chris Wilsonc9dc0f32014-12-24 08:13:40 -0800910
911int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
912 struct drm_file *file)
913{
914 struct drm_i915_file_private *file_priv = file->driver_priv;
915 struct drm_i915_gem_context_param *args = data;
916 struct intel_context *ctx;
917 int ret;
918
919 ret = i915_mutex_lock_interruptible(dev);
920 if (ret)
921 return ret;
922
923 ctx = i915_gem_context_get(file_priv, args->ctx_id);
924 if (IS_ERR(ctx)) {
925 mutex_unlock(&dev->struct_mutex);
926 return PTR_ERR(ctx);
927 }
928
929 args->size = 0;
930 switch (args->param) {
931 case I915_CONTEXT_PARAM_BAN_PERIOD:
932 args->value = ctx->hang_stats.ban_period_seconds;
933 break;
David Weinehallb1b38272015-05-20 17:00:13 +0300934 case I915_CONTEXT_PARAM_NO_ZEROMAP:
935 args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
936 break;
Chris Wilsonfa8848f2015-10-14 14:17:11 +0100937 case I915_CONTEXT_PARAM_GTT_SIZE:
938 if (ctx->ppgtt)
939 args->value = ctx->ppgtt->base.total;
940 else if (to_i915(dev)->mm.aliasing_ppgtt)
941 args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
942 else
943 args->value = to_i915(dev)->gtt.base.total;
944 break;
Chris Wilsonc9dc0f32014-12-24 08:13:40 -0800945 default:
946 ret = -EINVAL;
947 break;
948 }
949 mutex_unlock(&dev->struct_mutex);
950
951 return ret;
952}
953
954int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
955 struct drm_file *file)
956{
957 struct drm_i915_file_private *file_priv = file->driver_priv;
958 struct drm_i915_gem_context_param *args = data;
959 struct intel_context *ctx;
960 int ret;
961
962 ret = i915_mutex_lock_interruptible(dev);
963 if (ret)
964 return ret;
965
966 ctx = i915_gem_context_get(file_priv, args->ctx_id);
967 if (IS_ERR(ctx)) {
968 mutex_unlock(&dev->struct_mutex);
969 return PTR_ERR(ctx);
970 }
971
972 switch (args->param) {
973 case I915_CONTEXT_PARAM_BAN_PERIOD:
974 if (args->size)
975 ret = -EINVAL;
976 else if (args->value < ctx->hang_stats.ban_period_seconds &&
977 !capable(CAP_SYS_ADMIN))
978 ret = -EPERM;
979 else
980 ctx->hang_stats.ban_period_seconds = args->value;
981 break;
David Weinehallb1b38272015-05-20 17:00:13 +0300982 case I915_CONTEXT_PARAM_NO_ZEROMAP:
983 if (args->size) {
984 ret = -EINVAL;
985 } else {
986 ctx->flags &= ~CONTEXT_NO_ZEROMAP;
987 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
988 }
989 break;
Chris Wilsonc9dc0f32014-12-24 08:13:40 -0800990 default:
991 ret = -EINVAL;
992 break;
993 }
994 mutex_unlock(&dev->struct_mutex);
995
996 return ret;
997}