blob: 3d1435f55c7bf40fd881bc17626c81e97b151ca4 [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
55static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
Zhi Wang1406a142017-09-10 21:15:18 +080060 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -040061 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
64 struct page *page;
65 void *dst;
66 unsigned long context_gpa, context_page_num;
67 int i;
68
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
71
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030072 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -040073
74 context_page_num = context_page_num >> PAGE_SHIFT;
75
76 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
77 context_page_num = 19;
78
79 i = 2;
80
81 while (i < context_page_num) {
82 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
83 (u32)((workload->ctx_desc.lrca + i) <<
84 GTT_PAGE_SHIFT));
85 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -050086 gvt_vgpu_err("Invalid guest context descriptor\n");
fred gao5c568832017-09-20 05:36:47 +080087 return -EFAULT;
Zhi Wange4734052016-05-01 07:42:16 -040088 }
89
Michel Thierry0b29c752017-09-13 09:56:00 +010090 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080091 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040092 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
93 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080094 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040095 i++;
96 }
97
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080099 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400100
101#define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
104
105 COPY_REG(ctx_ctrl);
106 COPY_REG(ctx_timestamp);
107
108 if (ring_id == RCS) {
109 COPY_REG(bb_per_ctx_ptr);
110 COPY_REG(rcs_indirect_ctx);
111 COPY_REG(rcs_indirect_ctx_offset);
112 }
113#undef COPY_REG
114
115 set_context_pdp_root_pointer(shadow_ring_context,
116 workload->shadow_mm->shadow_page_table);
117
118 intel_gvt_hypervisor_read_gpa(vgpu,
119 workload->ring_context_gpa +
120 sizeof(*shadow_ring_context),
121 (void *)shadow_ring_context +
122 sizeof(*shadow_ring_context),
123 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800125 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400126 return 0;
127}
128
Changbin Dubc2d4b62017-03-22 12:35:31 +0800129static inline bool is_gvt_request(struct drm_i915_gem_request *req)
130{
131 return i915_gem_context_force_single_submission(req->ctx);
132}
133
Zhi Wange4734052016-05-01 07:42:16 -0400134static int shadow_context_status_change(struct notifier_block *nb,
135 unsigned long action, void *data)
136{
Changbin Du3fc03062017-03-13 10:47:11 +0800137 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
138 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
139 shadow_ctx_notifier_block[req->engine->id]);
140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du0e86cc92017-05-04 10:52:38 +0800141 enum intel_engine_id ring_id = req->engine->id;
142 struct intel_vgpu_workload *workload;
Zhi Wange4734052016-05-01 07:42:16 -0400143
Changbin Du0e86cc92017-05-04 10:52:38 +0800144 if (!is_gvt_request(req)) {
145 spin_lock_bh(&scheduler->mmio_context_lock);
146 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
147 scheduler->engine_owner[ring_id]) {
148 /* Switch ring from vGPU to host. */
149 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
150 NULL, ring_id);
151 scheduler->engine_owner[ring_id] = NULL;
152 }
153 spin_unlock_bh(&scheduler->mmio_context_lock);
154
155 return NOTIFY_OK;
156 }
157
158 workload = scheduler->current_workload[ring_id];
159 if (unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800160 return NOTIFY_OK;
161
Zhi Wange4734052016-05-01 07:42:16 -0400162 switch (action) {
163 case INTEL_CONTEXT_SCHEDULE_IN:
Changbin Du0e86cc92017-05-04 10:52:38 +0800164 spin_lock_bh(&scheduler->mmio_context_lock);
165 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
166 /* Switch ring from host to vGPU or vGPU to vGPU. */
167 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
168 workload->vgpu, ring_id);
169 scheduler->engine_owner[ring_id] = workload->vgpu;
170 } else
171 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
172 ring_id, workload->vgpu->id);
173 spin_unlock_bh(&scheduler->mmio_context_lock);
Zhi Wange4734052016-05-01 07:42:16 -0400174 atomic_set(&workload->shadow_ctx_active, 1);
175 break;
176 case INTEL_CONTEXT_SCHEDULE_OUT:
Chris Wilsond6c05112017-10-03 21:34:47 +0100177 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
Zhi Wange4734052016-05-01 07:42:16 -0400178 atomic_set(&workload->shadow_ctx_active, 0);
179 break;
180 default:
181 WARN_ON(1);
182 return NOTIFY_OK;
183 }
184 wake_up(&workload->shadow_ctx_status_wq);
185 return NOTIFY_OK;
186}
187
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800188static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
189 struct intel_engine_cs *engine)
190{
191 struct intel_context *ce = &ctx->engine[engine->id];
192 u64 desc = 0;
193
194 desc = ce->lrc_desc;
195
196 /* Update bits 0-11 of the context descriptor which includes flags
197 * like GEN8_CTX_* cached in desc_template
198 */
199 desc &= U64_MAX << 12;
200 desc |= ctx->desc_template & ((1ULL << 12) - 1);
201
202 ce->lrc_desc = desc;
203}
204
fred gao0a53bc02017-08-18 15:41:06 +0800205static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
206{
207 struct intel_vgpu *vgpu = workload->vgpu;
208 void *shadow_ring_buffer_va;
209 u32 *cs;
210
211 /* allocate shadow ring buffer */
212 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
213 if (IS_ERR(cs)) {
214 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
215 workload->rb_len);
216 return PTR_ERR(cs);
217 }
218
219 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
220
221 /* get shadow ring buffer va */
222 workload->shadow_ring_buffer_va = cs;
223
224 memcpy(cs, shadow_ring_buffer_va,
225 workload->rb_len);
226
227 cs += workload->rb_len / sizeof(u32);
228 intel_ring_advance(workload->req, cs);
229
230 return 0;
231}
232
fred gaoa3cfdca2017-08-18 15:41:07 +0800233void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
234{
235 if (!wa_ctx->indirect_ctx.obj)
236 return;
237
238 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
239 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
240}
241
Ping Gao89ea20b2017-06-29 12:22:42 +0800242/**
243 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
244 * shadow it as well, include ringbuffer,wa_ctx and ctx.
245 * @workload: an abstract entity for each execlist submission.
246 *
247 * This function is called before the workload submitting to i915, to make
248 * sure the content of the workload is valid.
249 */
250int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Zhi Wange4734052016-05-01 07:42:16 -0400251{
Zhi Wang1406a142017-09-10 21:15:18 +0800252 struct intel_vgpu *vgpu = workload->vgpu;
253 struct intel_vgpu_submission *s = &vgpu->submission;
254 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
255 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Zhi Wange4734052016-05-01 07:42:16 -0400256 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800257 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
Chris Wilson0eb742d2016-10-20 17:29:36 +0800258 struct drm_i915_gem_request *rq;
fred gao0a53bc02017-08-18 15:41:06 +0800259 struct intel_ring *ring;
Zhi Wange4734052016-05-01 07:42:16 -0400260 int ret;
261
Ping Gao87e919d2017-07-04 14:53:03 +0800262 lockdep_assert_held(&dev_priv->drm.struct_mutex);
263
Ping Gaod0302e72017-06-29 12:22:43 +0800264 if (workload->shadowed)
265 return 0;
Zhi Wange4734052016-05-01 07:42:16 -0400266
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800267 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
268 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400269 GEN8_CTX_ADDRESSING_MODE_SHIFT;
270
Zhi Wang1406a142017-09-10 21:15:18 +0800271 if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800272 shadow_context_descriptor_update(shadow_ctx,
273 dev_priv->engine[ring_id]);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800274
Ping Gao89ea20b2017-06-29 12:22:42 +0800275 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
Zhi Wangbe1da702016-05-03 18:26:57 -0400276 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800277 goto err_scan;
Zhi Wangbe1da702016-05-03 18:26:57 -0400278
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400279 if ((workload->ring_id == RCS) &&
280 (workload->wa_ctx.indirect_ctx.size != 0)) {
281 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
282 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800283 goto err_scan;
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400284 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400285
Ping Gao89ea20b2017-06-29 12:22:42 +0800286 /* pin shadow context by gvt even the shadow context will be pinned
287 * when i915 alloc request. That is because gvt will update the guest
288 * context from shadow context when workload is completed, and at that
289 * moment, i915 may already unpined the shadow context to make the
290 * shadow_ctx pages invalid. So gvt need to pin itself. After update
291 * the guest context, gvt can unpin the shadow_ctx safely.
292 */
293 ring = engine->context_pin(engine, shadow_ctx);
294 if (IS_ERR(ring)) {
295 ret = PTR_ERR(ring);
296 gvt_vgpu_err("fail to pin shadow context\n");
fred gaoa3cfdca2017-08-18 15:41:07 +0800297 goto err_shadow;
Ping Gao89ea20b2017-06-29 12:22:42 +0800298 }
Zhi Wange4734052016-05-01 07:42:16 -0400299
fred gao0a53bc02017-08-18 15:41:06 +0800300 ret = populate_shadow_context(workload);
301 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800302 goto err_unpin;
fred gao0a53bc02017-08-18 15:41:06 +0800303
304 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
305 if (IS_ERR(rq)) {
306 gvt_vgpu_err("fail to allocate gem request\n");
307 ret = PTR_ERR(rq);
fred gaoa3cfdca2017-08-18 15:41:07 +0800308 goto err_unpin;
fred gao0a53bc02017-08-18 15:41:06 +0800309 }
310
311 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
312
313 workload->req = i915_gem_request_get(rq);
314 ret = copy_workload_to_ring_buffer(workload);
315 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800316 goto err_unpin;
fred gao0a53bc02017-08-18 15:41:06 +0800317 workload->shadowed = true;
fred gaoa3cfdca2017-08-18 15:41:07 +0800318 return 0;
fred gao0a53bc02017-08-18 15:41:06 +0800319
fred gaoa3cfdca2017-08-18 15:41:07 +0800320err_unpin:
321 engine->context_unpin(engine, shadow_ctx);
322err_shadow:
323 release_shadow_wa_ctx(&workload->wa_ctx);
324err_scan:
fred gao0a53bc02017-08-18 15:41:06 +0800325 return ret;
326}
327
Zhi Wang497aa3f2017-09-12 21:51:10 +0800328static int prepare_workload(struct intel_vgpu_workload *workload)
329{
330 int ret = 0;
331
332 if (workload->prepare)
333 ret = workload->prepare(workload);
334
335 return ret;
336}
337
fred gao0a53bc02017-08-18 15:41:06 +0800338static int dispatch_workload(struct intel_vgpu_workload *workload)
339{
Zhi Wang1406a142017-09-10 21:15:18 +0800340 struct intel_vgpu *vgpu = workload->vgpu;
341 struct intel_vgpu_submission *s = &vgpu->submission;
342 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
343 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
fred gao0a53bc02017-08-18 15:41:06 +0800344 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800345 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
346 int ret = 0;
347
348 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
349 ring_id, workload);
350
351 mutex_lock(&dev_priv->drm.struct_mutex);
352
353 ret = intel_gvt_scan_and_shadow_workload(workload);
354 if (ret)
355 goto out;
356
Zhi Wang497aa3f2017-09-12 21:51:10 +0800357 ret = prepare_workload(workload);
358 if (ret) {
359 engine->context_unpin(engine, shadow_ctx);
360 goto out;
fred gao0a53bc02017-08-18 15:41:06 +0800361 }
362
Pei Zhang90d27a12016-11-14 18:02:57 +0800363out:
364 if (ret)
365 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800366
Ping Gao89ea20b2017-06-29 12:22:42 +0800367 if (!IS_ERR_OR_NULL(workload->req)) {
368 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
369 ring_id, workload->req);
370 i915_add_request(workload->req);
371 workload->dispatched = true;
372 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800373
Pei Zhang90d27a12016-11-14 18:02:57 +0800374 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400375 return ret;
376}
377
378static struct intel_vgpu_workload *pick_next_workload(
379 struct intel_gvt *gvt, int ring_id)
380{
381 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
382 struct intel_vgpu_workload *workload = NULL;
383
384 mutex_lock(&gvt->lock);
385
386 /*
387 * no current vgpu / will be scheduled out / no workload
388 * bail out
389 */
390 if (!scheduler->current_vgpu) {
391 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
392 goto out;
393 }
394
395 if (scheduler->need_reschedule) {
396 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
397 goto out;
398 }
399
Zhenyu Wang954180a2017-04-12 14:22:50 +0800400 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400401 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400402
403 /*
404 * still have current workload, maybe the workload disptacher
405 * fail to submit it for some reason, resubmit it.
406 */
407 if (scheduler->current_workload[ring_id]) {
408 workload = scheduler->current_workload[ring_id];
409 gvt_dbg_sched("ring id %d still have current workload %p\n",
410 ring_id, workload);
411 goto out;
412 }
413
414 /*
415 * pick a workload as current workload
416 * once current workload is set, schedule policy routines
417 * will wait the current workload is finished when trying to
418 * schedule out a vgpu.
419 */
420 scheduler->current_workload[ring_id] = container_of(
421 workload_q_head(scheduler->current_vgpu, ring_id)->next,
422 struct intel_vgpu_workload, list);
423
424 workload = scheduler->current_workload[ring_id];
425
426 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
427
Zhi Wang1406a142017-09-10 21:15:18 +0800428 atomic_inc(&workload->vgpu->submission.running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400429out:
430 mutex_unlock(&gvt->lock);
431 return workload;
432}
433
434static void update_guest_context(struct intel_vgpu_workload *workload)
435{
436 struct intel_vgpu *vgpu = workload->vgpu;
437 struct intel_gvt *gvt = vgpu->gvt;
Zhi Wang1406a142017-09-10 21:15:18 +0800438 struct intel_vgpu_submission *s = &vgpu->submission;
439 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400440 int ring_id = workload->ring_id;
Zhi Wange4734052016-05-01 07:42:16 -0400441 struct drm_i915_gem_object *ctx_obj =
442 shadow_ctx->engine[ring_id].state->obj;
443 struct execlist_ring_context *shadow_ring_context;
444 struct page *page;
445 void *src;
446 unsigned long context_gpa, context_page_num;
447 int i;
448
449 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
450 workload->ctx_desc.lrca);
451
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300452 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400453
454 context_page_num = context_page_num >> PAGE_SHIFT;
455
456 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
457 context_page_num = 19;
458
459 i = 2;
460
461 while (i < context_page_num) {
462 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
463 (u32)((workload->ctx_desc.lrca + i) <<
464 GTT_PAGE_SHIFT));
465 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500466 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400467 return;
468 }
469
Michel Thierry0b29c752017-09-13 09:56:00 +0100470 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800471 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400472 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
473 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800474 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400475 i++;
476 }
477
478 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
479 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
480
481 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800482 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400483
484#define COPY_REG(name) \
485 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
486 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
487
488 COPY_REG(ctx_ctrl);
489 COPY_REG(ctx_timestamp);
490
491#undef COPY_REG
492
493 intel_gvt_hypervisor_write_gpa(vgpu,
494 workload->ring_context_gpa +
495 sizeof(*shadow_ring_context),
496 (void *)shadow_ring_context +
497 sizeof(*shadow_ring_context),
498 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
499
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800500 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400501}
502
503static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
504{
505 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wang1406a142017-09-10 21:15:18 +0800506 struct intel_vgpu_workload *workload =
507 scheduler->current_workload[ring_id];
508 struct intel_vgpu *vgpu = workload->vgpu;
509 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wangbe1da702016-05-03 18:26:57 -0400510 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400511
512 mutex_lock(&gvt->lock);
513
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800514 /* For the workload w/ request, needs to wait for the context
515 * switch to make sure request is completed.
516 * For the workload w/o request, directly complete the workload.
517 */
518 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800519 struct drm_i915_private *dev_priv =
520 workload->vgpu->gvt->dev_priv;
521 struct intel_engine_cs *engine =
522 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400523 wait_event(workload->shadow_ctx_status_wq,
524 !atomic_read(&workload->shadow_ctx_active));
525
Chuanxiao Dong0cf5ec42017-06-23 13:01:11 +0800526 /* If this request caused GPU hang, req->fence.error will
527 * be set to -EIO. Use -EIO to set workload status so
528 * that when this request caused GPU hang, didn't trigger
529 * context switch interrupt to guest.
530 */
531 if (likely(workload->status == -EINPROGRESS)) {
532 if (workload->req->fence.error == -EIO)
533 workload->status = -EIO;
534 else
535 workload->status = 0;
536 }
537
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800538 i915_gem_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400539
Chuanxiao Dong6184cc82017-08-01 17:47:25 +0800540 if (!workload->status && !(vgpu->resetting_eng &
541 ENGINE_MASK(ring_id))) {
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800542 update_guest_context(workload);
543
544 for_each_set_bit(event, workload->pending_events,
545 INTEL_GVT_EVENT_MAX)
546 intel_vgpu_trigger_virtual_event(vgpu, event);
547 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800548 mutex_lock(&dev_priv->drm.struct_mutex);
549 /* unpin shadow ctx as the shadow_ctx update is done */
Zhi Wang1406a142017-09-10 21:15:18 +0800550 engine->context_unpin(engine, s->shadow_ctx);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800551 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400552 }
553
554 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
555 ring_id, workload, workload->status);
556
557 scheduler->current_workload[ring_id] = NULL;
558
Zhi Wange4734052016-05-01 07:42:16 -0400559 list_del_init(&workload->list);
560 workload->complete(workload);
561
Zhi Wang1406a142017-09-10 21:15:18 +0800562 atomic_dec(&s->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400563 wake_up(&scheduler->workload_complete_wq);
Ping Gaof100dae2017-05-24 09:14:11 +0800564
565 if (gvt->scheduler.need_reschedule)
566 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
567
Zhi Wange4734052016-05-01 07:42:16 -0400568 mutex_unlock(&gvt->lock);
569}
570
571struct workload_thread_param {
572 struct intel_gvt *gvt;
573 int ring_id;
574};
575
576static int workload_thread(void *priv)
577{
578 struct workload_thread_param *p = (struct workload_thread_param *)priv;
579 struct intel_gvt *gvt = p->gvt;
580 int ring_id = p->ring_id;
581 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
582 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500583 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400584 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800585 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
586 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800587 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400588
589 kfree(p);
590
591 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
592
593 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800594 add_wait_queue(&scheduler->waitq[ring_id], &wait);
595 do {
596 workload = pick_next_workload(gvt, ring_id);
597 if (workload)
598 break;
599 wait_woken(&wait, TASK_INTERRUPTIBLE,
600 MAX_SCHEDULE_TIMEOUT);
601 } while (!kthread_should_stop());
602 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400603
Du, Changbine45d7b72016-10-27 11:10:31 +0800604 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400605 break;
606
607 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
608 workload->ring_id, workload,
609 workload->vgpu->id);
610
611 intel_runtime_pm_get(gvt->dev_priv);
612
Zhi Wange4734052016-05-01 07:42:16 -0400613 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
614 workload->ring_id, workload);
615
616 if (need_force_wake)
617 intel_uncore_forcewake_get(gvt->dev_priv,
618 FORCEWAKE_ALL);
619
Pei Zhang90d27a12016-11-14 18:02:57 +0800620 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400621 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800622 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100623
Zhi Wange4734052016-05-01 07:42:16 -0400624 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500625 vgpu = workload->vgpu;
626 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -0400627 goto complete;
628 }
629
630 gvt_dbg_sched("ring id %d wait workload %p\n",
631 workload->ring_id, workload);
Chris Wilson3dce2ac2017-03-08 22:08:08 +0000632 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -0400633
634complete:
Changbin Du3ce32742017-02-09 10:13:16 +0800635 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -0400636 workload, workload->status);
637
Changbin Du2e51ef32017-01-05 13:28:05 +0800638 complete_current_workload(gvt, ring_id);
639
Zhi Wange4734052016-05-01 07:42:16 -0400640 if (need_force_wake)
641 intel_uncore_forcewake_put(gvt->dev_priv,
642 FORCEWAKE_ALL);
643
Zhi Wange4734052016-05-01 07:42:16 -0400644 intel_runtime_pm_put(gvt->dev_priv);
fred gaoe011c6c2017-09-19 15:11:28 +0800645 if (ret && (vgpu_is_vm_unhealthy(ret))) {
646 mutex_lock(&gvt->lock);
647 intel_vgpu_clean_execlist(vgpu);
648 mutex_unlock(&gvt->lock);
649 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
650 }
651
Zhi Wange4734052016-05-01 07:42:16 -0400652 }
653 return 0;
654}
655
656void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
657{
Zhi Wang1406a142017-09-10 21:15:18 +0800658 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wange4734052016-05-01 07:42:16 -0400659 struct intel_gvt *gvt = vgpu->gvt;
660 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
661
Zhi Wang1406a142017-09-10 21:15:18 +0800662 if (atomic_read(&s->running_workload_num)) {
Zhi Wange4734052016-05-01 07:42:16 -0400663 gvt_dbg_sched("wait vgpu idle\n");
664
665 wait_event(scheduler->workload_complete_wq,
Zhi Wang1406a142017-09-10 21:15:18 +0800666 !atomic_read(&s->running_workload_num));
Zhi Wange4734052016-05-01 07:42:16 -0400667 }
668}
669
670void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
671{
672 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +0800673 struct intel_engine_cs *engine;
674 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400675
676 gvt_dbg_core("clean workload scheduler\n");
677
Changbin Du3fc03062017-03-13 10:47:11 +0800678 for_each_engine(engine, gvt->dev_priv, i) {
679 atomic_notifier_chain_unregister(
680 &engine->context_status_notifier,
681 &gvt->shadow_ctx_notifier_block[i]);
682 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400683 }
684}
685
686int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
687{
688 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
689 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +0800690 struct intel_engine_cs *engine;
691 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400692 int ret;
Zhi Wange4734052016-05-01 07:42:16 -0400693
694 gvt_dbg_core("init workload scheduler\n");
695
696 init_waitqueue_head(&scheduler->workload_complete_wq);
697
Changbin Du3fc03062017-03-13 10:47:11 +0800698 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -0400699 init_waitqueue_head(&scheduler->waitq[i]);
700
701 param = kzalloc(sizeof(*param), GFP_KERNEL);
702 if (!param) {
703 ret = -ENOMEM;
704 goto err;
705 }
706
707 param->gvt = gvt;
708 param->ring_id = i;
709
710 scheduler->thread[i] = kthread_run(workload_thread, param,
711 "gvt workload %d", i);
712 if (IS_ERR(scheduler->thread[i])) {
713 gvt_err("fail to create workload thread\n");
714 ret = PTR_ERR(scheduler->thread[i]);
715 goto err;
716 }
Changbin Du3fc03062017-03-13 10:47:11 +0800717
718 gvt->shadow_ctx_notifier_block[i].notifier_call =
719 shadow_context_status_change;
720 atomic_notifier_chain_register(&engine->context_status_notifier,
721 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400722 }
723 return 0;
724err:
725 intel_gvt_clean_workload_scheduler(gvt);
726 kfree(param);
727 param = NULL;
728 return ret;
729}
730
Zhi Wang874b6a92017-09-10 20:08:18 +0800731/**
732 * intel_vgpu_clean_submission - free submission-related resource for vGPU
733 * @vgpu: a vGPU
734 *
735 * This function is called when a vGPU is being destroyed.
736 *
737 */
738void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -0400739{
Zhi Wang1406a142017-09-10 21:15:18 +0800740 struct intel_vgpu_submission *s = &vgpu->submission;
741
742 i915_gem_context_put(s->shadow_ctx);
743 kmem_cache_destroy(s->workloads);
Zhi Wange4734052016-05-01 07:42:16 -0400744}
745
Zhi Wang874b6a92017-09-10 20:08:18 +0800746/**
747 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
748 * @vgpu: a vGPU
749 *
750 * This function is called when a vGPU is being created.
751 *
752 * Returns:
753 * Zero on success, negative error code if failed.
754 *
755 */
756int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -0400757{
Zhi Wang1406a142017-09-10 21:15:18 +0800758 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wang9a9829e2017-09-10 20:28:09 +0800759 enum intel_engine_id i;
760 struct intel_engine_cs *engine;
761 int ret;
Zhi Wange4734052016-05-01 07:42:16 -0400762
Zhi Wang1406a142017-09-10 21:15:18 +0800763 s->shadow_ctx = i915_gem_context_create_gvt(
Zhi Wange4734052016-05-01 07:42:16 -0400764 &vgpu->gvt->dev_priv->drm);
Zhi Wang1406a142017-09-10 21:15:18 +0800765 if (IS_ERR(s->shadow_ctx))
766 return PTR_ERR(s->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -0400767
Zhi Wang1406a142017-09-10 21:15:18 +0800768 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800769
Zhi Wang1406a142017-09-10 21:15:18 +0800770 s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
Zhi Wang9a9829e2017-09-10 20:28:09 +0800771 sizeof(struct intel_vgpu_workload), 0,
772 SLAB_HWCACHE_ALIGN,
773 NULL);
774
Zhi Wang1406a142017-09-10 21:15:18 +0800775 if (!s->workloads) {
Zhi Wang9a9829e2017-09-10 20:28:09 +0800776 ret = -ENOMEM;
777 goto out_shadow_ctx;
778 }
779
780 for_each_engine(engine, vgpu->gvt->dev_priv, i)
Zhi Wang1406a142017-09-10 21:15:18 +0800781 INIT_LIST_HEAD(&s->workload_q_head[i]);
Zhi Wang9a9829e2017-09-10 20:28:09 +0800782
Zhi Wang1406a142017-09-10 21:15:18 +0800783 atomic_set(&s->running_workload_num, 0);
Zhi Wang91d5d852017-09-10 21:33:20 +0800784 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang9a9829e2017-09-10 20:28:09 +0800785
Zhi Wange4734052016-05-01 07:42:16 -0400786 return 0;
Zhi Wang9a9829e2017-09-10 20:28:09 +0800787
788out_shadow_ctx:
Zhi Wang1406a142017-09-10 21:15:18 +0800789 i915_gem_context_put(s->shadow_ctx);
Zhi Wang9a9829e2017-09-10 20:28:09 +0800790 return ret;
Zhi Wange4734052016-05-01 07:42:16 -0400791}
Zhi Wang21527a82017-09-12 21:42:09 +0800792
793/**
794 * intel_vgpu_destroy_workload - destroy a vGPU workload
795 * @vgpu: a vGPU
796 *
797 * This function is called when destroy a vGPU workload.
798 *
799 */
800void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
801{
802 struct intel_vgpu_submission *s = &workload->vgpu->submission;
803
804 if (workload->shadow_mm)
805 intel_gvt_mm_unreference(workload->shadow_mm);
806
807 kmem_cache_free(s->workloads, workload);
808}
809
810/**
811 * intel_vgpu_create_workload - create a vGPU workload
812 * @vgpu: a vGPU
813 *
814 * This function is called when creating a vGPU workload.
815 *
816 * Returns:
817 * struct intel_vgpu_workload * on success, negative error code in
818 * pointer if failed.
819 *
820 */
821struct intel_vgpu_workload *
822intel_vgpu_create_workload(struct intel_vgpu *vgpu)
823{
824 struct intel_vgpu_submission *s = &vgpu->submission;
825 struct intel_vgpu_workload *workload;
826
827 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
828 if (!workload)
829 return ERR_PTR(-ENOMEM);
830
831 INIT_LIST_HEAD(&workload->list);
832 INIT_LIST_HEAD(&workload->shadow_bb);
833
834 init_waitqueue_head(&workload->shadow_ctx_status_wq);
835 atomic_set(&workload->shadow_ctx_active, 0);
836
837 workload->status = -EINPROGRESS;
838 workload->shadowed = false;
839 workload->vgpu = vgpu;
840
841 return workload;
842}