blob: 9b92b4e25a200b5ce6692a48057f6ee38cd47d17 [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
55static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
Zhi Wang1406a142017-09-10 21:15:18 +080060 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -040061 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
64 struct page *page;
65 void *dst;
66 unsigned long context_gpa, context_page_num;
67 int i;
68
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
71
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030072 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -040073
74 context_page_num = context_page_num >> PAGE_SHIFT;
75
76 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
77 context_page_num = 19;
78
79 i = 2;
80
81 while (i < context_page_num) {
82 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
83 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +080084 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -040085 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -050086 gvt_vgpu_err("Invalid guest context descriptor\n");
fred gao5c568832017-09-20 05:36:47 +080087 return -EFAULT;
Zhi Wange4734052016-05-01 07:42:16 -040088 }
89
Michel Thierry0b29c752017-09-13 09:56:00 +010090 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080091 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040092 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
Zhi Wang9556e112017-10-10 13:51:32 +080093 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080094 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040095 i++;
96 }
97
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080099 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400100
101#define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
104
105 COPY_REG(ctx_ctrl);
106 COPY_REG(ctx_timestamp);
107
108 if (ring_id == RCS) {
109 COPY_REG(bb_per_ctx_ptr);
110 COPY_REG(rcs_indirect_ctx);
111 COPY_REG(rcs_indirect_ctx_offset);
112 }
113#undef COPY_REG
114
115 set_context_pdp_root_pointer(shadow_ring_context,
Changbin Duede9d0c2018-01-30 19:19:40 +0800116 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
Zhi Wange4734052016-05-01 07:42:16 -0400117
118 intel_gvt_hypervisor_read_gpa(vgpu,
119 workload->ring_context_gpa +
120 sizeof(*shadow_ring_context),
121 (void *)shadow_ring_context +
122 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800123 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400124
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800125 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400126 return 0;
127}
128
Chris Wilsone61e0f52018-02-21 09:56:36 +0000129static inline bool is_gvt_request(struct i915_request *req)
Changbin Dubc2d4b62017-03-22 12:35:31 +0800130{
131 return i915_gem_context_force_single_submission(req->ctx);
132}
133
Xiong Zhang295764c2017-11-07 05:23:02 +0800134static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
135{
136 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
137 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
138 i915_reg_t reg;
139
140 reg = RING_INSTDONE(ring_base);
141 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
142 reg = RING_ACTHD(ring_base);
143 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
144 reg = RING_ACTHD_UDW(ring_base);
145 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
146}
147
Zhi Wange4734052016-05-01 07:42:16 -0400148static int shadow_context_status_change(struct notifier_block *nb,
149 unsigned long action, void *data)
150{
Chris Wilsone61e0f52018-02-21 09:56:36 +0000151 struct i915_request *req = data;
Changbin Du3fc03062017-03-13 10:47:11 +0800152 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
153 shadow_ctx_notifier_block[req->engine->id]);
154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du0e86cc92017-05-04 10:52:38 +0800155 enum intel_engine_id ring_id = req->engine->id;
156 struct intel_vgpu_workload *workload;
Changbin Du679fd3e2017-11-13 14:58:31 +0800157 unsigned long flags;
Zhi Wange4734052016-05-01 07:42:16 -0400158
Changbin Du0e86cc92017-05-04 10:52:38 +0800159 if (!is_gvt_request(req)) {
Changbin Du679fd3e2017-11-13 14:58:31 +0800160 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800161 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
162 scheduler->engine_owner[ring_id]) {
163 /* Switch ring from vGPU to host. */
164 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
165 NULL, ring_id);
166 scheduler->engine_owner[ring_id] = NULL;
167 }
Changbin Du679fd3e2017-11-13 14:58:31 +0800168 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800169
170 return NOTIFY_OK;
171 }
172
173 workload = scheduler->current_workload[ring_id];
174 if (unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800175 return NOTIFY_OK;
176
Zhi Wange4734052016-05-01 07:42:16 -0400177 switch (action) {
178 case INTEL_CONTEXT_SCHEDULE_IN:
Changbin Du679fd3e2017-11-13 14:58:31 +0800179 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800180 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
181 /* Switch ring from host to vGPU or vGPU to vGPU. */
182 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
183 workload->vgpu, ring_id);
184 scheduler->engine_owner[ring_id] = workload->vgpu;
185 } else
186 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
187 ring_id, workload->vgpu->id);
Changbin Du679fd3e2017-11-13 14:58:31 +0800188 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Zhi Wange4734052016-05-01 07:42:16 -0400189 atomic_set(&workload->shadow_ctx_active, 1);
190 break;
191 case INTEL_CONTEXT_SCHEDULE_OUT:
Xiong Zhang295764c2017-11-07 05:23:02 +0800192 save_ring_hw_state(workload->vgpu, ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400193 atomic_set(&workload->shadow_ctx_active, 0);
194 break;
Zhenyu Wangda5f99e2017-12-01 14:59:53 +0800195 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
196 save_ring_hw_state(workload->vgpu, ring_id);
197 break;
Zhi Wange4734052016-05-01 07:42:16 -0400198 default:
199 WARN_ON(1);
200 return NOTIFY_OK;
201 }
202 wake_up(&workload->shadow_ctx_status_wq);
203 return NOTIFY_OK;
204}
205
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800206static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
207 struct intel_engine_cs *engine)
208{
209 struct intel_context *ce = &ctx->engine[engine->id];
210 u64 desc = 0;
211
212 desc = ce->lrc_desc;
213
214 /* Update bits 0-11 of the context descriptor which includes flags
215 * like GEN8_CTX_* cached in desc_template
216 */
217 desc &= U64_MAX << 12;
218 desc |= ctx->desc_template & ((1ULL << 12) - 1);
219
220 ce->lrc_desc = desc;
221}
222
fred gao0a53bc02017-08-18 15:41:06 +0800223static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
224{
225 struct intel_vgpu *vgpu = workload->vgpu;
226 void *shadow_ring_buffer_va;
227 u32 *cs;
Weinan Licd7e61b2018-02-23 14:46:45 +0800228 struct i915_request *req = workload->req;
229
230 if (IS_KABYLAKE(req->i915) &&
231 is_inhibit_context(req->ctx, req->engine->id))
232 intel_vgpu_restore_inhibit_context(vgpu, req);
fred gao0a53bc02017-08-18 15:41:06 +0800233
234 /* allocate shadow ring buffer */
235 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
236 if (IS_ERR(cs)) {
237 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
238 workload->rb_len);
239 return PTR_ERR(cs);
240 }
241
242 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
243
244 /* get shadow ring buffer va */
245 workload->shadow_ring_buffer_va = cs;
246
247 memcpy(cs, shadow_ring_buffer_va,
248 workload->rb_len);
249
250 cs += workload->rb_len / sizeof(u32);
251 intel_ring_advance(workload->req, cs);
252
253 return 0;
254}
255
Chris Wilson7b302552017-11-20 13:29:58 +0000256static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
fred gaoa3cfdca2017-08-18 15:41:07 +0800257{
258 if (!wa_ctx->indirect_ctx.obj)
259 return;
260
261 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
262 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
263}
264
Ping Gao89ea20b2017-06-29 12:22:42 +0800265/**
266 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
267 * shadow it as well, include ringbuffer,wa_ctx and ctx.
268 * @workload: an abstract entity for each execlist submission.
269 *
270 * This function is called before the workload submitting to i915, to make
271 * sure the content of the workload is valid.
272 */
273int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Zhi Wange4734052016-05-01 07:42:16 -0400274{
Zhi Wang1406a142017-09-10 21:15:18 +0800275 struct intel_vgpu *vgpu = workload->vgpu;
276 struct intel_vgpu_submission *s = &vgpu->submission;
277 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
278 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Zhi Wange4734052016-05-01 07:42:16 -0400279 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800280 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
fred gao0a53bc02017-08-18 15:41:06 +0800281 struct intel_ring *ring;
Zhi Wange4734052016-05-01 07:42:16 -0400282 int ret;
283
Ping Gao87e919d2017-07-04 14:53:03 +0800284 lockdep_assert_held(&dev_priv->drm.struct_mutex);
285
Ping Gaod0302e72017-06-29 12:22:43 +0800286 if (workload->shadowed)
287 return 0;
Zhi Wange4734052016-05-01 07:42:16 -0400288
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800289 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
290 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400291 GEN8_CTX_ADDRESSING_MODE_SHIFT;
292
Zhi Wang1406a142017-09-10 21:15:18 +0800293 if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800294 shadow_context_descriptor_update(shadow_ctx,
295 dev_priv->engine[ring_id]);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800296
Ping Gao89ea20b2017-06-29 12:22:42 +0800297 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
Zhi Wangbe1da702016-05-03 18:26:57 -0400298 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800299 goto err_scan;
Zhi Wangbe1da702016-05-03 18:26:57 -0400300
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400301 if ((workload->ring_id == RCS) &&
302 (workload->wa_ctx.indirect_ctx.size != 0)) {
303 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
304 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800305 goto err_scan;
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400306 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400307
Ping Gao89ea20b2017-06-29 12:22:42 +0800308 /* pin shadow context by gvt even the shadow context will be pinned
309 * when i915 alloc request. That is because gvt will update the guest
310 * context from shadow context when workload is completed, and at that
311 * moment, i915 may already unpined the shadow context to make the
312 * shadow_ctx pages invalid. So gvt need to pin itself. After update
313 * the guest context, gvt can unpin the shadow_ctx safely.
314 */
315 ring = engine->context_pin(engine, shadow_ctx);
316 if (IS_ERR(ring)) {
317 ret = PTR_ERR(ring);
318 gvt_vgpu_err("fail to pin shadow context\n");
fred gaoa3cfdca2017-08-18 15:41:07 +0800319 goto err_shadow;
Ping Gao89ea20b2017-06-29 12:22:42 +0800320 }
Zhi Wange4734052016-05-01 07:42:16 -0400321
fred gao0a53bc02017-08-18 15:41:06 +0800322 ret = populate_shadow_context(workload);
323 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800324 goto err_unpin;
fred gaof2880e02017-11-14 17:09:35 +0800325 workload->shadowed = true;
326 return 0;
327
328err_unpin:
329 engine->context_unpin(engine, shadow_ctx);
330err_shadow:
331 release_shadow_wa_ctx(&workload->wa_ctx);
332err_scan:
333 return ret;
334}
335
336static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
337{
338 int ring_id = workload->ring_id;
339 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
340 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
Chris Wilsone61e0f52018-02-21 09:56:36 +0000341 struct i915_request *rq;
fred gaof2880e02017-11-14 17:09:35 +0800342 struct intel_vgpu *vgpu = workload->vgpu;
343 struct intel_vgpu_submission *s = &vgpu->submission;
344 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
345 int ret;
fred gao0a53bc02017-08-18 15:41:06 +0800346
Chris Wilsone61e0f52018-02-21 09:56:36 +0000347 rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800348 if (IS_ERR(rq)) {
349 gvt_vgpu_err("fail to allocate gem request\n");
350 ret = PTR_ERR(rq);
fred gaoa3cfdca2017-08-18 15:41:07 +0800351 goto err_unpin;
fred gao0a53bc02017-08-18 15:41:06 +0800352 }
353
354 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
355
Chris Wilsone61e0f52018-02-21 09:56:36 +0000356 workload->req = i915_request_get(rq);
fred gao0a53bc02017-08-18 15:41:06 +0800357 ret = copy_workload_to_ring_buffer(workload);
358 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800359 goto err_unpin;
fred gaoa3cfdca2017-08-18 15:41:07 +0800360 return 0;
fred gao0a53bc02017-08-18 15:41:06 +0800361
fred gaoa3cfdca2017-08-18 15:41:07 +0800362err_unpin:
363 engine->context_unpin(engine, shadow_ctx);
fred gaoa3cfdca2017-08-18 15:41:07 +0800364 release_shadow_wa_ctx(&workload->wa_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800365 return ret;
366}
367
Zhi Wangf52c3802017-09-24 21:53:03 +0800368static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
369
Zhi Wangd8235b52017-09-12 22:06:39 +0800370static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
371{
372 struct intel_gvt *gvt = workload->vgpu->gvt;
373 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
Zhi Wangf52c3802017-09-24 21:53:03 +0800374 struct intel_vgpu_shadow_bb *bb;
375 int ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800376
Zhi Wangf52c3802017-09-24 21:53:03 +0800377 list_for_each_entry(bb, &workload->shadow_bb, list) {
378 bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
379 if (IS_ERR(bb->vma)) {
380 ret = PTR_ERR(bb->vma);
381 goto err;
382 }
Zhi Wangd8235b52017-09-12 22:06:39 +0800383
Zhi Wangf52c3802017-09-24 21:53:03 +0800384 /* relocate shadow batch buffer */
385 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
Zhi Wangd8235b52017-09-12 22:06:39 +0800386 if (gmadr_bytes == 8)
Zhi Wangf52c3802017-09-24 21:53:03 +0800387 bb->bb_start_cmd_va[2] = 0;
388
389 /* No one is going to touch shadow bb from now on. */
390 if (bb->clflush & CLFLUSH_AFTER) {
391 drm_clflush_virt_range(bb->va, bb->obj->base.size);
392 bb->clflush &= ~CLFLUSH_AFTER;
393 }
394
395 ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
396 if (ret)
397 goto err;
398
399 i915_gem_obj_finish_shmem_access(bb->obj);
400 bb->accessing = false;
401
402 i915_vma_move_to_active(bb->vma, workload->req, 0);
Zhi Wangd8235b52017-09-12 22:06:39 +0800403 }
404 return 0;
Zhi Wangf52c3802017-09-24 21:53:03 +0800405err:
406 release_shadow_batch_buffer(workload);
407 return ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800408}
409
410static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
411{
412 struct intel_vgpu_workload *workload = container_of(wa_ctx,
413 struct intel_vgpu_workload,
414 wa_ctx);
415 int ring_id = workload->ring_id;
416 struct intel_vgpu_submission *s = &workload->vgpu->submission;
417 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
418 struct drm_i915_gem_object *ctx_obj =
419 shadow_ctx->engine[ring_id].state->obj;
420 struct execlist_ring_context *shadow_ring_context;
421 struct page *page;
422
423 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
424 shadow_ring_context = kmap_atomic(page);
425
426 shadow_ring_context->bb_per_ctx_ptr.val =
427 (shadow_ring_context->bb_per_ctx_ptr.val &
428 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
429 shadow_ring_context->rcs_indirect_ctx.val =
430 (shadow_ring_context->rcs_indirect_ctx.val &
431 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
432
433 kunmap_atomic(shadow_ring_context);
434 return 0;
435}
436
437static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
438{
439 struct i915_vma *vma;
440 unsigned char *per_ctx_va =
441 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
442 wa_ctx->indirect_ctx.size;
443
444 if (wa_ctx->indirect_ctx.size == 0)
445 return 0;
446
447 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
448 0, CACHELINE_BYTES, 0);
449 if (IS_ERR(vma))
450 return PTR_ERR(vma);
451
452 /* FIXME: we are not tracking our pinned VMA leaving it
453 * up to the core to fix up the stray pin_count upon
454 * free.
455 */
456
457 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
458
459 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
460 memset(per_ctx_va, 0, CACHELINE_BYTES);
461
462 update_wa_ctx_2_shadow_ctx(wa_ctx);
463 return 0;
464}
465
466static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
467{
Zhi Wangf52c3802017-09-24 21:53:03 +0800468 struct intel_vgpu *vgpu = workload->vgpu;
469 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
470 struct intel_vgpu_shadow_bb *bb, *pos;
Zhi Wangd8235b52017-09-12 22:06:39 +0800471
Zhi Wangf52c3802017-09-24 21:53:03 +0800472 if (list_empty(&workload->shadow_bb))
473 return;
474
475 bb = list_first_entry(&workload->shadow_bb,
476 struct intel_vgpu_shadow_bb, list);
477
478 mutex_lock(&dev_priv->drm.struct_mutex);
479
480 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
481 if (bb->obj) {
482 if (bb->accessing)
483 i915_gem_obj_finish_shmem_access(bb->obj);
484
485 if (bb->va && !IS_ERR(bb->va))
486 i915_gem_object_unpin_map(bb->obj);
487
488 if (bb->vma && !IS_ERR(bb->vma)) {
489 i915_vma_unpin(bb->vma);
490 i915_vma_close(bb->vma);
491 }
492 __i915_gem_object_release_unless_active(bb->obj);
Zhi Wangd8235b52017-09-12 22:06:39 +0800493 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800494 list_del(&bb->list);
495 kfree(bb);
Zhi Wangd8235b52017-09-12 22:06:39 +0800496 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800497
498 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wangd8235b52017-09-12 22:06:39 +0800499}
500
Zhi Wang497aa3f2017-09-12 21:51:10 +0800501static int prepare_workload(struct intel_vgpu_workload *workload)
502{
Zhi Wangd8235b52017-09-12 22:06:39 +0800503 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wang497aa3f2017-09-12 21:51:10 +0800504 int ret = 0;
505
Zhi Wangd8235b52017-09-12 22:06:39 +0800506 ret = intel_vgpu_pin_mm(workload->shadow_mm);
507 if (ret) {
508 gvt_vgpu_err("fail to vgpu pin mm\n");
509 return ret;
510 }
Zhi Wang497aa3f2017-09-12 21:51:10 +0800511
Zhi Wangd8235b52017-09-12 22:06:39 +0800512 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
513 if (ret) {
514 gvt_vgpu_err("fail to vgpu sync oos pages\n");
515 goto err_unpin_mm;
516 }
517
518 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
519 if (ret) {
520 gvt_vgpu_err("fail to flush post shadow\n");
521 goto err_unpin_mm;
522 }
523
fred gaof2880e02017-11-14 17:09:35 +0800524 ret = intel_gvt_generate_request(workload);
525 if (ret) {
526 gvt_vgpu_err("fail to generate request\n");
527 goto err_unpin_mm;
528 }
529
Zhi Wangd8235b52017-09-12 22:06:39 +0800530 ret = prepare_shadow_batch_buffer(workload);
531 if (ret) {
532 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
533 goto err_unpin_mm;
534 }
535
536 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
537 if (ret) {
538 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
539 goto err_shadow_batch;
540 }
541
542 if (workload->prepare) {
543 ret = workload->prepare(workload);
544 if (ret)
545 goto err_shadow_wa_ctx;
546 }
547
548 return 0;
549err_shadow_wa_ctx:
550 release_shadow_wa_ctx(&workload->wa_ctx);
551err_shadow_batch:
552 release_shadow_batch_buffer(workload);
553err_unpin_mm:
554 intel_vgpu_unpin_mm(workload->shadow_mm);
Zhi Wang497aa3f2017-09-12 21:51:10 +0800555 return ret;
556}
557
fred gao0a53bc02017-08-18 15:41:06 +0800558static int dispatch_workload(struct intel_vgpu_workload *workload)
559{
Zhi Wang1406a142017-09-10 21:15:18 +0800560 struct intel_vgpu *vgpu = workload->vgpu;
561 struct intel_vgpu_submission *s = &vgpu->submission;
562 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
563 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
fred gao0a53bc02017-08-18 15:41:06 +0800564 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800565 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
566 int ret = 0;
567
568 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
569 ring_id, workload);
570
571 mutex_lock(&dev_priv->drm.struct_mutex);
572
573 ret = intel_gvt_scan_and_shadow_workload(workload);
574 if (ret)
575 goto out;
576
Zhi Wang497aa3f2017-09-12 21:51:10 +0800577 ret = prepare_workload(workload);
578 if (ret) {
579 engine->context_unpin(engine, shadow_ctx);
580 goto out;
fred gao0a53bc02017-08-18 15:41:06 +0800581 }
582
Pei Zhang90d27a12016-11-14 18:02:57 +0800583out:
584 if (ret)
585 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800586
Ping Gao89ea20b2017-06-29 12:22:42 +0800587 if (!IS_ERR_OR_NULL(workload->req)) {
588 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
589 ring_id, workload->req);
Chris Wilsone61e0f52018-02-21 09:56:36 +0000590 i915_request_add(workload->req);
Ping Gao89ea20b2017-06-29 12:22:42 +0800591 workload->dispatched = true;
592 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800593
Pei Zhang90d27a12016-11-14 18:02:57 +0800594 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400595 return ret;
596}
597
598static struct intel_vgpu_workload *pick_next_workload(
599 struct intel_gvt *gvt, int ring_id)
600{
601 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
602 struct intel_vgpu_workload *workload = NULL;
603
604 mutex_lock(&gvt->lock);
605
606 /*
607 * no current vgpu / will be scheduled out / no workload
608 * bail out
609 */
610 if (!scheduler->current_vgpu) {
611 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
612 goto out;
613 }
614
615 if (scheduler->need_reschedule) {
616 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
617 goto out;
618 }
619
Zhenyu Wang954180a2017-04-12 14:22:50 +0800620 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400621 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400622
623 /*
624 * still have current workload, maybe the workload disptacher
625 * fail to submit it for some reason, resubmit it.
626 */
627 if (scheduler->current_workload[ring_id]) {
628 workload = scheduler->current_workload[ring_id];
629 gvt_dbg_sched("ring id %d still have current workload %p\n",
630 ring_id, workload);
631 goto out;
632 }
633
634 /*
635 * pick a workload as current workload
636 * once current workload is set, schedule policy routines
637 * will wait the current workload is finished when trying to
638 * schedule out a vgpu.
639 */
640 scheduler->current_workload[ring_id] = container_of(
641 workload_q_head(scheduler->current_vgpu, ring_id)->next,
642 struct intel_vgpu_workload, list);
643
644 workload = scheduler->current_workload[ring_id];
645
646 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
647
Zhi Wang1406a142017-09-10 21:15:18 +0800648 atomic_inc(&workload->vgpu->submission.running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400649out:
650 mutex_unlock(&gvt->lock);
651 return workload;
652}
653
654static void update_guest_context(struct intel_vgpu_workload *workload)
655{
656 struct intel_vgpu *vgpu = workload->vgpu;
657 struct intel_gvt *gvt = vgpu->gvt;
Zhi Wang1406a142017-09-10 21:15:18 +0800658 struct intel_vgpu_submission *s = &vgpu->submission;
659 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400660 int ring_id = workload->ring_id;
Zhi Wange4734052016-05-01 07:42:16 -0400661 struct drm_i915_gem_object *ctx_obj =
662 shadow_ctx->engine[ring_id].state->obj;
663 struct execlist_ring_context *shadow_ring_context;
664 struct page *page;
665 void *src;
666 unsigned long context_gpa, context_page_num;
667 int i;
668
669 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
670 workload->ctx_desc.lrca);
671
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300672 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400673
674 context_page_num = context_page_num >> PAGE_SHIFT;
675
676 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
677 context_page_num = 19;
678
679 i = 2;
680
681 while (i < context_page_num) {
682 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
683 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800684 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400685 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500686 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400687 return;
688 }
689
Michel Thierry0b29c752017-09-13 09:56:00 +0100690 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800691 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400692 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
Zhi Wang9556e112017-10-10 13:51:32 +0800693 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800694 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400695 i++;
696 }
697
698 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
699 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
700
701 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800702 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400703
704#define COPY_REG(name) \
705 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
706 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
707
708 COPY_REG(ctx_ctrl);
709 COPY_REG(ctx_timestamp);
710
711#undef COPY_REG
712
713 intel_gvt_hypervisor_write_gpa(vgpu,
714 workload->ring_context_gpa +
715 sizeof(*shadow_ring_context),
716 (void *)shadow_ring_context +
717 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800718 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400719
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800720 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400721}
722
Zhi Wange2c43c02017-09-13 01:58:35 +0800723static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
724{
725 struct intel_vgpu_submission *s = &vgpu->submission;
726 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
727 struct intel_engine_cs *engine;
728 struct intel_vgpu_workload *pos, *n;
729 unsigned int tmp;
730
731 /* free the unsubmited workloads in the queues. */
732 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
733 list_for_each_entry_safe(pos, n,
734 &s->workload_q_head[engine->id], list) {
735 list_del_init(&pos->list);
736 intel_vgpu_destroy_workload(pos);
737 }
738 clear_bit(engine->id, s->shadow_ctx_desc_updated);
739 }
740}
741
Zhi Wange4734052016-05-01 07:42:16 -0400742static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
743{
744 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wang1406a142017-09-10 21:15:18 +0800745 struct intel_vgpu_workload *workload =
746 scheduler->current_workload[ring_id];
747 struct intel_vgpu *vgpu = workload->vgpu;
748 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wangbe1da702016-05-03 18:26:57 -0400749 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400750
751 mutex_lock(&gvt->lock);
752
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800753 /* For the workload w/ request, needs to wait for the context
754 * switch to make sure request is completed.
755 * For the workload w/o request, directly complete the workload.
756 */
757 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800758 struct drm_i915_private *dev_priv =
759 workload->vgpu->gvt->dev_priv;
760 struct intel_engine_cs *engine =
761 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400762 wait_event(workload->shadow_ctx_status_wq,
763 !atomic_read(&workload->shadow_ctx_active));
764
Chuanxiao Dong0cf5ec42017-06-23 13:01:11 +0800765 /* If this request caused GPU hang, req->fence.error will
766 * be set to -EIO. Use -EIO to set workload status so
767 * that when this request caused GPU hang, didn't trigger
768 * context switch interrupt to guest.
769 */
770 if (likely(workload->status == -EINPROGRESS)) {
771 if (workload->req->fence.error == -EIO)
772 workload->status = -EIO;
773 else
774 workload->status = 0;
775 }
776
Chris Wilsone61e0f52018-02-21 09:56:36 +0000777 i915_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400778
Chuanxiao Dong6184cc82017-08-01 17:47:25 +0800779 if (!workload->status && !(vgpu->resetting_eng &
780 ENGINE_MASK(ring_id))) {
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800781 update_guest_context(workload);
782
783 for_each_set_bit(event, workload->pending_events,
784 INTEL_GVT_EVENT_MAX)
785 intel_vgpu_trigger_virtual_event(vgpu, event);
786 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800787 mutex_lock(&dev_priv->drm.struct_mutex);
788 /* unpin shadow ctx as the shadow_ctx update is done */
Zhi Wang1406a142017-09-10 21:15:18 +0800789 engine->context_unpin(engine, s->shadow_ctx);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800790 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400791 }
792
793 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
794 ring_id, workload, workload->status);
795
796 scheduler->current_workload[ring_id] = NULL;
797
Zhi Wange4734052016-05-01 07:42:16 -0400798 list_del_init(&workload->list);
Zhi Wangd8235b52017-09-12 22:06:39 +0800799
800 if (!workload->status) {
801 release_shadow_batch_buffer(workload);
802 release_shadow_wa_ctx(&workload->wa_ctx);
803 }
804
Zhi Wange2c43c02017-09-13 01:58:35 +0800805 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
806 /* if workload->status is not successful means HW GPU
807 * has occurred GPU hang or something wrong with i915/GVT,
808 * and GVT won't inject context switch interrupt to guest.
809 * So this error is a vGPU hang actually to the guest.
810 * According to this we should emunlate a vGPU hang. If
811 * there are pending workloads which are already submitted
812 * from guest, we should clean them up like HW GPU does.
813 *
814 * if it is in middle of engine resetting, the pending
815 * workloads won't be submitted to HW GPU and will be
816 * cleaned up during the resetting process later, so doing
817 * the workload clean up here doesn't have any impact.
818 **/
819 clean_workloads(vgpu, ENGINE_MASK(ring_id));
820 }
821
Zhi Wange4734052016-05-01 07:42:16 -0400822 workload->complete(workload);
823
Zhi Wang1406a142017-09-10 21:15:18 +0800824 atomic_dec(&s->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400825 wake_up(&scheduler->workload_complete_wq);
Ping Gaof100dae2017-05-24 09:14:11 +0800826
827 if (gvt->scheduler.need_reschedule)
828 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
829
Zhi Wange4734052016-05-01 07:42:16 -0400830 mutex_unlock(&gvt->lock);
831}
832
833struct workload_thread_param {
834 struct intel_gvt *gvt;
835 int ring_id;
836};
837
838static int workload_thread(void *priv)
839{
840 struct workload_thread_param *p = (struct workload_thread_param *)priv;
841 struct intel_gvt *gvt = p->gvt;
842 int ring_id = p->ring_id;
843 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
844 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500845 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400846 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800847 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
848 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800849 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400850
851 kfree(p);
852
853 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
854
855 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800856 add_wait_queue(&scheduler->waitq[ring_id], &wait);
857 do {
858 workload = pick_next_workload(gvt, ring_id);
859 if (workload)
860 break;
861 wait_woken(&wait, TASK_INTERRUPTIBLE,
862 MAX_SCHEDULE_TIMEOUT);
863 } while (!kthread_should_stop());
864 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400865
Du, Changbine45d7b72016-10-27 11:10:31 +0800866 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400867 break;
868
869 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
870 workload->ring_id, workload,
871 workload->vgpu->id);
872
873 intel_runtime_pm_get(gvt->dev_priv);
874
Zhi Wange4734052016-05-01 07:42:16 -0400875 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
876 workload->ring_id, workload);
877
878 if (need_force_wake)
879 intel_uncore_forcewake_get(gvt->dev_priv,
880 FORCEWAKE_ALL);
881
Pei Zhang90d27a12016-11-14 18:02:57 +0800882 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400883 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800884 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100885
Zhi Wange4734052016-05-01 07:42:16 -0400886 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500887 vgpu = workload->vgpu;
888 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -0400889 goto complete;
890 }
891
892 gvt_dbg_sched("ring id %d wait workload %p\n",
893 workload->ring_id, workload);
Chris Wilsone61e0f52018-02-21 09:56:36 +0000894 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -0400895
896complete:
Changbin Du3ce32742017-02-09 10:13:16 +0800897 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -0400898 workload, workload->status);
899
Changbin Du2e51ef32017-01-05 13:28:05 +0800900 complete_current_workload(gvt, ring_id);
901
Zhi Wange4734052016-05-01 07:42:16 -0400902 if (need_force_wake)
903 intel_uncore_forcewake_put(gvt->dev_priv,
904 FORCEWAKE_ALL);
905
Zhi Wange4734052016-05-01 07:42:16 -0400906 intel_runtime_pm_put(gvt->dev_priv);
Zhi Wang6d763032017-09-12 22:33:12 +0800907 if (ret && (vgpu_is_vm_unhealthy(ret)))
fred gaoe011c6c2017-09-19 15:11:28 +0800908 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
Zhi Wange4734052016-05-01 07:42:16 -0400909 }
910 return 0;
911}
912
913void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
914{
Zhi Wang1406a142017-09-10 21:15:18 +0800915 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wange4734052016-05-01 07:42:16 -0400916 struct intel_gvt *gvt = vgpu->gvt;
917 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
918
Zhi Wang1406a142017-09-10 21:15:18 +0800919 if (atomic_read(&s->running_workload_num)) {
Zhi Wange4734052016-05-01 07:42:16 -0400920 gvt_dbg_sched("wait vgpu idle\n");
921
922 wait_event(scheduler->workload_complete_wq,
Zhi Wang1406a142017-09-10 21:15:18 +0800923 !atomic_read(&s->running_workload_num));
Zhi Wange4734052016-05-01 07:42:16 -0400924 }
925}
926
927void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
928{
929 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +0800930 struct intel_engine_cs *engine;
931 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400932
933 gvt_dbg_core("clean workload scheduler\n");
934
Changbin Du3fc03062017-03-13 10:47:11 +0800935 for_each_engine(engine, gvt->dev_priv, i) {
936 atomic_notifier_chain_unregister(
937 &engine->context_status_notifier,
938 &gvt->shadow_ctx_notifier_block[i]);
939 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400940 }
941}
942
943int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
944{
945 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
946 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +0800947 struct intel_engine_cs *engine;
948 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400949 int ret;
Zhi Wange4734052016-05-01 07:42:16 -0400950
951 gvt_dbg_core("init workload scheduler\n");
952
953 init_waitqueue_head(&scheduler->workload_complete_wq);
954
Changbin Du3fc03062017-03-13 10:47:11 +0800955 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -0400956 init_waitqueue_head(&scheduler->waitq[i]);
957
958 param = kzalloc(sizeof(*param), GFP_KERNEL);
959 if (!param) {
960 ret = -ENOMEM;
961 goto err;
962 }
963
964 param->gvt = gvt;
965 param->ring_id = i;
966
967 scheduler->thread[i] = kthread_run(workload_thread, param,
968 "gvt workload %d", i);
969 if (IS_ERR(scheduler->thread[i])) {
970 gvt_err("fail to create workload thread\n");
971 ret = PTR_ERR(scheduler->thread[i]);
972 goto err;
973 }
Changbin Du3fc03062017-03-13 10:47:11 +0800974
975 gvt->shadow_ctx_notifier_block[i].notifier_call =
976 shadow_context_status_change;
977 atomic_notifier_chain_register(&engine->context_status_notifier,
978 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400979 }
980 return 0;
981err:
982 intel_gvt_clean_workload_scheduler(gvt);
983 kfree(param);
984 param = NULL;
985 return ret;
986}
987
Zhi Wang874b6a92017-09-10 20:08:18 +0800988/**
989 * intel_vgpu_clean_submission - free submission-related resource for vGPU
990 * @vgpu: a vGPU
991 *
992 * This function is called when a vGPU is being destroyed.
993 *
994 */
995void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -0400996{
Zhi Wang1406a142017-09-10 21:15:18 +0800997 struct intel_vgpu_submission *s = &vgpu->submission;
998
Weinan Li7569a062018-01-26 15:09:07 +0800999 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
Zhi Wang1406a142017-09-10 21:15:18 +08001000 i915_gem_context_put(s->shadow_ctx);
1001 kmem_cache_destroy(s->workloads);
Zhi Wange4734052016-05-01 07:42:16 -04001002}
1003
Zhi Wang06bb3722017-09-13 01:41:35 +08001004
1005/**
1006 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1007 * @vgpu: a vGPU
1008 * @engine_mask: engines expected to be reset
1009 *
1010 * This function is called when a vGPU is being destroyed.
1011 *
1012 */
1013void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1014 unsigned long engine_mask)
1015{
1016 struct intel_vgpu_submission *s = &vgpu->submission;
1017
1018 if (!s->active)
1019 return;
1020
Zhi Wange2c43c02017-09-13 01:58:35 +08001021 clean_workloads(vgpu, engine_mask);
Zhi Wang06bb3722017-09-13 01:41:35 +08001022 s->ops->reset(vgpu, engine_mask);
1023}
1024
Zhi Wang874b6a92017-09-10 20:08:18 +08001025/**
1026 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1027 * @vgpu: a vGPU
1028 *
1029 * This function is called when a vGPU is being created.
1030 *
1031 * Returns:
1032 * Zero on success, negative error code if failed.
1033 *
1034 */
1035int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001036{
Zhi Wang1406a142017-09-10 21:15:18 +08001037 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001038 enum intel_engine_id i;
1039 struct intel_engine_cs *engine;
1040 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001041
Zhi Wang1406a142017-09-10 21:15:18 +08001042 s->shadow_ctx = i915_gem_context_create_gvt(
Zhi Wange4734052016-05-01 07:42:16 -04001043 &vgpu->gvt->dev_priv->drm);
Zhi Wang1406a142017-09-10 21:15:18 +08001044 if (IS_ERR(s->shadow_ctx))
1045 return PTR_ERR(s->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -04001046
Zhenyu Wang16036602017-12-04 10:42:58 +08001047 if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
1048 s->shadow_ctx->priority = INT_MAX;
1049
Zhi Wang1406a142017-09-10 21:15:18 +08001050 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
Kechen Lu9dfb8e52017-08-10 07:41:36 +08001051
Zhi Wang1406a142017-09-10 21:15:18 +08001052 s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
Zhi Wang9a9829e2017-09-10 20:28:09 +08001053 sizeof(struct intel_vgpu_workload), 0,
1054 SLAB_HWCACHE_ALIGN,
1055 NULL);
1056
Zhi Wang1406a142017-09-10 21:15:18 +08001057 if (!s->workloads) {
Zhi Wang9a9829e2017-09-10 20:28:09 +08001058 ret = -ENOMEM;
1059 goto out_shadow_ctx;
1060 }
1061
1062 for_each_engine(engine, vgpu->gvt->dev_priv, i)
Zhi Wang1406a142017-09-10 21:15:18 +08001063 INIT_LIST_HEAD(&s->workload_q_head[i]);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001064
Zhi Wang1406a142017-09-10 21:15:18 +08001065 atomic_set(&s->running_workload_num, 0);
Zhi Wang91d5d852017-09-10 21:33:20 +08001066 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001067
Zhi Wange4734052016-05-01 07:42:16 -04001068 return 0;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001069
1070out_shadow_ctx:
Zhi Wang1406a142017-09-10 21:15:18 +08001071 i915_gem_context_put(s->shadow_ctx);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001072 return ret;
Zhi Wange4734052016-05-01 07:42:16 -04001073}
Zhi Wang21527a82017-09-12 21:42:09 +08001074
1075/**
Zhi Wangad1d3632017-09-13 00:31:29 +08001076 * intel_vgpu_select_submission_ops - select virtual submission interface
1077 * @vgpu: a vGPU
1078 * @interface: expected vGPU virtual submission interface
1079 *
1080 * This function is called when guest configures submission interface.
1081 *
1082 * Returns:
1083 * Zero on success, negative error code if failed.
1084 *
1085 */
1086int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
Weinan Li7569a062018-01-26 15:09:07 +08001087 unsigned long engine_mask,
Zhi Wangad1d3632017-09-13 00:31:29 +08001088 unsigned int interface)
1089{
1090 struct intel_vgpu_submission *s = &vgpu->submission;
1091 const struct intel_vgpu_submission_ops *ops[] = {
1092 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1093 &intel_vgpu_execlist_submission_ops,
1094 };
1095 int ret;
1096
1097 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1098 return -EINVAL;
1099
Weinan Li9212b132018-01-26 15:09:08 +08001100 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1101 return -EINVAL;
1102
1103 if (s->active)
Weinan Li7569a062018-01-26 15:09:07 +08001104 s->ops->clean(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001105
1106 if (interface == 0) {
1107 s->ops = NULL;
1108 s->virtual_submission_interface = 0;
Weinan Li9212b132018-01-26 15:09:08 +08001109 s->active = false;
1110 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
Zhi Wangad1d3632017-09-13 00:31:29 +08001111 return 0;
1112 }
1113
Weinan Li7569a062018-01-26 15:09:07 +08001114 ret = ops[interface]->init(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001115 if (ret)
1116 return ret;
1117
1118 s->ops = ops[interface];
1119 s->virtual_submission_interface = interface;
1120 s->active = true;
1121
1122 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1123 vgpu->id, s->ops->name);
1124
1125 return 0;
1126}
1127
1128/**
Zhi Wang21527a82017-09-12 21:42:09 +08001129 * intel_vgpu_destroy_workload - destroy a vGPU workload
1130 * @vgpu: a vGPU
1131 *
1132 * This function is called when destroy a vGPU workload.
1133 *
1134 */
1135void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1136{
1137 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1138
1139 if (workload->shadow_mm)
Changbin Du1bc25852018-01-30 19:19:41 +08001140 intel_vgpu_mm_put(workload->shadow_mm);
Zhi Wang21527a82017-09-12 21:42:09 +08001141
1142 kmem_cache_free(s->workloads, workload);
1143}
1144
Zhi Wang6d763032017-09-12 22:33:12 +08001145static struct intel_vgpu_workload *
1146alloc_workload(struct intel_vgpu *vgpu)
Zhi Wang21527a82017-09-12 21:42:09 +08001147{
1148 struct intel_vgpu_submission *s = &vgpu->submission;
1149 struct intel_vgpu_workload *workload;
1150
1151 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1152 if (!workload)
1153 return ERR_PTR(-ENOMEM);
1154
1155 INIT_LIST_HEAD(&workload->list);
1156 INIT_LIST_HEAD(&workload->shadow_bb);
1157
1158 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1159 atomic_set(&workload->shadow_ctx_active, 0);
1160
1161 workload->status = -EINPROGRESS;
1162 workload->shadowed = false;
1163 workload->vgpu = vgpu;
1164
1165 return workload;
1166}
Zhi Wang6d763032017-09-12 22:33:12 +08001167
1168#define RING_CTX_OFF(x) \
1169 offsetof(struct execlist_ring_context, x)
1170
1171static void read_guest_pdps(struct intel_vgpu *vgpu,
1172 u64 ring_context_gpa, u32 pdp[8])
1173{
1174 u64 gpa;
1175 int i;
1176
1177 gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
1178
1179 for (i = 0; i < 8; i++)
1180 intel_gvt_hypervisor_read_gpa(vgpu,
1181 gpa + i * 8, &pdp[7 - i], 4);
1182}
1183
1184static int prepare_mm(struct intel_vgpu_workload *workload)
1185{
1186 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1187 struct intel_vgpu_mm *mm;
1188 struct intel_vgpu *vgpu = workload->vgpu;
Changbin Duede9d0c2018-01-30 19:19:40 +08001189 intel_gvt_gtt_type_t root_entry_type;
1190 u64 pdps[GVT_RING_CTX_NR_PDPS];
Zhi Wang6d763032017-09-12 22:33:12 +08001191
Changbin Duede9d0c2018-01-30 19:19:40 +08001192 switch (desc->addressing_mode) {
1193 case 1: /* legacy 32-bit */
1194 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1195 break;
1196 case 3: /* legacy 64-bit */
1197 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1198 break;
1199 default:
Zhi Wang6d763032017-09-12 22:33:12 +08001200 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1201 return -EINVAL;
1202 }
1203
Changbin Duede9d0c2018-01-30 19:19:40 +08001204 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
Zhi Wang6d763032017-09-12 22:33:12 +08001205
Changbin Due6e9c462018-01-30 19:19:46 +08001206 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1207 if (IS_ERR(mm))
1208 return PTR_ERR(mm);
Zhi Wang6d763032017-09-12 22:33:12 +08001209
Zhi Wang6d763032017-09-12 22:33:12 +08001210 workload->shadow_mm = mm;
1211 return 0;
1212}
1213
1214#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1215 ((a)->lrca == (b)->lrca))
1216
1217#define get_last_workload(q) \
1218 (list_empty(q) ? NULL : container_of(q->prev, \
1219 struct intel_vgpu_workload, list))
1220/**
1221 * intel_vgpu_create_workload - create a vGPU workload
1222 * @vgpu: a vGPU
1223 * @desc: a guest context descriptor
1224 *
1225 * This function is called when creating a vGPU workload.
1226 *
1227 * Returns:
1228 * struct intel_vgpu_workload * on success, negative error code in
1229 * pointer if failed.
1230 *
1231 */
1232struct intel_vgpu_workload *
1233intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1234 struct execlist_ctx_descriptor_format *desc)
1235{
1236 struct intel_vgpu_submission *s = &vgpu->submission;
1237 struct list_head *q = workload_q_head(vgpu, ring_id);
1238 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1239 struct intel_vgpu_workload *workload = NULL;
1240 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1241 u64 ring_context_gpa;
1242 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1243 int ret;
1244
1245 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
Zhi Wang9556e112017-10-10 13:51:32 +08001246 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
Zhi Wang6d763032017-09-12 22:33:12 +08001247 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1248 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1249 return ERR_PTR(-EINVAL);
1250 }
1251
1252 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1253 RING_CTX_OFF(ring_header.val), &head, 4);
1254
1255 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1256 RING_CTX_OFF(ring_tail.val), &tail, 4);
1257
1258 head &= RB_HEAD_OFF_MASK;
1259 tail &= RB_TAIL_OFF_MASK;
1260
1261 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1262 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1263 gvt_dbg_el("ctx head %x real head %lx\n", head,
1264 last_workload->rb_tail);
1265 /*
1266 * cannot use guest context head pointer here,
1267 * as it might not be updated at this time
1268 */
1269 head = last_workload->rb_tail;
1270 }
1271
1272 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1273
1274 /* record some ring buffer register values for scan and shadow */
1275 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1276 RING_CTX_OFF(rb_start.val), &start, 4);
1277 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1278 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1279 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1280 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1281
1282 workload = alloc_workload(vgpu);
1283 if (IS_ERR(workload))
1284 return workload;
1285
1286 workload->ring_id = ring_id;
1287 workload->ctx_desc = *desc;
1288 workload->ring_context_gpa = ring_context_gpa;
1289 workload->rb_head = head;
1290 workload->rb_tail = tail;
1291 workload->rb_start = start;
1292 workload->rb_ctl = ctl;
1293
1294 if (ring_id == RCS) {
1295 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1296 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1297 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1298 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1299
1300 workload->wa_ctx.indirect_ctx.guest_gma =
1301 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1302 workload->wa_ctx.indirect_ctx.size =
1303 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1304 CACHELINE_BYTES;
1305 workload->wa_ctx.per_ctx.guest_gma =
1306 per_ctx & PER_CTX_ADDR_MASK;
1307 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1308 }
1309
1310 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1311 workload, ring_id, head, tail, start, ctl);
1312
1313 ret = prepare_mm(workload);
1314 if (ret) {
1315 kmem_cache_free(s->workloads, workload);
1316 return ERR_PTR(ret);
1317 }
1318
1319 /* Only scan and shadow the first workload in the queue
1320 * as there is only one pre-allocated buf-obj for shadow.
1321 */
1322 if (list_empty(workload_q_head(vgpu, ring_id))) {
1323 intel_runtime_pm_get(dev_priv);
1324 mutex_lock(&dev_priv->drm.struct_mutex);
1325 ret = intel_gvt_scan_and_shadow_workload(workload);
1326 mutex_unlock(&dev_priv->drm.struct_mutex);
1327 intel_runtime_pm_put(dev_priv);
1328 }
1329
1330 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1331 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1332 intel_vgpu_destroy_workload(workload);
1333 return ERR_PTR(ret);
1334 }
1335
1336 return workload;
1337}
Changbin Du59a716c2017-11-29 15:40:06 +08001338
1339/**
1340 * intel_vgpu_queue_workload - Qeue a vGPU workload
1341 * @workload: the workload to queue in
1342 */
1343void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1344{
1345 list_add_tail(&workload->list,
1346 workload_q_head(workload->vgpu, workload->ring_id));
Changbin Duc1304562017-11-29 15:40:07 +08001347 intel_gvt_kick_schedule(workload->vgpu->gvt);
Changbin Du59a716c2017-11-29 15:40:06 +08001348 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1349}