blob: 35f7cfd7a6b4927ac15a1c61d195c04f9f6aa24e [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
Zhi Wangb20c0d52018-02-07 18:12:15 +080055static void update_shadow_pdps(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 int ring_id = workload->ring_id;
59 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
60 struct drm_i915_gem_object *ctx_obj =
61 shadow_ctx->engine[ring_id].state->obj;
62 struct execlist_ring_context *shadow_ring_context;
63 struct page *page;
64
65 if (WARN_ON(!workload->shadow_mm))
66 return;
67
68 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
69 return;
70
71 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
72 shadow_ring_context = kmap(page);
73 set_context_pdp_root_pointer(shadow_ring_context,
74 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
75 kunmap(page);
76}
77
Min Hefa3dd622018-03-02 10:00:25 +080078/*
79 * when populating shadow ctx from guest, we should not overrride oa related
80 * registers, so that they will not be overlapped by guest oa configs. Thus
81 * made it possible to capture oa data from host for both host and guests.
82 */
83static void sr_oa_regs(struct intel_vgpu_workload *workload,
84 u32 *reg_state, bool save)
85{
86 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
87 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
88 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
89 int i = 0;
90 u32 flex_mmio[] = {
91 i915_mmio_reg_offset(EU_PERF_CNTL0),
92 i915_mmio_reg_offset(EU_PERF_CNTL1),
93 i915_mmio_reg_offset(EU_PERF_CNTL2),
94 i915_mmio_reg_offset(EU_PERF_CNTL3),
95 i915_mmio_reg_offset(EU_PERF_CNTL4),
96 i915_mmio_reg_offset(EU_PERF_CNTL5),
97 i915_mmio_reg_offset(EU_PERF_CNTL6),
98 };
99
Gustavo A. R. Silva41e7ccc2018-03-22 13:21:54 -0500100 if (workload->ring_id != RCS)
Min Hefa3dd622018-03-02 10:00:25 +0800101 return;
102
103 if (save) {
104 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
105
106 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
107 u32 state_offset = ctx_flexeu0 + i * 2;
108
109 workload->flex_mmio[i] = reg_state[state_offset + 1];
110 }
111 } else {
112 reg_state[ctx_oactxctrl] =
113 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
114 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
115
116 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
117 u32 state_offset = ctx_flexeu0 + i * 2;
118 u32 mmio = flex_mmio[i];
119
120 reg_state[state_offset] = mmio;
121 reg_state[state_offset + 1] = workload->flex_mmio[i];
122 }
123 }
124}
125
Zhi Wange4734052016-05-01 07:42:16 -0400126static int populate_shadow_context(struct intel_vgpu_workload *workload)
127{
128 struct intel_vgpu *vgpu = workload->vgpu;
129 struct intel_gvt *gvt = vgpu->gvt;
130 int ring_id = workload->ring_id;
Zhi Wang1406a142017-09-10 21:15:18 +0800131 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400132 struct drm_i915_gem_object *ctx_obj =
133 shadow_ctx->engine[ring_id].state->obj;
134 struct execlist_ring_context *shadow_ring_context;
135 struct page *page;
136 void *dst;
137 unsigned long context_gpa, context_page_num;
138 int i;
139
140 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
141 workload->ctx_desc.lrca);
142
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300143 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400144
145 context_page_num = context_page_num >> PAGE_SHIFT;
146
147 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
148 context_page_num = 19;
149
150 i = 2;
151
152 while (i < context_page_num) {
153 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
154 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800155 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400156 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500157 gvt_vgpu_err("Invalid guest context descriptor\n");
fred gao5c568832017-09-20 05:36:47 +0800158 return -EFAULT;
Zhi Wange4734052016-05-01 07:42:16 -0400159 }
160
Michel Thierry0b29c752017-09-13 09:56:00 +0100161 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800162 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400163 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
Zhi Wang9556e112017-10-10 13:51:32 +0800164 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800165 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400166 i++;
167 }
168
169 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800170 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400171
Min Hefa3dd622018-03-02 10:00:25 +0800172 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
Zhi Wange4734052016-05-01 07:42:16 -0400173#define COPY_REG(name) \
174 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
175 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
Zhenyu Wangd8303072018-03-19 17:09:05 +0800176#define COPY_REG_MASKED(name) {\
177 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
178 + RING_CTX_OFF(name.val),\
179 &shadow_ring_context->name.val, 4);\
180 shadow_ring_context->name.val |= 0xffff << 16;\
181 }
Zhi Wange4734052016-05-01 07:42:16 -0400182
Zhenyu Wangd8303072018-03-19 17:09:05 +0800183 COPY_REG_MASKED(ctx_ctrl);
Zhi Wange4734052016-05-01 07:42:16 -0400184 COPY_REG(ctx_timestamp);
185
186 if (ring_id == RCS) {
187 COPY_REG(bb_per_ctx_ptr);
188 COPY_REG(rcs_indirect_ctx);
189 COPY_REG(rcs_indirect_ctx_offset);
190 }
191#undef COPY_REG
Zhenyu Wangd8303072018-03-19 17:09:05 +0800192#undef COPY_REG_MASKED
Zhi Wange4734052016-05-01 07:42:16 -0400193
Zhi Wange4734052016-05-01 07:42:16 -0400194 intel_gvt_hypervisor_read_gpa(vgpu,
195 workload->ring_context_gpa +
196 sizeof(*shadow_ring_context),
197 (void *)shadow_ring_context +
198 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800199 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400200
Min Hefa3dd622018-03-02 10:00:25 +0800201 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800202 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400203 return 0;
204}
205
Chris Wilsone61e0f52018-02-21 09:56:36 +0000206static inline bool is_gvt_request(struct i915_request *req)
Changbin Dubc2d4b62017-03-22 12:35:31 +0800207{
208 return i915_gem_context_force_single_submission(req->ctx);
209}
210
Xiong Zhang295764c2017-11-07 05:23:02 +0800211static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
212{
213 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
214 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
215 i915_reg_t reg;
216
217 reg = RING_INSTDONE(ring_base);
218 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
219 reg = RING_ACTHD(ring_base);
220 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
221 reg = RING_ACTHD_UDW(ring_base);
222 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
223}
224
Zhi Wange4734052016-05-01 07:42:16 -0400225static int shadow_context_status_change(struct notifier_block *nb,
226 unsigned long action, void *data)
227{
Chris Wilsone61e0f52018-02-21 09:56:36 +0000228 struct i915_request *req = data;
Changbin Du3fc03062017-03-13 10:47:11 +0800229 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
230 shadow_ctx_notifier_block[req->engine->id]);
231 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du0e86cc92017-05-04 10:52:38 +0800232 enum intel_engine_id ring_id = req->engine->id;
233 struct intel_vgpu_workload *workload;
Changbin Du679fd3e2017-11-13 14:58:31 +0800234 unsigned long flags;
Zhi Wange4734052016-05-01 07:42:16 -0400235
Changbin Du0e86cc92017-05-04 10:52:38 +0800236 if (!is_gvt_request(req)) {
Changbin Du679fd3e2017-11-13 14:58:31 +0800237 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800238 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
239 scheduler->engine_owner[ring_id]) {
240 /* Switch ring from vGPU to host. */
241 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
242 NULL, ring_id);
243 scheduler->engine_owner[ring_id] = NULL;
244 }
Changbin Du679fd3e2017-11-13 14:58:31 +0800245 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800246
247 return NOTIFY_OK;
248 }
249
250 workload = scheduler->current_workload[ring_id];
251 if (unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800252 return NOTIFY_OK;
253
Zhi Wange4734052016-05-01 07:42:16 -0400254 switch (action) {
255 case INTEL_CONTEXT_SCHEDULE_IN:
Changbin Du679fd3e2017-11-13 14:58:31 +0800256 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800257 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
258 /* Switch ring from host to vGPU or vGPU to vGPU. */
259 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
260 workload->vgpu, ring_id);
261 scheduler->engine_owner[ring_id] = workload->vgpu;
262 } else
263 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
264 ring_id, workload->vgpu->id);
Changbin Du679fd3e2017-11-13 14:58:31 +0800265 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Zhi Wange4734052016-05-01 07:42:16 -0400266 atomic_set(&workload->shadow_ctx_active, 1);
267 break;
268 case INTEL_CONTEXT_SCHEDULE_OUT:
Xiong Zhang295764c2017-11-07 05:23:02 +0800269 save_ring_hw_state(workload->vgpu, ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400270 atomic_set(&workload->shadow_ctx_active, 0);
271 break;
Zhenyu Wangda5f99e2017-12-01 14:59:53 +0800272 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
273 save_ring_hw_state(workload->vgpu, ring_id);
274 break;
Zhi Wange4734052016-05-01 07:42:16 -0400275 default:
276 WARN_ON(1);
277 return NOTIFY_OK;
278 }
279 wake_up(&workload->shadow_ctx_status_wq);
280 return NOTIFY_OK;
281}
282
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800283static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
284 struct intel_engine_cs *engine)
285{
286 struct intel_context *ce = &ctx->engine[engine->id];
287 u64 desc = 0;
288
289 desc = ce->lrc_desc;
290
291 /* Update bits 0-11 of the context descriptor which includes flags
292 * like GEN8_CTX_* cached in desc_template
293 */
294 desc &= U64_MAX << 12;
295 desc |= ctx->desc_template & ((1ULL << 12) - 1);
296
297 ce->lrc_desc = desc;
298}
299
fred gao0a53bc02017-08-18 15:41:06 +0800300static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
301{
302 struct intel_vgpu *vgpu = workload->vgpu;
303 void *shadow_ring_buffer_va;
304 u32 *cs;
Weinan Licd7e61b2018-02-23 14:46:45 +0800305 struct i915_request *req = workload->req;
306
307 if (IS_KABYLAKE(req->i915) &&
308 is_inhibit_context(req->ctx, req->engine->id))
309 intel_vgpu_restore_inhibit_context(vgpu, req);
fred gao0a53bc02017-08-18 15:41:06 +0800310
311 /* allocate shadow ring buffer */
312 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
313 if (IS_ERR(cs)) {
314 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
315 workload->rb_len);
316 return PTR_ERR(cs);
317 }
318
319 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
320
321 /* get shadow ring buffer va */
322 workload->shadow_ring_buffer_va = cs;
323
324 memcpy(cs, shadow_ring_buffer_va,
325 workload->rb_len);
326
327 cs += workload->rb_len / sizeof(u32);
328 intel_ring_advance(workload->req, cs);
329
330 return 0;
331}
332
Chris Wilson7b302552017-11-20 13:29:58 +0000333static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
fred gaoa3cfdca2017-08-18 15:41:07 +0800334{
335 if (!wa_ctx->indirect_ctx.obj)
336 return;
337
338 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
339 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
340}
341
Ping Gao89ea20b2017-06-29 12:22:42 +0800342/**
343 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
344 * shadow it as well, include ringbuffer,wa_ctx and ctx.
345 * @workload: an abstract entity for each execlist submission.
346 *
347 * This function is called before the workload submitting to i915, to make
348 * sure the content of the workload is valid.
349 */
350int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Zhi Wange4734052016-05-01 07:42:16 -0400351{
Zhi Wang1406a142017-09-10 21:15:18 +0800352 struct intel_vgpu *vgpu = workload->vgpu;
353 struct intel_vgpu_submission *s = &vgpu->submission;
354 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
355 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Zhi Wange4734052016-05-01 07:42:16 -0400356 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800357 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
fred gao0a53bc02017-08-18 15:41:06 +0800358 struct intel_ring *ring;
Zhi Wange4734052016-05-01 07:42:16 -0400359 int ret;
360
Ping Gao87e919d2017-07-04 14:53:03 +0800361 lockdep_assert_held(&dev_priv->drm.struct_mutex);
362
Ping Gaod0302e72017-06-29 12:22:43 +0800363 if (workload->shadowed)
364 return 0;
Zhi Wange4734052016-05-01 07:42:16 -0400365
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800366 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
367 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400368 GEN8_CTX_ADDRESSING_MODE_SHIFT;
369
Zhi Wang1406a142017-09-10 21:15:18 +0800370 if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800371 shadow_context_descriptor_update(shadow_ctx,
372 dev_priv->engine[ring_id]);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800373
Ping Gao89ea20b2017-06-29 12:22:42 +0800374 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
Zhi Wangbe1da702016-05-03 18:26:57 -0400375 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800376 goto err_scan;
Zhi Wangbe1da702016-05-03 18:26:57 -0400377
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400378 if ((workload->ring_id == RCS) &&
379 (workload->wa_ctx.indirect_ctx.size != 0)) {
380 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
381 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800382 goto err_scan;
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400383 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400384
Ping Gao89ea20b2017-06-29 12:22:42 +0800385 /* pin shadow context by gvt even the shadow context will be pinned
386 * when i915 alloc request. That is because gvt will update the guest
387 * context from shadow context when workload is completed, and at that
388 * moment, i915 may already unpined the shadow context to make the
389 * shadow_ctx pages invalid. So gvt need to pin itself. After update
390 * the guest context, gvt can unpin the shadow_ctx safely.
391 */
392 ring = engine->context_pin(engine, shadow_ctx);
393 if (IS_ERR(ring)) {
394 ret = PTR_ERR(ring);
395 gvt_vgpu_err("fail to pin shadow context\n");
fred gaoa3cfdca2017-08-18 15:41:07 +0800396 goto err_shadow;
Ping Gao89ea20b2017-06-29 12:22:42 +0800397 }
Zhi Wange4734052016-05-01 07:42:16 -0400398
fred gao0a53bc02017-08-18 15:41:06 +0800399 ret = populate_shadow_context(workload);
400 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800401 goto err_unpin;
fred gaof2880e02017-11-14 17:09:35 +0800402 workload->shadowed = true;
403 return 0;
404
405err_unpin:
406 engine->context_unpin(engine, shadow_ctx);
407err_shadow:
408 release_shadow_wa_ctx(&workload->wa_ctx);
409err_scan:
410 return ret;
411}
412
413static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
414{
415 int ring_id = workload->ring_id;
416 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
417 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
Chris Wilsone61e0f52018-02-21 09:56:36 +0000418 struct i915_request *rq;
fred gaof2880e02017-11-14 17:09:35 +0800419 struct intel_vgpu *vgpu = workload->vgpu;
420 struct intel_vgpu_submission *s = &vgpu->submission;
421 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
422 int ret;
fred gao0a53bc02017-08-18 15:41:06 +0800423
Chris Wilsone61e0f52018-02-21 09:56:36 +0000424 rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800425 if (IS_ERR(rq)) {
426 gvt_vgpu_err("fail to allocate gem request\n");
427 ret = PTR_ERR(rq);
fred gaoa3cfdca2017-08-18 15:41:07 +0800428 goto err_unpin;
fred gao0a53bc02017-08-18 15:41:06 +0800429 }
430
431 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
432
Chris Wilsone61e0f52018-02-21 09:56:36 +0000433 workload->req = i915_request_get(rq);
fred gao0a53bc02017-08-18 15:41:06 +0800434 ret = copy_workload_to_ring_buffer(workload);
435 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800436 goto err_unpin;
fred gaoa3cfdca2017-08-18 15:41:07 +0800437 return 0;
fred gao0a53bc02017-08-18 15:41:06 +0800438
fred gaoa3cfdca2017-08-18 15:41:07 +0800439err_unpin:
440 engine->context_unpin(engine, shadow_ctx);
fred gaoa3cfdca2017-08-18 15:41:07 +0800441 release_shadow_wa_ctx(&workload->wa_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800442 return ret;
443}
444
Zhi Wangf52c3802017-09-24 21:53:03 +0800445static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
446
Zhi Wangd8235b52017-09-12 22:06:39 +0800447static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
448{
449 struct intel_gvt *gvt = workload->vgpu->gvt;
450 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
Zhi Wangf52c3802017-09-24 21:53:03 +0800451 struct intel_vgpu_shadow_bb *bb;
452 int ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800453
Zhi Wangf52c3802017-09-24 21:53:03 +0800454 list_for_each_entry(bb, &workload->shadow_bb, list) {
fred gaoef75c682018-03-15 13:21:10 +0800455 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
456 * is only updated into ring_scan_buffer, not real ring address
457 * allocated in later copy_workload_to_ring_buffer. pls be noted
458 * shadow_ring_buffer_va is now pointed to real ring buffer va
459 * in copy_workload_to_ring_buffer.
460 */
461
462 if (bb->bb_offset)
463 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
464 + bb->bb_offset;
465
Zhao Yan96bebe32018-04-04 13:57:09 +0800466 if (bb->ppgtt) {
467 /* for non-priv bb, scan&shadow is only for
468 * debugging purpose, so the content of shadow bb
469 * is the same as original bb. Therefore,
470 * here, rather than switch to shadow bb's gma
471 * address, we directly use original batch buffer's
472 * gma address, and send original bb to hardware
473 * directly
474 */
475 if (bb->clflush & CLFLUSH_AFTER) {
476 drm_clflush_virt_range(bb->va,
477 bb->obj->base.size);
478 bb->clflush &= ~CLFLUSH_AFTER;
479 }
480 i915_gem_obj_finish_shmem_access(bb->obj);
481 bb->accessing = false;
Zhi Wangf52c3802017-09-24 21:53:03 +0800482
Zhao Yan96bebe32018-04-04 13:57:09 +0800483 } else {
484 bb->vma = i915_gem_object_ggtt_pin(bb->obj,
485 NULL, 0, 0, 0);
486 if (IS_ERR(bb->vma)) {
487 ret = PTR_ERR(bb->vma);
488 goto err;
489 }
490
491 /* relocate shadow batch buffer */
492 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
493 if (gmadr_bytes == 8)
494 bb->bb_start_cmd_va[2] = 0;
495
496 /* No one is going to touch shadow bb from now on. */
497 if (bb->clflush & CLFLUSH_AFTER) {
498 drm_clflush_virt_range(bb->va,
499 bb->obj->base.size);
500 bb->clflush &= ~CLFLUSH_AFTER;
501 }
502
503 ret = i915_gem_object_set_to_gtt_domain(bb->obj,
504 false);
505 if (ret)
506 goto err;
507
508 i915_gem_obj_finish_shmem_access(bb->obj);
509 bb->accessing = false;
510
511 i915_vma_move_to_active(bb->vma, workload->req, 0);
Zhi Wangf52c3802017-09-24 21:53:03 +0800512 }
Zhi Wangd8235b52017-09-12 22:06:39 +0800513 }
514 return 0;
Zhi Wangf52c3802017-09-24 21:53:03 +0800515err:
516 release_shadow_batch_buffer(workload);
517 return ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800518}
519
520static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
521{
522 struct intel_vgpu_workload *workload = container_of(wa_ctx,
523 struct intel_vgpu_workload,
524 wa_ctx);
525 int ring_id = workload->ring_id;
526 struct intel_vgpu_submission *s = &workload->vgpu->submission;
527 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
528 struct drm_i915_gem_object *ctx_obj =
529 shadow_ctx->engine[ring_id].state->obj;
530 struct execlist_ring_context *shadow_ring_context;
531 struct page *page;
532
533 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
534 shadow_ring_context = kmap_atomic(page);
535
536 shadow_ring_context->bb_per_ctx_ptr.val =
537 (shadow_ring_context->bb_per_ctx_ptr.val &
538 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
539 shadow_ring_context->rcs_indirect_ctx.val =
540 (shadow_ring_context->rcs_indirect_ctx.val &
541 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
542
543 kunmap_atomic(shadow_ring_context);
544 return 0;
545}
546
547static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
548{
549 struct i915_vma *vma;
550 unsigned char *per_ctx_va =
551 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
552 wa_ctx->indirect_ctx.size;
553
554 if (wa_ctx->indirect_ctx.size == 0)
555 return 0;
556
557 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
558 0, CACHELINE_BYTES, 0);
559 if (IS_ERR(vma))
560 return PTR_ERR(vma);
561
562 /* FIXME: we are not tracking our pinned VMA leaving it
563 * up to the core to fix up the stray pin_count upon
564 * free.
565 */
566
567 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
568
569 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
570 memset(per_ctx_va, 0, CACHELINE_BYTES);
571
572 update_wa_ctx_2_shadow_ctx(wa_ctx);
573 return 0;
574}
575
576static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
577{
Zhi Wangf52c3802017-09-24 21:53:03 +0800578 struct intel_vgpu *vgpu = workload->vgpu;
579 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
580 struct intel_vgpu_shadow_bb *bb, *pos;
Zhi Wangd8235b52017-09-12 22:06:39 +0800581
Zhi Wangf52c3802017-09-24 21:53:03 +0800582 if (list_empty(&workload->shadow_bb))
583 return;
584
585 bb = list_first_entry(&workload->shadow_bb,
586 struct intel_vgpu_shadow_bb, list);
587
588 mutex_lock(&dev_priv->drm.struct_mutex);
589
590 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
591 if (bb->obj) {
592 if (bb->accessing)
593 i915_gem_obj_finish_shmem_access(bb->obj);
594
595 if (bb->va && !IS_ERR(bb->va))
596 i915_gem_object_unpin_map(bb->obj);
597
598 if (bb->vma && !IS_ERR(bb->vma)) {
599 i915_vma_unpin(bb->vma);
600 i915_vma_close(bb->vma);
601 }
602 __i915_gem_object_release_unless_active(bb->obj);
Zhi Wangd8235b52017-09-12 22:06:39 +0800603 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800604 list_del(&bb->list);
605 kfree(bb);
Zhi Wangd8235b52017-09-12 22:06:39 +0800606 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800607
608 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wangd8235b52017-09-12 22:06:39 +0800609}
610
Zhi Wang497aa3f2017-09-12 21:51:10 +0800611static int prepare_workload(struct intel_vgpu_workload *workload)
612{
Zhi Wangd8235b52017-09-12 22:06:39 +0800613 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wang497aa3f2017-09-12 21:51:10 +0800614 int ret = 0;
615
Zhi Wangd8235b52017-09-12 22:06:39 +0800616 ret = intel_vgpu_pin_mm(workload->shadow_mm);
617 if (ret) {
618 gvt_vgpu_err("fail to vgpu pin mm\n");
619 return ret;
620 }
Zhi Wang497aa3f2017-09-12 21:51:10 +0800621
Zhi Wangb20c0d52018-02-07 18:12:15 +0800622 update_shadow_pdps(workload);
623
Zhi Wangd8235b52017-09-12 22:06:39 +0800624 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
625 if (ret) {
626 gvt_vgpu_err("fail to vgpu sync oos pages\n");
627 goto err_unpin_mm;
628 }
629
630 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
631 if (ret) {
632 gvt_vgpu_err("fail to flush post shadow\n");
633 goto err_unpin_mm;
634 }
635
fred gaof2880e02017-11-14 17:09:35 +0800636 ret = intel_gvt_generate_request(workload);
637 if (ret) {
638 gvt_vgpu_err("fail to generate request\n");
639 goto err_unpin_mm;
640 }
641
Zhi Wangd8235b52017-09-12 22:06:39 +0800642 ret = prepare_shadow_batch_buffer(workload);
643 if (ret) {
644 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
645 goto err_unpin_mm;
646 }
647
648 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
649 if (ret) {
650 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
651 goto err_shadow_batch;
652 }
653
654 if (workload->prepare) {
655 ret = workload->prepare(workload);
656 if (ret)
657 goto err_shadow_wa_ctx;
658 }
659
660 return 0;
661err_shadow_wa_ctx:
662 release_shadow_wa_ctx(&workload->wa_ctx);
663err_shadow_batch:
664 release_shadow_batch_buffer(workload);
665err_unpin_mm:
666 intel_vgpu_unpin_mm(workload->shadow_mm);
Zhi Wang497aa3f2017-09-12 21:51:10 +0800667 return ret;
668}
669
fred gao0a53bc02017-08-18 15:41:06 +0800670static int dispatch_workload(struct intel_vgpu_workload *workload)
671{
Zhi Wang1406a142017-09-10 21:15:18 +0800672 struct intel_vgpu *vgpu = workload->vgpu;
673 struct intel_vgpu_submission *s = &vgpu->submission;
674 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
675 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
fred gao0a53bc02017-08-18 15:41:06 +0800676 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800677 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
678 int ret = 0;
679
680 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
681 ring_id, workload);
682
683 mutex_lock(&dev_priv->drm.struct_mutex);
684
685 ret = intel_gvt_scan_and_shadow_workload(workload);
686 if (ret)
687 goto out;
688
Zhi Wang497aa3f2017-09-12 21:51:10 +0800689 ret = prepare_workload(workload);
690 if (ret) {
691 engine->context_unpin(engine, shadow_ctx);
692 goto out;
fred gao0a53bc02017-08-18 15:41:06 +0800693 }
694
Pei Zhang90d27a12016-11-14 18:02:57 +0800695out:
696 if (ret)
697 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800698
Ping Gao89ea20b2017-06-29 12:22:42 +0800699 if (!IS_ERR_OR_NULL(workload->req)) {
700 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
701 ring_id, workload->req);
Chris Wilsone61e0f52018-02-21 09:56:36 +0000702 i915_request_add(workload->req);
Ping Gao89ea20b2017-06-29 12:22:42 +0800703 workload->dispatched = true;
704 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800705
Pei Zhang90d27a12016-11-14 18:02:57 +0800706 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400707 return ret;
708}
709
710static struct intel_vgpu_workload *pick_next_workload(
711 struct intel_gvt *gvt, int ring_id)
712{
713 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
714 struct intel_vgpu_workload *workload = NULL;
715
716 mutex_lock(&gvt->lock);
717
718 /*
719 * no current vgpu / will be scheduled out / no workload
720 * bail out
721 */
722 if (!scheduler->current_vgpu) {
723 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
724 goto out;
725 }
726
727 if (scheduler->need_reschedule) {
728 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
729 goto out;
730 }
731
Zhenyu Wang954180a2017-04-12 14:22:50 +0800732 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400733 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400734
735 /*
736 * still have current workload, maybe the workload disptacher
737 * fail to submit it for some reason, resubmit it.
738 */
739 if (scheduler->current_workload[ring_id]) {
740 workload = scheduler->current_workload[ring_id];
741 gvt_dbg_sched("ring id %d still have current workload %p\n",
742 ring_id, workload);
743 goto out;
744 }
745
746 /*
747 * pick a workload as current workload
748 * once current workload is set, schedule policy routines
749 * will wait the current workload is finished when trying to
750 * schedule out a vgpu.
751 */
752 scheduler->current_workload[ring_id] = container_of(
753 workload_q_head(scheduler->current_vgpu, ring_id)->next,
754 struct intel_vgpu_workload, list);
755
756 workload = scheduler->current_workload[ring_id];
757
758 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
759
Zhi Wang1406a142017-09-10 21:15:18 +0800760 atomic_inc(&workload->vgpu->submission.running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400761out:
762 mutex_unlock(&gvt->lock);
763 return workload;
764}
765
766static void update_guest_context(struct intel_vgpu_workload *workload)
767{
768 struct intel_vgpu *vgpu = workload->vgpu;
769 struct intel_gvt *gvt = vgpu->gvt;
Zhi Wang1406a142017-09-10 21:15:18 +0800770 struct intel_vgpu_submission *s = &vgpu->submission;
771 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400772 int ring_id = workload->ring_id;
Zhi Wange4734052016-05-01 07:42:16 -0400773 struct drm_i915_gem_object *ctx_obj =
774 shadow_ctx->engine[ring_id].state->obj;
775 struct execlist_ring_context *shadow_ring_context;
776 struct page *page;
777 void *src;
778 unsigned long context_gpa, context_page_num;
779 int i;
780
781 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
782 workload->ctx_desc.lrca);
783
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300784 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400785
786 context_page_num = context_page_num >> PAGE_SHIFT;
787
788 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
789 context_page_num = 19;
790
791 i = 2;
792
793 while (i < context_page_num) {
794 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
795 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800796 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400797 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500798 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400799 return;
800 }
801
Michel Thierry0b29c752017-09-13 09:56:00 +0100802 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800803 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400804 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
Zhi Wang9556e112017-10-10 13:51:32 +0800805 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800806 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400807 i++;
808 }
809
810 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
811 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
812
813 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800814 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400815
816#define COPY_REG(name) \
817 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
818 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
819
820 COPY_REG(ctx_ctrl);
821 COPY_REG(ctx_timestamp);
822
823#undef COPY_REG
824
825 intel_gvt_hypervisor_write_gpa(vgpu,
826 workload->ring_context_gpa +
827 sizeof(*shadow_ring_context),
828 (void *)shadow_ring_context +
829 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800830 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400831
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800832 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400833}
834
Zhi Wange2c43c02017-09-13 01:58:35 +0800835static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
836{
837 struct intel_vgpu_submission *s = &vgpu->submission;
838 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
839 struct intel_engine_cs *engine;
840 struct intel_vgpu_workload *pos, *n;
841 unsigned int tmp;
842
843 /* free the unsubmited workloads in the queues. */
844 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
845 list_for_each_entry_safe(pos, n,
846 &s->workload_q_head[engine->id], list) {
847 list_del_init(&pos->list);
848 intel_vgpu_destroy_workload(pos);
849 }
850 clear_bit(engine->id, s->shadow_ctx_desc_updated);
851 }
852}
853
Zhi Wange4734052016-05-01 07:42:16 -0400854static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
855{
856 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wang1406a142017-09-10 21:15:18 +0800857 struct intel_vgpu_workload *workload =
858 scheduler->current_workload[ring_id];
859 struct intel_vgpu *vgpu = workload->vgpu;
860 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wangbe1da702016-05-03 18:26:57 -0400861 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400862
863 mutex_lock(&gvt->lock);
864
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800865 /* For the workload w/ request, needs to wait for the context
866 * switch to make sure request is completed.
867 * For the workload w/o request, directly complete the workload.
868 */
869 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800870 struct drm_i915_private *dev_priv =
871 workload->vgpu->gvt->dev_priv;
872 struct intel_engine_cs *engine =
873 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400874 wait_event(workload->shadow_ctx_status_wq,
875 !atomic_read(&workload->shadow_ctx_active));
876
Chuanxiao Dong0cf5ec42017-06-23 13:01:11 +0800877 /* If this request caused GPU hang, req->fence.error will
878 * be set to -EIO. Use -EIO to set workload status so
879 * that when this request caused GPU hang, didn't trigger
880 * context switch interrupt to guest.
881 */
882 if (likely(workload->status == -EINPROGRESS)) {
883 if (workload->req->fence.error == -EIO)
884 workload->status = -EIO;
885 else
886 workload->status = 0;
887 }
888
Chris Wilsone61e0f52018-02-21 09:56:36 +0000889 i915_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400890
Chuanxiao Dong6184cc82017-08-01 17:47:25 +0800891 if (!workload->status && !(vgpu->resetting_eng &
892 ENGINE_MASK(ring_id))) {
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800893 update_guest_context(workload);
894
895 for_each_set_bit(event, workload->pending_events,
896 INTEL_GVT_EVENT_MAX)
897 intel_vgpu_trigger_virtual_event(vgpu, event);
898 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800899 mutex_lock(&dev_priv->drm.struct_mutex);
900 /* unpin shadow ctx as the shadow_ctx update is done */
Zhi Wang1406a142017-09-10 21:15:18 +0800901 engine->context_unpin(engine, s->shadow_ctx);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800902 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400903 }
904
905 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
906 ring_id, workload, workload->status);
907
908 scheduler->current_workload[ring_id] = NULL;
909
Zhi Wange4734052016-05-01 07:42:16 -0400910 list_del_init(&workload->list);
Zhi Wangd8235b52017-09-12 22:06:39 +0800911
912 if (!workload->status) {
913 release_shadow_batch_buffer(workload);
914 release_shadow_wa_ctx(&workload->wa_ctx);
915 }
916
Zhi Wange2c43c02017-09-13 01:58:35 +0800917 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
918 /* if workload->status is not successful means HW GPU
919 * has occurred GPU hang or something wrong with i915/GVT,
920 * and GVT won't inject context switch interrupt to guest.
921 * So this error is a vGPU hang actually to the guest.
922 * According to this we should emunlate a vGPU hang. If
923 * there are pending workloads which are already submitted
924 * from guest, we should clean them up like HW GPU does.
925 *
926 * if it is in middle of engine resetting, the pending
927 * workloads won't be submitted to HW GPU and will be
928 * cleaned up during the resetting process later, so doing
929 * the workload clean up here doesn't have any impact.
930 **/
931 clean_workloads(vgpu, ENGINE_MASK(ring_id));
932 }
933
Zhi Wange4734052016-05-01 07:42:16 -0400934 workload->complete(workload);
935
Zhi Wang1406a142017-09-10 21:15:18 +0800936 atomic_dec(&s->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400937 wake_up(&scheduler->workload_complete_wq);
Ping Gaof100dae2017-05-24 09:14:11 +0800938
939 if (gvt->scheduler.need_reschedule)
940 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
941
Zhi Wange4734052016-05-01 07:42:16 -0400942 mutex_unlock(&gvt->lock);
943}
944
945struct workload_thread_param {
946 struct intel_gvt *gvt;
947 int ring_id;
948};
949
950static int workload_thread(void *priv)
951{
952 struct workload_thread_param *p = (struct workload_thread_param *)priv;
953 struct intel_gvt *gvt = p->gvt;
954 int ring_id = p->ring_id;
955 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
956 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500957 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400958 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800959 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
960 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800961 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400962
963 kfree(p);
964
965 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
966
967 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800968 add_wait_queue(&scheduler->waitq[ring_id], &wait);
969 do {
970 workload = pick_next_workload(gvt, ring_id);
971 if (workload)
972 break;
973 wait_woken(&wait, TASK_INTERRUPTIBLE,
974 MAX_SCHEDULE_TIMEOUT);
975 } while (!kthread_should_stop());
976 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400977
Du, Changbine45d7b72016-10-27 11:10:31 +0800978 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400979 break;
980
981 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
982 workload->ring_id, workload,
983 workload->vgpu->id);
984
985 intel_runtime_pm_get(gvt->dev_priv);
986
Zhi Wange4734052016-05-01 07:42:16 -0400987 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
988 workload->ring_id, workload);
989
990 if (need_force_wake)
991 intel_uncore_forcewake_get(gvt->dev_priv,
992 FORCEWAKE_ALL);
993
Pei Zhang90d27a12016-11-14 18:02:57 +0800994 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400995 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800996 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100997
Zhi Wange4734052016-05-01 07:42:16 -0400998 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500999 vgpu = workload->vgpu;
1000 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -04001001 goto complete;
1002 }
1003
1004 gvt_dbg_sched("ring id %d wait workload %p\n",
1005 workload->ring_id, workload);
Chris Wilsone61e0f52018-02-21 09:56:36 +00001006 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -04001007
1008complete:
Changbin Du3ce32742017-02-09 10:13:16 +08001009 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -04001010 workload, workload->status);
1011
Changbin Du2e51ef32017-01-05 13:28:05 +08001012 complete_current_workload(gvt, ring_id);
1013
Zhi Wange4734052016-05-01 07:42:16 -04001014 if (need_force_wake)
1015 intel_uncore_forcewake_put(gvt->dev_priv,
1016 FORCEWAKE_ALL);
1017
Zhi Wange4734052016-05-01 07:42:16 -04001018 intel_runtime_pm_put(gvt->dev_priv);
Zhi Wang6d763032017-09-12 22:33:12 +08001019 if (ret && (vgpu_is_vm_unhealthy(ret)))
fred gaoe011c6c2017-09-19 15:11:28 +08001020 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
Zhi Wange4734052016-05-01 07:42:16 -04001021 }
1022 return 0;
1023}
1024
1025void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1026{
Zhi Wang1406a142017-09-10 21:15:18 +08001027 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wange4734052016-05-01 07:42:16 -04001028 struct intel_gvt *gvt = vgpu->gvt;
1029 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1030
Zhi Wang1406a142017-09-10 21:15:18 +08001031 if (atomic_read(&s->running_workload_num)) {
Zhi Wange4734052016-05-01 07:42:16 -04001032 gvt_dbg_sched("wait vgpu idle\n");
1033
1034 wait_event(scheduler->workload_complete_wq,
Zhi Wang1406a142017-09-10 21:15:18 +08001035 !atomic_read(&s->running_workload_num));
Zhi Wange4734052016-05-01 07:42:16 -04001036 }
1037}
1038
1039void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1040{
1041 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +08001042 struct intel_engine_cs *engine;
1043 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -04001044
1045 gvt_dbg_core("clean workload scheduler\n");
1046
Changbin Du3fc03062017-03-13 10:47:11 +08001047 for_each_engine(engine, gvt->dev_priv, i) {
1048 atomic_notifier_chain_unregister(
1049 &engine->context_status_notifier,
1050 &gvt->shadow_ctx_notifier_block[i]);
1051 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -04001052 }
1053}
1054
1055int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1056{
1057 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1058 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +08001059 struct intel_engine_cs *engine;
1060 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -04001061 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001062
1063 gvt_dbg_core("init workload scheduler\n");
1064
1065 init_waitqueue_head(&scheduler->workload_complete_wq);
1066
Changbin Du3fc03062017-03-13 10:47:11 +08001067 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -04001068 init_waitqueue_head(&scheduler->waitq[i]);
1069
1070 param = kzalloc(sizeof(*param), GFP_KERNEL);
1071 if (!param) {
1072 ret = -ENOMEM;
1073 goto err;
1074 }
1075
1076 param->gvt = gvt;
1077 param->ring_id = i;
1078
1079 scheduler->thread[i] = kthread_run(workload_thread, param,
1080 "gvt workload %d", i);
1081 if (IS_ERR(scheduler->thread[i])) {
1082 gvt_err("fail to create workload thread\n");
1083 ret = PTR_ERR(scheduler->thread[i]);
1084 goto err;
1085 }
Changbin Du3fc03062017-03-13 10:47:11 +08001086
1087 gvt->shadow_ctx_notifier_block[i].notifier_call =
1088 shadow_context_status_change;
1089 atomic_notifier_chain_register(&engine->context_status_notifier,
1090 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -04001091 }
1092 return 0;
1093err:
1094 intel_gvt_clean_workload_scheduler(gvt);
1095 kfree(param);
1096 param = NULL;
1097 return ret;
1098}
1099
Zhi Wang874b6a92017-09-10 20:08:18 +08001100/**
1101 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1102 * @vgpu: a vGPU
1103 *
1104 * This function is called when a vGPU is being destroyed.
1105 *
1106 */
1107void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001108{
Zhi Wang1406a142017-09-10 21:15:18 +08001109 struct intel_vgpu_submission *s = &vgpu->submission;
1110
Weinan Li7569a062018-01-26 15:09:07 +08001111 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
Zhi Wang1406a142017-09-10 21:15:18 +08001112 i915_gem_context_put(s->shadow_ctx);
1113 kmem_cache_destroy(s->workloads);
Zhi Wange4734052016-05-01 07:42:16 -04001114}
1115
Zhi Wang06bb3722017-09-13 01:41:35 +08001116
1117/**
1118 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1119 * @vgpu: a vGPU
1120 * @engine_mask: engines expected to be reset
1121 *
1122 * This function is called when a vGPU is being destroyed.
1123 *
1124 */
1125void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1126 unsigned long engine_mask)
1127{
1128 struct intel_vgpu_submission *s = &vgpu->submission;
1129
1130 if (!s->active)
1131 return;
1132
Zhi Wange2c43c02017-09-13 01:58:35 +08001133 clean_workloads(vgpu, engine_mask);
Zhi Wang06bb3722017-09-13 01:41:35 +08001134 s->ops->reset(vgpu, engine_mask);
1135}
1136
Zhi Wang874b6a92017-09-10 20:08:18 +08001137/**
1138 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1139 * @vgpu: a vGPU
1140 *
1141 * This function is called when a vGPU is being created.
1142 *
1143 * Returns:
1144 * Zero on success, negative error code if failed.
1145 *
1146 */
1147int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001148{
Zhi Wang1406a142017-09-10 21:15:18 +08001149 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001150 enum intel_engine_id i;
1151 struct intel_engine_cs *engine;
1152 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001153
Zhi Wang1406a142017-09-10 21:15:18 +08001154 s->shadow_ctx = i915_gem_context_create_gvt(
Zhi Wange4734052016-05-01 07:42:16 -04001155 &vgpu->gvt->dev_priv->drm);
Zhi Wang1406a142017-09-10 21:15:18 +08001156 if (IS_ERR(s->shadow_ctx))
1157 return PTR_ERR(s->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -04001158
Zhenyu Wang16036602017-12-04 10:42:58 +08001159 if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
Chris Wilsonb7268c52018-04-18 19:40:52 +01001160 s->shadow_ctx->sched.priority = INT_MAX;
Zhenyu Wang16036602017-12-04 10:42:58 +08001161
Zhi Wang1406a142017-09-10 21:15:18 +08001162 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
Kechen Lu9dfb8e52017-08-10 07:41:36 +08001163
Zhenyu Wang850555d2018-02-14 11:35:01 +08001164 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1165 sizeof(struct intel_vgpu_workload), 0,
1166 SLAB_HWCACHE_ALIGN,
1167 offsetof(struct intel_vgpu_workload, rb_tail),
1168 sizeof_field(struct intel_vgpu_workload, rb_tail),
1169 NULL);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001170
Zhi Wang1406a142017-09-10 21:15:18 +08001171 if (!s->workloads) {
Zhi Wang9a9829e2017-09-10 20:28:09 +08001172 ret = -ENOMEM;
1173 goto out_shadow_ctx;
1174 }
1175
1176 for_each_engine(engine, vgpu->gvt->dev_priv, i)
Zhi Wang1406a142017-09-10 21:15:18 +08001177 INIT_LIST_HEAD(&s->workload_q_head[i]);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001178
Zhi Wang1406a142017-09-10 21:15:18 +08001179 atomic_set(&s->running_workload_num, 0);
Zhi Wang91d5d852017-09-10 21:33:20 +08001180 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001181
Zhi Wange4734052016-05-01 07:42:16 -04001182 return 0;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001183
1184out_shadow_ctx:
Zhi Wang1406a142017-09-10 21:15:18 +08001185 i915_gem_context_put(s->shadow_ctx);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001186 return ret;
Zhi Wange4734052016-05-01 07:42:16 -04001187}
Zhi Wang21527a82017-09-12 21:42:09 +08001188
1189/**
Zhi Wangad1d3632017-09-13 00:31:29 +08001190 * intel_vgpu_select_submission_ops - select virtual submission interface
1191 * @vgpu: a vGPU
1192 * @interface: expected vGPU virtual submission interface
1193 *
1194 * This function is called when guest configures submission interface.
1195 *
1196 * Returns:
1197 * Zero on success, negative error code if failed.
1198 *
1199 */
1200int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
Weinan Li7569a062018-01-26 15:09:07 +08001201 unsigned long engine_mask,
Zhi Wangad1d3632017-09-13 00:31:29 +08001202 unsigned int interface)
1203{
1204 struct intel_vgpu_submission *s = &vgpu->submission;
1205 const struct intel_vgpu_submission_ops *ops[] = {
1206 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1207 &intel_vgpu_execlist_submission_ops,
1208 };
1209 int ret;
1210
1211 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1212 return -EINVAL;
1213
Weinan Li9212b132018-01-26 15:09:08 +08001214 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1215 return -EINVAL;
1216
1217 if (s->active)
Weinan Li7569a062018-01-26 15:09:07 +08001218 s->ops->clean(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001219
1220 if (interface == 0) {
1221 s->ops = NULL;
1222 s->virtual_submission_interface = 0;
Weinan Li9212b132018-01-26 15:09:08 +08001223 s->active = false;
1224 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
Zhi Wangad1d3632017-09-13 00:31:29 +08001225 return 0;
1226 }
1227
Weinan Li7569a062018-01-26 15:09:07 +08001228 ret = ops[interface]->init(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001229 if (ret)
1230 return ret;
1231
1232 s->ops = ops[interface];
1233 s->virtual_submission_interface = interface;
1234 s->active = true;
1235
1236 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1237 vgpu->id, s->ops->name);
1238
1239 return 0;
1240}
1241
1242/**
Zhi Wang21527a82017-09-12 21:42:09 +08001243 * intel_vgpu_destroy_workload - destroy a vGPU workload
1244 * @vgpu: a vGPU
1245 *
1246 * This function is called when destroy a vGPU workload.
1247 *
1248 */
1249void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1250{
1251 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1252
1253 if (workload->shadow_mm)
Changbin Du1bc25852018-01-30 19:19:41 +08001254 intel_vgpu_mm_put(workload->shadow_mm);
Zhi Wang21527a82017-09-12 21:42:09 +08001255
1256 kmem_cache_free(s->workloads, workload);
1257}
1258
Zhi Wang6d763032017-09-12 22:33:12 +08001259static struct intel_vgpu_workload *
1260alloc_workload(struct intel_vgpu *vgpu)
Zhi Wang21527a82017-09-12 21:42:09 +08001261{
1262 struct intel_vgpu_submission *s = &vgpu->submission;
1263 struct intel_vgpu_workload *workload;
1264
1265 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1266 if (!workload)
1267 return ERR_PTR(-ENOMEM);
1268
1269 INIT_LIST_HEAD(&workload->list);
1270 INIT_LIST_HEAD(&workload->shadow_bb);
1271
1272 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1273 atomic_set(&workload->shadow_ctx_active, 0);
1274
1275 workload->status = -EINPROGRESS;
1276 workload->shadowed = false;
1277 workload->vgpu = vgpu;
1278
1279 return workload;
1280}
Zhi Wang6d763032017-09-12 22:33:12 +08001281
1282#define RING_CTX_OFF(x) \
1283 offsetof(struct execlist_ring_context, x)
1284
1285static void read_guest_pdps(struct intel_vgpu *vgpu,
1286 u64 ring_context_gpa, u32 pdp[8])
1287{
1288 u64 gpa;
1289 int i;
1290
1291 gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
1292
1293 for (i = 0; i < 8; i++)
1294 intel_gvt_hypervisor_read_gpa(vgpu,
1295 gpa + i * 8, &pdp[7 - i], 4);
1296}
1297
1298static int prepare_mm(struct intel_vgpu_workload *workload)
1299{
1300 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1301 struct intel_vgpu_mm *mm;
1302 struct intel_vgpu *vgpu = workload->vgpu;
Changbin Duede9d0c2018-01-30 19:19:40 +08001303 intel_gvt_gtt_type_t root_entry_type;
1304 u64 pdps[GVT_RING_CTX_NR_PDPS];
Zhi Wang6d763032017-09-12 22:33:12 +08001305
Changbin Duede9d0c2018-01-30 19:19:40 +08001306 switch (desc->addressing_mode) {
1307 case 1: /* legacy 32-bit */
1308 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1309 break;
1310 case 3: /* legacy 64-bit */
1311 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1312 break;
1313 default:
Zhi Wang6d763032017-09-12 22:33:12 +08001314 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1315 return -EINVAL;
1316 }
1317
Changbin Duede9d0c2018-01-30 19:19:40 +08001318 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
Zhi Wang6d763032017-09-12 22:33:12 +08001319
Changbin Due6e9c462018-01-30 19:19:46 +08001320 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1321 if (IS_ERR(mm))
1322 return PTR_ERR(mm);
Zhi Wang6d763032017-09-12 22:33:12 +08001323
Zhi Wang6d763032017-09-12 22:33:12 +08001324 workload->shadow_mm = mm;
1325 return 0;
1326}
1327
1328#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1329 ((a)->lrca == (b)->lrca))
1330
1331#define get_last_workload(q) \
1332 (list_empty(q) ? NULL : container_of(q->prev, \
1333 struct intel_vgpu_workload, list))
1334/**
1335 * intel_vgpu_create_workload - create a vGPU workload
1336 * @vgpu: a vGPU
1337 * @desc: a guest context descriptor
1338 *
1339 * This function is called when creating a vGPU workload.
1340 *
1341 * Returns:
1342 * struct intel_vgpu_workload * on success, negative error code in
1343 * pointer if failed.
1344 *
1345 */
1346struct intel_vgpu_workload *
1347intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1348 struct execlist_ctx_descriptor_format *desc)
1349{
1350 struct intel_vgpu_submission *s = &vgpu->submission;
1351 struct list_head *q = workload_q_head(vgpu, ring_id);
1352 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1353 struct intel_vgpu_workload *workload = NULL;
1354 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1355 u64 ring_context_gpa;
1356 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1357 int ret;
1358
1359 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
Zhi Wang9556e112017-10-10 13:51:32 +08001360 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
Zhi Wang6d763032017-09-12 22:33:12 +08001361 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1362 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1363 return ERR_PTR(-EINVAL);
1364 }
1365
1366 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1367 RING_CTX_OFF(ring_header.val), &head, 4);
1368
1369 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1370 RING_CTX_OFF(ring_tail.val), &tail, 4);
1371
1372 head &= RB_HEAD_OFF_MASK;
1373 tail &= RB_TAIL_OFF_MASK;
1374
1375 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1376 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1377 gvt_dbg_el("ctx head %x real head %lx\n", head,
1378 last_workload->rb_tail);
1379 /*
1380 * cannot use guest context head pointer here,
1381 * as it might not be updated at this time
1382 */
1383 head = last_workload->rb_tail;
1384 }
1385
1386 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1387
1388 /* record some ring buffer register values for scan and shadow */
1389 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1390 RING_CTX_OFF(rb_start.val), &start, 4);
1391 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1392 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1393 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1394 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1395
1396 workload = alloc_workload(vgpu);
1397 if (IS_ERR(workload))
1398 return workload;
1399
1400 workload->ring_id = ring_id;
1401 workload->ctx_desc = *desc;
1402 workload->ring_context_gpa = ring_context_gpa;
1403 workload->rb_head = head;
1404 workload->rb_tail = tail;
1405 workload->rb_start = start;
1406 workload->rb_ctl = ctl;
1407
1408 if (ring_id == RCS) {
1409 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1410 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1411 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1412 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1413
1414 workload->wa_ctx.indirect_ctx.guest_gma =
1415 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1416 workload->wa_ctx.indirect_ctx.size =
1417 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1418 CACHELINE_BYTES;
1419 workload->wa_ctx.per_ctx.guest_gma =
1420 per_ctx & PER_CTX_ADDR_MASK;
1421 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1422 }
1423
1424 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1425 workload, ring_id, head, tail, start, ctl);
1426
1427 ret = prepare_mm(workload);
1428 if (ret) {
1429 kmem_cache_free(s->workloads, workload);
1430 return ERR_PTR(ret);
1431 }
1432
1433 /* Only scan and shadow the first workload in the queue
1434 * as there is only one pre-allocated buf-obj for shadow.
1435 */
1436 if (list_empty(workload_q_head(vgpu, ring_id))) {
1437 intel_runtime_pm_get(dev_priv);
1438 mutex_lock(&dev_priv->drm.struct_mutex);
1439 ret = intel_gvt_scan_and_shadow_workload(workload);
1440 mutex_unlock(&dev_priv->drm.struct_mutex);
1441 intel_runtime_pm_put(dev_priv);
1442 }
1443
1444 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1445 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1446 intel_vgpu_destroy_workload(workload);
1447 return ERR_PTR(ret);
1448 }
1449
1450 return workload;
1451}
Changbin Du59a716c2017-11-29 15:40:06 +08001452
1453/**
1454 * intel_vgpu_queue_workload - Qeue a vGPU workload
1455 * @workload: the workload to queue in
1456 */
1457void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1458{
1459 list_add_tail(&workload->list,
1460 workload_q_head(workload->vgpu, workload->ring_id));
Changbin Duc1304562017-11-29 15:40:07 +08001461 intel_gvt_kick_schedule(workload->vgpu->gvt);
Changbin Du59a716c2017-11-29 15:40:06 +08001462 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1463}