blob: f3d21849b0cbe58b7301ced130c23b6f0ac9e69e [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
Zhi Wangb20c0d52018-02-07 18:12:15 +080055static void update_shadow_pdps(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 int ring_id = workload->ring_id;
59 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
60 struct drm_i915_gem_object *ctx_obj =
61 shadow_ctx->engine[ring_id].state->obj;
62 struct execlist_ring_context *shadow_ring_context;
63 struct page *page;
64
65 if (WARN_ON(!workload->shadow_mm))
66 return;
67
68 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
69 return;
70
71 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
72 shadow_ring_context = kmap(page);
73 set_context_pdp_root_pointer(shadow_ring_context,
74 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
75 kunmap(page);
76}
77
Min Hefa3dd622018-03-02 10:00:25 +080078/*
79 * when populating shadow ctx from guest, we should not overrride oa related
80 * registers, so that they will not be overlapped by guest oa configs. Thus
81 * made it possible to capture oa data from host for both host and guests.
82 */
83static void sr_oa_regs(struct intel_vgpu_workload *workload,
84 u32 *reg_state, bool save)
85{
86 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
87 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
88 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
89 int i = 0;
90 u32 flex_mmio[] = {
91 i915_mmio_reg_offset(EU_PERF_CNTL0),
92 i915_mmio_reg_offset(EU_PERF_CNTL1),
93 i915_mmio_reg_offset(EU_PERF_CNTL2),
94 i915_mmio_reg_offset(EU_PERF_CNTL3),
95 i915_mmio_reg_offset(EU_PERF_CNTL4),
96 i915_mmio_reg_offset(EU_PERF_CNTL5),
97 i915_mmio_reg_offset(EU_PERF_CNTL6),
98 };
99
100 if (!workload || !reg_state || workload->ring_id != RCS)
101 return;
102
103 if (save) {
104 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
105
106 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
107 u32 state_offset = ctx_flexeu0 + i * 2;
108
109 workload->flex_mmio[i] = reg_state[state_offset + 1];
110 }
111 } else {
112 reg_state[ctx_oactxctrl] =
113 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
114 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
115
116 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
117 u32 state_offset = ctx_flexeu0 + i * 2;
118 u32 mmio = flex_mmio[i];
119
120 reg_state[state_offset] = mmio;
121 reg_state[state_offset + 1] = workload->flex_mmio[i];
122 }
123 }
124}
125
Zhi Wange4734052016-05-01 07:42:16 -0400126static int populate_shadow_context(struct intel_vgpu_workload *workload)
127{
128 struct intel_vgpu *vgpu = workload->vgpu;
129 struct intel_gvt *gvt = vgpu->gvt;
130 int ring_id = workload->ring_id;
Zhi Wang1406a142017-09-10 21:15:18 +0800131 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400132 struct drm_i915_gem_object *ctx_obj =
133 shadow_ctx->engine[ring_id].state->obj;
134 struct execlist_ring_context *shadow_ring_context;
135 struct page *page;
136 void *dst;
137 unsigned long context_gpa, context_page_num;
138 int i;
139
140 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
141 workload->ctx_desc.lrca);
142
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300143 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400144
145 context_page_num = context_page_num >> PAGE_SHIFT;
146
147 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
148 context_page_num = 19;
149
150 i = 2;
151
152 while (i < context_page_num) {
153 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
154 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800155 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400156 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500157 gvt_vgpu_err("Invalid guest context descriptor\n");
fred gao5c568832017-09-20 05:36:47 +0800158 return -EFAULT;
Zhi Wange4734052016-05-01 07:42:16 -0400159 }
160
Michel Thierry0b29c752017-09-13 09:56:00 +0100161 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800162 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400163 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
Zhi Wang9556e112017-10-10 13:51:32 +0800164 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800165 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400166 i++;
167 }
168
169 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800170 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400171
Min Hefa3dd622018-03-02 10:00:25 +0800172 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
Zhi Wange4734052016-05-01 07:42:16 -0400173#define COPY_REG(name) \
174 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
175 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
Zhenyu Wangd8303072018-03-19 17:09:05 +0800176#define COPY_REG_MASKED(name) {\
177 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
178 + RING_CTX_OFF(name.val),\
179 &shadow_ring_context->name.val, 4);\
180 shadow_ring_context->name.val |= 0xffff << 16;\
181 }
Zhi Wange4734052016-05-01 07:42:16 -0400182
Zhenyu Wangd8303072018-03-19 17:09:05 +0800183 COPY_REG_MASKED(ctx_ctrl);
Zhi Wange4734052016-05-01 07:42:16 -0400184 COPY_REG(ctx_timestamp);
185
186 if (ring_id == RCS) {
187 COPY_REG(bb_per_ctx_ptr);
188 COPY_REG(rcs_indirect_ctx);
189 COPY_REG(rcs_indirect_ctx_offset);
190 }
191#undef COPY_REG
Zhenyu Wangd8303072018-03-19 17:09:05 +0800192#undef COPY_REG_MASKED
Zhi Wange4734052016-05-01 07:42:16 -0400193
Zhi Wange4734052016-05-01 07:42:16 -0400194 intel_gvt_hypervisor_read_gpa(vgpu,
195 workload->ring_context_gpa +
196 sizeof(*shadow_ring_context),
197 (void *)shadow_ring_context +
198 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800199 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400200
Min Hefa3dd622018-03-02 10:00:25 +0800201 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800202 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400203 return 0;
204}
205
Chris Wilsone61e0f52018-02-21 09:56:36 +0000206static inline bool is_gvt_request(struct i915_request *req)
Changbin Dubc2d4b62017-03-22 12:35:31 +0800207{
208 return i915_gem_context_force_single_submission(req->ctx);
209}
210
Xiong Zhang295764c2017-11-07 05:23:02 +0800211static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
212{
213 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
214 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
215 i915_reg_t reg;
216
217 reg = RING_INSTDONE(ring_base);
218 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
219 reg = RING_ACTHD(ring_base);
220 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
221 reg = RING_ACTHD_UDW(ring_base);
222 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
223}
224
Zhi Wange4734052016-05-01 07:42:16 -0400225static int shadow_context_status_change(struct notifier_block *nb,
226 unsigned long action, void *data)
227{
Chris Wilsone61e0f52018-02-21 09:56:36 +0000228 struct i915_request *req = data;
Changbin Du3fc03062017-03-13 10:47:11 +0800229 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
230 shadow_ctx_notifier_block[req->engine->id]);
231 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du0e86cc92017-05-04 10:52:38 +0800232 enum intel_engine_id ring_id = req->engine->id;
233 struct intel_vgpu_workload *workload;
Changbin Du679fd3e2017-11-13 14:58:31 +0800234 unsigned long flags;
Zhi Wange4734052016-05-01 07:42:16 -0400235
Changbin Du0e86cc92017-05-04 10:52:38 +0800236 if (!is_gvt_request(req)) {
Changbin Du679fd3e2017-11-13 14:58:31 +0800237 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800238 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
239 scheduler->engine_owner[ring_id]) {
240 /* Switch ring from vGPU to host. */
241 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
242 NULL, ring_id);
243 scheduler->engine_owner[ring_id] = NULL;
244 }
Changbin Du679fd3e2017-11-13 14:58:31 +0800245 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800246
247 return NOTIFY_OK;
248 }
249
250 workload = scheduler->current_workload[ring_id];
251 if (unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800252 return NOTIFY_OK;
253
Zhi Wange4734052016-05-01 07:42:16 -0400254 switch (action) {
255 case INTEL_CONTEXT_SCHEDULE_IN:
Changbin Du679fd3e2017-11-13 14:58:31 +0800256 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800257 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
258 /* Switch ring from host to vGPU or vGPU to vGPU. */
259 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
260 workload->vgpu, ring_id);
261 scheduler->engine_owner[ring_id] = workload->vgpu;
262 } else
263 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
264 ring_id, workload->vgpu->id);
Changbin Du679fd3e2017-11-13 14:58:31 +0800265 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Zhi Wange4734052016-05-01 07:42:16 -0400266 atomic_set(&workload->shadow_ctx_active, 1);
267 break;
268 case INTEL_CONTEXT_SCHEDULE_OUT:
Xiong Zhang295764c2017-11-07 05:23:02 +0800269 save_ring_hw_state(workload->vgpu, ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400270 atomic_set(&workload->shadow_ctx_active, 0);
271 break;
Zhenyu Wangda5f99e2017-12-01 14:59:53 +0800272 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
273 save_ring_hw_state(workload->vgpu, ring_id);
274 break;
Zhi Wange4734052016-05-01 07:42:16 -0400275 default:
276 WARN_ON(1);
277 return NOTIFY_OK;
278 }
279 wake_up(&workload->shadow_ctx_status_wq);
280 return NOTIFY_OK;
281}
282
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800283static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
284 struct intel_engine_cs *engine)
285{
286 struct intel_context *ce = &ctx->engine[engine->id];
287 u64 desc = 0;
288
289 desc = ce->lrc_desc;
290
291 /* Update bits 0-11 of the context descriptor which includes flags
292 * like GEN8_CTX_* cached in desc_template
293 */
294 desc &= U64_MAX << 12;
295 desc |= ctx->desc_template & ((1ULL << 12) - 1);
296
297 ce->lrc_desc = desc;
298}
299
fred gao0a53bc02017-08-18 15:41:06 +0800300static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
301{
302 struct intel_vgpu *vgpu = workload->vgpu;
303 void *shadow_ring_buffer_va;
304 u32 *cs;
Weinan Licd7e61b2018-02-23 14:46:45 +0800305 struct i915_request *req = workload->req;
306
307 if (IS_KABYLAKE(req->i915) &&
308 is_inhibit_context(req->ctx, req->engine->id))
309 intel_vgpu_restore_inhibit_context(vgpu, req);
fred gao0a53bc02017-08-18 15:41:06 +0800310
311 /* allocate shadow ring buffer */
312 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
313 if (IS_ERR(cs)) {
314 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
315 workload->rb_len);
316 return PTR_ERR(cs);
317 }
318
319 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
320
321 /* get shadow ring buffer va */
322 workload->shadow_ring_buffer_va = cs;
323
324 memcpy(cs, shadow_ring_buffer_va,
325 workload->rb_len);
326
327 cs += workload->rb_len / sizeof(u32);
328 intel_ring_advance(workload->req, cs);
329
330 return 0;
331}
332
Chris Wilson7b302552017-11-20 13:29:58 +0000333static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
fred gaoa3cfdca2017-08-18 15:41:07 +0800334{
335 if (!wa_ctx->indirect_ctx.obj)
336 return;
337
338 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
339 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
340}
341
Ping Gao89ea20b2017-06-29 12:22:42 +0800342/**
343 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
344 * shadow it as well, include ringbuffer,wa_ctx and ctx.
345 * @workload: an abstract entity for each execlist submission.
346 *
347 * This function is called before the workload submitting to i915, to make
348 * sure the content of the workload is valid.
349 */
350int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Zhi Wange4734052016-05-01 07:42:16 -0400351{
Zhi Wang1406a142017-09-10 21:15:18 +0800352 struct intel_vgpu *vgpu = workload->vgpu;
353 struct intel_vgpu_submission *s = &vgpu->submission;
354 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
355 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Zhi Wange4734052016-05-01 07:42:16 -0400356 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800357 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
fred gao0a53bc02017-08-18 15:41:06 +0800358 struct intel_ring *ring;
Zhi Wange4734052016-05-01 07:42:16 -0400359 int ret;
360
Ping Gao87e919d2017-07-04 14:53:03 +0800361 lockdep_assert_held(&dev_priv->drm.struct_mutex);
362
Ping Gaod0302e72017-06-29 12:22:43 +0800363 if (workload->shadowed)
364 return 0;
Zhi Wange4734052016-05-01 07:42:16 -0400365
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800366 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
367 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400368 GEN8_CTX_ADDRESSING_MODE_SHIFT;
369
Zhi Wang1406a142017-09-10 21:15:18 +0800370 if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800371 shadow_context_descriptor_update(shadow_ctx,
372 dev_priv->engine[ring_id]);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800373
Ping Gao89ea20b2017-06-29 12:22:42 +0800374 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
Zhi Wangbe1da702016-05-03 18:26:57 -0400375 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800376 goto err_scan;
Zhi Wangbe1da702016-05-03 18:26:57 -0400377
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400378 if ((workload->ring_id == RCS) &&
379 (workload->wa_ctx.indirect_ctx.size != 0)) {
380 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
381 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800382 goto err_scan;
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400383 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400384
Ping Gao89ea20b2017-06-29 12:22:42 +0800385 /* pin shadow context by gvt even the shadow context will be pinned
386 * when i915 alloc request. That is because gvt will update the guest
387 * context from shadow context when workload is completed, and at that
388 * moment, i915 may already unpined the shadow context to make the
389 * shadow_ctx pages invalid. So gvt need to pin itself. After update
390 * the guest context, gvt can unpin the shadow_ctx safely.
391 */
392 ring = engine->context_pin(engine, shadow_ctx);
393 if (IS_ERR(ring)) {
394 ret = PTR_ERR(ring);
395 gvt_vgpu_err("fail to pin shadow context\n");
fred gaoa3cfdca2017-08-18 15:41:07 +0800396 goto err_shadow;
Ping Gao89ea20b2017-06-29 12:22:42 +0800397 }
Zhi Wange4734052016-05-01 07:42:16 -0400398
fred gao0a53bc02017-08-18 15:41:06 +0800399 ret = populate_shadow_context(workload);
400 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800401 goto err_unpin;
fred gaof2880e02017-11-14 17:09:35 +0800402 workload->shadowed = true;
403 return 0;
404
405err_unpin:
406 engine->context_unpin(engine, shadow_ctx);
407err_shadow:
408 release_shadow_wa_ctx(&workload->wa_ctx);
409err_scan:
410 return ret;
411}
412
413static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
414{
415 int ring_id = workload->ring_id;
416 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
417 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
Chris Wilsone61e0f52018-02-21 09:56:36 +0000418 struct i915_request *rq;
fred gaof2880e02017-11-14 17:09:35 +0800419 struct intel_vgpu *vgpu = workload->vgpu;
420 struct intel_vgpu_submission *s = &vgpu->submission;
421 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
422 int ret;
fred gao0a53bc02017-08-18 15:41:06 +0800423
Chris Wilsone61e0f52018-02-21 09:56:36 +0000424 rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800425 if (IS_ERR(rq)) {
426 gvt_vgpu_err("fail to allocate gem request\n");
427 ret = PTR_ERR(rq);
fred gaoa3cfdca2017-08-18 15:41:07 +0800428 goto err_unpin;
fred gao0a53bc02017-08-18 15:41:06 +0800429 }
430
431 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
432
Chris Wilsone61e0f52018-02-21 09:56:36 +0000433 workload->req = i915_request_get(rq);
fred gao0a53bc02017-08-18 15:41:06 +0800434 ret = copy_workload_to_ring_buffer(workload);
435 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800436 goto err_unpin;
fred gaoa3cfdca2017-08-18 15:41:07 +0800437 return 0;
fred gao0a53bc02017-08-18 15:41:06 +0800438
fred gaoa3cfdca2017-08-18 15:41:07 +0800439err_unpin:
440 engine->context_unpin(engine, shadow_ctx);
fred gaoa3cfdca2017-08-18 15:41:07 +0800441 release_shadow_wa_ctx(&workload->wa_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800442 return ret;
443}
444
Zhi Wangf52c3802017-09-24 21:53:03 +0800445static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
446
Zhi Wangd8235b52017-09-12 22:06:39 +0800447static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
448{
449 struct intel_gvt *gvt = workload->vgpu->gvt;
450 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
Zhi Wangf52c3802017-09-24 21:53:03 +0800451 struct intel_vgpu_shadow_bb *bb;
452 int ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800453
Zhi Wangf52c3802017-09-24 21:53:03 +0800454 list_for_each_entry(bb, &workload->shadow_bb, list) {
455 bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
456 if (IS_ERR(bb->vma)) {
457 ret = PTR_ERR(bb->vma);
458 goto err;
459 }
Zhi Wangd8235b52017-09-12 22:06:39 +0800460
fred gaoef75c682018-03-15 13:21:10 +0800461 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
462 * is only updated into ring_scan_buffer, not real ring address
463 * allocated in later copy_workload_to_ring_buffer. pls be noted
464 * shadow_ring_buffer_va is now pointed to real ring buffer va
465 * in copy_workload_to_ring_buffer.
466 */
467
468 if (bb->bb_offset)
469 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
470 + bb->bb_offset;
471
Zhi Wangf52c3802017-09-24 21:53:03 +0800472 /* relocate shadow batch buffer */
473 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
Zhi Wangd8235b52017-09-12 22:06:39 +0800474 if (gmadr_bytes == 8)
Zhi Wangf52c3802017-09-24 21:53:03 +0800475 bb->bb_start_cmd_va[2] = 0;
476
477 /* No one is going to touch shadow bb from now on. */
478 if (bb->clflush & CLFLUSH_AFTER) {
479 drm_clflush_virt_range(bb->va, bb->obj->base.size);
480 bb->clflush &= ~CLFLUSH_AFTER;
481 }
482
483 ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
484 if (ret)
485 goto err;
486
487 i915_gem_obj_finish_shmem_access(bb->obj);
488 bb->accessing = false;
489
490 i915_vma_move_to_active(bb->vma, workload->req, 0);
Zhi Wangd8235b52017-09-12 22:06:39 +0800491 }
492 return 0;
Zhi Wangf52c3802017-09-24 21:53:03 +0800493err:
494 release_shadow_batch_buffer(workload);
495 return ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800496}
497
498static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
499{
500 struct intel_vgpu_workload *workload = container_of(wa_ctx,
501 struct intel_vgpu_workload,
502 wa_ctx);
503 int ring_id = workload->ring_id;
504 struct intel_vgpu_submission *s = &workload->vgpu->submission;
505 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
506 struct drm_i915_gem_object *ctx_obj =
507 shadow_ctx->engine[ring_id].state->obj;
508 struct execlist_ring_context *shadow_ring_context;
509 struct page *page;
510
511 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
512 shadow_ring_context = kmap_atomic(page);
513
514 shadow_ring_context->bb_per_ctx_ptr.val =
515 (shadow_ring_context->bb_per_ctx_ptr.val &
516 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
517 shadow_ring_context->rcs_indirect_ctx.val =
518 (shadow_ring_context->rcs_indirect_ctx.val &
519 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
520
521 kunmap_atomic(shadow_ring_context);
522 return 0;
523}
524
525static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
526{
527 struct i915_vma *vma;
528 unsigned char *per_ctx_va =
529 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
530 wa_ctx->indirect_ctx.size;
531
532 if (wa_ctx->indirect_ctx.size == 0)
533 return 0;
534
535 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
536 0, CACHELINE_BYTES, 0);
537 if (IS_ERR(vma))
538 return PTR_ERR(vma);
539
540 /* FIXME: we are not tracking our pinned VMA leaving it
541 * up to the core to fix up the stray pin_count upon
542 * free.
543 */
544
545 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
546
547 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
548 memset(per_ctx_va, 0, CACHELINE_BYTES);
549
550 update_wa_ctx_2_shadow_ctx(wa_ctx);
551 return 0;
552}
553
554static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
555{
Zhi Wangf52c3802017-09-24 21:53:03 +0800556 struct intel_vgpu *vgpu = workload->vgpu;
557 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
558 struct intel_vgpu_shadow_bb *bb, *pos;
Zhi Wangd8235b52017-09-12 22:06:39 +0800559
Zhi Wangf52c3802017-09-24 21:53:03 +0800560 if (list_empty(&workload->shadow_bb))
561 return;
562
563 bb = list_first_entry(&workload->shadow_bb,
564 struct intel_vgpu_shadow_bb, list);
565
566 mutex_lock(&dev_priv->drm.struct_mutex);
567
568 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
569 if (bb->obj) {
570 if (bb->accessing)
571 i915_gem_obj_finish_shmem_access(bb->obj);
572
573 if (bb->va && !IS_ERR(bb->va))
574 i915_gem_object_unpin_map(bb->obj);
575
576 if (bb->vma && !IS_ERR(bb->vma)) {
577 i915_vma_unpin(bb->vma);
578 i915_vma_close(bb->vma);
579 }
580 __i915_gem_object_release_unless_active(bb->obj);
Zhi Wangd8235b52017-09-12 22:06:39 +0800581 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800582 list_del(&bb->list);
583 kfree(bb);
Zhi Wangd8235b52017-09-12 22:06:39 +0800584 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800585
586 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wangd8235b52017-09-12 22:06:39 +0800587}
588
Zhi Wang497aa3f2017-09-12 21:51:10 +0800589static int prepare_workload(struct intel_vgpu_workload *workload)
590{
Zhi Wangd8235b52017-09-12 22:06:39 +0800591 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wang497aa3f2017-09-12 21:51:10 +0800592 int ret = 0;
593
Zhi Wangd8235b52017-09-12 22:06:39 +0800594 ret = intel_vgpu_pin_mm(workload->shadow_mm);
595 if (ret) {
596 gvt_vgpu_err("fail to vgpu pin mm\n");
597 return ret;
598 }
Zhi Wang497aa3f2017-09-12 21:51:10 +0800599
Zhi Wangb20c0d52018-02-07 18:12:15 +0800600 update_shadow_pdps(workload);
601
Zhi Wangd8235b52017-09-12 22:06:39 +0800602 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
603 if (ret) {
604 gvt_vgpu_err("fail to vgpu sync oos pages\n");
605 goto err_unpin_mm;
606 }
607
608 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
609 if (ret) {
610 gvt_vgpu_err("fail to flush post shadow\n");
611 goto err_unpin_mm;
612 }
613
fred gaof2880e02017-11-14 17:09:35 +0800614 ret = intel_gvt_generate_request(workload);
615 if (ret) {
616 gvt_vgpu_err("fail to generate request\n");
617 goto err_unpin_mm;
618 }
619
Zhi Wangd8235b52017-09-12 22:06:39 +0800620 ret = prepare_shadow_batch_buffer(workload);
621 if (ret) {
622 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
623 goto err_unpin_mm;
624 }
625
626 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
627 if (ret) {
628 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
629 goto err_shadow_batch;
630 }
631
632 if (workload->prepare) {
633 ret = workload->prepare(workload);
634 if (ret)
635 goto err_shadow_wa_ctx;
636 }
637
638 return 0;
639err_shadow_wa_ctx:
640 release_shadow_wa_ctx(&workload->wa_ctx);
641err_shadow_batch:
642 release_shadow_batch_buffer(workload);
643err_unpin_mm:
644 intel_vgpu_unpin_mm(workload->shadow_mm);
Zhi Wang497aa3f2017-09-12 21:51:10 +0800645 return ret;
646}
647
fred gao0a53bc02017-08-18 15:41:06 +0800648static int dispatch_workload(struct intel_vgpu_workload *workload)
649{
Zhi Wang1406a142017-09-10 21:15:18 +0800650 struct intel_vgpu *vgpu = workload->vgpu;
651 struct intel_vgpu_submission *s = &vgpu->submission;
652 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
653 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
fred gao0a53bc02017-08-18 15:41:06 +0800654 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800655 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
656 int ret = 0;
657
658 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
659 ring_id, workload);
660
661 mutex_lock(&dev_priv->drm.struct_mutex);
662
663 ret = intel_gvt_scan_and_shadow_workload(workload);
664 if (ret)
665 goto out;
666
Zhi Wang497aa3f2017-09-12 21:51:10 +0800667 ret = prepare_workload(workload);
668 if (ret) {
669 engine->context_unpin(engine, shadow_ctx);
670 goto out;
fred gao0a53bc02017-08-18 15:41:06 +0800671 }
672
Pei Zhang90d27a12016-11-14 18:02:57 +0800673out:
674 if (ret)
675 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800676
Ping Gao89ea20b2017-06-29 12:22:42 +0800677 if (!IS_ERR_OR_NULL(workload->req)) {
678 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
679 ring_id, workload->req);
Chris Wilsone61e0f52018-02-21 09:56:36 +0000680 i915_request_add(workload->req);
Ping Gao89ea20b2017-06-29 12:22:42 +0800681 workload->dispatched = true;
682 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800683
Pei Zhang90d27a12016-11-14 18:02:57 +0800684 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400685 return ret;
686}
687
688static struct intel_vgpu_workload *pick_next_workload(
689 struct intel_gvt *gvt, int ring_id)
690{
691 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
692 struct intel_vgpu_workload *workload = NULL;
693
694 mutex_lock(&gvt->lock);
695
696 /*
697 * no current vgpu / will be scheduled out / no workload
698 * bail out
699 */
700 if (!scheduler->current_vgpu) {
701 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
702 goto out;
703 }
704
705 if (scheduler->need_reschedule) {
706 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
707 goto out;
708 }
709
Zhenyu Wang954180a2017-04-12 14:22:50 +0800710 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400711 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400712
713 /*
714 * still have current workload, maybe the workload disptacher
715 * fail to submit it for some reason, resubmit it.
716 */
717 if (scheduler->current_workload[ring_id]) {
718 workload = scheduler->current_workload[ring_id];
719 gvt_dbg_sched("ring id %d still have current workload %p\n",
720 ring_id, workload);
721 goto out;
722 }
723
724 /*
725 * pick a workload as current workload
726 * once current workload is set, schedule policy routines
727 * will wait the current workload is finished when trying to
728 * schedule out a vgpu.
729 */
730 scheduler->current_workload[ring_id] = container_of(
731 workload_q_head(scheduler->current_vgpu, ring_id)->next,
732 struct intel_vgpu_workload, list);
733
734 workload = scheduler->current_workload[ring_id];
735
736 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
737
Zhi Wang1406a142017-09-10 21:15:18 +0800738 atomic_inc(&workload->vgpu->submission.running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400739out:
740 mutex_unlock(&gvt->lock);
741 return workload;
742}
743
744static void update_guest_context(struct intel_vgpu_workload *workload)
745{
746 struct intel_vgpu *vgpu = workload->vgpu;
747 struct intel_gvt *gvt = vgpu->gvt;
Zhi Wang1406a142017-09-10 21:15:18 +0800748 struct intel_vgpu_submission *s = &vgpu->submission;
749 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400750 int ring_id = workload->ring_id;
Zhi Wange4734052016-05-01 07:42:16 -0400751 struct drm_i915_gem_object *ctx_obj =
752 shadow_ctx->engine[ring_id].state->obj;
753 struct execlist_ring_context *shadow_ring_context;
754 struct page *page;
755 void *src;
756 unsigned long context_gpa, context_page_num;
757 int i;
758
759 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
760 workload->ctx_desc.lrca);
761
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300762 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400763
764 context_page_num = context_page_num >> PAGE_SHIFT;
765
766 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
767 context_page_num = 19;
768
769 i = 2;
770
771 while (i < context_page_num) {
772 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
773 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800774 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400775 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500776 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400777 return;
778 }
779
Michel Thierry0b29c752017-09-13 09:56:00 +0100780 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800781 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400782 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
Zhi Wang9556e112017-10-10 13:51:32 +0800783 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800784 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400785 i++;
786 }
787
788 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
789 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
790
791 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800792 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400793
794#define COPY_REG(name) \
795 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
796 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
797
798 COPY_REG(ctx_ctrl);
799 COPY_REG(ctx_timestamp);
800
801#undef COPY_REG
802
803 intel_gvt_hypervisor_write_gpa(vgpu,
804 workload->ring_context_gpa +
805 sizeof(*shadow_ring_context),
806 (void *)shadow_ring_context +
807 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800808 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400809
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800810 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400811}
812
Zhi Wange2c43c02017-09-13 01:58:35 +0800813static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
814{
815 struct intel_vgpu_submission *s = &vgpu->submission;
816 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
817 struct intel_engine_cs *engine;
818 struct intel_vgpu_workload *pos, *n;
819 unsigned int tmp;
820
821 /* free the unsubmited workloads in the queues. */
822 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
823 list_for_each_entry_safe(pos, n,
824 &s->workload_q_head[engine->id], list) {
825 list_del_init(&pos->list);
826 intel_vgpu_destroy_workload(pos);
827 }
828 clear_bit(engine->id, s->shadow_ctx_desc_updated);
829 }
830}
831
Zhi Wange4734052016-05-01 07:42:16 -0400832static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
833{
834 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wang1406a142017-09-10 21:15:18 +0800835 struct intel_vgpu_workload *workload =
836 scheduler->current_workload[ring_id];
837 struct intel_vgpu *vgpu = workload->vgpu;
838 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wangbe1da702016-05-03 18:26:57 -0400839 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400840
841 mutex_lock(&gvt->lock);
842
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800843 /* For the workload w/ request, needs to wait for the context
844 * switch to make sure request is completed.
845 * For the workload w/o request, directly complete the workload.
846 */
847 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800848 struct drm_i915_private *dev_priv =
849 workload->vgpu->gvt->dev_priv;
850 struct intel_engine_cs *engine =
851 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400852 wait_event(workload->shadow_ctx_status_wq,
853 !atomic_read(&workload->shadow_ctx_active));
854
Chuanxiao Dong0cf5ec42017-06-23 13:01:11 +0800855 /* If this request caused GPU hang, req->fence.error will
856 * be set to -EIO. Use -EIO to set workload status so
857 * that when this request caused GPU hang, didn't trigger
858 * context switch interrupt to guest.
859 */
860 if (likely(workload->status == -EINPROGRESS)) {
861 if (workload->req->fence.error == -EIO)
862 workload->status = -EIO;
863 else
864 workload->status = 0;
865 }
866
Chris Wilsone61e0f52018-02-21 09:56:36 +0000867 i915_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400868
Chuanxiao Dong6184cc82017-08-01 17:47:25 +0800869 if (!workload->status && !(vgpu->resetting_eng &
870 ENGINE_MASK(ring_id))) {
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800871 update_guest_context(workload);
872
873 for_each_set_bit(event, workload->pending_events,
874 INTEL_GVT_EVENT_MAX)
875 intel_vgpu_trigger_virtual_event(vgpu, event);
876 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800877 mutex_lock(&dev_priv->drm.struct_mutex);
878 /* unpin shadow ctx as the shadow_ctx update is done */
Zhi Wang1406a142017-09-10 21:15:18 +0800879 engine->context_unpin(engine, s->shadow_ctx);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800880 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400881 }
882
883 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
884 ring_id, workload, workload->status);
885
886 scheduler->current_workload[ring_id] = NULL;
887
Zhi Wange4734052016-05-01 07:42:16 -0400888 list_del_init(&workload->list);
Zhi Wangd8235b52017-09-12 22:06:39 +0800889
890 if (!workload->status) {
891 release_shadow_batch_buffer(workload);
892 release_shadow_wa_ctx(&workload->wa_ctx);
893 }
894
Zhi Wange2c43c02017-09-13 01:58:35 +0800895 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
896 /* if workload->status is not successful means HW GPU
897 * has occurred GPU hang or something wrong with i915/GVT,
898 * and GVT won't inject context switch interrupt to guest.
899 * So this error is a vGPU hang actually to the guest.
900 * According to this we should emunlate a vGPU hang. If
901 * there are pending workloads which are already submitted
902 * from guest, we should clean them up like HW GPU does.
903 *
904 * if it is in middle of engine resetting, the pending
905 * workloads won't be submitted to HW GPU and will be
906 * cleaned up during the resetting process later, so doing
907 * the workload clean up here doesn't have any impact.
908 **/
909 clean_workloads(vgpu, ENGINE_MASK(ring_id));
910 }
911
Zhi Wange4734052016-05-01 07:42:16 -0400912 workload->complete(workload);
913
Zhi Wang1406a142017-09-10 21:15:18 +0800914 atomic_dec(&s->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400915 wake_up(&scheduler->workload_complete_wq);
Ping Gaof100dae2017-05-24 09:14:11 +0800916
917 if (gvt->scheduler.need_reschedule)
918 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
919
Zhi Wange4734052016-05-01 07:42:16 -0400920 mutex_unlock(&gvt->lock);
921}
922
923struct workload_thread_param {
924 struct intel_gvt *gvt;
925 int ring_id;
926};
927
928static int workload_thread(void *priv)
929{
930 struct workload_thread_param *p = (struct workload_thread_param *)priv;
931 struct intel_gvt *gvt = p->gvt;
932 int ring_id = p->ring_id;
933 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
934 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500935 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400936 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800937 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
938 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800939 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400940
941 kfree(p);
942
943 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
944
945 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800946 add_wait_queue(&scheduler->waitq[ring_id], &wait);
947 do {
948 workload = pick_next_workload(gvt, ring_id);
949 if (workload)
950 break;
951 wait_woken(&wait, TASK_INTERRUPTIBLE,
952 MAX_SCHEDULE_TIMEOUT);
953 } while (!kthread_should_stop());
954 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400955
Du, Changbine45d7b72016-10-27 11:10:31 +0800956 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400957 break;
958
959 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
960 workload->ring_id, workload,
961 workload->vgpu->id);
962
963 intel_runtime_pm_get(gvt->dev_priv);
964
Zhi Wange4734052016-05-01 07:42:16 -0400965 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
966 workload->ring_id, workload);
967
968 if (need_force_wake)
969 intel_uncore_forcewake_get(gvt->dev_priv,
970 FORCEWAKE_ALL);
971
Pei Zhang90d27a12016-11-14 18:02:57 +0800972 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400973 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800974 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100975
Zhi Wange4734052016-05-01 07:42:16 -0400976 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500977 vgpu = workload->vgpu;
978 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -0400979 goto complete;
980 }
981
982 gvt_dbg_sched("ring id %d wait workload %p\n",
983 workload->ring_id, workload);
Chris Wilsone61e0f52018-02-21 09:56:36 +0000984 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -0400985
986complete:
Changbin Du3ce32742017-02-09 10:13:16 +0800987 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -0400988 workload, workload->status);
989
Changbin Du2e51ef32017-01-05 13:28:05 +0800990 complete_current_workload(gvt, ring_id);
991
Zhi Wange4734052016-05-01 07:42:16 -0400992 if (need_force_wake)
993 intel_uncore_forcewake_put(gvt->dev_priv,
994 FORCEWAKE_ALL);
995
Zhi Wange4734052016-05-01 07:42:16 -0400996 intel_runtime_pm_put(gvt->dev_priv);
Zhi Wang6d763032017-09-12 22:33:12 +0800997 if (ret && (vgpu_is_vm_unhealthy(ret)))
fred gaoe011c6c2017-09-19 15:11:28 +0800998 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
Zhi Wange4734052016-05-01 07:42:16 -0400999 }
1000 return 0;
1001}
1002
1003void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1004{
Zhi Wang1406a142017-09-10 21:15:18 +08001005 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wange4734052016-05-01 07:42:16 -04001006 struct intel_gvt *gvt = vgpu->gvt;
1007 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1008
Zhi Wang1406a142017-09-10 21:15:18 +08001009 if (atomic_read(&s->running_workload_num)) {
Zhi Wange4734052016-05-01 07:42:16 -04001010 gvt_dbg_sched("wait vgpu idle\n");
1011
1012 wait_event(scheduler->workload_complete_wq,
Zhi Wang1406a142017-09-10 21:15:18 +08001013 !atomic_read(&s->running_workload_num));
Zhi Wange4734052016-05-01 07:42:16 -04001014 }
1015}
1016
1017void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1018{
1019 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +08001020 struct intel_engine_cs *engine;
1021 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -04001022
1023 gvt_dbg_core("clean workload scheduler\n");
1024
Changbin Du3fc03062017-03-13 10:47:11 +08001025 for_each_engine(engine, gvt->dev_priv, i) {
1026 atomic_notifier_chain_unregister(
1027 &engine->context_status_notifier,
1028 &gvt->shadow_ctx_notifier_block[i]);
1029 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -04001030 }
1031}
1032
1033int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1034{
1035 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1036 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +08001037 struct intel_engine_cs *engine;
1038 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -04001039 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001040
1041 gvt_dbg_core("init workload scheduler\n");
1042
1043 init_waitqueue_head(&scheduler->workload_complete_wq);
1044
Changbin Du3fc03062017-03-13 10:47:11 +08001045 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -04001046 init_waitqueue_head(&scheduler->waitq[i]);
1047
1048 param = kzalloc(sizeof(*param), GFP_KERNEL);
1049 if (!param) {
1050 ret = -ENOMEM;
1051 goto err;
1052 }
1053
1054 param->gvt = gvt;
1055 param->ring_id = i;
1056
1057 scheduler->thread[i] = kthread_run(workload_thread, param,
1058 "gvt workload %d", i);
1059 if (IS_ERR(scheduler->thread[i])) {
1060 gvt_err("fail to create workload thread\n");
1061 ret = PTR_ERR(scheduler->thread[i]);
1062 goto err;
1063 }
Changbin Du3fc03062017-03-13 10:47:11 +08001064
1065 gvt->shadow_ctx_notifier_block[i].notifier_call =
1066 shadow_context_status_change;
1067 atomic_notifier_chain_register(&engine->context_status_notifier,
1068 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -04001069 }
1070 return 0;
1071err:
1072 intel_gvt_clean_workload_scheduler(gvt);
1073 kfree(param);
1074 param = NULL;
1075 return ret;
1076}
1077
Zhi Wang874b6a92017-09-10 20:08:18 +08001078/**
1079 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1080 * @vgpu: a vGPU
1081 *
1082 * This function is called when a vGPU is being destroyed.
1083 *
1084 */
1085void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001086{
Zhi Wang1406a142017-09-10 21:15:18 +08001087 struct intel_vgpu_submission *s = &vgpu->submission;
1088
Weinan Li7569a062018-01-26 15:09:07 +08001089 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
Zhi Wang1406a142017-09-10 21:15:18 +08001090 i915_gem_context_put(s->shadow_ctx);
1091 kmem_cache_destroy(s->workloads);
Zhi Wange4734052016-05-01 07:42:16 -04001092}
1093
Zhi Wang06bb3722017-09-13 01:41:35 +08001094
1095/**
1096 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1097 * @vgpu: a vGPU
1098 * @engine_mask: engines expected to be reset
1099 *
1100 * This function is called when a vGPU is being destroyed.
1101 *
1102 */
1103void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1104 unsigned long engine_mask)
1105{
1106 struct intel_vgpu_submission *s = &vgpu->submission;
1107
1108 if (!s->active)
1109 return;
1110
Zhi Wange2c43c02017-09-13 01:58:35 +08001111 clean_workloads(vgpu, engine_mask);
Zhi Wang06bb3722017-09-13 01:41:35 +08001112 s->ops->reset(vgpu, engine_mask);
1113}
1114
Zhi Wang874b6a92017-09-10 20:08:18 +08001115/**
1116 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1117 * @vgpu: a vGPU
1118 *
1119 * This function is called when a vGPU is being created.
1120 *
1121 * Returns:
1122 * Zero on success, negative error code if failed.
1123 *
1124 */
1125int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001126{
Zhi Wang1406a142017-09-10 21:15:18 +08001127 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001128 enum intel_engine_id i;
1129 struct intel_engine_cs *engine;
1130 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001131
Zhi Wang1406a142017-09-10 21:15:18 +08001132 s->shadow_ctx = i915_gem_context_create_gvt(
Zhi Wange4734052016-05-01 07:42:16 -04001133 &vgpu->gvt->dev_priv->drm);
Zhi Wang1406a142017-09-10 21:15:18 +08001134 if (IS_ERR(s->shadow_ctx))
1135 return PTR_ERR(s->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -04001136
Zhenyu Wang16036602017-12-04 10:42:58 +08001137 if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
Chris Wilsonb7268c52018-04-18 19:40:52 +01001138 s->shadow_ctx->sched.priority = INT_MAX;
Zhenyu Wang16036602017-12-04 10:42:58 +08001139
Zhi Wang1406a142017-09-10 21:15:18 +08001140 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
Kechen Lu9dfb8e52017-08-10 07:41:36 +08001141
Zhenyu Wang850555d2018-02-14 11:35:01 +08001142 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1143 sizeof(struct intel_vgpu_workload), 0,
1144 SLAB_HWCACHE_ALIGN,
1145 offsetof(struct intel_vgpu_workload, rb_tail),
1146 sizeof_field(struct intel_vgpu_workload, rb_tail),
1147 NULL);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001148
Zhi Wang1406a142017-09-10 21:15:18 +08001149 if (!s->workloads) {
Zhi Wang9a9829e2017-09-10 20:28:09 +08001150 ret = -ENOMEM;
1151 goto out_shadow_ctx;
1152 }
1153
1154 for_each_engine(engine, vgpu->gvt->dev_priv, i)
Zhi Wang1406a142017-09-10 21:15:18 +08001155 INIT_LIST_HEAD(&s->workload_q_head[i]);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001156
Zhi Wang1406a142017-09-10 21:15:18 +08001157 atomic_set(&s->running_workload_num, 0);
Zhi Wang91d5d852017-09-10 21:33:20 +08001158 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001159
Zhi Wange4734052016-05-01 07:42:16 -04001160 return 0;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001161
1162out_shadow_ctx:
Zhi Wang1406a142017-09-10 21:15:18 +08001163 i915_gem_context_put(s->shadow_ctx);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001164 return ret;
Zhi Wange4734052016-05-01 07:42:16 -04001165}
Zhi Wang21527a82017-09-12 21:42:09 +08001166
1167/**
Zhi Wangad1d3632017-09-13 00:31:29 +08001168 * intel_vgpu_select_submission_ops - select virtual submission interface
1169 * @vgpu: a vGPU
1170 * @interface: expected vGPU virtual submission interface
1171 *
1172 * This function is called when guest configures submission interface.
1173 *
1174 * Returns:
1175 * Zero on success, negative error code if failed.
1176 *
1177 */
1178int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
Weinan Li7569a062018-01-26 15:09:07 +08001179 unsigned long engine_mask,
Zhi Wangad1d3632017-09-13 00:31:29 +08001180 unsigned int interface)
1181{
1182 struct intel_vgpu_submission *s = &vgpu->submission;
1183 const struct intel_vgpu_submission_ops *ops[] = {
1184 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1185 &intel_vgpu_execlist_submission_ops,
1186 };
1187 int ret;
1188
1189 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1190 return -EINVAL;
1191
Weinan Li9212b132018-01-26 15:09:08 +08001192 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1193 return -EINVAL;
1194
1195 if (s->active)
Weinan Li7569a062018-01-26 15:09:07 +08001196 s->ops->clean(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001197
1198 if (interface == 0) {
1199 s->ops = NULL;
1200 s->virtual_submission_interface = 0;
Weinan Li9212b132018-01-26 15:09:08 +08001201 s->active = false;
1202 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
Zhi Wangad1d3632017-09-13 00:31:29 +08001203 return 0;
1204 }
1205
Weinan Li7569a062018-01-26 15:09:07 +08001206 ret = ops[interface]->init(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001207 if (ret)
1208 return ret;
1209
1210 s->ops = ops[interface];
1211 s->virtual_submission_interface = interface;
1212 s->active = true;
1213
1214 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1215 vgpu->id, s->ops->name);
1216
1217 return 0;
1218}
1219
1220/**
Zhi Wang21527a82017-09-12 21:42:09 +08001221 * intel_vgpu_destroy_workload - destroy a vGPU workload
1222 * @vgpu: a vGPU
1223 *
1224 * This function is called when destroy a vGPU workload.
1225 *
1226 */
1227void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1228{
1229 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1230
1231 if (workload->shadow_mm)
Changbin Du1bc25852018-01-30 19:19:41 +08001232 intel_vgpu_mm_put(workload->shadow_mm);
Zhi Wang21527a82017-09-12 21:42:09 +08001233
1234 kmem_cache_free(s->workloads, workload);
1235}
1236
Zhi Wang6d763032017-09-12 22:33:12 +08001237static struct intel_vgpu_workload *
1238alloc_workload(struct intel_vgpu *vgpu)
Zhi Wang21527a82017-09-12 21:42:09 +08001239{
1240 struct intel_vgpu_submission *s = &vgpu->submission;
1241 struct intel_vgpu_workload *workload;
1242
1243 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1244 if (!workload)
1245 return ERR_PTR(-ENOMEM);
1246
1247 INIT_LIST_HEAD(&workload->list);
1248 INIT_LIST_HEAD(&workload->shadow_bb);
1249
1250 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1251 atomic_set(&workload->shadow_ctx_active, 0);
1252
1253 workload->status = -EINPROGRESS;
1254 workload->shadowed = false;
1255 workload->vgpu = vgpu;
1256
1257 return workload;
1258}
Zhi Wang6d763032017-09-12 22:33:12 +08001259
1260#define RING_CTX_OFF(x) \
1261 offsetof(struct execlist_ring_context, x)
1262
1263static void read_guest_pdps(struct intel_vgpu *vgpu,
1264 u64 ring_context_gpa, u32 pdp[8])
1265{
1266 u64 gpa;
1267 int i;
1268
1269 gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
1270
1271 for (i = 0; i < 8; i++)
1272 intel_gvt_hypervisor_read_gpa(vgpu,
1273 gpa + i * 8, &pdp[7 - i], 4);
1274}
1275
1276static int prepare_mm(struct intel_vgpu_workload *workload)
1277{
1278 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1279 struct intel_vgpu_mm *mm;
1280 struct intel_vgpu *vgpu = workload->vgpu;
Changbin Duede9d0c2018-01-30 19:19:40 +08001281 intel_gvt_gtt_type_t root_entry_type;
1282 u64 pdps[GVT_RING_CTX_NR_PDPS];
Zhi Wang6d763032017-09-12 22:33:12 +08001283
Changbin Duede9d0c2018-01-30 19:19:40 +08001284 switch (desc->addressing_mode) {
1285 case 1: /* legacy 32-bit */
1286 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1287 break;
1288 case 3: /* legacy 64-bit */
1289 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1290 break;
1291 default:
Zhi Wang6d763032017-09-12 22:33:12 +08001292 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1293 return -EINVAL;
1294 }
1295
Changbin Duede9d0c2018-01-30 19:19:40 +08001296 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
Zhi Wang6d763032017-09-12 22:33:12 +08001297
Changbin Due6e9c462018-01-30 19:19:46 +08001298 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1299 if (IS_ERR(mm))
1300 return PTR_ERR(mm);
Zhi Wang6d763032017-09-12 22:33:12 +08001301
Zhi Wang6d763032017-09-12 22:33:12 +08001302 workload->shadow_mm = mm;
1303 return 0;
1304}
1305
1306#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1307 ((a)->lrca == (b)->lrca))
1308
1309#define get_last_workload(q) \
1310 (list_empty(q) ? NULL : container_of(q->prev, \
1311 struct intel_vgpu_workload, list))
1312/**
1313 * intel_vgpu_create_workload - create a vGPU workload
1314 * @vgpu: a vGPU
1315 * @desc: a guest context descriptor
1316 *
1317 * This function is called when creating a vGPU workload.
1318 *
1319 * Returns:
1320 * struct intel_vgpu_workload * on success, negative error code in
1321 * pointer if failed.
1322 *
1323 */
1324struct intel_vgpu_workload *
1325intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1326 struct execlist_ctx_descriptor_format *desc)
1327{
1328 struct intel_vgpu_submission *s = &vgpu->submission;
1329 struct list_head *q = workload_q_head(vgpu, ring_id);
1330 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1331 struct intel_vgpu_workload *workload = NULL;
1332 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1333 u64 ring_context_gpa;
1334 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1335 int ret;
1336
1337 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
Zhi Wang9556e112017-10-10 13:51:32 +08001338 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
Zhi Wang6d763032017-09-12 22:33:12 +08001339 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1340 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1341 return ERR_PTR(-EINVAL);
1342 }
1343
1344 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1345 RING_CTX_OFF(ring_header.val), &head, 4);
1346
1347 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1348 RING_CTX_OFF(ring_tail.val), &tail, 4);
1349
1350 head &= RB_HEAD_OFF_MASK;
1351 tail &= RB_TAIL_OFF_MASK;
1352
1353 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1354 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1355 gvt_dbg_el("ctx head %x real head %lx\n", head,
1356 last_workload->rb_tail);
1357 /*
1358 * cannot use guest context head pointer here,
1359 * as it might not be updated at this time
1360 */
1361 head = last_workload->rb_tail;
1362 }
1363
1364 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1365
1366 /* record some ring buffer register values for scan and shadow */
1367 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1368 RING_CTX_OFF(rb_start.val), &start, 4);
1369 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1370 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1371 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1372 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1373
1374 workload = alloc_workload(vgpu);
1375 if (IS_ERR(workload))
1376 return workload;
1377
1378 workload->ring_id = ring_id;
1379 workload->ctx_desc = *desc;
1380 workload->ring_context_gpa = ring_context_gpa;
1381 workload->rb_head = head;
1382 workload->rb_tail = tail;
1383 workload->rb_start = start;
1384 workload->rb_ctl = ctl;
1385
1386 if (ring_id == RCS) {
1387 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1388 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1389 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1390 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1391
1392 workload->wa_ctx.indirect_ctx.guest_gma =
1393 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1394 workload->wa_ctx.indirect_ctx.size =
1395 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1396 CACHELINE_BYTES;
1397 workload->wa_ctx.per_ctx.guest_gma =
1398 per_ctx & PER_CTX_ADDR_MASK;
1399 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1400 }
1401
1402 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1403 workload, ring_id, head, tail, start, ctl);
1404
1405 ret = prepare_mm(workload);
1406 if (ret) {
1407 kmem_cache_free(s->workloads, workload);
1408 return ERR_PTR(ret);
1409 }
1410
1411 /* Only scan and shadow the first workload in the queue
1412 * as there is only one pre-allocated buf-obj for shadow.
1413 */
1414 if (list_empty(workload_q_head(vgpu, ring_id))) {
1415 intel_runtime_pm_get(dev_priv);
1416 mutex_lock(&dev_priv->drm.struct_mutex);
1417 ret = intel_gvt_scan_and_shadow_workload(workload);
1418 mutex_unlock(&dev_priv->drm.struct_mutex);
1419 intel_runtime_pm_put(dev_priv);
1420 }
1421
1422 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1423 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1424 intel_vgpu_destroy_workload(workload);
1425 return ERR_PTR(ret);
1426 }
1427
1428 return workload;
1429}
Changbin Du59a716c2017-11-29 15:40:06 +08001430
1431/**
1432 * intel_vgpu_queue_workload - Qeue a vGPU workload
1433 * @workload: the workload to queue in
1434 */
1435void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1436{
1437 list_add_tail(&workload->list,
1438 workload_q_head(workload->vgpu, workload->ring_id));
Changbin Duc1304562017-11-29 15:40:07 +08001439 intel_gvt_kick_schedule(workload->vgpu->gvt);
Changbin Du59a716c2017-11-29 15:40:06 +08001440 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1441}