blob: 8caf72c1e79425893160a2eecca0cd04e50d1b4e [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
Min Hefa3dd622018-03-02 10:00:25 +080055/*
56 * when populating shadow ctx from guest, we should not overrride oa related
57 * registers, so that they will not be overlapped by guest oa configs. Thus
58 * made it possible to capture oa data from host for both host and guests.
59 */
60static void sr_oa_regs(struct intel_vgpu_workload *workload,
61 u32 *reg_state, bool save)
62{
63 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
64 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
65 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
66 int i = 0;
67 u32 flex_mmio[] = {
68 i915_mmio_reg_offset(EU_PERF_CNTL0),
69 i915_mmio_reg_offset(EU_PERF_CNTL1),
70 i915_mmio_reg_offset(EU_PERF_CNTL2),
71 i915_mmio_reg_offset(EU_PERF_CNTL3),
72 i915_mmio_reg_offset(EU_PERF_CNTL4),
73 i915_mmio_reg_offset(EU_PERF_CNTL5),
74 i915_mmio_reg_offset(EU_PERF_CNTL6),
75 };
76
77 if (!workload || !reg_state || workload->ring_id != RCS)
78 return;
79
80 if (save) {
81 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
82
83 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
84 u32 state_offset = ctx_flexeu0 + i * 2;
85
86 workload->flex_mmio[i] = reg_state[state_offset + 1];
87 }
88 } else {
89 reg_state[ctx_oactxctrl] =
90 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
91 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
92
93 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
94 u32 state_offset = ctx_flexeu0 + i * 2;
95 u32 mmio = flex_mmio[i];
96
97 reg_state[state_offset] = mmio;
98 reg_state[state_offset + 1] = workload->flex_mmio[i];
99 }
100 }
101}
102
Zhi Wange4734052016-05-01 07:42:16 -0400103static int populate_shadow_context(struct intel_vgpu_workload *workload)
104{
105 struct intel_vgpu *vgpu = workload->vgpu;
106 struct intel_gvt *gvt = vgpu->gvt;
107 int ring_id = workload->ring_id;
Zhi Wang1406a142017-09-10 21:15:18 +0800108 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400109 struct drm_i915_gem_object *ctx_obj =
110 shadow_ctx->engine[ring_id].state->obj;
111 struct execlist_ring_context *shadow_ring_context;
112 struct page *page;
113 void *dst;
114 unsigned long context_gpa, context_page_num;
115 int i;
116
117 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
118 workload->ctx_desc.lrca);
119
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300120 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400121
122 context_page_num = context_page_num >> PAGE_SHIFT;
123
124 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
125 context_page_num = 19;
126
127 i = 2;
128
129 while (i < context_page_num) {
130 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
131 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800132 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400133 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500134 gvt_vgpu_err("Invalid guest context descriptor\n");
fred gao5c568832017-09-20 05:36:47 +0800135 return -EFAULT;
Zhi Wange4734052016-05-01 07:42:16 -0400136 }
137
Michel Thierry0b29c752017-09-13 09:56:00 +0100138 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800139 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400140 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
Zhi Wang9556e112017-10-10 13:51:32 +0800141 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800142 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400143 i++;
144 }
145
146 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800147 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400148
Min Hefa3dd622018-03-02 10:00:25 +0800149 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
Zhi Wange4734052016-05-01 07:42:16 -0400150#define COPY_REG(name) \
151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
152 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
153
154 COPY_REG(ctx_ctrl);
155 COPY_REG(ctx_timestamp);
156
157 if (ring_id == RCS) {
158 COPY_REG(bb_per_ctx_ptr);
159 COPY_REG(rcs_indirect_ctx);
160 COPY_REG(rcs_indirect_ctx_offset);
161 }
162#undef COPY_REG
163
164 set_context_pdp_root_pointer(shadow_ring_context,
165 workload->shadow_mm->shadow_page_table);
166
167 intel_gvt_hypervisor_read_gpa(vgpu,
168 workload->ring_context_gpa +
169 sizeof(*shadow_ring_context),
170 (void *)shadow_ring_context +
171 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800172 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400173
Min Hefa3dd622018-03-02 10:00:25 +0800174 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800175 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400176 return 0;
177}
178
Changbin Dubc2d4b62017-03-22 12:35:31 +0800179static inline bool is_gvt_request(struct drm_i915_gem_request *req)
180{
181 return i915_gem_context_force_single_submission(req->ctx);
182}
183
Xiong Zhang295764c2017-11-07 05:23:02 +0800184static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
185{
186 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
187 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
188 i915_reg_t reg;
189
190 reg = RING_INSTDONE(ring_base);
191 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
192 reg = RING_ACTHD(ring_base);
193 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
194 reg = RING_ACTHD_UDW(ring_base);
195 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
196}
197
Zhi Wange4734052016-05-01 07:42:16 -0400198static int shadow_context_status_change(struct notifier_block *nb,
199 unsigned long action, void *data)
200{
Changbin Du3fc03062017-03-13 10:47:11 +0800201 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
202 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
203 shadow_ctx_notifier_block[req->engine->id]);
204 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du0e86cc92017-05-04 10:52:38 +0800205 enum intel_engine_id ring_id = req->engine->id;
206 struct intel_vgpu_workload *workload;
Changbin Du679fd3e2017-11-13 14:58:31 +0800207 unsigned long flags;
Zhi Wange4734052016-05-01 07:42:16 -0400208
Changbin Du0e86cc92017-05-04 10:52:38 +0800209 if (!is_gvt_request(req)) {
Changbin Du679fd3e2017-11-13 14:58:31 +0800210 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800211 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
212 scheduler->engine_owner[ring_id]) {
213 /* Switch ring from vGPU to host. */
214 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
215 NULL, ring_id);
216 scheduler->engine_owner[ring_id] = NULL;
217 }
Changbin Du679fd3e2017-11-13 14:58:31 +0800218 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800219
220 return NOTIFY_OK;
221 }
222
223 workload = scheduler->current_workload[ring_id];
224 if (unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800225 return NOTIFY_OK;
226
Zhi Wange4734052016-05-01 07:42:16 -0400227 switch (action) {
228 case INTEL_CONTEXT_SCHEDULE_IN:
Changbin Du679fd3e2017-11-13 14:58:31 +0800229 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800230 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
231 /* Switch ring from host to vGPU or vGPU to vGPU. */
232 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
233 workload->vgpu, ring_id);
234 scheduler->engine_owner[ring_id] = workload->vgpu;
235 } else
236 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
237 ring_id, workload->vgpu->id);
Changbin Du679fd3e2017-11-13 14:58:31 +0800238 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Zhi Wange4734052016-05-01 07:42:16 -0400239 atomic_set(&workload->shadow_ctx_active, 1);
240 break;
241 case INTEL_CONTEXT_SCHEDULE_OUT:
Xiong Zhang295764c2017-11-07 05:23:02 +0800242 save_ring_hw_state(workload->vgpu, ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400243 atomic_set(&workload->shadow_ctx_active, 0);
244 break;
Zhenyu Wangda5f99e2017-12-01 14:59:53 +0800245 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
246 save_ring_hw_state(workload->vgpu, ring_id);
247 break;
Zhi Wange4734052016-05-01 07:42:16 -0400248 default:
249 WARN_ON(1);
250 return NOTIFY_OK;
251 }
252 wake_up(&workload->shadow_ctx_status_wq);
253 return NOTIFY_OK;
254}
255
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800256static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
257 struct intel_engine_cs *engine)
258{
259 struct intel_context *ce = &ctx->engine[engine->id];
260 u64 desc = 0;
261
262 desc = ce->lrc_desc;
263
264 /* Update bits 0-11 of the context descriptor which includes flags
265 * like GEN8_CTX_* cached in desc_template
266 */
267 desc &= U64_MAX << 12;
268 desc |= ctx->desc_template & ((1ULL << 12) - 1);
269
270 ce->lrc_desc = desc;
271}
272
fred gao0a53bc02017-08-18 15:41:06 +0800273static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
274{
275 struct intel_vgpu *vgpu = workload->vgpu;
276 void *shadow_ring_buffer_va;
277 u32 *cs;
278
279 /* allocate shadow ring buffer */
280 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
281 if (IS_ERR(cs)) {
282 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
283 workload->rb_len);
284 return PTR_ERR(cs);
285 }
286
287 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
288
289 /* get shadow ring buffer va */
290 workload->shadow_ring_buffer_va = cs;
291
292 memcpy(cs, shadow_ring_buffer_va,
293 workload->rb_len);
294
295 cs += workload->rb_len / sizeof(u32);
296 intel_ring_advance(workload->req, cs);
297
298 return 0;
299}
300
Chris Wilson7b302552017-11-20 13:29:58 +0000301static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
fred gaoa3cfdca2017-08-18 15:41:07 +0800302{
303 if (!wa_ctx->indirect_ctx.obj)
304 return;
305
306 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
307 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
308}
309
Ping Gao89ea20b2017-06-29 12:22:42 +0800310/**
311 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
312 * shadow it as well, include ringbuffer,wa_ctx and ctx.
313 * @workload: an abstract entity for each execlist submission.
314 *
315 * This function is called before the workload submitting to i915, to make
316 * sure the content of the workload is valid.
317 */
318int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Zhi Wange4734052016-05-01 07:42:16 -0400319{
Zhi Wang1406a142017-09-10 21:15:18 +0800320 struct intel_vgpu *vgpu = workload->vgpu;
321 struct intel_vgpu_submission *s = &vgpu->submission;
322 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
323 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Zhi Wange4734052016-05-01 07:42:16 -0400324 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800325 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
fred gao0a53bc02017-08-18 15:41:06 +0800326 struct intel_ring *ring;
Zhi Wange4734052016-05-01 07:42:16 -0400327 int ret;
328
Ping Gao87e919d2017-07-04 14:53:03 +0800329 lockdep_assert_held(&dev_priv->drm.struct_mutex);
330
Ping Gaod0302e72017-06-29 12:22:43 +0800331 if (workload->shadowed)
332 return 0;
Zhi Wange4734052016-05-01 07:42:16 -0400333
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800334 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
335 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400336 GEN8_CTX_ADDRESSING_MODE_SHIFT;
337
Zhi Wang1406a142017-09-10 21:15:18 +0800338 if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800339 shadow_context_descriptor_update(shadow_ctx,
340 dev_priv->engine[ring_id]);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800341
Ping Gao89ea20b2017-06-29 12:22:42 +0800342 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
Zhi Wangbe1da702016-05-03 18:26:57 -0400343 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800344 goto err_scan;
Zhi Wangbe1da702016-05-03 18:26:57 -0400345
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400346 if ((workload->ring_id == RCS) &&
347 (workload->wa_ctx.indirect_ctx.size != 0)) {
348 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
349 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800350 goto err_scan;
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400351 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400352
Ping Gao89ea20b2017-06-29 12:22:42 +0800353 /* pin shadow context by gvt even the shadow context will be pinned
354 * when i915 alloc request. That is because gvt will update the guest
355 * context from shadow context when workload is completed, and at that
356 * moment, i915 may already unpined the shadow context to make the
357 * shadow_ctx pages invalid. So gvt need to pin itself. After update
358 * the guest context, gvt can unpin the shadow_ctx safely.
359 */
360 ring = engine->context_pin(engine, shadow_ctx);
361 if (IS_ERR(ring)) {
362 ret = PTR_ERR(ring);
363 gvt_vgpu_err("fail to pin shadow context\n");
fred gaoa3cfdca2017-08-18 15:41:07 +0800364 goto err_shadow;
Ping Gao89ea20b2017-06-29 12:22:42 +0800365 }
Zhi Wange4734052016-05-01 07:42:16 -0400366
fred gao0a53bc02017-08-18 15:41:06 +0800367 ret = populate_shadow_context(workload);
368 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800369 goto err_unpin;
fred gaof2880e02017-11-14 17:09:35 +0800370 workload->shadowed = true;
371 return 0;
372
373err_unpin:
374 engine->context_unpin(engine, shadow_ctx);
375err_shadow:
376 release_shadow_wa_ctx(&workload->wa_ctx);
377err_scan:
378 return ret;
379}
380
381static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
382{
383 int ring_id = workload->ring_id;
384 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
385 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
386 struct drm_i915_gem_request *rq;
387 struct intel_vgpu *vgpu = workload->vgpu;
388 struct intel_vgpu_submission *s = &vgpu->submission;
389 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
390 int ret;
fred gao0a53bc02017-08-18 15:41:06 +0800391
392 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
393 if (IS_ERR(rq)) {
394 gvt_vgpu_err("fail to allocate gem request\n");
395 ret = PTR_ERR(rq);
fred gaoa3cfdca2017-08-18 15:41:07 +0800396 goto err_unpin;
fred gao0a53bc02017-08-18 15:41:06 +0800397 }
398
399 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
400
401 workload->req = i915_gem_request_get(rq);
402 ret = copy_workload_to_ring_buffer(workload);
403 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800404 goto err_unpin;
fred gaoa3cfdca2017-08-18 15:41:07 +0800405 return 0;
fred gao0a53bc02017-08-18 15:41:06 +0800406
fred gaoa3cfdca2017-08-18 15:41:07 +0800407err_unpin:
408 engine->context_unpin(engine, shadow_ctx);
fred gaoa3cfdca2017-08-18 15:41:07 +0800409 release_shadow_wa_ctx(&workload->wa_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800410 return ret;
411}
412
Zhi Wangf52c3802017-09-24 21:53:03 +0800413static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
414
Zhi Wangd8235b52017-09-12 22:06:39 +0800415static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
416{
417 struct intel_gvt *gvt = workload->vgpu->gvt;
418 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
Zhi Wangf52c3802017-09-24 21:53:03 +0800419 struct intel_vgpu_shadow_bb *bb;
420 int ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800421
Zhi Wangf52c3802017-09-24 21:53:03 +0800422 list_for_each_entry(bb, &workload->shadow_bb, list) {
423 bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
424 if (IS_ERR(bb->vma)) {
425 ret = PTR_ERR(bb->vma);
426 goto err;
427 }
Zhi Wangd8235b52017-09-12 22:06:39 +0800428
Zhi Wangf52c3802017-09-24 21:53:03 +0800429 /* relocate shadow batch buffer */
430 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
Zhi Wangd8235b52017-09-12 22:06:39 +0800431 if (gmadr_bytes == 8)
Zhi Wangf52c3802017-09-24 21:53:03 +0800432 bb->bb_start_cmd_va[2] = 0;
433
434 /* No one is going to touch shadow bb from now on. */
435 if (bb->clflush & CLFLUSH_AFTER) {
436 drm_clflush_virt_range(bb->va, bb->obj->base.size);
437 bb->clflush &= ~CLFLUSH_AFTER;
438 }
439
440 ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
441 if (ret)
442 goto err;
443
444 i915_gem_obj_finish_shmem_access(bb->obj);
445 bb->accessing = false;
446
447 i915_vma_move_to_active(bb->vma, workload->req, 0);
Zhi Wangd8235b52017-09-12 22:06:39 +0800448 }
449 return 0;
Zhi Wangf52c3802017-09-24 21:53:03 +0800450err:
451 release_shadow_batch_buffer(workload);
452 return ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800453}
454
455static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
456{
457 struct intel_vgpu_workload *workload = container_of(wa_ctx,
458 struct intel_vgpu_workload,
459 wa_ctx);
460 int ring_id = workload->ring_id;
461 struct intel_vgpu_submission *s = &workload->vgpu->submission;
462 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
463 struct drm_i915_gem_object *ctx_obj =
464 shadow_ctx->engine[ring_id].state->obj;
465 struct execlist_ring_context *shadow_ring_context;
466 struct page *page;
467
468 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
469 shadow_ring_context = kmap_atomic(page);
470
471 shadow_ring_context->bb_per_ctx_ptr.val =
472 (shadow_ring_context->bb_per_ctx_ptr.val &
473 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
474 shadow_ring_context->rcs_indirect_ctx.val =
475 (shadow_ring_context->rcs_indirect_ctx.val &
476 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
477
478 kunmap_atomic(shadow_ring_context);
479 return 0;
480}
481
482static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
483{
484 struct i915_vma *vma;
485 unsigned char *per_ctx_va =
486 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
487 wa_ctx->indirect_ctx.size;
488
489 if (wa_ctx->indirect_ctx.size == 0)
490 return 0;
491
492 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
493 0, CACHELINE_BYTES, 0);
494 if (IS_ERR(vma))
495 return PTR_ERR(vma);
496
497 /* FIXME: we are not tracking our pinned VMA leaving it
498 * up to the core to fix up the stray pin_count upon
499 * free.
500 */
501
502 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
503
504 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
505 memset(per_ctx_va, 0, CACHELINE_BYTES);
506
507 update_wa_ctx_2_shadow_ctx(wa_ctx);
508 return 0;
509}
510
511static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
512{
Zhi Wangf52c3802017-09-24 21:53:03 +0800513 struct intel_vgpu *vgpu = workload->vgpu;
514 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
515 struct intel_vgpu_shadow_bb *bb, *pos;
Zhi Wangd8235b52017-09-12 22:06:39 +0800516
Zhi Wangf52c3802017-09-24 21:53:03 +0800517 if (list_empty(&workload->shadow_bb))
518 return;
519
520 bb = list_first_entry(&workload->shadow_bb,
521 struct intel_vgpu_shadow_bb, list);
522
523 mutex_lock(&dev_priv->drm.struct_mutex);
524
525 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
526 if (bb->obj) {
527 if (bb->accessing)
528 i915_gem_obj_finish_shmem_access(bb->obj);
529
530 if (bb->va && !IS_ERR(bb->va))
531 i915_gem_object_unpin_map(bb->obj);
532
533 if (bb->vma && !IS_ERR(bb->vma)) {
534 i915_vma_unpin(bb->vma);
535 i915_vma_close(bb->vma);
536 }
537 __i915_gem_object_release_unless_active(bb->obj);
Zhi Wangd8235b52017-09-12 22:06:39 +0800538 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800539 list_del(&bb->list);
540 kfree(bb);
Zhi Wangd8235b52017-09-12 22:06:39 +0800541 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800542
543 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wangd8235b52017-09-12 22:06:39 +0800544}
545
Zhi Wang497aa3f2017-09-12 21:51:10 +0800546static int prepare_workload(struct intel_vgpu_workload *workload)
547{
Zhi Wangd8235b52017-09-12 22:06:39 +0800548 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wang497aa3f2017-09-12 21:51:10 +0800549 int ret = 0;
550
Zhi Wangd8235b52017-09-12 22:06:39 +0800551 ret = intel_vgpu_pin_mm(workload->shadow_mm);
552 if (ret) {
553 gvt_vgpu_err("fail to vgpu pin mm\n");
554 return ret;
555 }
Zhi Wang497aa3f2017-09-12 21:51:10 +0800556
Zhi Wangd8235b52017-09-12 22:06:39 +0800557 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
558 if (ret) {
559 gvt_vgpu_err("fail to vgpu sync oos pages\n");
560 goto err_unpin_mm;
561 }
562
563 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
564 if (ret) {
565 gvt_vgpu_err("fail to flush post shadow\n");
566 goto err_unpin_mm;
567 }
568
fred gaof2880e02017-11-14 17:09:35 +0800569 ret = intel_gvt_generate_request(workload);
570 if (ret) {
571 gvt_vgpu_err("fail to generate request\n");
572 goto err_unpin_mm;
573 }
574
Zhi Wangd8235b52017-09-12 22:06:39 +0800575 ret = prepare_shadow_batch_buffer(workload);
576 if (ret) {
577 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
578 goto err_unpin_mm;
579 }
580
581 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
582 if (ret) {
583 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
584 goto err_shadow_batch;
585 }
586
587 if (workload->prepare) {
588 ret = workload->prepare(workload);
589 if (ret)
590 goto err_shadow_wa_ctx;
591 }
592
593 return 0;
594err_shadow_wa_ctx:
595 release_shadow_wa_ctx(&workload->wa_ctx);
596err_shadow_batch:
597 release_shadow_batch_buffer(workload);
598err_unpin_mm:
599 intel_vgpu_unpin_mm(workload->shadow_mm);
Zhi Wang497aa3f2017-09-12 21:51:10 +0800600 return ret;
601}
602
fred gao0a53bc02017-08-18 15:41:06 +0800603static int dispatch_workload(struct intel_vgpu_workload *workload)
604{
Zhi Wang1406a142017-09-10 21:15:18 +0800605 struct intel_vgpu *vgpu = workload->vgpu;
606 struct intel_vgpu_submission *s = &vgpu->submission;
607 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
608 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
fred gao0a53bc02017-08-18 15:41:06 +0800609 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800610 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
611 int ret = 0;
612
613 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
614 ring_id, workload);
615
616 mutex_lock(&dev_priv->drm.struct_mutex);
617
618 ret = intel_gvt_scan_and_shadow_workload(workload);
619 if (ret)
620 goto out;
621
Zhi Wang497aa3f2017-09-12 21:51:10 +0800622 ret = prepare_workload(workload);
623 if (ret) {
624 engine->context_unpin(engine, shadow_ctx);
625 goto out;
fred gao0a53bc02017-08-18 15:41:06 +0800626 }
627
Pei Zhang90d27a12016-11-14 18:02:57 +0800628out:
629 if (ret)
630 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800631
Ping Gao89ea20b2017-06-29 12:22:42 +0800632 if (!IS_ERR_OR_NULL(workload->req)) {
633 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
634 ring_id, workload->req);
635 i915_add_request(workload->req);
636 workload->dispatched = true;
637 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800638
Pei Zhang90d27a12016-11-14 18:02:57 +0800639 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400640 return ret;
641}
642
643static struct intel_vgpu_workload *pick_next_workload(
644 struct intel_gvt *gvt, int ring_id)
645{
646 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
647 struct intel_vgpu_workload *workload = NULL;
648
649 mutex_lock(&gvt->lock);
650
651 /*
652 * no current vgpu / will be scheduled out / no workload
653 * bail out
654 */
655 if (!scheduler->current_vgpu) {
656 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
657 goto out;
658 }
659
660 if (scheduler->need_reschedule) {
661 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
662 goto out;
663 }
664
Zhenyu Wang954180a2017-04-12 14:22:50 +0800665 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400666 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400667
668 /*
669 * still have current workload, maybe the workload disptacher
670 * fail to submit it for some reason, resubmit it.
671 */
672 if (scheduler->current_workload[ring_id]) {
673 workload = scheduler->current_workload[ring_id];
674 gvt_dbg_sched("ring id %d still have current workload %p\n",
675 ring_id, workload);
676 goto out;
677 }
678
679 /*
680 * pick a workload as current workload
681 * once current workload is set, schedule policy routines
682 * will wait the current workload is finished when trying to
683 * schedule out a vgpu.
684 */
685 scheduler->current_workload[ring_id] = container_of(
686 workload_q_head(scheduler->current_vgpu, ring_id)->next,
687 struct intel_vgpu_workload, list);
688
689 workload = scheduler->current_workload[ring_id];
690
691 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
692
Zhi Wang1406a142017-09-10 21:15:18 +0800693 atomic_inc(&workload->vgpu->submission.running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400694out:
695 mutex_unlock(&gvt->lock);
696 return workload;
697}
698
699static void update_guest_context(struct intel_vgpu_workload *workload)
700{
701 struct intel_vgpu *vgpu = workload->vgpu;
702 struct intel_gvt *gvt = vgpu->gvt;
Zhi Wang1406a142017-09-10 21:15:18 +0800703 struct intel_vgpu_submission *s = &vgpu->submission;
704 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400705 int ring_id = workload->ring_id;
Zhi Wange4734052016-05-01 07:42:16 -0400706 struct drm_i915_gem_object *ctx_obj =
707 shadow_ctx->engine[ring_id].state->obj;
708 struct execlist_ring_context *shadow_ring_context;
709 struct page *page;
710 void *src;
711 unsigned long context_gpa, context_page_num;
712 int i;
713
714 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
715 workload->ctx_desc.lrca);
716
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300717 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400718
719 context_page_num = context_page_num >> PAGE_SHIFT;
720
721 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
722 context_page_num = 19;
723
724 i = 2;
725
726 while (i < context_page_num) {
727 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
728 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800729 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400730 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500731 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400732 return;
733 }
734
Michel Thierry0b29c752017-09-13 09:56:00 +0100735 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800736 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400737 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
Zhi Wang9556e112017-10-10 13:51:32 +0800738 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800739 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400740 i++;
741 }
742
743 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
744 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
745
746 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800747 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400748
749#define COPY_REG(name) \
750 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
751 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
752
753 COPY_REG(ctx_ctrl);
754 COPY_REG(ctx_timestamp);
755
756#undef COPY_REG
757
758 intel_gvt_hypervisor_write_gpa(vgpu,
759 workload->ring_context_gpa +
760 sizeof(*shadow_ring_context),
761 (void *)shadow_ring_context +
762 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800763 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400764
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800765 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400766}
767
Zhi Wange2c43c02017-09-13 01:58:35 +0800768static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
769{
770 struct intel_vgpu_submission *s = &vgpu->submission;
771 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
772 struct intel_engine_cs *engine;
773 struct intel_vgpu_workload *pos, *n;
774 unsigned int tmp;
775
776 /* free the unsubmited workloads in the queues. */
777 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
778 list_for_each_entry_safe(pos, n,
779 &s->workload_q_head[engine->id], list) {
780 list_del_init(&pos->list);
781 intel_vgpu_destroy_workload(pos);
782 }
783 clear_bit(engine->id, s->shadow_ctx_desc_updated);
784 }
785}
786
Zhi Wange4734052016-05-01 07:42:16 -0400787static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
788{
789 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wang1406a142017-09-10 21:15:18 +0800790 struct intel_vgpu_workload *workload =
791 scheduler->current_workload[ring_id];
792 struct intel_vgpu *vgpu = workload->vgpu;
793 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wangbe1da702016-05-03 18:26:57 -0400794 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400795
796 mutex_lock(&gvt->lock);
797
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800798 /* For the workload w/ request, needs to wait for the context
799 * switch to make sure request is completed.
800 * For the workload w/o request, directly complete the workload.
801 */
802 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800803 struct drm_i915_private *dev_priv =
804 workload->vgpu->gvt->dev_priv;
805 struct intel_engine_cs *engine =
806 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400807 wait_event(workload->shadow_ctx_status_wq,
808 !atomic_read(&workload->shadow_ctx_active));
809
Chuanxiao Dong0cf5ec42017-06-23 13:01:11 +0800810 /* If this request caused GPU hang, req->fence.error will
811 * be set to -EIO. Use -EIO to set workload status so
812 * that when this request caused GPU hang, didn't trigger
813 * context switch interrupt to guest.
814 */
815 if (likely(workload->status == -EINPROGRESS)) {
816 if (workload->req->fence.error == -EIO)
817 workload->status = -EIO;
818 else
819 workload->status = 0;
820 }
821
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800822 i915_gem_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400823
Chuanxiao Dong6184cc82017-08-01 17:47:25 +0800824 if (!workload->status && !(vgpu->resetting_eng &
825 ENGINE_MASK(ring_id))) {
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800826 update_guest_context(workload);
827
828 for_each_set_bit(event, workload->pending_events,
829 INTEL_GVT_EVENT_MAX)
830 intel_vgpu_trigger_virtual_event(vgpu, event);
831 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800832 mutex_lock(&dev_priv->drm.struct_mutex);
833 /* unpin shadow ctx as the shadow_ctx update is done */
Zhi Wang1406a142017-09-10 21:15:18 +0800834 engine->context_unpin(engine, s->shadow_ctx);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800835 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400836 }
837
838 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
839 ring_id, workload, workload->status);
840
841 scheduler->current_workload[ring_id] = NULL;
842
Zhi Wange4734052016-05-01 07:42:16 -0400843 list_del_init(&workload->list);
Zhi Wangd8235b52017-09-12 22:06:39 +0800844
845 if (!workload->status) {
846 release_shadow_batch_buffer(workload);
847 release_shadow_wa_ctx(&workload->wa_ctx);
848 }
849
Zhi Wange2c43c02017-09-13 01:58:35 +0800850 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
851 /* if workload->status is not successful means HW GPU
852 * has occurred GPU hang or something wrong with i915/GVT,
853 * and GVT won't inject context switch interrupt to guest.
854 * So this error is a vGPU hang actually to the guest.
855 * According to this we should emunlate a vGPU hang. If
856 * there are pending workloads which are already submitted
857 * from guest, we should clean them up like HW GPU does.
858 *
859 * if it is in middle of engine resetting, the pending
860 * workloads won't be submitted to HW GPU and will be
861 * cleaned up during the resetting process later, so doing
862 * the workload clean up here doesn't have any impact.
863 **/
864 clean_workloads(vgpu, ENGINE_MASK(ring_id));
865 }
866
Zhi Wange4734052016-05-01 07:42:16 -0400867 workload->complete(workload);
868
Zhi Wang1406a142017-09-10 21:15:18 +0800869 atomic_dec(&s->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400870 wake_up(&scheduler->workload_complete_wq);
Ping Gaof100dae2017-05-24 09:14:11 +0800871
872 if (gvt->scheduler.need_reschedule)
873 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
874
Zhi Wange4734052016-05-01 07:42:16 -0400875 mutex_unlock(&gvt->lock);
876}
877
878struct workload_thread_param {
879 struct intel_gvt *gvt;
880 int ring_id;
881};
882
883static int workload_thread(void *priv)
884{
885 struct workload_thread_param *p = (struct workload_thread_param *)priv;
886 struct intel_gvt *gvt = p->gvt;
887 int ring_id = p->ring_id;
888 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
889 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500890 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400891 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800892 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
893 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800894 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400895
896 kfree(p);
897
898 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
899
900 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800901 add_wait_queue(&scheduler->waitq[ring_id], &wait);
902 do {
903 workload = pick_next_workload(gvt, ring_id);
904 if (workload)
905 break;
906 wait_woken(&wait, TASK_INTERRUPTIBLE,
907 MAX_SCHEDULE_TIMEOUT);
908 } while (!kthread_should_stop());
909 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400910
Du, Changbine45d7b72016-10-27 11:10:31 +0800911 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400912 break;
913
914 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
915 workload->ring_id, workload,
916 workload->vgpu->id);
917
918 intel_runtime_pm_get(gvt->dev_priv);
919
Zhi Wange4734052016-05-01 07:42:16 -0400920 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
921 workload->ring_id, workload);
922
923 if (need_force_wake)
924 intel_uncore_forcewake_get(gvt->dev_priv,
925 FORCEWAKE_ALL);
926
Pei Zhang90d27a12016-11-14 18:02:57 +0800927 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400928 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800929 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100930
Zhi Wange4734052016-05-01 07:42:16 -0400931 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500932 vgpu = workload->vgpu;
933 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -0400934 goto complete;
935 }
936
937 gvt_dbg_sched("ring id %d wait workload %p\n",
938 workload->ring_id, workload);
Chris Wilson3dce2ac2017-03-08 22:08:08 +0000939 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -0400940
941complete:
Changbin Du3ce32742017-02-09 10:13:16 +0800942 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -0400943 workload, workload->status);
944
Changbin Du2e51ef32017-01-05 13:28:05 +0800945 complete_current_workload(gvt, ring_id);
946
Zhi Wange4734052016-05-01 07:42:16 -0400947 if (need_force_wake)
948 intel_uncore_forcewake_put(gvt->dev_priv,
949 FORCEWAKE_ALL);
950
Zhi Wange4734052016-05-01 07:42:16 -0400951 intel_runtime_pm_put(gvt->dev_priv);
Zhi Wang6d763032017-09-12 22:33:12 +0800952 if (ret && (vgpu_is_vm_unhealthy(ret)))
fred gaoe011c6c2017-09-19 15:11:28 +0800953 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
Zhi Wange4734052016-05-01 07:42:16 -0400954 }
955 return 0;
956}
957
958void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
959{
Zhi Wang1406a142017-09-10 21:15:18 +0800960 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wange4734052016-05-01 07:42:16 -0400961 struct intel_gvt *gvt = vgpu->gvt;
962 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
963
Zhi Wang1406a142017-09-10 21:15:18 +0800964 if (atomic_read(&s->running_workload_num)) {
Zhi Wange4734052016-05-01 07:42:16 -0400965 gvt_dbg_sched("wait vgpu idle\n");
966
967 wait_event(scheduler->workload_complete_wq,
Zhi Wang1406a142017-09-10 21:15:18 +0800968 !atomic_read(&s->running_workload_num));
Zhi Wange4734052016-05-01 07:42:16 -0400969 }
970}
971
972void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
973{
974 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +0800975 struct intel_engine_cs *engine;
976 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400977
978 gvt_dbg_core("clean workload scheduler\n");
979
Changbin Du3fc03062017-03-13 10:47:11 +0800980 for_each_engine(engine, gvt->dev_priv, i) {
981 atomic_notifier_chain_unregister(
982 &engine->context_status_notifier,
983 &gvt->shadow_ctx_notifier_block[i]);
984 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400985 }
986}
987
988int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
989{
990 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
991 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +0800992 struct intel_engine_cs *engine;
993 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400994 int ret;
Zhi Wange4734052016-05-01 07:42:16 -0400995
996 gvt_dbg_core("init workload scheduler\n");
997
998 init_waitqueue_head(&scheduler->workload_complete_wq);
999
Changbin Du3fc03062017-03-13 10:47:11 +08001000 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -04001001 init_waitqueue_head(&scheduler->waitq[i]);
1002
1003 param = kzalloc(sizeof(*param), GFP_KERNEL);
1004 if (!param) {
1005 ret = -ENOMEM;
1006 goto err;
1007 }
1008
1009 param->gvt = gvt;
1010 param->ring_id = i;
1011
1012 scheduler->thread[i] = kthread_run(workload_thread, param,
1013 "gvt workload %d", i);
1014 if (IS_ERR(scheduler->thread[i])) {
1015 gvt_err("fail to create workload thread\n");
1016 ret = PTR_ERR(scheduler->thread[i]);
1017 goto err;
1018 }
Changbin Du3fc03062017-03-13 10:47:11 +08001019
1020 gvt->shadow_ctx_notifier_block[i].notifier_call =
1021 shadow_context_status_change;
1022 atomic_notifier_chain_register(&engine->context_status_notifier,
1023 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -04001024 }
1025 return 0;
1026err:
1027 intel_gvt_clean_workload_scheduler(gvt);
1028 kfree(param);
1029 param = NULL;
1030 return ret;
1031}
1032
Zhi Wang874b6a92017-09-10 20:08:18 +08001033/**
1034 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1035 * @vgpu: a vGPU
1036 *
1037 * This function is called when a vGPU is being destroyed.
1038 *
1039 */
1040void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001041{
Zhi Wang1406a142017-09-10 21:15:18 +08001042 struct intel_vgpu_submission *s = &vgpu->submission;
1043
Weinan Li7569a062018-01-26 15:09:07 +08001044 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
Zhi Wang1406a142017-09-10 21:15:18 +08001045 i915_gem_context_put(s->shadow_ctx);
1046 kmem_cache_destroy(s->workloads);
Zhi Wange4734052016-05-01 07:42:16 -04001047}
1048
Zhi Wang06bb3722017-09-13 01:41:35 +08001049
1050/**
1051 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1052 * @vgpu: a vGPU
1053 * @engine_mask: engines expected to be reset
1054 *
1055 * This function is called when a vGPU is being destroyed.
1056 *
1057 */
1058void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1059 unsigned long engine_mask)
1060{
1061 struct intel_vgpu_submission *s = &vgpu->submission;
1062
1063 if (!s->active)
1064 return;
1065
Zhi Wange2c43c02017-09-13 01:58:35 +08001066 clean_workloads(vgpu, engine_mask);
Zhi Wang06bb3722017-09-13 01:41:35 +08001067 s->ops->reset(vgpu, engine_mask);
1068}
1069
Zhi Wang874b6a92017-09-10 20:08:18 +08001070/**
1071 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1072 * @vgpu: a vGPU
1073 *
1074 * This function is called when a vGPU is being created.
1075 *
1076 * Returns:
1077 * Zero on success, negative error code if failed.
1078 *
1079 */
1080int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001081{
Zhi Wang1406a142017-09-10 21:15:18 +08001082 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001083 enum intel_engine_id i;
1084 struct intel_engine_cs *engine;
1085 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001086
Zhi Wang1406a142017-09-10 21:15:18 +08001087 s->shadow_ctx = i915_gem_context_create_gvt(
Zhi Wange4734052016-05-01 07:42:16 -04001088 &vgpu->gvt->dev_priv->drm);
Zhi Wang1406a142017-09-10 21:15:18 +08001089 if (IS_ERR(s->shadow_ctx))
1090 return PTR_ERR(s->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -04001091
Zhenyu Wang16036602017-12-04 10:42:58 +08001092 if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
1093 s->shadow_ctx->priority = INT_MAX;
1094
Zhi Wang1406a142017-09-10 21:15:18 +08001095 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
Kechen Lu9dfb8e52017-08-10 07:41:36 +08001096
Zhi Wang1406a142017-09-10 21:15:18 +08001097 s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
Zhi Wang9a9829e2017-09-10 20:28:09 +08001098 sizeof(struct intel_vgpu_workload), 0,
1099 SLAB_HWCACHE_ALIGN,
1100 NULL);
1101
Zhi Wang1406a142017-09-10 21:15:18 +08001102 if (!s->workloads) {
Zhi Wang9a9829e2017-09-10 20:28:09 +08001103 ret = -ENOMEM;
1104 goto out_shadow_ctx;
1105 }
1106
1107 for_each_engine(engine, vgpu->gvt->dev_priv, i)
Zhi Wang1406a142017-09-10 21:15:18 +08001108 INIT_LIST_HEAD(&s->workload_q_head[i]);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001109
Zhi Wang1406a142017-09-10 21:15:18 +08001110 atomic_set(&s->running_workload_num, 0);
Zhi Wang91d5d852017-09-10 21:33:20 +08001111 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001112
Zhi Wange4734052016-05-01 07:42:16 -04001113 return 0;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001114
1115out_shadow_ctx:
Zhi Wang1406a142017-09-10 21:15:18 +08001116 i915_gem_context_put(s->shadow_ctx);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001117 return ret;
Zhi Wange4734052016-05-01 07:42:16 -04001118}
Zhi Wang21527a82017-09-12 21:42:09 +08001119
1120/**
Zhi Wangad1d3632017-09-13 00:31:29 +08001121 * intel_vgpu_select_submission_ops - select virtual submission interface
1122 * @vgpu: a vGPU
1123 * @interface: expected vGPU virtual submission interface
1124 *
1125 * This function is called when guest configures submission interface.
1126 *
1127 * Returns:
1128 * Zero on success, negative error code if failed.
1129 *
1130 */
1131int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
Weinan Li7569a062018-01-26 15:09:07 +08001132 unsigned long engine_mask,
Zhi Wangad1d3632017-09-13 00:31:29 +08001133 unsigned int interface)
1134{
1135 struct intel_vgpu_submission *s = &vgpu->submission;
1136 const struct intel_vgpu_submission_ops *ops[] = {
1137 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1138 &intel_vgpu_execlist_submission_ops,
1139 };
1140 int ret;
1141
1142 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1143 return -EINVAL;
1144
Weinan Li9212b132018-01-26 15:09:08 +08001145 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1146 return -EINVAL;
1147
1148 if (s->active)
Weinan Li7569a062018-01-26 15:09:07 +08001149 s->ops->clean(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001150
1151 if (interface == 0) {
1152 s->ops = NULL;
1153 s->virtual_submission_interface = 0;
Weinan Li9212b132018-01-26 15:09:08 +08001154 s->active = false;
1155 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
Zhi Wangad1d3632017-09-13 00:31:29 +08001156 return 0;
1157 }
1158
Weinan Li7569a062018-01-26 15:09:07 +08001159 ret = ops[interface]->init(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001160 if (ret)
1161 return ret;
1162
1163 s->ops = ops[interface];
1164 s->virtual_submission_interface = interface;
1165 s->active = true;
1166
1167 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1168 vgpu->id, s->ops->name);
1169
1170 return 0;
1171}
1172
1173/**
Zhi Wang21527a82017-09-12 21:42:09 +08001174 * intel_vgpu_destroy_workload - destroy a vGPU workload
1175 * @vgpu: a vGPU
1176 *
1177 * This function is called when destroy a vGPU workload.
1178 *
1179 */
1180void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1181{
1182 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1183
1184 if (workload->shadow_mm)
1185 intel_gvt_mm_unreference(workload->shadow_mm);
1186
1187 kmem_cache_free(s->workloads, workload);
1188}
1189
Zhi Wang6d763032017-09-12 22:33:12 +08001190static struct intel_vgpu_workload *
1191alloc_workload(struct intel_vgpu *vgpu)
Zhi Wang21527a82017-09-12 21:42:09 +08001192{
1193 struct intel_vgpu_submission *s = &vgpu->submission;
1194 struct intel_vgpu_workload *workload;
1195
1196 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1197 if (!workload)
1198 return ERR_PTR(-ENOMEM);
1199
1200 INIT_LIST_HEAD(&workload->list);
1201 INIT_LIST_HEAD(&workload->shadow_bb);
1202
1203 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1204 atomic_set(&workload->shadow_ctx_active, 0);
1205
1206 workload->status = -EINPROGRESS;
1207 workload->shadowed = false;
1208 workload->vgpu = vgpu;
1209
1210 return workload;
1211}
Zhi Wang6d763032017-09-12 22:33:12 +08001212
1213#define RING_CTX_OFF(x) \
1214 offsetof(struct execlist_ring_context, x)
1215
1216static void read_guest_pdps(struct intel_vgpu *vgpu,
1217 u64 ring_context_gpa, u32 pdp[8])
1218{
1219 u64 gpa;
1220 int i;
1221
1222 gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
1223
1224 for (i = 0; i < 8; i++)
1225 intel_gvt_hypervisor_read_gpa(vgpu,
1226 gpa + i * 8, &pdp[7 - i], 4);
1227}
1228
1229static int prepare_mm(struct intel_vgpu_workload *workload)
1230{
1231 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1232 struct intel_vgpu_mm *mm;
1233 struct intel_vgpu *vgpu = workload->vgpu;
1234 int page_table_level;
1235 u32 pdp[8];
1236
1237 if (desc->addressing_mode == 1) { /* legacy 32-bit */
1238 page_table_level = 3;
1239 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
1240 page_table_level = 4;
1241 } else {
1242 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1243 return -EINVAL;
1244 }
1245
1246 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
1247
1248 mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
1249 if (mm) {
1250 intel_gvt_mm_reference(mm);
1251 } else {
1252
1253 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
1254 pdp, page_table_level, 0);
1255 if (IS_ERR(mm)) {
1256 gvt_vgpu_err("fail to create mm object.\n");
1257 return PTR_ERR(mm);
1258 }
1259 }
1260 workload->shadow_mm = mm;
1261 return 0;
1262}
1263
1264#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1265 ((a)->lrca == (b)->lrca))
1266
1267#define get_last_workload(q) \
1268 (list_empty(q) ? NULL : container_of(q->prev, \
1269 struct intel_vgpu_workload, list))
1270/**
1271 * intel_vgpu_create_workload - create a vGPU workload
1272 * @vgpu: a vGPU
1273 * @desc: a guest context descriptor
1274 *
1275 * This function is called when creating a vGPU workload.
1276 *
1277 * Returns:
1278 * struct intel_vgpu_workload * on success, negative error code in
1279 * pointer if failed.
1280 *
1281 */
1282struct intel_vgpu_workload *
1283intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1284 struct execlist_ctx_descriptor_format *desc)
1285{
1286 struct intel_vgpu_submission *s = &vgpu->submission;
1287 struct list_head *q = workload_q_head(vgpu, ring_id);
1288 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1289 struct intel_vgpu_workload *workload = NULL;
1290 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1291 u64 ring_context_gpa;
1292 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1293 int ret;
1294
1295 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
Zhi Wang9556e112017-10-10 13:51:32 +08001296 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
Zhi Wang6d763032017-09-12 22:33:12 +08001297 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1298 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1299 return ERR_PTR(-EINVAL);
1300 }
1301
1302 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1303 RING_CTX_OFF(ring_header.val), &head, 4);
1304
1305 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1306 RING_CTX_OFF(ring_tail.val), &tail, 4);
1307
1308 head &= RB_HEAD_OFF_MASK;
1309 tail &= RB_TAIL_OFF_MASK;
1310
1311 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1312 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1313 gvt_dbg_el("ctx head %x real head %lx\n", head,
1314 last_workload->rb_tail);
1315 /*
1316 * cannot use guest context head pointer here,
1317 * as it might not be updated at this time
1318 */
1319 head = last_workload->rb_tail;
1320 }
1321
1322 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1323
1324 /* record some ring buffer register values for scan and shadow */
1325 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1326 RING_CTX_OFF(rb_start.val), &start, 4);
1327 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1328 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1329 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1330 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1331
1332 workload = alloc_workload(vgpu);
1333 if (IS_ERR(workload))
1334 return workload;
1335
1336 workload->ring_id = ring_id;
1337 workload->ctx_desc = *desc;
1338 workload->ring_context_gpa = ring_context_gpa;
1339 workload->rb_head = head;
1340 workload->rb_tail = tail;
1341 workload->rb_start = start;
1342 workload->rb_ctl = ctl;
1343
1344 if (ring_id == RCS) {
1345 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1346 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1347 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1348 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1349
1350 workload->wa_ctx.indirect_ctx.guest_gma =
1351 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1352 workload->wa_ctx.indirect_ctx.size =
1353 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1354 CACHELINE_BYTES;
1355 workload->wa_ctx.per_ctx.guest_gma =
1356 per_ctx & PER_CTX_ADDR_MASK;
1357 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1358 }
1359
1360 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1361 workload, ring_id, head, tail, start, ctl);
1362
1363 ret = prepare_mm(workload);
1364 if (ret) {
1365 kmem_cache_free(s->workloads, workload);
1366 return ERR_PTR(ret);
1367 }
1368
1369 /* Only scan and shadow the first workload in the queue
1370 * as there is only one pre-allocated buf-obj for shadow.
1371 */
1372 if (list_empty(workload_q_head(vgpu, ring_id))) {
1373 intel_runtime_pm_get(dev_priv);
1374 mutex_lock(&dev_priv->drm.struct_mutex);
1375 ret = intel_gvt_scan_and_shadow_workload(workload);
1376 mutex_unlock(&dev_priv->drm.struct_mutex);
1377 intel_runtime_pm_put(dev_priv);
1378 }
1379
1380 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1381 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1382 intel_vgpu_destroy_workload(workload);
1383 return ERR_PTR(ret);
1384 }
1385
1386 return workload;
1387}
Changbin Du59a716c2017-11-29 15:40:06 +08001388
1389/**
1390 * intel_vgpu_queue_workload - Qeue a vGPU workload
1391 * @workload: the workload to queue in
1392 */
1393void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1394{
1395 list_add_tail(&workload->list,
1396 workload_q_head(workload->vgpu, workload->ring_id));
Changbin Duc1304562017-11-29 15:40:07 +08001397 intel_gvt_kick_schedule(workload->vgpu->gvt);
Changbin Du59a716c2017-11-29 15:40:06 +08001398 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1399}