blob: 025aba8a72e0073719667ce2835cbad84b09440c [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
55static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
60 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
64 struct page *page;
65 void *dst;
66 unsigned long context_gpa, context_page_num;
67 int i;
68
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
71
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030072 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -040073
74 context_page_num = context_page_num >> PAGE_SHIFT;
75
76 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
77 context_page_num = 19;
78
79 i = 2;
80
81 while (i < context_page_num) {
82 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
83 (u32)((workload->ctx_desc.lrca + i) <<
84 GTT_PAGE_SHIFT));
85 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -050086 gvt_vgpu_err("Invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -040087 return -EINVAL;
88 }
89
90 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080091 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040092 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
93 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080094 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040095 i++;
96 }
97
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080099 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400100
101#define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
104
105 COPY_REG(ctx_ctrl);
106 COPY_REG(ctx_timestamp);
107
108 if (ring_id == RCS) {
109 COPY_REG(bb_per_ctx_ptr);
110 COPY_REG(rcs_indirect_ctx);
111 COPY_REG(rcs_indirect_ctx_offset);
112 }
113#undef COPY_REG
114
115 set_context_pdp_root_pointer(shadow_ring_context,
116 workload->shadow_mm->shadow_page_table);
117
118 intel_gvt_hypervisor_read_gpa(vgpu,
119 workload->ring_context_gpa +
120 sizeof(*shadow_ring_context),
121 (void *)shadow_ring_context +
122 sizeof(*shadow_ring_context),
123 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800125 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400126 return 0;
127}
128
Changbin Dubc2d4b62017-03-22 12:35:31 +0800129static inline bool is_gvt_request(struct drm_i915_gem_request *req)
130{
131 return i915_gem_context_force_single_submission(req->ctx);
132}
133
Zhi Wange4734052016-05-01 07:42:16 -0400134static int shadow_context_status_change(struct notifier_block *nb,
135 unsigned long action, void *data)
136{
Changbin Du3fc03062017-03-13 10:47:11 +0800137 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
138 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
139 shadow_ctx_notifier_block[req->engine->id]);
140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du0e86cc92017-05-04 10:52:38 +0800141 enum intel_engine_id ring_id = req->engine->id;
142 struct intel_vgpu_workload *workload;
Zhi Wange4734052016-05-01 07:42:16 -0400143
Changbin Du0e86cc92017-05-04 10:52:38 +0800144 if (!is_gvt_request(req)) {
145 spin_lock_bh(&scheduler->mmio_context_lock);
146 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
147 scheduler->engine_owner[ring_id]) {
148 /* Switch ring from vGPU to host. */
149 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
150 NULL, ring_id);
151 scheduler->engine_owner[ring_id] = NULL;
152 }
153 spin_unlock_bh(&scheduler->mmio_context_lock);
154
155 return NOTIFY_OK;
156 }
157
158 workload = scheduler->current_workload[ring_id];
159 if (unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800160 return NOTIFY_OK;
161
Zhi Wange4734052016-05-01 07:42:16 -0400162 switch (action) {
163 case INTEL_CONTEXT_SCHEDULE_IN:
Changbin Du0e86cc92017-05-04 10:52:38 +0800164 spin_lock_bh(&scheduler->mmio_context_lock);
165 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
166 /* Switch ring from host to vGPU or vGPU to vGPU. */
167 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
168 workload->vgpu, ring_id);
169 scheduler->engine_owner[ring_id] = workload->vgpu;
170 } else
171 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
172 ring_id, workload->vgpu->id);
173 spin_unlock_bh(&scheduler->mmio_context_lock);
Zhi Wange4734052016-05-01 07:42:16 -0400174 atomic_set(&workload->shadow_ctx_active, 1);
175 break;
176 case INTEL_CONTEXT_SCHEDULE_OUT:
Zhi Wange4734052016-05-01 07:42:16 -0400177 atomic_set(&workload->shadow_ctx_active, 0);
178 break;
179 default:
180 WARN_ON(1);
181 return NOTIFY_OK;
182 }
183 wake_up(&workload->shadow_ctx_status_wq);
184 return NOTIFY_OK;
185}
186
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800187static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
188 struct intel_engine_cs *engine)
189{
190 struct intel_context *ce = &ctx->engine[engine->id];
191 u64 desc = 0;
192
193 desc = ce->lrc_desc;
194
195 /* Update bits 0-11 of the context descriptor which includes flags
196 * like GEN8_CTX_* cached in desc_template
197 */
198 desc &= U64_MAX << 12;
199 desc |= ctx->desc_template & ((1ULL << 12) - 1);
200
201 ce->lrc_desc = desc;
202}
203
Ping Gao89ea20b2017-06-29 12:22:42 +0800204/**
205 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
206 * shadow it as well, include ringbuffer,wa_ctx and ctx.
207 * @workload: an abstract entity for each execlist submission.
208 *
209 * This function is called before the workload submitting to i915, to make
210 * sure the content of the workload is valid.
211 */
212int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Zhi Wange4734052016-05-01 07:42:16 -0400213{
Zhi Wange4734052016-05-01 07:42:16 -0400214 int ring_id = workload->ring_id;
215 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
216 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800217 struct drm_i915_gem_request *rq;
Tina Zhang695fbc02017-03-10 04:26:53 -0500218 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wange4734052016-05-01 07:42:16 -0400219 int ret;
220
Ping Gao87e919d2017-07-04 14:53:03 +0800221 lockdep_assert_held(&dev_priv->drm.struct_mutex);
222
Ping Gaod0302e72017-06-29 12:22:43 +0800223 if (workload->shadowed)
224 return 0;
225
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800226 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
227 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400228 GEN8_CTX_ADDRESSING_MODE_SHIFT;
229
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800230 if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
231 shadow_context_descriptor_update(shadow_ctx,
232 dev_priv->engine[ring_id]);
233
Chris Wilson0eb742d2016-10-20 17:29:36 +0800234 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
235 if (IS_ERR(rq)) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500236 gvt_vgpu_err("fail to allocate gem request\n");
Zhenyu Wang53d6f8122016-11-24 15:55:49 +0800237 ret = PTR_ERR(rq);
238 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400239 }
240
Chris Wilson0eb742d2016-10-20 17:29:36 +0800241 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
242
243 workload->req = i915_gem_request_get(rq);
Zhi Wange4734052016-05-01 07:42:16 -0400244
Ping Gao89ea20b2017-06-29 12:22:42 +0800245 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
Zhi Wangbe1da702016-05-03 18:26:57 -0400246 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800247 goto out;
Zhi Wangbe1da702016-05-03 18:26:57 -0400248
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400249 if ((workload->ring_id == RCS) &&
250 (workload->wa_ctx.indirect_ctx.size != 0)) {
251 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
252 if (ret)
253 goto out;
254 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400255
Zhi Wange4734052016-05-01 07:42:16 -0400256 ret = populate_shadow_context(workload);
Ping Gaod0302e72017-06-29 12:22:43 +0800257 if (ret)
258 goto out;
259
260 workload->shadowed = true;
Ping Gao89ea20b2017-06-29 12:22:42 +0800261
262out:
263 return ret;
264}
265
266static int dispatch_workload(struct intel_vgpu_workload *workload)
267{
268 int ring_id = workload->ring_id;
269 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
270 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
271 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
272 struct intel_vgpu *vgpu = workload->vgpu;
273 struct intel_ring *ring;
274 int ret = 0;
275
276 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
277 ring_id, workload);
278
279 mutex_lock(&dev_priv->drm.struct_mutex);
280
281 ret = intel_gvt_scan_and_shadow_workload(workload);
Zhi Wange4734052016-05-01 07:42:16 -0400282 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800283 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400284
285 if (workload->prepare) {
286 ret = workload->prepare(workload);
287 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800288 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400289 }
290
Ping Gao89ea20b2017-06-29 12:22:42 +0800291 /* pin shadow context by gvt even the shadow context will be pinned
292 * when i915 alloc request. That is because gvt will update the guest
293 * context from shadow context when workload is completed, and at that
294 * moment, i915 may already unpined the shadow context to make the
295 * shadow_ctx pages invalid. So gvt need to pin itself. After update
296 * the guest context, gvt can unpin the shadow_ctx safely.
297 */
298 ring = engine->context_pin(engine, shadow_ctx);
299 if (IS_ERR(ring)) {
300 ret = PTR_ERR(ring);
301 gvt_vgpu_err("fail to pin shadow context\n");
302 goto out;
303 }
Zhi Wange4734052016-05-01 07:42:16 -0400304
Pei Zhang90d27a12016-11-14 18:02:57 +0800305out:
306 if (ret)
307 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800308
Ping Gao89ea20b2017-06-29 12:22:42 +0800309 if (!IS_ERR_OR_NULL(workload->req)) {
310 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
311 ring_id, workload->req);
312 i915_add_request(workload->req);
313 workload->dispatched = true;
314 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800315
Pei Zhang90d27a12016-11-14 18:02:57 +0800316 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400317 return ret;
318}
319
320static struct intel_vgpu_workload *pick_next_workload(
321 struct intel_gvt *gvt, int ring_id)
322{
323 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
324 struct intel_vgpu_workload *workload = NULL;
325
326 mutex_lock(&gvt->lock);
327
328 /*
329 * no current vgpu / will be scheduled out / no workload
330 * bail out
331 */
332 if (!scheduler->current_vgpu) {
333 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
334 goto out;
335 }
336
337 if (scheduler->need_reschedule) {
338 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
339 goto out;
340 }
341
Zhenyu Wang954180a2017-04-12 14:22:50 +0800342 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400343 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400344
345 /*
346 * still have current workload, maybe the workload disptacher
347 * fail to submit it for some reason, resubmit it.
348 */
349 if (scheduler->current_workload[ring_id]) {
350 workload = scheduler->current_workload[ring_id];
351 gvt_dbg_sched("ring id %d still have current workload %p\n",
352 ring_id, workload);
353 goto out;
354 }
355
356 /*
357 * pick a workload as current workload
358 * once current workload is set, schedule policy routines
359 * will wait the current workload is finished when trying to
360 * schedule out a vgpu.
361 */
362 scheduler->current_workload[ring_id] = container_of(
363 workload_q_head(scheduler->current_vgpu, ring_id)->next,
364 struct intel_vgpu_workload, list);
365
366 workload = scheduler->current_workload[ring_id];
367
368 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
369
370 atomic_inc(&workload->vgpu->running_workload_num);
371out:
372 mutex_unlock(&gvt->lock);
373 return workload;
374}
375
376static void update_guest_context(struct intel_vgpu_workload *workload)
377{
378 struct intel_vgpu *vgpu = workload->vgpu;
379 struct intel_gvt *gvt = vgpu->gvt;
380 int ring_id = workload->ring_id;
381 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
382 struct drm_i915_gem_object *ctx_obj =
383 shadow_ctx->engine[ring_id].state->obj;
384 struct execlist_ring_context *shadow_ring_context;
385 struct page *page;
386 void *src;
387 unsigned long context_gpa, context_page_num;
388 int i;
389
390 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
391 workload->ctx_desc.lrca);
392
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300393 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400394
395 context_page_num = context_page_num >> PAGE_SHIFT;
396
397 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
398 context_page_num = 19;
399
400 i = 2;
401
402 while (i < context_page_num) {
403 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
404 (u32)((workload->ctx_desc.lrca + i) <<
405 GTT_PAGE_SHIFT));
406 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500407 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400408 return;
409 }
410
411 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800412 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400413 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
414 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800415 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400416 i++;
417 }
418
419 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
420 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
421
422 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800423 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400424
425#define COPY_REG(name) \
426 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
427 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
428
429 COPY_REG(ctx_ctrl);
430 COPY_REG(ctx_timestamp);
431
432#undef COPY_REG
433
434 intel_gvt_hypervisor_write_gpa(vgpu,
435 workload->ring_context_gpa +
436 sizeof(*shadow_ring_context),
437 (void *)shadow_ring_context +
438 sizeof(*shadow_ring_context),
439 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
440
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800441 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400442}
443
444static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
445{
446 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
447 struct intel_vgpu_workload *workload;
Changbin Du440a9b92017-01-05 16:49:03 +0800448 struct intel_vgpu *vgpu;
Zhi Wangbe1da702016-05-03 18:26:57 -0400449 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400450
451 mutex_lock(&gvt->lock);
452
453 workload = scheduler->current_workload[ring_id];
Changbin Du440a9b92017-01-05 16:49:03 +0800454 vgpu = workload->vgpu;
Zhi Wange4734052016-05-01 07:42:16 -0400455
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800456 /* For the workload w/ request, needs to wait for the context
457 * switch to make sure request is completed.
458 * For the workload w/o request, directly complete the workload.
459 */
460 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800461 struct drm_i915_private *dev_priv =
462 workload->vgpu->gvt->dev_priv;
463 struct intel_engine_cs *engine =
464 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400465 wait_event(workload->shadow_ctx_status_wq,
466 !atomic_read(&workload->shadow_ctx_active));
467
Chuanxiao Dong0cf5ec42017-06-23 13:01:11 +0800468 /* If this request caused GPU hang, req->fence.error will
469 * be set to -EIO. Use -EIO to set workload status so
470 * that when this request caused GPU hang, didn't trigger
471 * context switch interrupt to guest.
472 */
473 if (likely(workload->status == -EINPROGRESS)) {
474 if (workload->req->fence.error == -EIO)
475 workload->status = -EIO;
476 else
477 workload->status = 0;
478 }
479
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800480 i915_gem_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400481
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800482 if (!workload->status && !vgpu->resetting) {
483 update_guest_context(workload);
484
485 for_each_set_bit(event, workload->pending_events,
486 INTEL_GVT_EVENT_MAX)
487 intel_vgpu_trigger_virtual_event(vgpu, event);
488 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800489 mutex_lock(&dev_priv->drm.struct_mutex);
490 /* unpin shadow ctx as the shadow_ctx update is done */
491 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
492 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400493 }
494
495 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
496 ring_id, workload, workload->status);
497
498 scheduler->current_workload[ring_id] = NULL;
499
Zhi Wange4734052016-05-01 07:42:16 -0400500 list_del_init(&workload->list);
501 workload->complete(workload);
502
Changbin Du440a9b92017-01-05 16:49:03 +0800503 atomic_dec(&vgpu->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400504 wake_up(&scheduler->workload_complete_wq);
Ping Gaof100dae2017-05-24 09:14:11 +0800505
506 if (gvt->scheduler.need_reschedule)
507 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
508
Zhi Wange4734052016-05-01 07:42:16 -0400509 mutex_unlock(&gvt->lock);
510}
511
512struct workload_thread_param {
513 struct intel_gvt *gvt;
514 int ring_id;
515};
516
517static int workload_thread(void *priv)
518{
519 struct workload_thread_param *p = (struct workload_thread_param *)priv;
520 struct intel_gvt *gvt = p->gvt;
521 int ring_id = p->ring_id;
522 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
523 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500524 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400525 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800526 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
527 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800528 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400529
530 kfree(p);
531
532 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
533
534 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800535 add_wait_queue(&scheduler->waitq[ring_id], &wait);
536 do {
537 workload = pick_next_workload(gvt, ring_id);
538 if (workload)
539 break;
540 wait_woken(&wait, TASK_INTERRUPTIBLE,
541 MAX_SCHEDULE_TIMEOUT);
542 } while (!kthread_should_stop());
543 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400544
Du, Changbine45d7b72016-10-27 11:10:31 +0800545 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400546 break;
547
548 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
549 workload->ring_id, workload,
550 workload->vgpu->id);
551
552 intel_runtime_pm_get(gvt->dev_priv);
553
Zhi Wange4734052016-05-01 07:42:16 -0400554 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
555 workload->ring_id, workload);
556
557 if (need_force_wake)
558 intel_uncore_forcewake_get(gvt->dev_priv,
559 FORCEWAKE_ALL);
560
Pei Zhang90d27a12016-11-14 18:02:57 +0800561 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400562 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800563 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100564
Zhi Wange4734052016-05-01 07:42:16 -0400565 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500566 vgpu = workload->vgpu;
567 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -0400568 goto complete;
569 }
570
571 gvt_dbg_sched("ring id %d wait workload %p\n",
572 workload->ring_id, workload);
Chris Wilson3dce2ac2017-03-08 22:08:08 +0000573 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -0400574
575complete:
Changbin Du3ce32742017-02-09 10:13:16 +0800576 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -0400577 workload, workload->status);
578
Changbin Du2e51ef32017-01-05 13:28:05 +0800579 complete_current_workload(gvt, ring_id);
580
Zhi Wange4734052016-05-01 07:42:16 -0400581 if (need_force_wake)
582 intel_uncore_forcewake_put(gvt->dev_priv,
583 FORCEWAKE_ALL);
584
Zhi Wange4734052016-05-01 07:42:16 -0400585 intel_runtime_pm_put(gvt->dev_priv);
Zhi Wange4734052016-05-01 07:42:16 -0400586 }
587 return 0;
588}
589
590void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
591{
592 struct intel_gvt *gvt = vgpu->gvt;
593 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
594
595 if (atomic_read(&vgpu->running_workload_num)) {
596 gvt_dbg_sched("wait vgpu idle\n");
597
598 wait_event(scheduler->workload_complete_wq,
599 !atomic_read(&vgpu->running_workload_num));
600 }
601}
602
603void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
604{
605 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +0800606 struct intel_engine_cs *engine;
607 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400608
609 gvt_dbg_core("clean workload scheduler\n");
610
Changbin Du3fc03062017-03-13 10:47:11 +0800611 for_each_engine(engine, gvt->dev_priv, i) {
612 atomic_notifier_chain_unregister(
613 &engine->context_status_notifier,
614 &gvt->shadow_ctx_notifier_block[i]);
615 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400616 }
617}
618
619int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
620{
621 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
622 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +0800623 struct intel_engine_cs *engine;
624 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400625 int ret;
Zhi Wange4734052016-05-01 07:42:16 -0400626
627 gvt_dbg_core("init workload scheduler\n");
628
629 init_waitqueue_head(&scheduler->workload_complete_wq);
630
Changbin Du3fc03062017-03-13 10:47:11 +0800631 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -0400632 init_waitqueue_head(&scheduler->waitq[i]);
633
634 param = kzalloc(sizeof(*param), GFP_KERNEL);
635 if (!param) {
636 ret = -ENOMEM;
637 goto err;
638 }
639
640 param->gvt = gvt;
641 param->ring_id = i;
642
643 scheduler->thread[i] = kthread_run(workload_thread, param,
644 "gvt workload %d", i);
645 if (IS_ERR(scheduler->thread[i])) {
646 gvt_err("fail to create workload thread\n");
647 ret = PTR_ERR(scheduler->thread[i]);
648 goto err;
649 }
Changbin Du3fc03062017-03-13 10:47:11 +0800650
651 gvt->shadow_ctx_notifier_block[i].notifier_call =
652 shadow_context_status_change;
653 atomic_notifier_chain_register(&engine->context_status_notifier,
654 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400655 }
656 return 0;
657err:
658 intel_gvt_clean_workload_scheduler(gvt);
659 kfree(param);
660 param = NULL;
661 return ret;
662}
663
664void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
665{
Chris Wilson5f09a9c2017-06-20 12:05:46 +0100666 i915_gem_context_put(vgpu->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -0400667}
668
669int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
670{
671 atomic_set(&vgpu->running_workload_num, 0);
672
673 vgpu->shadow_ctx = i915_gem_context_create_gvt(
674 &vgpu->gvt->dev_priv->drm);
675 if (IS_ERR(vgpu->shadow_ctx))
676 return PTR_ERR(vgpu->shadow_ctx);
677
678 vgpu->shadow_ctx->engine[RCS].initialised = true;
679
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800680 bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
681
Zhi Wange4734052016-05-01 07:42:16 -0400682 return 0;
683}