blob: 0b685dd26cb3227b5a718b86a6fd7cda865f72d9 [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
55static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
60 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
64 struct page *page;
65 void *dst;
66 unsigned long context_gpa, context_page_num;
67 int i;
68
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
71
72 context_page_num = intel_lr_context_size(
Zhenyu Wang1140f9e2016-10-18 09:40:07 +080073 gvt->dev_priv->engine[ring_id]);
Zhi Wange4734052016-05-01 07:42:16 -040074
75 context_page_num = context_page_num >> PAGE_SHIFT;
76
77 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
78 context_page_num = 19;
79
80 i = 2;
81
82 while (i < context_page_num) {
83 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
84 (u32)((workload->ctx_desc.lrca + i) <<
85 GTT_PAGE_SHIFT));
86 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -050087 gvt_vgpu_err("Invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -040088 return -EINVAL;
89 }
90
91 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080092 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040093 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
94 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080095 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040096 i++;
97 }
98
99 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800100 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400101
102#define COPY_REG(name) \
103 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
104 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
105
106 COPY_REG(ctx_ctrl);
107 COPY_REG(ctx_timestamp);
108
109 if (ring_id == RCS) {
110 COPY_REG(bb_per_ctx_ptr);
111 COPY_REG(rcs_indirect_ctx);
112 COPY_REG(rcs_indirect_ctx_offset);
113 }
114#undef COPY_REG
115
116 set_context_pdp_root_pointer(shadow_ring_context,
117 workload->shadow_mm->shadow_page_table);
118
119 intel_gvt_hypervisor_read_gpa(vgpu,
120 workload->ring_context_gpa +
121 sizeof(*shadow_ring_context),
122 (void *)shadow_ring_context +
123 sizeof(*shadow_ring_context),
124 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
125
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800126 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400127 return 0;
128}
129
130static int shadow_context_status_change(struct notifier_block *nb,
131 unsigned long action, void *data)
132{
Changbin Du3fc03062017-03-13 10:47:11 +0800133 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
134 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
135 shadow_ctx_notifier_block[req->engine->id]);
136 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wange4734052016-05-01 07:42:16 -0400137 struct intel_vgpu_workload *workload =
138 scheduler->current_workload[req->engine->id];
139
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800140 if (unlikely(!workload))
141 return NOTIFY_OK;
142
Zhi Wange4734052016-05-01 07:42:16 -0400143 switch (action) {
144 case INTEL_CONTEXT_SCHEDULE_IN:
Zhi Wang17865712016-05-01 19:02:37 -0400145 intel_gvt_load_render_mmio(workload->vgpu,
146 workload->ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400147 atomic_set(&workload->shadow_ctx_active, 1);
148 break;
149 case INTEL_CONTEXT_SCHEDULE_OUT:
Zhi Wang17865712016-05-01 19:02:37 -0400150 intel_gvt_restore_render_mmio(workload->vgpu,
151 workload->ring_id);
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800152 /* If the status is -EINPROGRESS means this workload
153 * doesn't meet any issue during dispatching so when
154 * get the SCHEDULE_OUT set the status to be zero for
155 * good. If the status is NOT -EINPROGRESS means there
156 * is something wrong happened during dispatching and
157 * the status should not be set to zero
158 */
159 if (workload->status == -EINPROGRESS)
160 workload->status = 0;
Zhi Wange4734052016-05-01 07:42:16 -0400161 atomic_set(&workload->shadow_ctx_active, 0);
162 break;
163 default:
164 WARN_ON(1);
165 return NOTIFY_OK;
166 }
167 wake_up(&workload->shadow_ctx_status_wq);
168 return NOTIFY_OK;
169}
170
171static int dispatch_workload(struct intel_vgpu_workload *workload)
172{
Zhi Wange4734052016-05-01 07:42:16 -0400173 int ring_id = workload->ring_id;
174 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
175 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800176 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
Chris Wilson0eb742d2016-10-20 17:29:36 +0800177 struct drm_i915_gem_request *rq;
Tina Zhang695fbc02017-03-10 04:26:53 -0500178 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wange4734052016-05-01 07:42:16 -0400179 int ret;
180
181 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
182 ring_id, workload);
183
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800184 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
185 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400186 GEN8_CTX_ADDRESSING_MODE_SHIFT;
187
Pei Zhang90d27a12016-11-14 18:02:57 +0800188 mutex_lock(&dev_priv->drm.struct_mutex);
189
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800190 /* pin shadow context by gvt even the shadow context will be pinned
191 * when i915 alloc request. That is because gvt will update the guest
192 * context from shadow context when workload is completed, and at that
193 * moment, i915 may already unpined the shadow context to make the
194 * shadow_ctx pages invalid. So gvt need to pin itself. After update
195 * the guest context, gvt can unpin the shadow_ctx safely.
196 */
197 ret = engine->context_pin(engine, shadow_ctx);
198 if (ret) {
199 gvt_vgpu_err("fail to pin shadow context\n");
200 workload->status = ret;
201 mutex_unlock(&dev_priv->drm.struct_mutex);
202 return ret;
203 }
204
Chris Wilson0eb742d2016-10-20 17:29:36 +0800205 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
206 if (IS_ERR(rq)) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500207 gvt_vgpu_err("fail to allocate gem request\n");
Zhenyu Wang53d6f8122016-11-24 15:55:49 +0800208 ret = PTR_ERR(rq);
209 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400210 }
211
Chris Wilson0eb742d2016-10-20 17:29:36 +0800212 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
213
214 workload->req = i915_gem_request_get(rq);
Zhi Wange4734052016-05-01 07:42:16 -0400215
Zhi Wangbe1da702016-05-03 18:26:57 -0400216 ret = intel_gvt_scan_and_shadow_workload(workload);
217 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800218 goto out;
Zhi Wangbe1da702016-05-03 18:26:57 -0400219
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400220 if ((workload->ring_id == RCS) &&
221 (workload->wa_ctx.indirect_ctx.size != 0)) {
222 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
223 if (ret)
224 goto out;
225 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400226
Zhi Wange4734052016-05-01 07:42:16 -0400227 ret = populate_shadow_context(workload);
228 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800229 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400230
231 if (workload->prepare) {
232 ret = workload->prepare(workload);
233 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800234 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400235 }
236
Zhi Wange4734052016-05-01 07:42:16 -0400237 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
238 ring_id, workload->req);
239
Pei Zhang90d27a12016-11-14 18:02:57 +0800240 ret = 0;
Zhi Wange4734052016-05-01 07:42:16 -0400241 workload->dispatched = true;
Pei Zhang90d27a12016-11-14 18:02:57 +0800242out:
243 if (ret)
244 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800245
Zhenyu Wang53d6f8122016-11-24 15:55:49 +0800246 if (!IS_ERR_OR_NULL(rq))
Chris Wilsone642c852017-03-17 11:47:09 +0000247 i915_add_request(rq);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800248 else
249 engine->context_unpin(engine, shadow_ctx);
250
Pei Zhang90d27a12016-11-14 18:02:57 +0800251 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400252 return ret;
253}
254
255static struct intel_vgpu_workload *pick_next_workload(
256 struct intel_gvt *gvt, int ring_id)
257{
258 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
259 struct intel_vgpu_workload *workload = NULL;
260
261 mutex_lock(&gvt->lock);
262
263 /*
264 * no current vgpu / will be scheduled out / no workload
265 * bail out
266 */
267 if (!scheduler->current_vgpu) {
268 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
269 goto out;
270 }
271
272 if (scheduler->need_reschedule) {
273 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
274 goto out;
275 }
276
Zhenyu Wang954180a2017-04-12 14:22:50 +0800277 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400278 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400279
280 /*
281 * still have current workload, maybe the workload disptacher
282 * fail to submit it for some reason, resubmit it.
283 */
284 if (scheduler->current_workload[ring_id]) {
285 workload = scheduler->current_workload[ring_id];
286 gvt_dbg_sched("ring id %d still have current workload %p\n",
287 ring_id, workload);
288 goto out;
289 }
290
291 /*
292 * pick a workload as current workload
293 * once current workload is set, schedule policy routines
294 * will wait the current workload is finished when trying to
295 * schedule out a vgpu.
296 */
297 scheduler->current_workload[ring_id] = container_of(
298 workload_q_head(scheduler->current_vgpu, ring_id)->next,
299 struct intel_vgpu_workload, list);
300
301 workload = scheduler->current_workload[ring_id];
302
303 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
304
305 atomic_inc(&workload->vgpu->running_workload_num);
306out:
307 mutex_unlock(&gvt->lock);
308 return workload;
309}
310
311static void update_guest_context(struct intel_vgpu_workload *workload)
312{
313 struct intel_vgpu *vgpu = workload->vgpu;
314 struct intel_gvt *gvt = vgpu->gvt;
315 int ring_id = workload->ring_id;
316 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
317 struct drm_i915_gem_object *ctx_obj =
318 shadow_ctx->engine[ring_id].state->obj;
319 struct execlist_ring_context *shadow_ring_context;
320 struct page *page;
321 void *src;
322 unsigned long context_gpa, context_page_num;
323 int i;
324
325 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
326 workload->ctx_desc.lrca);
327
328 context_page_num = intel_lr_context_size(
Zhenyu Wang1140f9e2016-10-18 09:40:07 +0800329 gvt->dev_priv->engine[ring_id]);
Zhi Wange4734052016-05-01 07:42:16 -0400330
331 context_page_num = context_page_num >> PAGE_SHIFT;
332
333 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
334 context_page_num = 19;
335
336 i = 2;
337
338 while (i < context_page_num) {
339 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
340 (u32)((workload->ctx_desc.lrca + i) <<
341 GTT_PAGE_SHIFT));
342 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500343 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400344 return;
345 }
346
347 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800348 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400349 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
350 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800351 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400352 i++;
353 }
354
355 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
356 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
357
358 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800359 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400360
361#define COPY_REG(name) \
362 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
363 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
364
365 COPY_REG(ctx_ctrl);
366 COPY_REG(ctx_timestamp);
367
368#undef COPY_REG
369
370 intel_gvt_hypervisor_write_gpa(vgpu,
371 workload->ring_context_gpa +
372 sizeof(*shadow_ring_context),
373 (void *)shadow_ring_context +
374 sizeof(*shadow_ring_context),
375 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
376
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800377 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400378}
379
380static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
381{
382 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
383 struct intel_vgpu_workload *workload;
Changbin Du440a9b92017-01-05 16:49:03 +0800384 struct intel_vgpu *vgpu;
Zhi Wangbe1da702016-05-03 18:26:57 -0400385 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400386
387 mutex_lock(&gvt->lock);
388
389 workload = scheduler->current_workload[ring_id];
Changbin Du440a9b92017-01-05 16:49:03 +0800390 vgpu = workload->vgpu;
Zhi Wange4734052016-05-01 07:42:16 -0400391
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800392 /* For the workload w/ request, needs to wait for the context
393 * switch to make sure request is completed.
394 * For the workload w/o request, directly complete the workload.
395 */
396 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800397 struct drm_i915_private *dev_priv =
398 workload->vgpu->gvt->dev_priv;
399 struct intel_engine_cs *engine =
400 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400401 wait_event(workload->shadow_ctx_status_wq,
402 !atomic_read(&workload->shadow_ctx_active));
403
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800404 i915_gem_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400405
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800406 if (!workload->status && !vgpu->resetting) {
407 update_guest_context(workload);
408
409 for_each_set_bit(event, workload->pending_events,
410 INTEL_GVT_EVENT_MAX)
411 intel_vgpu_trigger_virtual_event(vgpu, event);
412 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800413 mutex_lock(&dev_priv->drm.struct_mutex);
414 /* unpin shadow ctx as the shadow_ctx update is done */
415 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
416 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400417 }
418
419 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
420 ring_id, workload, workload->status);
421
422 scheduler->current_workload[ring_id] = NULL;
423
Zhi Wange4734052016-05-01 07:42:16 -0400424 list_del_init(&workload->list);
425 workload->complete(workload);
426
Changbin Du440a9b92017-01-05 16:49:03 +0800427 atomic_dec(&vgpu->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400428 wake_up(&scheduler->workload_complete_wq);
429 mutex_unlock(&gvt->lock);
430}
431
432struct workload_thread_param {
433 struct intel_gvt *gvt;
434 int ring_id;
435};
436
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100437static DEFINE_MUTEX(scheduler_mutex);
438
Zhi Wange4734052016-05-01 07:42:16 -0400439static int workload_thread(void *priv)
440{
441 struct workload_thread_param *p = (struct workload_thread_param *)priv;
442 struct intel_gvt *gvt = p->gvt;
443 int ring_id = p->ring_id;
444 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
445 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500446 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400447 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800448 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
449 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800450 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400451
452 kfree(p);
453
454 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
455
456 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800457 add_wait_queue(&scheduler->waitq[ring_id], &wait);
458 do {
459 workload = pick_next_workload(gvt, ring_id);
460 if (workload)
461 break;
462 wait_woken(&wait, TASK_INTERRUPTIBLE,
463 MAX_SCHEDULE_TIMEOUT);
464 } while (!kthread_should_stop());
465 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400466
Du, Changbine45d7b72016-10-27 11:10:31 +0800467 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400468 break;
469
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100470 mutex_lock(&scheduler_mutex);
471
Zhi Wange4734052016-05-01 07:42:16 -0400472 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
473 workload->ring_id, workload,
474 workload->vgpu->id);
475
476 intel_runtime_pm_get(gvt->dev_priv);
477
Zhi Wange4734052016-05-01 07:42:16 -0400478 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
479 workload->ring_id, workload);
480
481 if (need_force_wake)
482 intel_uncore_forcewake_get(gvt->dev_priv,
483 FORCEWAKE_ALL);
484
Pei Zhang90d27a12016-11-14 18:02:57 +0800485 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400486 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800487 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100488
Zhi Wange4734052016-05-01 07:42:16 -0400489 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500490 vgpu = workload->vgpu;
491 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -0400492 goto complete;
493 }
494
495 gvt_dbg_sched("ring id %d wait workload %p\n",
496 workload->ring_id, workload);
Chris Wilson3dce2ac2017-03-08 22:08:08 +0000497 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -0400498
499complete:
Changbin Du3ce32742017-02-09 10:13:16 +0800500 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -0400501 workload, workload->status);
502
Changbin Du2e51ef32017-01-05 13:28:05 +0800503 complete_current_workload(gvt, ring_id);
504
Zhi Wange4734052016-05-01 07:42:16 -0400505 if (need_force_wake)
506 intel_uncore_forcewake_put(gvt->dev_priv,
507 FORCEWAKE_ALL);
508
Zhi Wange4734052016-05-01 07:42:16 -0400509 intel_runtime_pm_put(gvt->dev_priv);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100510
511 mutex_unlock(&scheduler_mutex);
512
Zhi Wange4734052016-05-01 07:42:16 -0400513 }
514 return 0;
515}
516
517void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
518{
519 struct intel_gvt *gvt = vgpu->gvt;
520 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
521
522 if (atomic_read(&vgpu->running_workload_num)) {
523 gvt_dbg_sched("wait vgpu idle\n");
524
525 wait_event(scheduler->workload_complete_wq,
526 !atomic_read(&vgpu->running_workload_num));
527 }
528}
529
530void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
531{
532 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +0800533 struct intel_engine_cs *engine;
534 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400535
536 gvt_dbg_core("clean workload scheduler\n");
537
Changbin Du3fc03062017-03-13 10:47:11 +0800538 for_each_engine(engine, gvt->dev_priv, i) {
539 atomic_notifier_chain_unregister(
540 &engine->context_status_notifier,
541 &gvt->shadow_ctx_notifier_block[i]);
542 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400543 }
544}
545
546int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
547{
548 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
549 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +0800550 struct intel_engine_cs *engine;
551 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400552 int ret;
Zhi Wange4734052016-05-01 07:42:16 -0400553
554 gvt_dbg_core("init workload scheduler\n");
555
556 init_waitqueue_head(&scheduler->workload_complete_wq);
557
Changbin Du3fc03062017-03-13 10:47:11 +0800558 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -0400559 init_waitqueue_head(&scheduler->waitq[i]);
560
561 param = kzalloc(sizeof(*param), GFP_KERNEL);
562 if (!param) {
563 ret = -ENOMEM;
564 goto err;
565 }
566
567 param->gvt = gvt;
568 param->ring_id = i;
569
570 scheduler->thread[i] = kthread_run(workload_thread, param,
571 "gvt workload %d", i);
572 if (IS_ERR(scheduler->thread[i])) {
573 gvt_err("fail to create workload thread\n");
574 ret = PTR_ERR(scheduler->thread[i]);
575 goto err;
576 }
Changbin Du3fc03062017-03-13 10:47:11 +0800577
578 gvt->shadow_ctx_notifier_block[i].notifier_call =
579 shadow_context_status_change;
580 atomic_notifier_chain_register(&engine->context_status_notifier,
581 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400582 }
583 return 0;
584err:
585 intel_gvt_clean_workload_scheduler(gvt);
586 kfree(param);
587 param = NULL;
588 return ret;
589}
590
591void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
592{
Chris Wilson70ffe992016-12-18 15:37:22 +0000593 i915_gem_context_put_unlocked(vgpu->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -0400594}
595
596int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
597{
598 atomic_set(&vgpu->running_workload_num, 0);
599
600 vgpu->shadow_ctx = i915_gem_context_create_gvt(
601 &vgpu->gvt->dev_priv->drm);
602 if (IS_ERR(vgpu->shadow_ctx))
603 return PTR_ERR(vgpu->shadow_ctx);
604
605 vgpu->shadow_ctx->engine[RCS].initialised = true;
606
Zhi Wange4734052016-05-01 07:42:16 -0400607 return 0;
608}