blob: c61c8a73820228b9724ae546effc151e8a236542 [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
55static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
60 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
64 struct page *page;
65 void *dst;
66 unsigned long context_gpa, context_page_num;
67 int i;
68
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
71
72 context_page_num = intel_lr_context_size(
Zhenyu Wang1140f9e2016-10-18 09:40:07 +080073 gvt->dev_priv->engine[ring_id]);
Zhi Wange4734052016-05-01 07:42:16 -040074
75 context_page_num = context_page_num >> PAGE_SHIFT;
76
77 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
78 context_page_num = 19;
79
80 i = 2;
81
82 while (i < context_page_num) {
83 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
84 (u32)((workload->ctx_desc.lrca + i) <<
85 GTT_PAGE_SHIFT));
86 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
87 gvt_err("Invalid guest context descriptor\n");
88 return -EINVAL;
89 }
90
91 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080092 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040093 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
94 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080095 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040096 i++;
97 }
98
99 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800100 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400101
102#define COPY_REG(name) \
103 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
104 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
105
106 COPY_REG(ctx_ctrl);
107 COPY_REG(ctx_timestamp);
108
109 if (ring_id == RCS) {
110 COPY_REG(bb_per_ctx_ptr);
111 COPY_REG(rcs_indirect_ctx);
112 COPY_REG(rcs_indirect_ctx_offset);
113 }
114#undef COPY_REG
115
116 set_context_pdp_root_pointer(shadow_ring_context,
117 workload->shadow_mm->shadow_page_table);
118
119 intel_gvt_hypervisor_read_gpa(vgpu,
120 workload->ring_context_gpa +
121 sizeof(*shadow_ring_context),
122 (void *)shadow_ring_context +
123 sizeof(*shadow_ring_context),
124 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
125
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800126 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400127 return 0;
128}
129
130static int shadow_context_status_change(struct notifier_block *nb,
131 unsigned long action, void *data)
132{
133 struct intel_vgpu *vgpu = container_of(nb,
134 struct intel_vgpu, shadow_ctx_notifier_block);
135 struct drm_i915_gem_request *req =
136 (struct drm_i915_gem_request *)data;
137 struct intel_gvt_workload_scheduler *scheduler =
138 &vgpu->gvt->scheduler;
139 struct intel_vgpu_workload *workload =
140 scheduler->current_workload[req->engine->id];
141
142 switch (action) {
143 case INTEL_CONTEXT_SCHEDULE_IN:
Zhi Wang17865712016-05-01 19:02:37 -0400144 intel_gvt_load_render_mmio(workload->vgpu,
145 workload->ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400146 atomic_set(&workload->shadow_ctx_active, 1);
147 break;
148 case INTEL_CONTEXT_SCHEDULE_OUT:
Zhi Wang17865712016-05-01 19:02:37 -0400149 intel_gvt_restore_render_mmio(workload->vgpu,
150 workload->ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400151 atomic_set(&workload->shadow_ctx_active, 0);
152 break;
153 default:
154 WARN_ON(1);
155 return NOTIFY_OK;
156 }
157 wake_up(&workload->shadow_ctx_status_wq);
158 return NOTIFY_OK;
159}
160
161static int dispatch_workload(struct intel_vgpu_workload *workload)
162{
Zhi Wange4734052016-05-01 07:42:16 -0400163 int ring_id = workload->ring_id;
164 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
165 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800166 struct drm_i915_gem_request *rq;
Zhi Wange4734052016-05-01 07:42:16 -0400167 int ret;
168
169 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
170 ring_id, workload);
171
172 shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
173 GEN8_CTX_ADDRESSING_MODE_SHIFT;
174
Pei Zhang90d27a12016-11-14 18:02:57 +0800175 mutex_lock(&dev_priv->drm.struct_mutex);
176
Chris Wilson0eb742d2016-10-20 17:29:36 +0800177 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
178 if (IS_ERR(rq)) {
Zhi Wange4734052016-05-01 07:42:16 -0400179 gvt_err("fail to allocate gem request\n");
Chris Wilson0eb742d2016-10-20 17:29:36 +0800180 workload->status = PTR_ERR(rq);
Zhi Wange4734052016-05-01 07:42:16 -0400181 return workload->status;
182 }
183
Chris Wilson0eb742d2016-10-20 17:29:36 +0800184 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
185
186 workload->req = i915_gem_request_get(rq);
Zhi Wange4734052016-05-01 07:42:16 -0400187
Zhi Wangbe1da702016-05-03 18:26:57 -0400188 ret = intel_gvt_scan_and_shadow_workload(workload);
189 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800190 goto out;
Zhi Wangbe1da702016-05-03 18:26:57 -0400191
192 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
193 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800194 goto out;
Zhi Wangbe1da702016-05-03 18:26:57 -0400195
Zhi Wange4734052016-05-01 07:42:16 -0400196 ret = populate_shadow_context(workload);
197 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800198 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400199
200 if (workload->prepare) {
201 ret = workload->prepare(workload);
202 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800203 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400204 }
205
Zhi Wange4734052016-05-01 07:42:16 -0400206 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
207 ring_id, workload->req);
208
Pei Zhang90d27a12016-11-14 18:02:57 +0800209 ret = 0;
Zhi Wange4734052016-05-01 07:42:16 -0400210 workload->dispatched = true;
Pei Zhang90d27a12016-11-14 18:02:57 +0800211out:
212 if (ret)
213 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800214
215 i915_add_request_no_flush(rq);
Pei Zhang90d27a12016-11-14 18:02:57 +0800216 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400217 return ret;
218}
219
220static struct intel_vgpu_workload *pick_next_workload(
221 struct intel_gvt *gvt, int ring_id)
222{
223 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
224 struct intel_vgpu_workload *workload = NULL;
225
226 mutex_lock(&gvt->lock);
227
228 /*
229 * no current vgpu / will be scheduled out / no workload
230 * bail out
231 */
232 if (!scheduler->current_vgpu) {
233 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
234 goto out;
235 }
236
237 if (scheduler->need_reschedule) {
238 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
239 goto out;
240 }
241
242 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
243 gvt_dbg_sched("ring id %d stop - no available workload\n",
244 ring_id);
245 goto out;
246 }
247
248 /*
249 * still have current workload, maybe the workload disptacher
250 * fail to submit it for some reason, resubmit it.
251 */
252 if (scheduler->current_workload[ring_id]) {
253 workload = scheduler->current_workload[ring_id];
254 gvt_dbg_sched("ring id %d still have current workload %p\n",
255 ring_id, workload);
256 goto out;
257 }
258
259 /*
260 * pick a workload as current workload
261 * once current workload is set, schedule policy routines
262 * will wait the current workload is finished when trying to
263 * schedule out a vgpu.
264 */
265 scheduler->current_workload[ring_id] = container_of(
266 workload_q_head(scheduler->current_vgpu, ring_id)->next,
267 struct intel_vgpu_workload, list);
268
269 workload = scheduler->current_workload[ring_id];
270
271 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
272
273 atomic_inc(&workload->vgpu->running_workload_num);
274out:
275 mutex_unlock(&gvt->lock);
276 return workload;
277}
278
279static void update_guest_context(struct intel_vgpu_workload *workload)
280{
281 struct intel_vgpu *vgpu = workload->vgpu;
282 struct intel_gvt *gvt = vgpu->gvt;
283 int ring_id = workload->ring_id;
284 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
285 struct drm_i915_gem_object *ctx_obj =
286 shadow_ctx->engine[ring_id].state->obj;
287 struct execlist_ring_context *shadow_ring_context;
288 struct page *page;
289 void *src;
290 unsigned long context_gpa, context_page_num;
291 int i;
292
293 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
294 workload->ctx_desc.lrca);
295
296 context_page_num = intel_lr_context_size(
Zhenyu Wang1140f9e2016-10-18 09:40:07 +0800297 gvt->dev_priv->engine[ring_id]);
Zhi Wange4734052016-05-01 07:42:16 -0400298
299 context_page_num = context_page_num >> PAGE_SHIFT;
300
301 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
302 context_page_num = 19;
303
304 i = 2;
305
306 while (i < context_page_num) {
307 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
308 (u32)((workload->ctx_desc.lrca + i) <<
309 GTT_PAGE_SHIFT));
310 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
311 gvt_err("invalid guest context descriptor\n");
312 return;
313 }
314
315 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800316 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400317 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
318 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800319 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400320 i++;
321 }
322
323 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
324 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
325
326 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800327 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400328
329#define COPY_REG(name) \
330 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
331 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
332
333 COPY_REG(ctx_ctrl);
334 COPY_REG(ctx_timestamp);
335
336#undef COPY_REG
337
338 intel_gvt_hypervisor_write_gpa(vgpu,
339 workload->ring_context_gpa +
340 sizeof(*shadow_ring_context),
341 (void *)shadow_ring_context +
342 sizeof(*shadow_ring_context),
343 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
344
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800345 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400346}
347
348static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
349{
350 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
351 struct intel_vgpu_workload *workload;
Zhi Wangbe1da702016-05-03 18:26:57 -0400352 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400353
354 mutex_lock(&gvt->lock);
355
356 workload = scheduler->current_workload[ring_id];
357
358 if (!workload->status && !workload->vgpu->resetting) {
359 wait_event(workload->shadow_ctx_status_wq,
360 !atomic_read(&workload->shadow_ctx_active));
361
362 update_guest_context(workload);
Zhi Wangbe1da702016-05-03 18:26:57 -0400363
364 for_each_set_bit(event, workload->pending_events,
365 INTEL_GVT_EVENT_MAX)
366 intel_vgpu_trigger_virtual_event(workload->vgpu,
367 event);
Zhi Wange4734052016-05-01 07:42:16 -0400368 }
369
370 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
371 ring_id, workload, workload->status);
372
373 scheduler->current_workload[ring_id] = NULL;
374
375 atomic_dec(&workload->vgpu->running_workload_num);
376
377 list_del_init(&workload->list);
378 workload->complete(workload);
379
380 wake_up(&scheduler->workload_complete_wq);
381 mutex_unlock(&gvt->lock);
382}
383
384struct workload_thread_param {
385 struct intel_gvt *gvt;
386 int ring_id;
387};
388
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100389static DEFINE_MUTEX(scheduler_mutex);
390
Zhi Wange4734052016-05-01 07:42:16 -0400391static int workload_thread(void *priv)
392{
393 struct workload_thread_param *p = (struct workload_thread_param *)priv;
394 struct intel_gvt *gvt = p->gvt;
395 int ring_id = p->ring_id;
396 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
397 struct intel_vgpu_workload *workload = NULL;
Chris Wilsone95433c2016-10-28 13:58:27 +0100398 long lret;
Zhi Wange4734052016-05-01 07:42:16 -0400399 int ret;
400 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800401 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400402
403 kfree(p);
404
405 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
406
407 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800408 add_wait_queue(&scheduler->waitq[ring_id], &wait);
409 do {
410 workload = pick_next_workload(gvt, ring_id);
411 if (workload)
412 break;
413 wait_woken(&wait, TASK_INTERRUPTIBLE,
414 MAX_SCHEDULE_TIMEOUT);
415 } while (!kthread_should_stop());
416 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400417
Du, Changbine45d7b72016-10-27 11:10:31 +0800418 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400419 break;
420
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100421 mutex_lock(&scheduler_mutex);
422
Zhi Wange4734052016-05-01 07:42:16 -0400423 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
424 workload->ring_id, workload,
425 workload->vgpu->id);
426
427 intel_runtime_pm_get(gvt->dev_priv);
428
Zhi Wange4734052016-05-01 07:42:16 -0400429 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
430 workload->ring_id, workload);
431
432 if (need_force_wake)
433 intel_uncore_forcewake_get(gvt->dev_priv,
434 FORCEWAKE_ALL);
435
Pei Zhang90d27a12016-11-14 18:02:57 +0800436 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400437 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800438 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100439
Zhi Wange4734052016-05-01 07:42:16 -0400440 if (ret) {
441 gvt_err("fail to dispatch workload, skip\n");
442 goto complete;
443 }
444
445 gvt_dbg_sched("ring id %d wait workload %p\n",
446 workload->ring_id, workload);
447
Chris Wilsone95433c2016-10-28 13:58:27 +0100448 lret = i915_wait_request(workload->req,
449 0, MAX_SCHEDULE_TIMEOUT);
450 if (lret < 0) {
451 workload->status = lret;
Zhi Wange4734052016-05-01 07:42:16 -0400452 gvt_err("fail to wait workload, skip\n");
Zhenyu Wang9b172342016-11-02 15:00:15 +0800453 } else {
454 workload->status = 0;
Chris Wilsone95433c2016-10-28 13:58:27 +0100455 }
Zhi Wange4734052016-05-01 07:42:16 -0400456
457complete:
458 gvt_dbg_sched("will complete workload %p\n, status: %d\n",
459 workload, workload->status);
460
461 complete_current_workload(gvt, ring_id);
462
Chris Wilson0eb742d2016-10-20 17:29:36 +0800463 i915_gem_request_put(fetch_and_zero(&workload->req));
464
Zhi Wange4734052016-05-01 07:42:16 -0400465 if (need_force_wake)
466 intel_uncore_forcewake_put(gvt->dev_priv,
467 FORCEWAKE_ALL);
468
Zhi Wange4734052016-05-01 07:42:16 -0400469 intel_runtime_pm_put(gvt->dev_priv);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100470
471 mutex_unlock(&scheduler_mutex);
472
Zhi Wange4734052016-05-01 07:42:16 -0400473 }
474 return 0;
475}
476
477void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
478{
479 struct intel_gvt *gvt = vgpu->gvt;
480 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
481
482 if (atomic_read(&vgpu->running_workload_num)) {
483 gvt_dbg_sched("wait vgpu idle\n");
484
485 wait_event(scheduler->workload_complete_wq,
486 !atomic_read(&vgpu->running_workload_num));
487 }
488}
489
490void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
491{
492 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
493 int i;
494
495 gvt_dbg_core("clean workload scheduler\n");
496
497 for (i = 0; i < I915_NUM_ENGINES; i++) {
498 if (scheduler->thread[i]) {
499 kthread_stop(scheduler->thread[i]);
500 scheduler->thread[i] = NULL;
501 }
502 }
503}
504
505int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
506{
507 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
508 struct workload_thread_param *param = NULL;
509 int ret;
510 int i;
511
512 gvt_dbg_core("init workload scheduler\n");
513
514 init_waitqueue_head(&scheduler->workload_complete_wq);
515
516 for (i = 0; i < I915_NUM_ENGINES; i++) {
Zhenyu Wang0fac21e2016-10-20 13:30:33 +0800517 /* check ring mask at init time */
518 if (!HAS_ENGINE(gvt->dev_priv, i))
519 continue;
520
Zhi Wange4734052016-05-01 07:42:16 -0400521 init_waitqueue_head(&scheduler->waitq[i]);
522
523 param = kzalloc(sizeof(*param), GFP_KERNEL);
524 if (!param) {
525 ret = -ENOMEM;
526 goto err;
527 }
528
529 param->gvt = gvt;
530 param->ring_id = i;
531
532 scheduler->thread[i] = kthread_run(workload_thread, param,
533 "gvt workload %d", i);
534 if (IS_ERR(scheduler->thread[i])) {
535 gvt_err("fail to create workload thread\n");
536 ret = PTR_ERR(scheduler->thread[i]);
537 goto err;
538 }
539 }
540 return 0;
541err:
542 intel_gvt_clean_workload_scheduler(gvt);
543 kfree(param);
544 param = NULL;
545 return ret;
546}
547
548void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
549{
Zhi Wange4734052016-05-01 07:42:16 -0400550 atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
551 &vgpu->shadow_ctx_notifier_block);
552
Chris Wilson70ffe992016-12-18 15:37:22 +0000553 i915_gem_context_put_unlocked(vgpu->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -0400554}
555
556int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
557{
558 atomic_set(&vgpu->running_workload_num, 0);
559
560 vgpu->shadow_ctx = i915_gem_context_create_gvt(
561 &vgpu->gvt->dev_priv->drm);
562 if (IS_ERR(vgpu->shadow_ctx))
563 return PTR_ERR(vgpu->shadow_ctx);
564
565 vgpu->shadow_ctx->engine[RCS].initialised = true;
566
567 vgpu->shadow_ctx_notifier_block.notifier_call =
568 shadow_context_status_change;
569
570 atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
571 &vgpu->shadow_ctx_notifier_block);
572 return 0;
573}