Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 21 | * SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Zhi Wang <zhi.a.wang@intel.com> |
| 25 | * |
| 26 | * Contributors: |
| 27 | * Ping Gao <ping.a.gao@intel.com> |
| 28 | * Tina Zhang <tina.zhang@intel.com> |
| 29 | * Chanbin Du <changbin.du@intel.com> |
| 30 | * Min He <min.he@intel.com> |
| 31 | * Bing Niu <bing.niu@intel.com> |
| 32 | * Zhenyu Wang <zhenyuw@linux.intel.com> |
| 33 | * |
| 34 | */ |
| 35 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 36 | #include <linux/kthread.h> |
| 37 | |
Zhenyu Wang | feddf6e | 2016-10-20 17:15:03 +0800 | [diff] [blame] | 38 | #include "i915_drv.h" |
| 39 | #include "gvt.h" |
| 40 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 41 | #define RING_CTX_OFF(x) \ |
| 42 | offsetof(struct execlist_ring_context, x) |
| 43 | |
Du, Changbin | 999ccb4 | 2016-10-20 14:08:47 +0800 | [diff] [blame] | 44 | static void set_context_pdp_root_pointer( |
| 45 | struct execlist_ring_context *ring_context, |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 46 | u32 pdp[8]) |
| 47 | { |
| 48 | struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW; |
| 49 | int i; |
| 50 | |
| 51 | for (i = 0; i < 8; i++) |
| 52 | pdp_pair[i].val = pdp[7 - i]; |
| 53 | } |
| 54 | |
| 55 | static int populate_shadow_context(struct intel_vgpu_workload *workload) |
| 56 | { |
| 57 | struct intel_vgpu *vgpu = workload->vgpu; |
| 58 | struct intel_gvt *gvt = vgpu->gvt; |
| 59 | int ring_id = workload->ring_id; |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 60 | struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 61 | struct drm_i915_gem_object *ctx_obj = |
| 62 | shadow_ctx->engine[ring_id].state->obj; |
| 63 | struct execlist_ring_context *shadow_ring_context; |
| 64 | struct page *page; |
| 65 | void *dst; |
| 66 | unsigned long context_gpa, context_page_num; |
| 67 | int i; |
| 68 | |
| 69 | gvt_dbg_sched("ring id %d workload lrca %x", ring_id, |
| 70 | workload->ctx_desc.lrca); |
| 71 | |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 72 | context_page_num = gvt->dev_priv->engine[ring_id]->context_size; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 73 | |
| 74 | context_page_num = context_page_num >> PAGE_SHIFT; |
| 75 | |
| 76 | if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) |
| 77 | context_page_num = 19; |
| 78 | |
| 79 | i = 2; |
| 80 | |
| 81 | while (i < context_page_num) { |
| 82 | context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, |
| 83 | (u32)((workload->ctx_desc.lrca + i) << |
| 84 | GTT_PAGE_SHIFT)); |
| 85 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 86 | gvt_vgpu_err("Invalid guest context descriptor\n"); |
fred gao | 5c56883 | 2017-09-20 05:36:47 +0800 | [diff] [blame] | 87 | return -EFAULT; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 88 | } |
| 89 | |
Michel Thierry | 0b29c75 | 2017-09-13 09:56:00 +0100 | [diff] [blame] | 90 | page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 91 | dst = kmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 92 | intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, |
| 93 | GTT_PAGE_SIZE); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 94 | kunmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 95 | i++; |
| 96 | } |
| 97 | |
| 98 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 99 | shadow_ring_context = kmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 100 | |
| 101 | #define COPY_REG(name) \ |
| 102 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ |
| 103 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) |
| 104 | |
| 105 | COPY_REG(ctx_ctrl); |
| 106 | COPY_REG(ctx_timestamp); |
| 107 | |
| 108 | if (ring_id == RCS) { |
| 109 | COPY_REG(bb_per_ctx_ptr); |
| 110 | COPY_REG(rcs_indirect_ctx); |
| 111 | COPY_REG(rcs_indirect_ctx_offset); |
| 112 | } |
| 113 | #undef COPY_REG |
| 114 | |
| 115 | set_context_pdp_root_pointer(shadow_ring_context, |
| 116 | workload->shadow_mm->shadow_page_table); |
| 117 | |
| 118 | intel_gvt_hypervisor_read_gpa(vgpu, |
| 119 | workload->ring_context_gpa + |
| 120 | sizeof(*shadow_ring_context), |
| 121 | (void *)shadow_ring_context + |
| 122 | sizeof(*shadow_ring_context), |
| 123 | GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); |
| 124 | |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 125 | kunmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 126 | return 0; |
| 127 | } |
| 128 | |
Changbin Du | bc2d4b6 | 2017-03-22 12:35:31 +0800 | [diff] [blame] | 129 | static inline bool is_gvt_request(struct drm_i915_gem_request *req) |
| 130 | { |
| 131 | return i915_gem_context_force_single_submission(req->ctx); |
| 132 | } |
| 133 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 134 | static int shadow_context_status_change(struct notifier_block *nb, |
| 135 | unsigned long action, void *data) |
| 136 | { |
Changbin Du | 3fc0306 | 2017-03-13 10:47:11 +0800 | [diff] [blame] | 137 | struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data; |
| 138 | struct intel_gvt *gvt = container_of(nb, struct intel_gvt, |
| 139 | shadow_ctx_notifier_block[req->engine->id]); |
| 140 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
Changbin Du | 0e86cc9 | 2017-05-04 10:52:38 +0800 | [diff] [blame] | 141 | enum intel_engine_id ring_id = req->engine->id; |
| 142 | struct intel_vgpu_workload *workload; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 143 | |
Changbin Du | 0e86cc9 | 2017-05-04 10:52:38 +0800 | [diff] [blame] | 144 | if (!is_gvt_request(req)) { |
| 145 | spin_lock_bh(&scheduler->mmio_context_lock); |
| 146 | if (action == INTEL_CONTEXT_SCHEDULE_IN && |
| 147 | scheduler->engine_owner[ring_id]) { |
| 148 | /* Switch ring from vGPU to host. */ |
| 149 | intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], |
| 150 | NULL, ring_id); |
| 151 | scheduler->engine_owner[ring_id] = NULL; |
| 152 | } |
| 153 | spin_unlock_bh(&scheduler->mmio_context_lock); |
| 154 | |
| 155 | return NOTIFY_OK; |
| 156 | } |
| 157 | |
| 158 | workload = scheduler->current_workload[ring_id]; |
| 159 | if (unlikely(!workload)) |
Chuanxiao Dong | 9272f73 | 2017-02-17 19:29:52 +0800 | [diff] [blame] | 160 | return NOTIFY_OK; |
| 161 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 162 | switch (action) { |
| 163 | case INTEL_CONTEXT_SCHEDULE_IN: |
Changbin Du | 0e86cc9 | 2017-05-04 10:52:38 +0800 | [diff] [blame] | 164 | spin_lock_bh(&scheduler->mmio_context_lock); |
| 165 | if (workload->vgpu != scheduler->engine_owner[ring_id]) { |
| 166 | /* Switch ring from host to vGPU or vGPU to vGPU. */ |
| 167 | intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], |
| 168 | workload->vgpu, ring_id); |
| 169 | scheduler->engine_owner[ring_id] = workload->vgpu; |
| 170 | } else |
| 171 | gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", |
| 172 | ring_id, workload->vgpu->id); |
| 173 | spin_unlock_bh(&scheduler->mmio_context_lock); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 174 | atomic_set(&workload->shadow_ctx_active, 1); |
| 175 | break; |
| 176 | case INTEL_CONTEXT_SCHEDULE_OUT: |
Chris Wilson | d6c0511 | 2017-10-03 21:34:47 +0100 | [diff] [blame] | 177 | case INTEL_CONTEXT_SCHEDULE_PREEMPTED: |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 178 | atomic_set(&workload->shadow_ctx_active, 0); |
| 179 | break; |
| 180 | default: |
| 181 | WARN_ON(1); |
| 182 | return NOTIFY_OK; |
| 183 | } |
| 184 | wake_up(&workload->shadow_ctx_status_wq); |
| 185 | return NOTIFY_OK; |
| 186 | } |
| 187 | |
Kechen Lu | 9dfb8e5 | 2017-08-10 07:41:36 +0800 | [diff] [blame] | 188 | static void shadow_context_descriptor_update(struct i915_gem_context *ctx, |
| 189 | struct intel_engine_cs *engine) |
| 190 | { |
| 191 | struct intel_context *ce = &ctx->engine[engine->id]; |
| 192 | u64 desc = 0; |
| 193 | |
| 194 | desc = ce->lrc_desc; |
| 195 | |
| 196 | /* Update bits 0-11 of the context descriptor which includes flags |
| 197 | * like GEN8_CTX_* cached in desc_template |
| 198 | */ |
| 199 | desc &= U64_MAX << 12; |
| 200 | desc |= ctx->desc_template & ((1ULL << 12) - 1); |
| 201 | |
| 202 | ce->lrc_desc = desc; |
| 203 | } |
| 204 | |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 205 | static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) |
| 206 | { |
| 207 | struct intel_vgpu *vgpu = workload->vgpu; |
| 208 | void *shadow_ring_buffer_va; |
| 209 | u32 *cs; |
| 210 | |
| 211 | /* allocate shadow ring buffer */ |
| 212 | cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); |
| 213 | if (IS_ERR(cs)) { |
| 214 | gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n", |
| 215 | workload->rb_len); |
| 216 | return PTR_ERR(cs); |
| 217 | } |
| 218 | |
| 219 | shadow_ring_buffer_va = workload->shadow_ring_buffer_va; |
| 220 | |
| 221 | /* get shadow ring buffer va */ |
| 222 | workload->shadow_ring_buffer_va = cs; |
| 223 | |
| 224 | memcpy(cs, shadow_ring_buffer_va, |
| 225 | workload->rb_len); |
| 226 | |
| 227 | cs += workload->rb_len / sizeof(u32); |
| 228 | intel_ring_advance(workload->req, cs); |
| 229 | |
| 230 | return 0; |
| 231 | } |
| 232 | |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 233 | void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) |
| 234 | { |
| 235 | if (!wa_ctx->indirect_ctx.obj) |
| 236 | return; |
| 237 | |
| 238 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); |
| 239 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); |
| 240 | } |
| 241 | |
Ping Gao | 89ea20b | 2017-06-29 12:22:42 +0800 | [diff] [blame] | 242 | /** |
| 243 | * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and |
| 244 | * shadow it as well, include ringbuffer,wa_ctx and ctx. |
| 245 | * @workload: an abstract entity for each execlist submission. |
| 246 | * |
| 247 | * This function is called before the workload submitting to i915, to make |
| 248 | * sure the content of the workload is valid. |
| 249 | */ |
| 250 | int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 251 | { |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 252 | struct intel_vgpu *vgpu = workload->vgpu; |
| 253 | struct intel_vgpu_submission *s = &vgpu->submission; |
| 254 | struct i915_gem_context *shadow_ctx = s->shadow_ctx; |
| 255 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 256 | int ring_id = workload->ring_id; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 257 | struct intel_engine_cs *engine = dev_priv->engine[ring_id]; |
Chris Wilson | 0eb742d | 2016-10-20 17:29:36 +0800 | [diff] [blame] | 258 | struct drm_i915_gem_request *rq; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 259 | struct intel_ring *ring; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 260 | int ret; |
| 261 | |
Ping Gao | 87e919d | 2017-07-04 14:53:03 +0800 | [diff] [blame] | 262 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
| 263 | |
Ping Gao | d0302e7 | 2017-06-29 12:22:43 +0800 | [diff] [blame] | 264 | if (workload->shadowed) |
| 265 | return 0; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 266 | |
Zhenyu Wang | 03806ed | 2017-02-13 17:07:19 +0800 | [diff] [blame] | 267 | shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); |
| 268 | shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 269 | GEN8_CTX_ADDRESSING_MODE_SHIFT; |
| 270 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 271 | if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated)) |
Kechen Lu | 9dfb8e5 | 2017-08-10 07:41:36 +0800 | [diff] [blame] | 272 | shadow_context_descriptor_update(shadow_ctx, |
| 273 | dev_priv->engine[ring_id]); |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame] | 274 | |
Ping Gao | 89ea20b | 2017-06-29 12:22:42 +0800 | [diff] [blame] | 275 | ret = intel_gvt_scan_and_shadow_ringbuffer(workload); |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 276 | if (ret) |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 277 | goto err_scan; |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 278 | |
Tina Zhang | 17f1b1a | 2017-03-15 23:16:01 -0400 | [diff] [blame] | 279 | if ((workload->ring_id == RCS) && |
| 280 | (workload->wa_ctx.indirect_ctx.size != 0)) { |
| 281 | ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); |
| 282 | if (ret) |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 283 | goto err_scan; |
Tina Zhang | 17f1b1a | 2017-03-15 23:16:01 -0400 | [diff] [blame] | 284 | } |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 285 | |
Ping Gao | 89ea20b | 2017-06-29 12:22:42 +0800 | [diff] [blame] | 286 | /* pin shadow context by gvt even the shadow context will be pinned |
| 287 | * when i915 alloc request. That is because gvt will update the guest |
| 288 | * context from shadow context when workload is completed, and at that |
| 289 | * moment, i915 may already unpined the shadow context to make the |
| 290 | * shadow_ctx pages invalid. So gvt need to pin itself. After update |
| 291 | * the guest context, gvt can unpin the shadow_ctx safely. |
| 292 | */ |
| 293 | ring = engine->context_pin(engine, shadow_ctx); |
| 294 | if (IS_ERR(ring)) { |
| 295 | ret = PTR_ERR(ring); |
| 296 | gvt_vgpu_err("fail to pin shadow context\n"); |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 297 | goto err_shadow; |
Ping Gao | 89ea20b | 2017-06-29 12:22:42 +0800 | [diff] [blame] | 298 | } |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 299 | |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 300 | ret = populate_shadow_context(workload); |
| 301 | if (ret) |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 302 | goto err_unpin; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 303 | |
| 304 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); |
| 305 | if (IS_ERR(rq)) { |
| 306 | gvt_vgpu_err("fail to allocate gem request\n"); |
| 307 | ret = PTR_ERR(rq); |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 308 | goto err_unpin; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 309 | } |
| 310 | |
| 311 | gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); |
| 312 | |
| 313 | workload->req = i915_gem_request_get(rq); |
| 314 | ret = copy_workload_to_ring_buffer(workload); |
| 315 | if (ret) |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 316 | goto err_unpin; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 317 | workload->shadowed = true; |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 318 | return 0; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 319 | |
fred gao | a3cfdca | 2017-08-18 15:41:07 +0800 | [diff] [blame] | 320 | err_unpin: |
| 321 | engine->context_unpin(engine, shadow_ctx); |
| 322 | err_shadow: |
| 323 | release_shadow_wa_ctx(&workload->wa_ctx); |
| 324 | err_scan: |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 325 | return ret; |
| 326 | } |
| 327 | |
Zhi Wang | 497aa3f | 2017-09-12 21:51:10 +0800 | [diff] [blame^] | 328 | static int prepare_workload(struct intel_vgpu_workload *workload) |
| 329 | { |
| 330 | int ret = 0; |
| 331 | |
| 332 | if (workload->prepare) |
| 333 | ret = workload->prepare(workload); |
| 334 | |
| 335 | return ret; |
| 336 | } |
| 337 | |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 338 | static int dispatch_workload(struct intel_vgpu_workload *workload) |
| 339 | { |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 340 | struct intel_vgpu *vgpu = workload->vgpu; |
| 341 | struct intel_vgpu_submission *s = &vgpu->submission; |
| 342 | struct i915_gem_context *shadow_ctx = s->shadow_ctx; |
| 343 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 344 | int ring_id = workload->ring_id; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 345 | struct intel_engine_cs *engine = dev_priv->engine[ring_id]; |
| 346 | int ret = 0; |
| 347 | |
| 348 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", |
| 349 | ring_id, workload); |
| 350 | |
| 351 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 352 | |
| 353 | ret = intel_gvt_scan_and_shadow_workload(workload); |
| 354 | if (ret) |
| 355 | goto out; |
| 356 | |
Zhi Wang | 497aa3f | 2017-09-12 21:51:10 +0800 | [diff] [blame^] | 357 | ret = prepare_workload(workload); |
| 358 | if (ret) { |
| 359 | engine->context_unpin(engine, shadow_ctx); |
| 360 | goto out; |
fred gao | 0a53bc0 | 2017-08-18 15:41:06 +0800 | [diff] [blame] | 361 | } |
| 362 | |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 363 | out: |
| 364 | if (ret) |
| 365 | workload->status = ret; |
Chris Wilson | 0eb742d | 2016-10-20 17:29:36 +0800 | [diff] [blame] | 366 | |
Ping Gao | 89ea20b | 2017-06-29 12:22:42 +0800 | [diff] [blame] | 367 | if (!IS_ERR_OR_NULL(workload->req)) { |
| 368 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", |
| 369 | ring_id, workload->req); |
| 370 | i915_add_request(workload->req); |
| 371 | workload->dispatched = true; |
| 372 | } |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame] | 373 | |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 374 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 375 | return ret; |
| 376 | } |
| 377 | |
| 378 | static struct intel_vgpu_workload *pick_next_workload( |
| 379 | struct intel_gvt *gvt, int ring_id) |
| 380 | { |
| 381 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 382 | struct intel_vgpu_workload *workload = NULL; |
| 383 | |
| 384 | mutex_lock(&gvt->lock); |
| 385 | |
| 386 | /* |
| 387 | * no current vgpu / will be scheduled out / no workload |
| 388 | * bail out |
| 389 | */ |
| 390 | if (!scheduler->current_vgpu) { |
| 391 | gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); |
| 392 | goto out; |
| 393 | } |
| 394 | |
| 395 | if (scheduler->need_reschedule) { |
| 396 | gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); |
| 397 | goto out; |
| 398 | } |
| 399 | |
Zhenyu Wang | 954180a | 2017-04-12 14:22:50 +0800 | [diff] [blame] | 400 | if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 401 | goto out; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 402 | |
| 403 | /* |
| 404 | * still have current workload, maybe the workload disptacher |
| 405 | * fail to submit it for some reason, resubmit it. |
| 406 | */ |
| 407 | if (scheduler->current_workload[ring_id]) { |
| 408 | workload = scheduler->current_workload[ring_id]; |
| 409 | gvt_dbg_sched("ring id %d still have current workload %p\n", |
| 410 | ring_id, workload); |
| 411 | goto out; |
| 412 | } |
| 413 | |
| 414 | /* |
| 415 | * pick a workload as current workload |
| 416 | * once current workload is set, schedule policy routines |
| 417 | * will wait the current workload is finished when trying to |
| 418 | * schedule out a vgpu. |
| 419 | */ |
| 420 | scheduler->current_workload[ring_id] = container_of( |
| 421 | workload_q_head(scheduler->current_vgpu, ring_id)->next, |
| 422 | struct intel_vgpu_workload, list); |
| 423 | |
| 424 | workload = scheduler->current_workload[ring_id]; |
| 425 | |
| 426 | gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload); |
| 427 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 428 | atomic_inc(&workload->vgpu->submission.running_workload_num); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 429 | out: |
| 430 | mutex_unlock(&gvt->lock); |
| 431 | return workload; |
| 432 | } |
| 433 | |
| 434 | static void update_guest_context(struct intel_vgpu_workload *workload) |
| 435 | { |
| 436 | struct intel_vgpu *vgpu = workload->vgpu; |
| 437 | struct intel_gvt *gvt = vgpu->gvt; |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 438 | struct intel_vgpu_submission *s = &vgpu->submission; |
| 439 | struct i915_gem_context *shadow_ctx = s->shadow_ctx; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 440 | int ring_id = workload->ring_id; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 441 | struct drm_i915_gem_object *ctx_obj = |
| 442 | shadow_ctx->engine[ring_id].state->obj; |
| 443 | struct execlist_ring_context *shadow_ring_context; |
| 444 | struct page *page; |
| 445 | void *src; |
| 446 | unsigned long context_gpa, context_page_num; |
| 447 | int i; |
| 448 | |
| 449 | gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, |
| 450 | workload->ctx_desc.lrca); |
| 451 | |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 452 | context_page_num = gvt->dev_priv->engine[ring_id]->context_size; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 453 | |
| 454 | context_page_num = context_page_num >> PAGE_SHIFT; |
| 455 | |
| 456 | if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) |
| 457 | context_page_num = 19; |
| 458 | |
| 459 | i = 2; |
| 460 | |
| 461 | while (i < context_page_num) { |
| 462 | context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, |
| 463 | (u32)((workload->ctx_desc.lrca + i) << |
| 464 | GTT_PAGE_SHIFT)); |
| 465 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 466 | gvt_vgpu_err("invalid guest context descriptor\n"); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 467 | return; |
| 468 | } |
| 469 | |
Michel Thierry | 0b29c75 | 2017-09-13 09:56:00 +0100 | [diff] [blame] | 470 | page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 471 | src = kmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 472 | intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, |
| 473 | GTT_PAGE_SIZE); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 474 | kunmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 475 | i++; |
| 476 | } |
| 477 | |
| 478 | intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + |
| 479 | RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); |
| 480 | |
| 481 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 482 | shadow_ring_context = kmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 483 | |
| 484 | #define COPY_REG(name) \ |
| 485 | intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ |
| 486 | RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) |
| 487 | |
| 488 | COPY_REG(ctx_ctrl); |
| 489 | COPY_REG(ctx_timestamp); |
| 490 | |
| 491 | #undef COPY_REG |
| 492 | |
| 493 | intel_gvt_hypervisor_write_gpa(vgpu, |
| 494 | workload->ring_context_gpa + |
| 495 | sizeof(*shadow_ring_context), |
| 496 | (void *)shadow_ring_context + |
| 497 | sizeof(*shadow_ring_context), |
| 498 | GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); |
| 499 | |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 500 | kunmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | static void complete_current_workload(struct intel_gvt *gvt, int ring_id) |
| 504 | { |
| 505 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 506 | struct intel_vgpu_workload *workload = |
| 507 | scheduler->current_workload[ring_id]; |
| 508 | struct intel_vgpu *vgpu = workload->vgpu; |
| 509 | struct intel_vgpu_submission *s = &vgpu->submission; |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 510 | int event; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 511 | |
| 512 | mutex_lock(&gvt->lock); |
| 513 | |
Chuanxiao Dong | 8f1117a | 2017-03-06 13:05:24 +0800 | [diff] [blame] | 514 | /* For the workload w/ request, needs to wait for the context |
| 515 | * switch to make sure request is completed. |
| 516 | * For the workload w/o request, directly complete the workload. |
| 517 | */ |
| 518 | if (workload->req) { |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame] | 519 | struct drm_i915_private *dev_priv = |
| 520 | workload->vgpu->gvt->dev_priv; |
| 521 | struct intel_engine_cs *engine = |
| 522 | dev_priv->engine[workload->ring_id]; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 523 | wait_event(workload->shadow_ctx_status_wq, |
| 524 | !atomic_read(&workload->shadow_ctx_active)); |
| 525 | |
Chuanxiao Dong | 0cf5ec4 | 2017-06-23 13:01:11 +0800 | [diff] [blame] | 526 | /* If this request caused GPU hang, req->fence.error will |
| 527 | * be set to -EIO. Use -EIO to set workload status so |
| 528 | * that when this request caused GPU hang, didn't trigger |
| 529 | * context switch interrupt to guest. |
| 530 | */ |
| 531 | if (likely(workload->status == -EINPROGRESS)) { |
| 532 | if (workload->req->fence.error == -EIO) |
| 533 | workload->status = -EIO; |
| 534 | else |
| 535 | workload->status = 0; |
| 536 | } |
| 537 | |
Chuanxiao Dong | 8f1117a | 2017-03-06 13:05:24 +0800 | [diff] [blame] | 538 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 539 | |
Chuanxiao Dong | 6184cc8 | 2017-08-01 17:47:25 +0800 | [diff] [blame] | 540 | if (!workload->status && !(vgpu->resetting_eng & |
| 541 | ENGINE_MASK(ring_id))) { |
Chuanxiao Dong | 8f1117a | 2017-03-06 13:05:24 +0800 | [diff] [blame] | 542 | update_guest_context(workload); |
| 543 | |
| 544 | for_each_set_bit(event, workload->pending_events, |
| 545 | INTEL_GVT_EVENT_MAX) |
| 546 | intel_vgpu_trigger_virtual_event(vgpu, event); |
| 547 | } |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame] | 548 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 549 | /* unpin shadow ctx as the shadow_ctx update is done */ |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 550 | engine->context_unpin(engine, s->shadow_ctx); |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame] | 551 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 552 | } |
| 553 | |
| 554 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |
| 555 | ring_id, workload, workload->status); |
| 556 | |
| 557 | scheduler->current_workload[ring_id] = NULL; |
| 558 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 559 | list_del_init(&workload->list); |
| 560 | workload->complete(workload); |
| 561 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 562 | atomic_dec(&s->running_workload_num); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 563 | wake_up(&scheduler->workload_complete_wq); |
Ping Gao | f100dae | 2017-05-24 09:14:11 +0800 | [diff] [blame] | 564 | |
| 565 | if (gvt->scheduler.need_reschedule) |
| 566 | intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); |
| 567 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 568 | mutex_unlock(&gvt->lock); |
| 569 | } |
| 570 | |
| 571 | struct workload_thread_param { |
| 572 | struct intel_gvt *gvt; |
| 573 | int ring_id; |
| 574 | }; |
| 575 | |
| 576 | static int workload_thread(void *priv) |
| 577 | { |
| 578 | struct workload_thread_param *p = (struct workload_thread_param *)priv; |
| 579 | struct intel_gvt *gvt = p->gvt; |
| 580 | int ring_id = p->ring_id; |
| 581 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 582 | struct intel_vgpu_workload *workload = NULL; |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 583 | struct intel_vgpu *vgpu = NULL; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 584 | int ret; |
Xu Han | e3476c0 | 2017-03-29 10:13:59 +0800 | [diff] [blame] | 585 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) |
| 586 | || IS_KABYLAKE(gvt->dev_priv); |
Du, Changbin | e45d7b7 | 2016-10-27 11:10:31 +0800 | [diff] [blame] | 587 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 588 | |
| 589 | kfree(p); |
| 590 | |
| 591 | gvt_dbg_core("workload thread for ring %d started\n", ring_id); |
| 592 | |
| 593 | while (!kthread_should_stop()) { |
Du, Changbin | e45d7b7 | 2016-10-27 11:10:31 +0800 | [diff] [blame] | 594 | add_wait_queue(&scheduler->waitq[ring_id], &wait); |
| 595 | do { |
| 596 | workload = pick_next_workload(gvt, ring_id); |
| 597 | if (workload) |
| 598 | break; |
| 599 | wait_woken(&wait, TASK_INTERRUPTIBLE, |
| 600 | MAX_SCHEDULE_TIMEOUT); |
| 601 | } while (!kthread_should_stop()); |
| 602 | remove_wait_queue(&scheduler->waitq[ring_id], &wait); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 603 | |
Du, Changbin | e45d7b7 | 2016-10-27 11:10:31 +0800 | [diff] [blame] | 604 | if (!workload) |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 605 | break; |
| 606 | |
| 607 | gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", |
| 608 | workload->ring_id, workload, |
| 609 | workload->vgpu->id); |
| 610 | |
| 611 | intel_runtime_pm_get(gvt->dev_priv); |
| 612 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 613 | gvt_dbg_sched("ring id %d will dispatch workload %p\n", |
| 614 | workload->ring_id, workload); |
| 615 | |
| 616 | if (need_force_wake) |
| 617 | intel_uncore_forcewake_get(gvt->dev_priv, |
| 618 | FORCEWAKE_ALL); |
| 619 | |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 620 | mutex_lock(&gvt->lock); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 621 | ret = dispatch_workload(workload); |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 622 | mutex_unlock(&gvt->lock); |
Chris Wilson | 66bbc3b | 2016-10-19 11:11:44 +0100 | [diff] [blame] | 623 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 624 | if (ret) { |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 625 | vgpu = workload->vgpu; |
| 626 | gvt_vgpu_err("fail to dispatch workload, skip\n"); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 627 | goto complete; |
| 628 | } |
| 629 | |
| 630 | gvt_dbg_sched("ring id %d wait workload %p\n", |
| 631 | workload->ring_id, workload); |
Chris Wilson | 3dce2ac | 2017-03-08 22:08:08 +0000 | [diff] [blame] | 632 | i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 633 | |
| 634 | complete: |
Changbin Du | 3ce3274 | 2017-02-09 10:13:16 +0800 | [diff] [blame] | 635 | gvt_dbg_sched("will complete workload %p, status: %d\n", |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 636 | workload, workload->status); |
| 637 | |
Changbin Du | 2e51ef3 | 2017-01-05 13:28:05 +0800 | [diff] [blame] | 638 | complete_current_workload(gvt, ring_id); |
| 639 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 640 | if (need_force_wake) |
| 641 | intel_uncore_forcewake_put(gvt->dev_priv, |
| 642 | FORCEWAKE_ALL); |
| 643 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 644 | intel_runtime_pm_put(gvt->dev_priv); |
fred gao | e011c6c | 2017-09-19 15:11:28 +0800 | [diff] [blame] | 645 | if (ret && (vgpu_is_vm_unhealthy(ret))) { |
| 646 | mutex_lock(&gvt->lock); |
| 647 | intel_vgpu_clean_execlist(vgpu); |
| 648 | mutex_unlock(&gvt->lock); |
| 649 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); |
| 650 | } |
| 651 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 652 | } |
| 653 | return 0; |
| 654 | } |
| 655 | |
| 656 | void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) |
| 657 | { |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 658 | struct intel_vgpu_submission *s = &vgpu->submission; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 659 | struct intel_gvt *gvt = vgpu->gvt; |
| 660 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 661 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 662 | if (atomic_read(&s->running_workload_num)) { |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 663 | gvt_dbg_sched("wait vgpu idle\n"); |
| 664 | |
| 665 | wait_event(scheduler->workload_complete_wq, |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 666 | !atomic_read(&s->running_workload_num)); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 667 | } |
| 668 | } |
| 669 | |
| 670 | void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) |
| 671 | { |
| 672 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
Changbin Du | 3fc0306 | 2017-03-13 10:47:11 +0800 | [diff] [blame] | 673 | struct intel_engine_cs *engine; |
| 674 | enum intel_engine_id i; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 675 | |
| 676 | gvt_dbg_core("clean workload scheduler\n"); |
| 677 | |
Changbin Du | 3fc0306 | 2017-03-13 10:47:11 +0800 | [diff] [blame] | 678 | for_each_engine(engine, gvt->dev_priv, i) { |
| 679 | atomic_notifier_chain_unregister( |
| 680 | &engine->context_status_notifier, |
| 681 | &gvt->shadow_ctx_notifier_block[i]); |
| 682 | kthread_stop(scheduler->thread[i]); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 683 | } |
| 684 | } |
| 685 | |
| 686 | int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) |
| 687 | { |
| 688 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 689 | struct workload_thread_param *param = NULL; |
Changbin Du | 3fc0306 | 2017-03-13 10:47:11 +0800 | [diff] [blame] | 690 | struct intel_engine_cs *engine; |
| 691 | enum intel_engine_id i; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 692 | int ret; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 693 | |
| 694 | gvt_dbg_core("init workload scheduler\n"); |
| 695 | |
| 696 | init_waitqueue_head(&scheduler->workload_complete_wq); |
| 697 | |
Changbin Du | 3fc0306 | 2017-03-13 10:47:11 +0800 | [diff] [blame] | 698 | for_each_engine(engine, gvt->dev_priv, i) { |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 699 | init_waitqueue_head(&scheduler->waitq[i]); |
| 700 | |
| 701 | param = kzalloc(sizeof(*param), GFP_KERNEL); |
| 702 | if (!param) { |
| 703 | ret = -ENOMEM; |
| 704 | goto err; |
| 705 | } |
| 706 | |
| 707 | param->gvt = gvt; |
| 708 | param->ring_id = i; |
| 709 | |
| 710 | scheduler->thread[i] = kthread_run(workload_thread, param, |
| 711 | "gvt workload %d", i); |
| 712 | if (IS_ERR(scheduler->thread[i])) { |
| 713 | gvt_err("fail to create workload thread\n"); |
| 714 | ret = PTR_ERR(scheduler->thread[i]); |
| 715 | goto err; |
| 716 | } |
Changbin Du | 3fc0306 | 2017-03-13 10:47:11 +0800 | [diff] [blame] | 717 | |
| 718 | gvt->shadow_ctx_notifier_block[i].notifier_call = |
| 719 | shadow_context_status_change; |
| 720 | atomic_notifier_chain_register(&engine->context_status_notifier, |
| 721 | &gvt->shadow_ctx_notifier_block[i]); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 722 | } |
| 723 | return 0; |
| 724 | err: |
| 725 | intel_gvt_clean_workload_scheduler(gvt); |
| 726 | kfree(param); |
| 727 | param = NULL; |
| 728 | return ret; |
| 729 | } |
| 730 | |
Zhi Wang | 874b6a9 | 2017-09-10 20:08:18 +0800 | [diff] [blame] | 731 | /** |
| 732 | * intel_vgpu_clean_submission - free submission-related resource for vGPU |
| 733 | * @vgpu: a vGPU |
| 734 | * |
| 735 | * This function is called when a vGPU is being destroyed. |
| 736 | * |
| 737 | */ |
| 738 | void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 739 | { |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 740 | struct intel_vgpu_submission *s = &vgpu->submission; |
| 741 | |
| 742 | i915_gem_context_put(s->shadow_ctx); |
| 743 | kmem_cache_destroy(s->workloads); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 744 | } |
| 745 | |
Zhi Wang | 874b6a9 | 2017-09-10 20:08:18 +0800 | [diff] [blame] | 746 | /** |
| 747 | * intel_vgpu_setup_submission - setup submission-related resource for vGPU |
| 748 | * @vgpu: a vGPU |
| 749 | * |
| 750 | * This function is called when a vGPU is being created. |
| 751 | * |
| 752 | * Returns: |
| 753 | * Zero on success, negative error code if failed. |
| 754 | * |
| 755 | */ |
| 756 | int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 757 | { |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 758 | struct intel_vgpu_submission *s = &vgpu->submission; |
Zhi Wang | 9a9829e | 2017-09-10 20:28:09 +0800 | [diff] [blame] | 759 | enum intel_engine_id i; |
| 760 | struct intel_engine_cs *engine; |
| 761 | int ret; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 762 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 763 | s->shadow_ctx = i915_gem_context_create_gvt( |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 764 | &vgpu->gvt->dev_priv->drm); |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 765 | if (IS_ERR(s->shadow_ctx)) |
| 766 | return PTR_ERR(s->shadow_ctx); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 767 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 768 | bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); |
Kechen Lu | 9dfb8e5 | 2017-08-10 07:41:36 +0800 | [diff] [blame] | 769 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 770 | s->workloads = kmem_cache_create("gvt-g_vgpu_workload", |
Zhi Wang | 9a9829e | 2017-09-10 20:28:09 +0800 | [diff] [blame] | 771 | sizeof(struct intel_vgpu_workload), 0, |
| 772 | SLAB_HWCACHE_ALIGN, |
| 773 | NULL); |
| 774 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 775 | if (!s->workloads) { |
Zhi Wang | 9a9829e | 2017-09-10 20:28:09 +0800 | [diff] [blame] | 776 | ret = -ENOMEM; |
| 777 | goto out_shadow_ctx; |
| 778 | } |
| 779 | |
| 780 | for_each_engine(engine, vgpu->gvt->dev_priv, i) |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 781 | INIT_LIST_HEAD(&s->workload_q_head[i]); |
Zhi Wang | 9a9829e | 2017-09-10 20:28:09 +0800 | [diff] [blame] | 782 | |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 783 | atomic_set(&s->running_workload_num, 0); |
Zhi Wang | 91d5d85 | 2017-09-10 21:33:20 +0800 | [diff] [blame] | 784 | bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); |
Zhi Wang | 9a9829e | 2017-09-10 20:28:09 +0800 | [diff] [blame] | 785 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 786 | return 0; |
Zhi Wang | 9a9829e | 2017-09-10 20:28:09 +0800 | [diff] [blame] | 787 | |
| 788 | out_shadow_ctx: |
Zhi Wang | 1406a14 | 2017-09-10 21:15:18 +0800 | [diff] [blame] | 789 | i915_gem_context_put(s->shadow_ctx); |
Zhi Wang | 9a9829e | 2017-09-10 20:28:09 +0800 | [diff] [blame] | 790 | return ret; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 791 | } |
Zhi Wang | 21527a8 | 2017-09-12 21:42:09 +0800 | [diff] [blame] | 792 | |
| 793 | /** |
| 794 | * intel_vgpu_destroy_workload - destroy a vGPU workload |
| 795 | * @vgpu: a vGPU |
| 796 | * |
| 797 | * This function is called when destroy a vGPU workload. |
| 798 | * |
| 799 | */ |
| 800 | void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) |
| 801 | { |
| 802 | struct intel_vgpu_submission *s = &workload->vgpu->submission; |
| 803 | |
| 804 | if (workload->shadow_mm) |
| 805 | intel_gvt_mm_unreference(workload->shadow_mm); |
| 806 | |
| 807 | kmem_cache_free(s->workloads, workload); |
| 808 | } |
| 809 | |
| 810 | /** |
| 811 | * intel_vgpu_create_workload - create a vGPU workload |
| 812 | * @vgpu: a vGPU |
| 813 | * |
| 814 | * This function is called when creating a vGPU workload. |
| 815 | * |
| 816 | * Returns: |
| 817 | * struct intel_vgpu_workload * on success, negative error code in |
| 818 | * pointer if failed. |
| 819 | * |
| 820 | */ |
| 821 | struct intel_vgpu_workload * |
| 822 | intel_vgpu_create_workload(struct intel_vgpu *vgpu) |
| 823 | { |
| 824 | struct intel_vgpu_submission *s = &vgpu->submission; |
| 825 | struct intel_vgpu_workload *workload; |
| 826 | |
| 827 | workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); |
| 828 | if (!workload) |
| 829 | return ERR_PTR(-ENOMEM); |
| 830 | |
| 831 | INIT_LIST_HEAD(&workload->list); |
| 832 | INIT_LIST_HEAD(&workload->shadow_bb); |
| 833 | |
| 834 | init_waitqueue_head(&workload->shadow_ctx_status_wq); |
| 835 | atomic_set(&workload->shadow_ctx_active, 0); |
| 836 | |
| 837 | workload->status = -EINPROGRESS; |
| 838 | workload->shadowed = false; |
| 839 | workload->vgpu = vgpu; |
| 840 | |
| 841 | return workload; |
| 842 | } |