blob: 7295bc8e12fb240eeaf6f9434d2bba713453ad3c [file] [log] [blame]
Zhi Wang82d375d2016-07-05 12:40:49 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
26 *
27 * Contributors:
28 * Ping Gao <ping.a.gao@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 * Bing Niu <bing.niu@intel.com>
31 *
32 */
33
34#include "i915_drv.h"
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080035#include "gvt.h"
36#include "i915_pvinfo.h"
Zhi Wang82d375d2016-07-05 12:40:49 -040037
Ping Gao23736d12016-10-26 09:38:52 +080038void populate_pvinfo_page(struct intel_vgpu *vgpu)
Zhi Wang82d375d2016-07-05 12:40:49 -040039{
40 /* setup the ballooning information */
41 vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
42 vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
43 vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
44 vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
45 vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
46 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
47 vgpu_aperture_gmadr_base(vgpu);
48 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
49 vgpu_aperture_sz(vgpu);
50 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
51 vgpu_hidden_gmadr_base(vgpu);
52 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
53 vgpu_hidden_sz(vgpu);
54
55 vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
56
57 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
58 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
59 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
60 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
61 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
62 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
63
64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
65}
66
67/**
Zhenyu Wang1f31c822016-11-03 18:38:31 +080068 * intel_gvt_init_vgpu_types - initialize vGPU type list
69 * @gvt : GVT device
70 *
71 * Initialize vGPU type list based on available resource.
72 *
73 */
74int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
75{
76 unsigned int num_types;
77 unsigned int i, low_avail;
78 unsigned int min_low;
79
80 /* vGPU type name is defined as GVTg_Vx_y which contains
81 * physical GPU generation type and 'y' means maximum vGPU
82 * instances user can create on one physical GPU for this
83 * type.
84 *
85 * Depend on physical SKU resource, might see vGPU types like
86 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
87 * different types of vGPU on same physical GPU depending on
88 * available resource. Each vGPU type will have "avail_instance"
89 * to indicate how many vGPU instance can be created for this
90 * type.
91 *
92 * Currently use static size here as we init type earlier..
93 */
94 low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
95 num_types = 4;
96
97 gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
98 GFP_KERNEL);
99 if (!gvt->types)
100 return -ENOMEM;
101
102 min_low = MB_TO_BYTES(32);
103 for (i = 0; i < num_types; ++i) {
104 if (low_avail / min_low == 0)
105 break;
106 gvt->types[i].low_gm_size = min_low;
Zhenyu Wang888530b2017-01-05 10:26:13 +0800107 gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800108 gvt->types[i].fence = 4;
109 gvt->types[i].max_instance = low_avail / min_low;
110 gvt->types[i].avail_instance = gvt->types[i].max_instance;
111
112 if (IS_GEN8(gvt->dev_priv))
113 sprintf(gvt->types[i].name, "GVTg_V4_%u",
114 gvt->types[i].max_instance);
115 else if (IS_GEN9(gvt->dev_priv))
116 sprintf(gvt->types[i].name, "GVTg_V5_%u",
117 gvt->types[i].max_instance);
118
119 min_low <<= 1;
120 gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
121 i, gvt->types[i].name, gvt->types[i].max_instance,
122 gvt->types[i].avail_instance,
123 gvt->types[i].low_gm_size,
124 gvt->types[i].high_gm_size, gvt->types[i].fence);
125 }
126
127 gvt->num_types = i;
128 return 0;
129}
130
131void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
132{
133 kfree(gvt->types);
134}
135
136static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
137{
138 int i;
139 unsigned int low_gm_avail, high_gm_avail, fence_avail;
140 unsigned int low_gm_min, high_gm_min, fence_min, total_min;
141
142 /* Need to depend on maxium hw resource size but keep on
143 * static config for now.
144 */
145 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
146 gvt->gm.vgpu_allocated_low_gm_size;
Zhenyu Wang888530b2017-01-05 10:26:13 +0800147 high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800148 gvt->gm.vgpu_allocated_high_gm_size;
149 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
150 gvt->fence.vgpu_allocated_fence_num;
151
152 for (i = 0; i < gvt->num_types; i++) {
153 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
154 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
155 fence_min = fence_avail / gvt->types[i].fence;
156 total_min = min(min(low_gm_min, high_gm_min), fence_min);
157 gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
158 total_min);
159
160 gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
161 i, gvt->types[i].name, gvt->types[i].max_instance,
162 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
163 gvt->types[i].high_gm_size, gvt->types[i].fence);
164 }
165}
166
167/**
Zhi Wang82d375d2016-07-05 12:40:49 -0400168 * intel_gvt_destroy_vgpu - destroy a virtual GPU
169 * @vgpu: virtual GPU
170 *
171 * This function is called when user wants to destroy a virtual GPU.
172 *
173 */
174void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
175{
176 struct intel_gvt *gvt = vgpu->gvt;
177
178 mutex_lock(&gvt->lock);
179
180 vgpu->active = false;
181 idr_remove(&gvt->vgpu_idr, vgpu->id);
182
Zhi Wang4b639602016-05-01 17:09:58 -0400183 if (atomic_read(&vgpu->running_workload_num)) {
184 mutex_unlock(&gvt->lock);
185 intel_gvt_wait_vgpu_idle(vgpu);
186 mutex_lock(&gvt->lock);
187 }
188
189 intel_vgpu_stop_schedule(vgpu);
190 intel_vgpu_clean_sched_policy(vgpu);
Zhi Wange4734052016-05-01 07:42:16 -0400191 intel_vgpu_clean_gvt_context(vgpu);
Zhi Wang28c4c6c2016-05-01 05:22:47 -0400192 intel_vgpu_clean_execlist(vgpu);
Zhi Wang04d348a2016-04-25 18:28:56 -0400193 intel_vgpu_clean_display(vgpu);
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400194 intel_vgpu_clean_opregion(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +0800195 intel_vgpu_clean_gtt(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400196 intel_gvt_hypervisor_detach_vgpu(vgpu);
197 intel_vgpu_free_resource(vgpu);
Changbin Ducdcc4342017-01-13 11:16:00 +0800198 intel_vgpu_clean_mmio(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400199 vfree(vgpu);
200
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800201 intel_gvt_update_vgpu_types(gvt);
Zhi Wang82d375d2016-07-05 12:40:49 -0400202 mutex_unlock(&gvt->lock);
203}
204
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800205static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
Zhi Wang82d375d2016-07-05 12:40:49 -0400206 struct intel_vgpu_creation_params *param)
207{
208 struct intel_vgpu *vgpu;
209 int ret;
210
211 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
212 param->handle, param->low_gm_sz, param->high_gm_sz,
213 param->fence_sz);
214
215 vgpu = vzalloc(sizeof(*vgpu));
216 if (!vgpu)
217 return ERR_PTR(-ENOMEM);
218
219 mutex_lock(&gvt->lock);
220
221 ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
222 if (ret < 0)
223 goto out_free_vgpu;
224
225 vgpu->id = ret;
226 vgpu->handle = param->handle;
227 vgpu->gvt = gvt;
Zhi Wang17865712016-05-01 19:02:37 -0400228 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang82d375d2016-07-05 12:40:49 -0400229
Changbin Du536fc232017-01-13 11:15:58 +0800230 intel_vgpu_init_cfg_space(vgpu, param->primary);
Zhi Wang82d375d2016-07-05 12:40:49 -0400231
Changbin Ducdcc4342017-01-13 11:16:00 +0800232 ret = intel_vgpu_init_mmio(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400233 if (ret)
Jike Song4e537892017-01-06 15:16:22 +0800234 goto out_clean_idr;
Zhi Wang82d375d2016-07-05 12:40:49 -0400235
236 ret = intel_vgpu_alloc_resource(vgpu, param);
237 if (ret)
238 goto out_clean_vgpu_mmio;
239
240 populate_pvinfo_page(vgpu);
241
242 ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
243 if (ret)
244 goto out_clean_vgpu_resource;
245
Zhi Wang2707e442016-03-28 23:23:16 +0800246 ret = intel_vgpu_init_gtt(vgpu);
247 if (ret)
248 goto out_detach_hypervisor_vgpu;
249
Zhi Wang04d348a2016-04-25 18:28:56 -0400250 ret = intel_vgpu_init_display(vgpu);
251 if (ret)
Jike Song8f897432016-11-03 18:38:32 +0800252 goto out_clean_gtt;
Zhi Wang04d348a2016-04-25 18:28:56 -0400253
Zhi Wang8453d672016-05-01 02:48:25 -0400254 ret = intel_vgpu_init_execlist(vgpu);
255 if (ret)
256 goto out_clean_display;
257
Zhi Wange4734052016-05-01 07:42:16 -0400258 ret = intel_vgpu_init_gvt_context(vgpu);
259 if (ret)
260 goto out_clean_execlist;
261
Zhi Wang4b639602016-05-01 17:09:58 -0400262 ret = intel_vgpu_init_sched_policy(vgpu);
263 if (ret)
264 goto out_clean_shadow_ctx;
265
Zhi Wang82d375d2016-07-05 12:40:49 -0400266 vgpu->active = true;
267 mutex_unlock(&gvt->lock);
268
269 return vgpu;
270
Zhi Wang4b639602016-05-01 17:09:58 -0400271out_clean_shadow_ctx:
272 intel_vgpu_clean_gvt_context(vgpu);
Zhi Wange4734052016-05-01 07:42:16 -0400273out_clean_execlist:
274 intel_vgpu_clean_execlist(vgpu);
Zhi Wang8453d672016-05-01 02:48:25 -0400275out_clean_display:
276 intel_vgpu_clean_display(vgpu);
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400277out_clean_gtt:
278 intel_vgpu_clean_gtt(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +0800279out_detach_hypervisor_vgpu:
280 intel_gvt_hypervisor_detach_vgpu(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400281out_clean_vgpu_resource:
282 intel_vgpu_free_resource(vgpu);
283out_clean_vgpu_mmio:
Changbin Ducdcc4342017-01-13 11:16:00 +0800284 intel_vgpu_clean_mmio(vgpu);
Jike Song4e537892017-01-06 15:16:22 +0800285out_clean_idr:
286 idr_remove(&gvt->vgpu_idr, vgpu->id);
Zhi Wang82d375d2016-07-05 12:40:49 -0400287out_free_vgpu:
288 vfree(vgpu);
289 mutex_unlock(&gvt->lock);
290 return ERR_PTR(ret);
291}
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800292
293/**
294 * intel_gvt_create_vgpu - create a virtual GPU
295 * @gvt: GVT device
296 * @type: type of the vGPU to create
297 *
298 * This function is called when user wants to create a virtual GPU.
299 *
300 * Returns:
301 * pointer to intel_vgpu, error pointer if failed.
302 */
303struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
304 struct intel_vgpu_type *type)
305{
306 struct intel_vgpu_creation_params param;
307 struct intel_vgpu *vgpu;
308
309 param.handle = 0;
Du, Changbine992fae2016-11-21 17:08:14 +0800310 param.primary = 1;
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800311 param.low_gm_sz = type->low_gm_size;
312 param.high_gm_sz = type->high_gm_size;
313 param.fence_sz = type->fence;
314
315 /* XXX current param based on MB */
316 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
317 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
318
319 vgpu = __intel_gvt_create_vgpu(gvt, &param);
320 if (IS_ERR(vgpu))
321 return vgpu;
322
323 /* calculate left instance change for types */
324 intel_gvt_update_vgpu_types(gvt);
325
326 return vgpu;
327}
Jike Song9ec1e662016-11-03 18:38:35 +0800328
329/**
Changbin Ducfe65f42017-01-13 11:16:02 +0800330 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
331 * @vgpu: virtual GPU
332 * @dmlr: vGPU Device Model Level Reset or GT Reset
333 * @engine_mask: engines to reset for GT reset
334 *
335 * This function is called when user wants to reset a virtual GPU through
336 * device model reset or GT reset. The caller should hold the gvt lock.
337 *
338 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
339 * the whole vGPU to default state as when it is created. This vGPU function
340 * is required both for functionary and security concerns.The ultimate goal
341 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
342 * assign a vGPU to a virtual machine we must isse such reset first.
343 *
344 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
345 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
346 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
347 * the reset request. Guest driver can issue a GT reset by programming the
348 * virtual GDRST register to reset specific virtual GPU engine or all
349 * engines.
350 *
351 * The parameter dev_level is to identify if we will do DMLR or GT reset.
352 * The parameter engine_mask is to specific the engines that need to be
353 * resetted. If value ALL_ENGINES is given for engine_mask, it means
354 * the caller requests a full GT reset that we will reset all virtual
355 * GPU engines. For FLR, engine_mask is ignored.
356 */
357void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
358 unsigned int engine_mask)
359{
360 struct intel_gvt *gvt = vgpu->gvt;
361 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
362
363 gvt_dbg_core("------------------------------------------\n");
364 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
365 vgpu->id, dmlr, engine_mask);
366 vgpu->resetting = true;
367
368 intel_vgpu_stop_schedule(vgpu);
369 /*
370 * The current_vgpu will set to NULL after stopping the
371 * scheduler when the reset is triggered by current vgpu.
372 */
373 if (scheduler->current_vgpu == NULL) {
374 mutex_unlock(&gvt->lock);
375 intel_gvt_wait_vgpu_idle(vgpu);
376 mutex_lock(&gvt->lock);
377 }
378
379 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
380
381 /* full GPU reset or device model level reset */
382 if (engine_mask == ALL_ENGINES || dmlr) {
383 intel_vgpu_reset_gtt(vgpu, dmlr);
384 intel_vgpu_reset_resource(vgpu);
385 intel_vgpu_reset_mmio(vgpu);
386 populate_pvinfo_page(vgpu);
387
388 if (dmlr)
389 intel_vgpu_reset_cfg_space(vgpu);
390 }
391
392 vgpu->resetting = false;
393 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
394 gvt_dbg_core("------------------------------------------\n");
395}
396
397/**
398 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
Jike Song9ec1e662016-11-03 18:38:35 +0800399 * @vgpu: virtual GPU
400 *
401 * This function is called when user wants to reset a virtual GPU.
402 *
403 */
404void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
405{
Changbin Ducfe65f42017-01-13 11:16:02 +0800406 mutex_lock(&vgpu->gvt->lock);
407 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
408 mutex_unlock(&vgpu->gvt->lock);
Jike Song9ec1e662016-11-03 18:38:35 +0800409}