blob: 044125c0f407e8a9a4029009f839c5dc88ae1279 [file] [log] [blame]
Zhi Wang82d375d2016-07-05 12:40:49 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
26 *
27 * Contributors:
28 * Ping Gao <ping.a.gao@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 * Bing Niu <bing.niu@intel.com>
31 *
32 */
33
34#include "i915_drv.h"
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080035#include "gvt.h"
36#include "i915_pvinfo.h"
Zhi Wang82d375d2016-07-05 12:40:49 -040037
38static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
39{
40 vfree(vgpu->mmio.vreg);
41 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
42}
43
Ping Gao23736d12016-10-26 09:38:52 +080044int setup_vgpu_mmio(struct intel_vgpu *vgpu)
Zhi Wang82d375d2016-07-05 12:40:49 -040045{
46 struct intel_gvt *gvt = vgpu->gvt;
47 const struct intel_gvt_device_info *info = &gvt->device_info;
48
Du, Changbinf4b0c282016-11-11 10:31:37 +080049 if (vgpu->mmio.vreg)
50 memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
51 else {
52 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
53 if (!vgpu->mmio.vreg)
54 return -ENOMEM;
55 }
Zhi Wang82d375d2016-07-05 12:40:49 -040056
57 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
58
59 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
60 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
Zhi Wange39c5ad2016-09-02 13:33:29 +080061
62 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
63
64 /* set the bit 0:2(Core C-State ) to C0 */
65 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
Zhi Wang82d375d2016-07-05 12:40:49 -040066 return 0;
67}
68
69static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
70 struct intel_vgpu_creation_params *param)
71{
72 struct intel_gvt *gvt = vgpu->gvt;
73 const struct intel_gvt_device_info *info = &gvt->device_info;
74 u16 *gmch_ctl;
75 int i;
76
77 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
78 info->cfg_space_size);
79
80 if (!param->primary) {
81 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
82 INTEL_GVT_PCI_CLASS_VGA_OTHER;
83 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
84 INTEL_GVT_PCI_CLASS_VGA_OTHER;
85 }
86
87 /* Show guest that there isn't any stolen memory.*/
88 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
89 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
90
91 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
92 gvt_aperture_pa_base(gvt), true);
93
94 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
95 | PCI_COMMAND_MEMORY
96 | PCI_COMMAND_MASTER);
97 /*
98 * Clear the bar upper 32bit and let guest to assign the new value
99 */
100 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
101 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
102
103 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
104 vgpu->cfg_space.bar[i].size = pci_resource_len(
105 gvt->dev_priv->drm.pdev, i * 2);
106 vgpu->cfg_space.bar[i].tracked = false;
107 }
108}
109
Ping Gao23736d12016-10-26 09:38:52 +0800110void populate_pvinfo_page(struct intel_vgpu *vgpu)
Zhi Wang82d375d2016-07-05 12:40:49 -0400111{
112 /* setup the ballooning information */
113 vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
114 vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
115 vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
116 vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
117 vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
118 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
119 vgpu_aperture_gmadr_base(vgpu);
120 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
121 vgpu_aperture_sz(vgpu);
122 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
123 vgpu_hidden_gmadr_base(vgpu);
124 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
125 vgpu_hidden_sz(vgpu);
126
127 vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
128
129 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
130 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
131 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
132 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
133 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
134 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
135
136 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
137}
138
139/**
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800140 * intel_gvt_init_vgpu_types - initialize vGPU type list
141 * @gvt : GVT device
142 *
143 * Initialize vGPU type list based on available resource.
144 *
145 */
146int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
147{
148 unsigned int num_types;
149 unsigned int i, low_avail;
150 unsigned int min_low;
151
152 /* vGPU type name is defined as GVTg_Vx_y which contains
153 * physical GPU generation type and 'y' means maximum vGPU
154 * instances user can create on one physical GPU for this
155 * type.
156 *
157 * Depend on physical SKU resource, might see vGPU types like
158 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
159 * different types of vGPU on same physical GPU depending on
160 * available resource. Each vGPU type will have "avail_instance"
161 * to indicate how many vGPU instance can be created for this
162 * type.
163 *
164 * Currently use static size here as we init type earlier..
165 */
166 low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
167 num_types = 4;
168
169 gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
170 GFP_KERNEL);
171 if (!gvt->types)
172 return -ENOMEM;
173
174 min_low = MB_TO_BYTES(32);
175 for (i = 0; i < num_types; ++i) {
176 if (low_avail / min_low == 0)
177 break;
178 gvt->types[i].low_gm_size = min_low;
179 gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
180 gvt->types[i].fence = 4;
181 gvt->types[i].max_instance = low_avail / min_low;
182 gvt->types[i].avail_instance = gvt->types[i].max_instance;
183
184 if (IS_GEN8(gvt->dev_priv))
185 sprintf(gvt->types[i].name, "GVTg_V4_%u",
186 gvt->types[i].max_instance);
187 else if (IS_GEN9(gvt->dev_priv))
188 sprintf(gvt->types[i].name, "GVTg_V5_%u",
189 gvt->types[i].max_instance);
190
191 min_low <<= 1;
192 gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
193 i, gvt->types[i].name, gvt->types[i].max_instance,
194 gvt->types[i].avail_instance,
195 gvt->types[i].low_gm_size,
196 gvt->types[i].high_gm_size, gvt->types[i].fence);
197 }
198
199 gvt->num_types = i;
200 return 0;
201}
202
203void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
204{
205 kfree(gvt->types);
206}
207
208static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
209{
210 int i;
211 unsigned int low_gm_avail, high_gm_avail, fence_avail;
212 unsigned int low_gm_min, high_gm_min, fence_min, total_min;
213
214 /* Need to depend on maxium hw resource size but keep on
215 * static config for now.
216 */
217 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
218 gvt->gm.vgpu_allocated_low_gm_size;
219 high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
220 gvt->gm.vgpu_allocated_high_gm_size;
221 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
222 gvt->fence.vgpu_allocated_fence_num;
223
224 for (i = 0; i < gvt->num_types; i++) {
225 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
226 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
227 fence_min = fence_avail / gvt->types[i].fence;
228 total_min = min(min(low_gm_min, high_gm_min), fence_min);
229 gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
230 total_min);
231
232 gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
233 i, gvt->types[i].name, gvt->types[i].max_instance,
234 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
235 gvt->types[i].high_gm_size, gvt->types[i].fence);
236 }
237}
238
239/**
Zhi Wang82d375d2016-07-05 12:40:49 -0400240 * intel_gvt_destroy_vgpu - destroy a virtual GPU
241 * @vgpu: virtual GPU
242 *
243 * This function is called when user wants to destroy a virtual GPU.
244 *
245 */
246void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
247{
248 struct intel_gvt *gvt = vgpu->gvt;
249
250 mutex_lock(&gvt->lock);
251
252 vgpu->active = false;
253 idr_remove(&gvt->vgpu_idr, vgpu->id);
254
Zhi Wang4b639602016-05-01 17:09:58 -0400255 if (atomic_read(&vgpu->running_workload_num)) {
256 mutex_unlock(&gvt->lock);
257 intel_gvt_wait_vgpu_idle(vgpu);
258 mutex_lock(&gvt->lock);
259 }
260
261 intel_vgpu_stop_schedule(vgpu);
262 intel_vgpu_clean_sched_policy(vgpu);
Zhi Wange4734052016-05-01 07:42:16 -0400263 intel_vgpu_clean_gvt_context(vgpu);
Zhi Wang28c4c6c2016-05-01 05:22:47 -0400264 intel_vgpu_clean_execlist(vgpu);
Zhi Wang04d348a2016-04-25 18:28:56 -0400265 intel_vgpu_clean_display(vgpu);
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400266 intel_vgpu_clean_opregion(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +0800267 intel_vgpu_clean_gtt(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400268 intel_gvt_hypervisor_detach_vgpu(vgpu);
269 intel_vgpu_free_resource(vgpu);
270 clean_vgpu_mmio(vgpu);
271 vfree(vgpu);
272
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800273 intel_gvt_update_vgpu_types(gvt);
Zhi Wang82d375d2016-07-05 12:40:49 -0400274 mutex_unlock(&gvt->lock);
275}
276
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800277static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
Zhi Wang82d375d2016-07-05 12:40:49 -0400278 struct intel_vgpu_creation_params *param)
279{
280 struct intel_vgpu *vgpu;
281 int ret;
282
283 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
284 param->handle, param->low_gm_sz, param->high_gm_sz,
285 param->fence_sz);
286
287 vgpu = vzalloc(sizeof(*vgpu));
288 if (!vgpu)
289 return ERR_PTR(-ENOMEM);
290
291 mutex_lock(&gvt->lock);
292
293 ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
294 if (ret < 0)
295 goto out_free_vgpu;
296
297 vgpu->id = ret;
298 vgpu->handle = param->handle;
299 vgpu->gvt = gvt;
Zhi Wang17865712016-05-01 19:02:37 -0400300 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang82d375d2016-07-05 12:40:49 -0400301
302 setup_vgpu_cfg_space(vgpu, param);
303
304 ret = setup_vgpu_mmio(vgpu);
305 if (ret)
306 goto out_free_vgpu;
307
308 ret = intel_vgpu_alloc_resource(vgpu, param);
309 if (ret)
310 goto out_clean_vgpu_mmio;
311
312 populate_pvinfo_page(vgpu);
313
314 ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
315 if (ret)
316 goto out_clean_vgpu_resource;
317
Zhi Wang2707e442016-03-28 23:23:16 +0800318 ret = intel_vgpu_init_gtt(vgpu);
319 if (ret)
320 goto out_detach_hypervisor_vgpu;
321
Zhi Wang04d348a2016-04-25 18:28:56 -0400322 ret = intel_vgpu_init_display(vgpu);
323 if (ret)
Jike Song8f897432016-11-03 18:38:32 +0800324 goto out_clean_gtt;
Zhi Wang04d348a2016-04-25 18:28:56 -0400325
Zhi Wang8453d672016-05-01 02:48:25 -0400326 ret = intel_vgpu_init_execlist(vgpu);
327 if (ret)
328 goto out_clean_display;
329
Zhi Wange4734052016-05-01 07:42:16 -0400330 ret = intel_vgpu_init_gvt_context(vgpu);
331 if (ret)
332 goto out_clean_execlist;
333
Zhi Wang4b639602016-05-01 17:09:58 -0400334 ret = intel_vgpu_init_sched_policy(vgpu);
335 if (ret)
336 goto out_clean_shadow_ctx;
337
Zhi Wang82d375d2016-07-05 12:40:49 -0400338 vgpu->active = true;
339 mutex_unlock(&gvt->lock);
340
341 return vgpu;
342
Zhi Wang4b639602016-05-01 17:09:58 -0400343out_clean_shadow_ctx:
344 intel_vgpu_clean_gvt_context(vgpu);
Zhi Wange4734052016-05-01 07:42:16 -0400345out_clean_execlist:
346 intel_vgpu_clean_execlist(vgpu);
Zhi Wang8453d672016-05-01 02:48:25 -0400347out_clean_display:
348 intel_vgpu_clean_display(vgpu);
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400349out_clean_gtt:
350 intel_vgpu_clean_gtt(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +0800351out_detach_hypervisor_vgpu:
352 intel_gvt_hypervisor_detach_vgpu(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400353out_clean_vgpu_resource:
354 intel_vgpu_free_resource(vgpu);
355out_clean_vgpu_mmio:
356 clean_vgpu_mmio(vgpu);
357out_free_vgpu:
358 vfree(vgpu);
359 mutex_unlock(&gvt->lock);
360 return ERR_PTR(ret);
361}
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800362
363/**
364 * intel_gvt_create_vgpu - create a virtual GPU
365 * @gvt: GVT device
366 * @type: type of the vGPU to create
367 *
368 * This function is called when user wants to create a virtual GPU.
369 *
370 * Returns:
371 * pointer to intel_vgpu, error pointer if failed.
372 */
373struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
374 struct intel_vgpu_type *type)
375{
376 struct intel_vgpu_creation_params param;
377 struct intel_vgpu *vgpu;
378
379 param.handle = 0;
380 param.low_gm_sz = type->low_gm_size;
381 param.high_gm_sz = type->high_gm_size;
382 param.fence_sz = type->fence;
383
384 /* XXX current param based on MB */
385 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
386 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
387
388 vgpu = __intel_gvt_create_vgpu(gvt, &param);
389 if (IS_ERR(vgpu))
390 return vgpu;
391
392 /* calculate left instance change for types */
393 intel_gvt_update_vgpu_types(gvt);
394
395 return vgpu;
396}
Jike Song9ec1e662016-11-03 18:38:35 +0800397
398/**
399 * intel_gvt_reset_vgpu - reset a virtual GPU
400 * @vgpu: virtual GPU
401 *
402 * This function is called when user wants to reset a virtual GPU.
403 *
404 */
405void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
406{
407}