blob: 8d5317d0122d41aa43f3d40d16f184129878a32a [file] [log] [blame]
Zhi Wang2707e442016-03-28 23:23:16 +08001/*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
29 *
30 * Contributors:
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36#include "i915_drv.h"
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080037#include "gvt.h"
38#include "i915_pvinfo.h"
Zhi Wang2707e442016-03-28 23:23:16 +080039#include "trace.h"
40
41static bool enable_out_of_sync = false;
42static int preallocated_oos_pages = 8192;
43
44/*
45 * validate a gm address and related range size,
46 * translate it to host gm address
47 */
48bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
49{
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
Tina Zhang695fbc02017-03-10 04:26:53 -050052 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
53 addr, size);
Zhi Wang2707e442016-03-28 23:23:16 +080054 return false;
55 }
56 return true;
57}
58
59/* translate a guest gmadr to host gmadr */
60int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
61{
62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
63 "invalid guest gmadr %llx\n", g_addr))
64 return -EACCES;
65
66 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
67 *h_addr = vgpu_aperture_gmadr_base(vgpu)
68 + (g_addr - vgpu_aperture_offset(vgpu));
69 else
70 *h_addr = vgpu_hidden_gmadr_base(vgpu)
71 + (g_addr - vgpu_hidden_offset(vgpu));
72 return 0;
73}
74
75/* translate a host gmadr to guest gmadr */
76int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
77{
78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
79 "invalid host gmadr %llx\n", h_addr))
80 return -EACCES;
81
82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
83 *g_addr = vgpu_aperture_gmadr_base(vgpu)
84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
85 else
86 *g_addr = vgpu_hidden_gmadr_base(vgpu)
87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
88 return 0;
89}
90
91int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
92 unsigned long *h_index)
93{
94 u64 h_addr;
95 int ret;
96
Zhi Wang9556e112017-10-10 13:51:32 +080097 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +080098 &h_addr);
99 if (ret)
100 return ret;
101
Zhi Wang9556e112017-10-10 13:51:32 +0800102 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800103 return 0;
104}
105
106int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
107 unsigned long *g_index)
108{
109 u64 g_addr;
110 int ret;
111
Zhi Wang9556e112017-10-10 13:51:32 +0800112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800113 &g_addr);
114 if (ret)
115 return ret;
116
Zhi Wang9556e112017-10-10 13:51:32 +0800117 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800118 return 0;
119}
120
121#define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
125
126#define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
128
129#define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
131
132#define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
134
135#define gtt_init_entry(e, t, p, v) do { \
136 (e)->type = t; \
137 (e)->pdev = p; \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
139} while (0)
140
Zhi Wang2707e442016-03-28 23:23:16 +0800141/*
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
147 *
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
154 * page table.
155 */
156
157struct gtt_type_table_entry {
158 int entry_type;
Zhi Wang054f4eb2017-10-10 17:19:30 +0800159 int pt_type;
Zhi Wang2707e442016-03-28 23:23:16 +0800160 int next_pt_type;
161 int pse_entry_type;
162};
163
Zhi Wang054f4eb2017-10-10 17:19:30 +0800164#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
Zhi Wang2707e442016-03-28 23:23:16 +0800165 [type] = { \
166 .entry_type = e_type, \
Zhi Wang054f4eb2017-10-10 17:19:30 +0800167 .pt_type = cpt_type, \
Zhi Wang2707e442016-03-28 23:23:16 +0800168 .next_pt_type = npt_type, \
169 .pse_entry_type = pse_type, \
170 }
171
172static struct gtt_type_table_entry gtt_type_table[] = {
173 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
174 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800175 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800176 GTT_TYPE_PPGTT_PML4_PT,
177 GTT_TYPE_INVALID),
178 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
179 GTT_TYPE_PPGTT_PML4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800180 GTT_TYPE_PPGTT_PML4_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800181 GTT_TYPE_PPGTT_PDP_PT,
182 GTT_TYPE_INVALID),
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
184 GTT_TYPE_PPGTT_PML4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800185 GTT_TYPE_PPGTT_PML4_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800186 GTT_TYPE_PPGTT_PDP_PT,
187 GTT_TYPE_INVALID),
188 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
189 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800190 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800191 GTT_TYPE_PPGTT_PDE_PT,
192 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
193 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
194 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800195 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800196 GTT_TYPE_PPGTT_PDE_PT,
197 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
198 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
199 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800200 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800201 GTT_TYPE_PPGTT_PDE_PT,
202 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
204 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800205 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800206 GTT_TYPE_PPGTT_PTE_PT,
207 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
208 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
209 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800210 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800211 GTT_TYPE_PPGTT_PTE_PT,
212 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
213 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
214 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800215 GTT_TYPE_PPGTT_PTE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800216 GTT_TYPE_INVALID,
217 GTT_TYPE_INVALID),
218 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
219 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800220 GTT_TYPE_PPGTT_PTE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800221 GTT_TYPE_INVALID,
222 GTT_TYPE_INVALID),
223 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
224 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800225 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800226 GTT_TYPE_INVALID,
227 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
228 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
229 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800230 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800231 GTT_TYPE_INVALID,
232 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
233 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
234 GTT_TYPE_GGTT_PTE,
235 GTT_TYPE_INVALID,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800236 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800237 GTT_TYPE_INVALID),
238};
239
240static inline int get_next_pt_type(int type)
241{
242 return gtt_type_table[type].next_pt_type;
243}
244
Zhi Wang054f4eb2017-10-10 17:19:30 +0800245static inline int get_pt_type(int type)
246{
247 return gtt_type_table[type].pt_type;
248}
249
Zhi Wang2707e442016-03-28 23:23:16 +0800250static inline int get_entry_type(int type)
251{
252 return gtt_type_table[type].entry_type;
253}
254
255static inline int get_pse_type(int type)
256{
257 return gtt_type_table[type].pse_entry_type;
258}
259
260static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
261{
Du, Changbin321927d2016-10-20 14:08:46 +0800262 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
Zhi Wang2707e442016-03-28 23:23:16 +0800263
Changbin Du905a5032016-12-30 14:10:53 +0800264 return readq(addr);
Zhi Wang2707e442016-03-28 23:23:16 +0800265}
266
Chuanxiao Dongaf2c6392017-06-02 15:34:24 +0800267static void gtt_invalidate(struct drm_i915_private *dev_priv)
268{
269 mmio_hw_access_pre(dev_priv);
270 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
271 mmio_hw_access_post(dev_priv);
272}
273
Zhi Wang2707e442016-03-28 23:23:16 +0800274static void write_pte64(struct drm_i915_private *dev_priv,
275 unsigned long index, u64 pte)
276{
Du, Changbin321927d2016-10-20 14:08:46 +0800277 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
Zhi Wang2707e442016-03-28 23:23:16 +0800278
Zhi Wang2707e442016-03-28 23:23:16 +0800279 writeq(pte, addr);
Zhi Wang2707e442016-03-28 23:23:16 +0800280}
281
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800282static inline int gtt_get_entry64(void *pt,
Zhi Wang2707e442016-03-28 23:23:16 +0800283 struct intel_gvt_gtt_entry *e,
284 unsigned long index, bool hypervisor_access, unsigned long gpa,
285 struct intel_vgpu *vgpu)
286{
287 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
288 int ret;
289
290 if (WARN_ON(info->gtt_entry_size != 8))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800291 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800292
293 if (hypervisor_access) {
294 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
295 (index << info->gtt_entry_size_shift),
296 &e->val64, 8);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800297 if (WARN_ON(ret))
298 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800299 } else if (!pt) {
300 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
301 } else {
302 e->val64 = *((u64 *)pt + index);
303 }
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800304 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800305}
306
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800307static inline int gtt_set_entry64(void *pt,
Zhi Wang2707e442016-03-28 23:23:16 +0800308 struct intel_gvt_gtt_entry *e,
309 unsigned long index, bool hypervisor_access, unsigned long gpa,
310 struct intel_vgpu *vgpu)
311{
312 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
313 int ret;
314
315 if (WARN_ON(info->gtt_entry_size != 8))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800316 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800317
318 if (hypervisor_access) {
319 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
320 (index << info->gtt_entry_size_shift),
321 &e->val64, 8);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800322 if (WARN_ON(ret))
323 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800324 } else if (!pt) {
325 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
326 } else {
327 *((u64 *)pt + index) = e->val64;
328 }
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800329 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800330}
331
332#define GTT_HAW 46
333
Xiong Zhangb721b652017-11-28 07:29:54 +0800334#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
335#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
336#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
Zhi Wang2707e442016-03-28 23:23:16 +0800337
338static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
339{
340 unsigned long pfn;
341
342 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
343 pfn = (e->val64 & ADDR_1G_MASK) >> 12;
344 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
345 pfn = (e->val64 & ADDR_2M_MASK) >> 12;
346 else
347 pfn = (e->val64 & ADDR_4K_MASK) >> 12;
348 return pfn;
349}
350
351static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
352{
353 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
354 e->val64 &= ~ADDR_1G_MASK;
355 pfn &= (ADDR_1G_MASK >> 12);
356 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
357 e->val64 &= ~ADDR_2M_MASK;
358 pfn &= (ADDR_2M_MASK >> 12);
359 } else {
360 e->val64 &= ~ADDR_4K_MASK;
361 pfn &= (ADDR_4K_MASK >> 12);
362 }
363
364 e->val64 |= (pfn << 12);
365}
366
367static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
368{
369 /* Entry doesn't have PSE bit. */
370 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
371 return false;
372
373 e->type = get_entry_type(e->type);
Zhi Wang5e86cce2017-09-26 15:02:21 +0800374 if (!(e->val64 & BIT(7)))
Zhi Wang2707e442016-03-28 23:23:16 +0800375 return false;
376
377 e->type = get_pse_type(e->type);
378 return true;
379}
380
381static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
382{
383 /*
384 * i915 writes PDP root pointer registers without present bit,
385 * it also works, so we need to treat root pointer entry
386 * specifically.
387 */
388 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
389 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
390 return (e->val64 != 0);
391 else
Zhi Wang5e86cce2017-09-26 15:02:21 +0800392 return (e->val64 & BIT(0));
Zhi Wang2707e442016-03-28 23:23:16 +0800393}
394
395static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
396{
Zhi Wang5e86cce2017-09-26 15:02:21 +0800397 e->val64 &= ~BIT(0);
Zhi Wang2707e442016-03-28 23:23:16 +0800398}
399
Zhi Wang655c64e2017-10-10 17:24:26 +0800400static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
401{
402 e->val64 |= BIT(0);
Zhi Wang2707e442016-03-28 23:23:16 +0800403}
404
405/*
406 * Per-platform GMA routines.
407 */
408static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
409{
Zhi Wang9556e112017-10-10 13:51:32 +0800410 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800411
412 trace_gma_index(__func__, gma, x);
413 return x;
414}
415
416#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
417static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
418{ \
419 unsigned long x = (exp); \
420 trace_gma_index(__func__, gma, x); \
421 return x; \
422}
423
424DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
425DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
426DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
427DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
428DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
429
430static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
431 .get_entry = gtt_get_entry64,
432 .set_entry = gtt_set_entry64,
433 .clear_present = gtt_entry_clear_present,
Zhi Wang655c64e2017-10-10 17:24:26 +0800434 .set_present = gtt_entry_set_present,
Zhi Wang2707e442016-03-28 23:23:16 +0800435 .test_present = gen8_gtt_test_present,
436 .test_pse = gen8_gtt_test_pse,
437 .get_pfn = gen8_gtt_get_pfn,
438 .set_pfn = gen8_gtt_set_pfn,
439};
440
441static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
442 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
443 .gma_to_pte_index = gen8_gma_to_pte_index,
444 .gma_to_pde_index = gen8_gma_to_pde_index,
445 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
446 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
447 .gma_to_pml4_index = gen8_gma_to_pml4_index,
448};
449
450static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
451 struct intel_gvt_gtt_entry *m)
452{
453 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
454 unsigned long gfn, mfn;
455
456 *m = *p;
457
458 if (!ops->test_present(p))
459 return 0;
460
461 gfn = ops->get_pfn(p);
462
463 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
464 if (mfn == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500465 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
Zhi Wang2707e442016-03-28 23:23:16 +0800466 return -ENXIO;
467 }
468
469 ops->set_pfn(m, mfn);
470 return 0;
471}
472
473/*
474 * MM helpers.
475 */
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800476int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
Zhi Wang2707e442016-03-28 23:23:16 +0800477 void *page_table, struct intel_gvt_gtt_entry *e,
478 unsigned long index)
479{
480 struct intel_gvt *gvt = mm->vgpu->gvt;
481 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800482 int ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800483
484 e->type = mm->page_table_entry_type;
485
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800486 ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
487 if (ret)
488 return ret;
489
Zhi Wang2707e442016-03-28 23:23:16 +0800490 ops->test_pse(e);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800491 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800492}
493
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800494int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
Zhi Wang2707e442016-03-28 23:23:16 +0800495 void *page_table, struct intel_gvt_gtt_entry *e,
496 unsigned long index)
497{
498 struct intel_gvt *gvt = mm->vgpu->gvt;
499 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
500
501 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
502}
503
504/*
505 * PPGTT shadow page table helpers.
506 */
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800507static inline int ppgtt_spt_get_entry(
Zhi Wang2707e442016-03-28 23:23:16 +0800508 struct intel_vgpu_ppgtt_spt *spt,
509 void *page_table, int type,
510 struct intel_gvt_gtt_entry *e, unsigned long index,
511 bool guest)
512{
513 struct intel_gvt *gvt = spt->vgpu->gvt;
514 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800515 int ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800516
517 e->type = get_entry_type(type);
518
519 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800520 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800521
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800522 ret = ops->get_entry(page_table, e, index, guest,
Zhi Wang9556e112017-10-10 13:51:32 +0800523 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800524 spt->vgpu);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800525 if (ret)
526 return ret;
527
Zhi Wang2707e442016-03-28 23:23:16 +0800528 ops->test_pse(e);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800529 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800530}
531
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800532static inline int ppgtt_spt_set_entry(
Zhi Wang2707e442016-03-28 23:23:16 +0800533 struct intel_vgpu_ppgtt_spt *spt,
534 void *page_table, int type,
535 struct intel_gvt_gtt_entry *e, unsigned long index,
536 bool guest)
537{
538 struct intel_gvt *gvt = spt->vgpu->gvt;
539 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
540
541 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800542 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800543
544 return ops->set_entry(page_table, e, index, guest,
Zhi Wang9556e112017-10-10 13:51:32 +0800545 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800546 spt->vgpu);
547}
548
549#define ppgtt_get_guest_entry(spt, e, index) \
550 ppgtt_spt_get_entry(spt, NULL, \
551 spt->guest_page_type, e, index, true)
552
553#define ppgtt_set_guest_entry(spt, e, index) \
554 ppgtt_spt_set_entry(spt, NULL, \
555 spt->guest_page_type, e, index, true)
556
557#define ppgtt_get_shadow_entry(spt, e, index) \
558 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
559 spt->shadow_page.type, e, index, false)
560
561#define ppgtt_set_shadow_entry(spt, e, index) \
562 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
563 spt->shadow_page.type, e, index, false)
564
565/**
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800566 * intel_vgpu_init_page_track - init a page track data structure
Zhi Wang2707e442016-03-28 23:23:16 +0800567 * @vgpu: a vGPU
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800568 * @t: a page track data structure
Zhi Wang2707e442016-03-28 23:23:16 +0800569 * @gfn: guest memory page frame number
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800570 * @handler: the function will be called when target guest memory page has
Zhi Wang2707e442016-03-28 23:23:16 +0800571 * been modified.
572 *
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800573 * This function is called when a user wants to prepare a page track data
574 * structure to track a guest memory page.
Zhi Wang2707e442016-03-28 23:23:16 +0800575 *
576 * Returns:
577 * Zero on success, negative error code if failed.
578 */
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800579int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
580 struct intel_vgpu_page_track *t,
581 unsigned long gfn,
582 int (*handler)(void *, u64, void *, int),
583 void *data)
584{
585 INIT_HLIST_NODE(&t->node);
586
587 t->tracked = false;
588 t->gfn = gfn;
589 t->handler = handler;
590 t->data = data;
591
592 hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
593 return 0;
594}
595
596/**
597 * intel_vgpu_clean_page_track - release a page track data structure
598 * @vgpu: a vGPU
599 * @t: a page track data structure
600 *
601 * This function is called before a user frees a page track data structure.
602 */
603void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
604 struct intel_vgpu_page_track *t)
605{
606 if (!hlist_unhashed(&t->node))
607 hash_del(&t->node);
608
609 if (t->tracked)
610 intel_gvt_hypervisor_disable_page_track(vgpu, t);
611}
612
613/**
614 * intel_vgpu_find_tracked_page - find a tracked guest page
615 * @vgpu: a vGPU
616 * @gfn: guest memory page frame number
617 *
618 * This function is called when the emulation layer wants to figure out if a
619 * trapped GFN is a tracked guest page.
620 *
621 * Returns:
622 * Pointer to page track data structure, NULL if not found.
623 */
624struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
625 struct intel_vgpu *vgpu, unsigned long gfn)
626{
627 struct intel_vgpu_page_track *t;
628
629 hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
630 t, node, gfn) {
631 if (t->gfn == gfn)
632 return t;
633 }
634 return NULL;
635}
636
637static int init_guest_page(struct intel_vgpu *vgpu,
Zhi Wang2707e442016-03-28 23:23:16 +0800638 struct intel_vgpu_guest_page *p,
639 unsigned long gfn,
640 int (*handler)(void *, u64, void *, int),
641 void *data)
642{
Zhi Wang2707e442016-03-28 23:23:16 +0800643 p->oos_page = NULL;
644 p->write_cnt = 0;
645
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800646 return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
Zhi Wang2707e442016-03-28 23:23:16 +0800647}
648
649static int detach_oos_page(struct intel_vgpu *vgpu,
650 struct intel_vgpu_oos_page *oos_page);
651
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800652static void clean_guest_page(struct intel_vgpu *vgpu,
Zhi Wang2707e442016-03-28 23:23:16 +0800653 struct intel_vgpu_guest_page *p)
654{
Zhi Wang2707e442016-03-28 23:23:16 +0800655 if (p->oos_page)
656 detach_oos_page(vgpu, p->oos_page);
657
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800658 intel_vgpu_clean_page_track(vgpu, &p->track);
Zhi Wang2707e442016-03-28 23:23:16 +0800659}
660
661static inline int init_shadow_page(struct intel_vgpu *vgpu,
Zhi Wang22115ce2017-10-10 14:34:11 +0800662 struct intel_vgpu_shadow_page *p, int type, bool hash)
Zhi Wang2707e442016-03-28 23:23:16 +0800663{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800664 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
665 dma_addr_t daddr;
666
667 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
668 if (dma_mapping_error(kdev, daddr)) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500669 gvt_vgpu_err("fail to map dma addr\n");
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800670 return -EINVAL;
671 }
672
Zhi Wang2707e442016-03-28 23:23:16 +0800673 p->vaddr = page_address(p->page);
674 p->type = type;
675
676 INIT_HLIST_NODE(&p->node);
677
Zhi Wang9556e112017-10-10 13:51:32 +0800678 p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
Zhi Wang22115ce2017-10-10 14:34:11 +0800679 if (hash)
680 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
Zhi Wang2707e442016-03-28 23:23:16 +0800681 return 0;
682}
683
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800684static inline void clean_shadow_page(struct intel_vgpu *vgpu,
685 struct intel_vgpu_shadow_page *p)
Zhi Wang2707e442016-03-28 23:23:16 +0800686{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800687 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
688
Zhi Wang9556e112017-10-10 13:51:32 +0800689 dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800690 PCI_DMA_BIDIRECTIONAL);
691
Zhi Wang2707e442016-03-28 23:23:16 +0800692 if (!hlist_unhashed(&p->node))
693 hash_del(&p->node);
694}
695
696static inline struct intel_vgpu_shadow_page *find_shadow_page(
697 struct intel_vgpu *vgpu, unsigned long mfn)
698{
699 struct intel_vgpu_shadow_page *p;
700
701 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
702 p, node, mfn) {
703 if (p->mfn == mfn)
704 return p;
705 }
706 return NULL;
707}
708
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800709#define page_track_to_guest_page(ptr) \
710 container_of(ptr, struct intel_vgpu_guest_page, track)
711
Zhi Wang2707e442016-03-28 23:23:16 +0800712#define guest_page_to_ppgtt_spt(ptr) \
713 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
714
715#define shadow_page_to_ppgtt_spt(ptr) \
716 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
717
718static void *alloc_spt(gfp_t gfp_mask)
719{
720 struct intel_vgpu_ppgtt_spt *spt;
721
722 spt = kzalloc(sizeof(*spt), gfp_mask);
723 if (!spt)
724 return NULL;
725
726 spt->shadow_page.page = alloc_page(gfp_mask);
727 if (!spt->shadow_page.page) {
728 kfree(spt);
729 return NULL;
730 }
731 return spt;
732}
733
734static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
735{
736 __free_page(spt->shadow_page.page);
737 kfree(spt);
738}
739
740static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
741{
742 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
743
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800744 clean_shadow_page(spt->vgpu, &spt->shadow_page);
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800745 clean_guest_page(spt->vgpu, &spt->guest_page);
Zhi Wang2707e442016-03-28 23:23:16 +0800746 list_del_init(&spt->post_shadow_list);
747
748 free_spt(spt);
749}
750
751static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
752{
753 struct hlist_node *n;
754 struct intel_vgpu_shadow_page *sp;
755 int i;
756
757 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
758 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
759}
760
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800761static int ppgtt_handle_guest_write_page_table_bytes(
762 struct intel_vgpu_guest_page *gpt,
Zhi Wang2707e442016-03-28 23:23:16 +0800763 u64 pa, void *p_data, int bytes);
764
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800765static int ppgtt_write_protection_handler(void *data, u64 pa,
Zhi Wang2707e442016-03-28 23:23:16 +0800766 void *p_data, int bytes)
767{
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800768 struct intel_vgpu_page_track *t = data;
769 struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
Zhi Wang2707e442016-03-28 23:23:16 +0800770 int ret;
771
772 if (bytes != 4 && bytes != 8)
773 return -EINVAL;
774
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800775 if (!t->tracked)
Zhi Wang2707e442016-03-28 23:23:16 +0800776 return -EINVAL;
777
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800778 ret = ppgtt_handle_guest_write_page_table_bytes(p,
Zhi Wang2707e442016-03-28 23:23:16 +0800779 pa, p_data, bytes);
780 if (ret)
781 return ret;
782 return ret;
783}
784
785static int reclaim_one_mm(struct intel_gvt *gvt);
786
787static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
788 struct intel_vgpu *vgpu, int type, unsigned long gfn)
789{
790 struct intel_vgpu_ppgtt_spt *spt = NULL;
791 int ret;
792
793retry:
794 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
795 if (!spt) {
796 if (reclaim_one_mm(vgpu->gvt))
797 goto retry;
798
Tina Zhang695fbc02017-03-10 04:26:53 -0500799 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800800 return ERR_PTR(-ENOMEM);
801 }
802
803 spt->vgpu = vgpu;
804 spt->guest_page_type = type;
805 atomic_set(&spt->refcount, 1);
806 INIT_LIST_HEAD(&spt->post_shadow_list);
807
808 /*
809 * TODO: guest page type may be different with shadow page type,
810 * when we support PSE page in future.
811 */
Zhi Wang22115ce2017-10-10 14:34:11 +0800812 ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
Zhi Wang2707e442016-03-28 23:23:16 +0800813 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500814 gvt_vgpu_err("fail to initialize shadow page for spt\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800815 goto err;
816 }
817
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800818 ret = init_guest_page(vgpu, &spt->guest_page,
Zhi Wang2707e442016-03-28 23:23:16 +0800819 gfn, ppgtt_write_protection_handler, NULL);
820 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500821 gvt_vgpu_err("fail to initialize guest page for spt\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800822 goto err;
823 }
824
825 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
826 return spt;
827err:
828 ppgtt_free_shadow_page(spt);
829 return ERR_PTR(ret);
830}
831
832static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
833 struct intel_vgpu *vgpu, unsigned long mfn)
834{
835 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
836
837 if (p)
838 return shadow_page_to_ppgtt_spt(p);
839
Tina Zhang695fbc02017-03-10 04:26:53 -0500840 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
Zhi Wang2707e442016-03-28 23:23:16 +0800841 return NULL;
842}
843
844#define pt_entry_size_shift(spt) \
845 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
846
847#define pt_entries(spt) \
Zhi Wang9556e112017-10-10 13:51:32 +0800848 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
Zhi Wang2707e442016-03-28 23:23:16 +0800849
850#define for_each_present_guest_entry(spt, e, i) \
851 for (i = 0; i < pt_entries(spt); i++) \
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800852 if (!ppgtt_get_guest_entry(spt, e, i) && \
853 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
Zhi Wang2707e442016-03-28 23:23:16 +0800854
855#define for_each_present_shadow_entry(spt, e, i) \
856 for (i = 0; i < pt_entries(spt); i++) \
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800857 if (!ppgtt_get_shadow_entry(spt, e, i) && \
858 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
Zhi Wang2707e442016-03-28 23:23:16 +0800859
860static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
861{
862 int v = atomic_read(&spt->refcount);
863
864 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
865
866 atomic_inc(&spt->refcount);
867}
868
869static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
870
871static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
872 struct intel_gvt_gtt_entry *e)
873{
874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
875 struct intel_vgpu_ppgtt_spt *s;
Ping Gao3b6411c2016-11-04 13:47:35 +0800876 intel_gvt_gtt_type_t cur_pt_type;
Zhi Wang2707e442016-03-28 23:23:16 +0800877
878 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
879 return -EINVAL;
880
Ping Gao3b6411c2016-11-04 13:47:35 +0800881 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
882 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
883 cur_pt_type = get_next_pt_type(e->type) + 1;
884 if (ops->get_pfn(e) ==
885 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
886 return 0;
887 }
Zhi Wang2707e442016-03-28 23:23:16 +0800888 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
889 if (!s) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500890 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
891 ops->get_pfn(e));
Zhi Wang2707e442016-03-28 23:23:16 +0800892 return -ENXIO;
893 }
894 return ppgtt_invalidate_shadow_page(s);
895}
896
897static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
898{
Tina Zhang695fbc02017-03-10 04:26:53 -0500899 struct intel_vgpu *vgpu = spt->vgpu;
Zhi Wang2707e442016-03-28 23:23:16 +0800900 struct intel_gvt_gtt_entry e;
901 unsigned long index;
902 int ret;
903 int v = atomic_read(&spt->refcount);
904
905 trace_spt_change(spt->vgpu->id, "die", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800906 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800907
908 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
909
910 if (atomic_dec_return(&spt->refcount) > 0)
911 return 0;
912
913 if (gtt_type_is_pte_pt(spt->shadow_page.type))
914 goto release;
915
916 for_each_present_shadow_entry(spt, &e, index) {
917 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500918 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800919 return -EINVAL;
920 }
921 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
922 spt->vgpu, &e);
923 if (ret)
924 goto fail;
925 }
926release:
927 trace_spt_change(spt->vgpu->id, "release", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800928 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800929 ppgtt_free_shadow_page(spt);
930 return 0;
931fail:
Tina Zhang695fbc02017-03-10 04:26:53 -0500932 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
933 spt, e.val64, e.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800934 return ret;
935}
936
937static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
938
939static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
940 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
941{
942 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
943 struct intel_vgpu_ppgtt_spt *s = NULL;
944 struct intel_vgpu_guest_page *g;
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800945 struct intel_vgpu_page_track *t;
Zhi Wang2707e442016-03-28 23:23:16 +0800946 int ret;
947
948 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
949 ret = -EINVAL;
950 goto fail;
951 }
952
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800953 t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
954 if (t) {
955 g = page_track_to_guest_page(t);
Zhi Wang2707e442016-03-28 23:23:16 +0800956 s = guest_page_to_ppgtt_spt(g);
957 ppgtt_get_shadow_page(s);
958 } else {
959 int type = get_next_pt_type(we->type);
960
961 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
962 if (IS_ERR(s)) {
963 ret = PTR_ERR(s);
964 goto fail;
965 }
966
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800967 ret = intel_gvt_hypervisor_enable_page_track(vgpu,
968 &s->guest_page.track);
Zhi Wang2707e442016-03-28 23:23:16 +0800969 if (ret)
970 goto fail;
971
972 ret = ppgtt_populate_shadow_page(s);
973 if (ret)
974 goto fail;
975
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800976 trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
Zhi Wang2707e442016-03-28 23:23:16 +0800977 s->shadow_page.type);
978 }
979 return s;
980fail:
Tina Zhang695fbc02017-03-10 04:26:53 -0500981 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
982 s, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +0800983 return ERR_PTR(ret);
984}
985
986static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
987 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
988{
989 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
990
991 se->type = ge->type;
992 se->val64 = ge->val64;
993
994 ops->set_pfn(se, s->shadow_page.mfn);
995}
996
997static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
998{
999 struct intel_vgpu *vgpu = spt->vgpu;
Hang Yuancc753fb2017-12-22 18:06:31 +08001000 struct intel_gvt *gvt = vgpu->gvt;
1001 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +08001002 struct intel_vgpu_ppgtt_spt *s;
1003 struct intel_gvt_gtt_entry se, ge;
Hang Yuancc753fb2017-12-22 18:06:31 +08001004 unsigned long gfn, i;
Zhi Wang2707e442016-03-28 23:23:16 +08001005 int ret;
1006
1007 trace_spt_change(spt->vgpu->id, "born", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001008 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001009
1010 if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
1011 for_each_present_guest_entry(spt, &ge, i) {
Hang Yuancc753fb2017-12-22 18:06:31 +08001012 gfn = ops->get_pfn(&ge);
1013 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
1014 gtt_entry_p2m(vgpu, &ge, &se))
1015 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
Zhi Wang2707e442016-03-28 23:23:16 +08001016 ppgtt_set_shadow_entry(spt, &se, i);
1017 }
1018 return 0;
1019 }
1020
1021 for_each_present_guest_entry(spt, &ge, i) {
1022 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001023 gvt_vgpu_err("GVT doesn't support pse bit now\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001024 ret = -EINVAL;
1025 goto fail;
1026 }
1027
1028 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1029 if (IS_ERR(s)) {
1030 ret = PTR_ERR(s);
1031 goto fail;
1032 }
1033 ppgtt_get_shadow_entry(spt, &se, i);
1034 ppgtt_generate_shadow_entry(&se, s, &ge);
1035 ppgtt_set_shadow_entry(spt, &se, i);
1036 }
1037 return 0;
1038fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001039 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1040 spt, ge.val64, ge.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001041 return ret;
1042}
1043
1044static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
Tina Zhang6b3816d2017-08-14 15:24:14 +08001045 struct intel_gvt_gtt_entry *se, unsigned long index)
Zhi Wang2707e442016-03-28 23:23:16 +08001046{
1047 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1048 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1049 struct intel_vgpu *vgpu = spt->vgpu;
1050 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +08001051 int ret;
1052
Tina Zhang6b3816d2017-08-14 15:24:14 +08001053 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
Bing Niu9baf0922016-11-07 10:44:36 +08001054 index);
1055
Tina Zhang6b3816d2017-08-14 15:24:14 +08001056 if (!ops->test_present(se))
Zhi Wang2707e442016-03-28 23:23:16 +08001057 return 0;
1058
Tina Zhang6b3816d2017-08-14 15:24:14 +08001059 if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
Zhi Wang2707e442016-03-28 23:23:16 +08001060 return 0;
1061
Tina Zhang6b3816d2017-08-14 15:24:14 +08001062 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
Bing Niu9baf0922016-11-07 10:44:36 +08001063 struct intel_vgpu_ppgtt_spt *s =
Tina Zhang6b3816d2017-08-14 15:24:14 +08001064 ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
Bing Niu9baf0922016-11-07 10:44:36 +08001065 if (!s) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001066 gvt_vgpu_err("fail to find guest page\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001067 ret = -ENXIO;
1068 goto fail;
1069 }
Bing Niu9baf0922016-11-07 10:44:36 +08001070 ret = ppgtt_invalidate_shadow_page(s);
Zhi Wang2707e442016-03-28 23:23:16 +08001071 if (ret)
1072 goto fail;
1073 }
Zhi Wang2707e442016-03-28 23:23:16 +08001074 return 0;
1075fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001076 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
Tina Zhang6b3816d2017-08-14 15:24:14 +08001077 spt, se->val64, se->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001078 return ret;
1079}
1080
1081static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1082 struct intel_gvt_gtt_entry *we, unsigned long index)
1083{
1084 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1085 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1086 struct intel_vgpu *vgpu = spt->vgpu;
1087 struct intel_gvt_gtt_entry m;
1088 struct intel_vgpu_ppgtt_spt *s;
1089 int ret;
1090
1091 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
1092 we->val64, index);
1093
1094 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1095 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1096 if (IS_ERR(s)) {
1097 ret = PTR_ERR(s);
1098 goto fail;
1099 }
1100 ppgtt_get_shadow_entry(spt, &m, index);
1101 ppgtt_generate_shadow_entry(&m, s, we);
1102 ppgtt_set_shadow_entry(spt, &m, index);
1103 } else {
1104 ret = gtt_entry_p2m(vgpu, we, &m);
1105 if (ret)
1106 goto fail;
1107 ppgtt_set_shadow_entry(spt, &m, index);
1108 }
1109 return 0;
1110fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001111 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1112 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001113 return ret;
1114}
1115
1116static int sync_oos_page(struct intel_vgpu *vgpu,
1117 struct intel_vgpu_oos_page *oos_page)
1118{
1119 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1120 struct intel_gvt *gvt = vgpu->gvt;
1121 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1122 struct intel_vgpu_ppgtt_spt *spt =
1123 guest_page_to_ppgtt_spt(oos_page->guest_page);
1124 struct intel_gvt_gtt_entry old, new, m;
1125 int index;
1126 int ret;
1127
1128 trace_oos_change(vgpu->id, "sync", oos_page->id,
1129 oos_page->guest_page, spt->guest_page_type);
1130
1131 old.type = new.type = get_entry_type(spt->guest_page_type);
1132 old.val64 = new.val64 = 0;
1133
Zhi Wang9556e112017-10-10 13:51:32 +08001134 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1135 info->gtt_entry_size_shift); index++) {
Zhi Wang2707e442016-03-28 23:23:16 +08001136 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1137 ops->get_entry(NULL, &new, index, true,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001138 oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08001139
1140 if (old.val64 == new.val64
1141 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1142 continue;
1143
1144 trace_oos_sync(vgpu->id, oos_page->id,
1145 oos_page->guest_page, spt->guest_page_type,
1146 new.val64, index);
1147
1148 ret = gtt_entry_p2m(vgpu, &new, &m);
1149 if (ret)
1150 return ret;
1151
1152 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1153 ppgtt_set_shadow_entry(spt, &m, index);
1154 }
1155
1156 oos_page->guest_page->write_cnt = 0;
1157 list_del_init(&spt->post_shadow_list);
1158 return 0;
1159}
1160
1161static int detach_oos_page(struct intel_vgpu *vgpu,
1162 struct intel_vgpu_oos_page *oos_page)
1163{
1164 struct intel_gvt *gvt = vgpu->gvt;
1165 struct intel_vgpu_ppgtt_spt *spt =
1166 guest_page_to_ppgtt_spt(oos_page->guest_page);
1167
1168 trace_oos_change(vgpu->id, "detach", oos_page->id,
1169 oos_page->guest_page, spt->guest_page_type);
1170
1171 oos_page->guest_page->write_cnt = 0;
1172 oos_page->guest_page->oos_page = NULL;
1173 oos_page->guest_page = NULL;
1174
1175 list_del_init(&oos_page->vm_list);
1176 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1177
1178 return 0;
1179}
1180
1181static int attach_oos_page(struct intel_vgpu *vgpu,
1182 struct intel_vgpu_oos_page *oos_page,
1183 struct intel_vgpu_guest_page *gpt)
1184{
1185 struct intel_gvt *gvt = vgpu->gvt;
1186 int ret;
1187
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001188 ret = intel_gvt_hypervisor_read_gpa(vgpu,
Zhi Wang9556e112017-10-10 13:51:32 +08001189 gpt->track.gfn << I915_GTT_PAGE_SHIFT,
1190 oos_page->mem, I915_GTT_PAGE_SIZE);
Zhi Wang2707e442016-03-28 23:23:16 +08001191 if (ret)
1192 return ret;
1193
1194 oos_page->guest_page = gpt;
1195 gpt->oos_page = oos_page;
1196
1197 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1198
1199 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
1200 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1201 return 0;
1202}
1203
1204static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
1205 struct intel_vgpu_guest_page *gpt)
1206{
1207 int ret;
1208
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001209 ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
Zhi Wang2707e442016-03-28 23:23:16 +08001210 if (ret)
1211 return ret;
1212
1213 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
1214 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1215
1216 list_del_init(&gpt->oos_page->vm_list);
1217 return sync_oos_page(vgpu, gpt->oos_page);
1218}
1219
1220static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
1221 struct intel_vgpu_guest_page *gpt)
1222{
1223 struct intel_gvt *gvt = vgpu->gvt;
1224 struct intel_gvt_gtt *gtt = &gvt->gtt;
1225 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1226 int ret;
1227
1228 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1229
1230 if (list_empty(&gtt->oos_page_free_list_head)) {
1231 oos_page = container_of(gtt->oos_page_use_list_head.next,
1232 struct intel_vgpu_oos_page, list);
1233 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1234 if (ret)
1235 return ret;
1236 ret = detach_oos_page(vgpu, oos_page);
1237 if (ret)
1238 return ret;
1239 } else
1240 oos_page = container_of(gtt->oos_page_free_list_head.next,
1241 struct intel_vgpu_oos_page, list);
1242 return attach_oos_page(vgpu, oos_page, gpt);
1243}
1244
1245static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
1246 struct intel_vgpu_guest_page *gpt)
1247{
1248 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1249
1250 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1251 return -EINVAL;
1252
1253 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
1254 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1255
1256 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001257 return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
Zhi Wang2707e442016-03-28 23:23:16 +08001258}
1259
1260/**
1261 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1262 * @vgpu: a vGPU
1263 *
1264 * This function is called before submitting a guest workload to host,
1265 * to sync all the out-of-synced shadow for vGPU
1266 *
1267 * Returns:
1268 * Zero on success, negative error code if failed.
1269 */
1270int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1271{
1272 struct list_head *pos, *n;
1273 struct intel_vgpu_oos_page *oos_page;
1274 int ret;
1275
1276 if (!enable_out_of_sync)
1277 return 0;
1278
1279 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1280 oos_page = container_of(pos,
1281 struct intel_vgpu_oos_page, vm_list);
1282 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1283 if (ret)
1284 return ret;
1285 }
1286 return 0;
1287}
1288
1289/*
1290 * The heart of PPGTT shadow page table.
1291 */
1292static int ppgtt_handle_guest_write_page_table(
1293 struct intel_vgpu_guest_page *gpt,
1294 struct intel_gvt_gtt_entry *we, unsigned long index)
1295{
1296 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1297 struct intel_vgpu *vgpu = spt->vgpu;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001298 int type = spt->shadow_page.type;
Zhi Wang2707e442016-03-28 23:23:16 +08001299 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001300 struct intel_gvt_gtt_entry se;
Zhi Wang2707e442016-03-28 23:23:16 +08001301
Zhi Wang2707e442016-03-28 23:23:16 +08001302 int ret;
Bing Niu9baf0922016-11-07 10:44:36 +08001303 int new_present;
Zhi Wang2707e442016-03-28 23:23:16 +08001304
Zhi Wang2707e442016-03-28 23:23:16 +08001305 new_present = ops->test_present(we);
1306
Tina Zhang6b3816d2017-08-14 15:24:14 +08001307 /*
1308 * Adding the new entry first and then removing the old one, that can
1309 * guarantee the ppgtt table is validated during the window between
1310 * adding and removal.
1311 */
1312 ppgtt_get_shadow_entry(spt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001313
Zhi Wang2707e442016-03-28 23:23:16 +08001314 if (new_present) {
1315 ret = ppgtt_handle_guest_entry_add(gpt, we, index);
1316 if (ret)
1317 goto fail;
1318 }
Tina Zhang6b3816d2017-08-14 15:24:14 +08001319
1320 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
1321 if (ret)
1322 goto fail;
1323
1324 if (!new_present) {
1325 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1326 ppgtt_set_shadow_entry(spt, &se, index);
1327 }
1328
Zhi Wang2707e442016-03-28 23:23:16 +08001329 return 0;
1330fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001331 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1332 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001333 return ret;
1334}
1335
1336static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
1337{
1338 return enable_out_of_sync
1339 && gtt_type_is_pte_pt(
1340 guest_page_to_ppgtt_spt(gpt)->guest_page_type)
1341 && gpt->write_cnt >= 2;
1342}
1343
1344static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1345 unsigned long index)
1346{
1347 set_bit(index, spt->post_shadow_bitmap);
1348 if (!list_empty(&spt->post_shadow_list))
1349 return;
1350
1351 list_add_tail(&spt->post_shadow_list,
1352 &spt->vgpu->gtt.post_shadow_list_head);
1353}
1354
1355/**
1356 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1357 * @vgpu: a vGPU
1358 *
1359 * This function is called before submitting a guest workload to host,
1360 * to flush all the post shadows for a vGPU.
1361 *
1362 * Returns:
1363 * Zero on success, negative error code if failed.
1364 */
1365int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1366{
1367 struct list_head *pos, *n;
1368 struct intel_vgpu_ppgtt_spt *spt;
Bing Niu9baf0922016-11-07 10:44:36 +08001369 struct intel_gvt_gtt_entry ge;
Zhi Wang2707e442016-03-28 23:23:16 +08001370 unsigned long index;
1371 int ret;
1372
1373 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1374 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1375 post_shadow_list);
1376
1377 for_each_set_bit(index, spt->post_shadow_bitmap,
1378 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1379 ppgtt_get_guest_entry(spt, &ge, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001380
1381 ret = ppgtt_handle_guest_write_page_table(
1382 &spt->guest_page, &ge, index);
1383 if (ret)
1384 return ret;
1385 clear_bit(index, spt->post_shadow_bitmap);
1386 }
1387 list_del_init(&spt->post_shadow_list);
1388 }
1389 return 0;
1390}
1391
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001392static int ppgtt_handle_guest_write_page_table_bytes(
1393 struct intel_vgpu_guest_page *gpt,
Zhi Wang2707e442016-03-28 23:23:16 +08001394 u64 pa, void *p_data, int bytes)
1395{
Zhi Wang2707e442016-03-28 23:23:16 +08001396 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1397 struct intel_vgpu *vgpu = spt->vgpu;
1398 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1399 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001400 struct intel_gvt_gtt_entry we, se;
Zhi Wang2707e442016-03-28 23:23:16 +08001401 unsigned long index;
1402 int ret;
1403
1404 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1405
1406 ppgtt_get_guest_entry(spt, &we, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001407
1408 ops->test_pse(&we);
1409
1410 if (bytes == info->gtt_entry_size) {
1411 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
1412 if (ret)
1413 return ret;
1414 } else {
Zhi Wang2707e442016-03-28 23:23:16 +08001415 if (!test_bit(index, spt->post_shadow_bitmap)) {
Zhi Wang121d760d2017-12-29 02:50:08 +08001416 int type = spt->shadow_page.type;
1417
Tina Zhang6b3816d2017-08-14 15:24:14 +08001418 ppgtt_get_shadow_entry(spt, &se, index);
1419 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001420 if (ret)
1421 return ret;
Zhi Wang121d760d2017-12-29 02:50:08 +08001422 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1423 ppgtt_set_shadow_entry(spt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001424 }
Zhi Wang2707e442016-03-28 23:23:16 +08001425 ppgtt_set_post_shadow(spt, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001426 }
1427
1428 if (!enable_out_of_sync)
1429 return 0;
1430
1431 gpt->write_cnt++;
1432
1433 if (gpt->oos_page)
1434 ops->set_entry(gpt->oos_page->mem, &we, index,
1435 false, 0, vgpu);
1436
1437 if (can_do_out_of_sync(gpt)) {
1438 if (!gpt->oos_page)
1439 ppgtt_allocate_oos_page(vgpu, gpt);
1440
1441 ret = ppgtt_set_guest_page_oos(vgpu, gpt);
1442 if (ret < 0)
1443 return ret;
1444 }
1445 return 0;
1446}
1447
1448/*
1449 * mm page table allocation policy for bdw+
1450 * - for ggtt, only virtual page table will be allocated.
1451 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1452 */
1453static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1454{
1455 struct intel_vgpu *vgpu = mm->vgpu;
1456 struct intel_gvt *gvt = vgpu->gvt;
1457 const struct intel_gvt_device_info *info = &gvt->device_info;
1458 void *mem;
1459
1460 if (mm->type == INTEL_GVT_MM_PPGTT) {
1461 mm->page_table_entry_cnt = 4;
1462 mm->page_table_entry_size = mm->page_table_entry_cnt *
1463 info->gtt_entry_size;
1464 mem = kzalloc(mm->has_shadow_page_table ?
1465 mm->page_table_entry_size * 2
Jike Song96317392017-01-09 15:38:38 +08001466 : mm->page_table_entry_size, GFP_KERNEL);
Zhi Wang2707e442016-03-28 23:23:16 +08001467 if (!mem)
1468 return -ENOMEM;
1469 mm->virtual_page_table = mem;
1470 if (!mm->has_shadow_page_table)
1471 return 0;
1472 mm->shadow_page_table = mem + mm->page_table_entry_size;
1473 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1474 mm->page_table_entry_cnt =
Zhi Wang9556e112017-10-10 13:51:32 +08001475 (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +08001476 mm->page_table_entry_size = mm->page_table_entry_cnt *
1477 info->gtt_entry_size;
1478 mem = vzalloc(mm->page_table_entry_size);
1479 if (!mem)
1480 return -ENOMEM;
1481 mm->virtual_page_table = mem;
1482 }
1483 return 0;
1484}
1485
1486static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
1487{
1488 if (mm->type == INTEL_GVT_MM_PPGTT) {
1489 kfree(mm->virtual_page_table);
1490 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1491 if (mm->virtual_page_table)
1492 vfree(mm->virtual_page_table);
1493 }
1494 mm->virtual_page_table = mm->shadow_page_table = NULL;
1495}
1496
1497static void invalidate_mm(struct intel_vgpu_mm *mm)
1498{
1499 struct intel_vgpu *vgpu = mm->vgpu;
1500 struct intel_gvt *gvt = vgpu->gvt;
1501 struct intel_gvt_gtt *gtt = &gvt->gtt;
1502 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1503 struct intel_gvt_gtt_entry se;
1504 int i;
1505
1506 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
1507 return;
1508
1509 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1510 ppgtt_get_shadow_root_entry(mm, &se, i);
1511 if (!ops->test_present(&se))
1512 continue;
1513 ppgtt_invalidate_shadow_page_by_shadow_entry(
1514 vgpu, &se);
1515 se.val64 = 0;
1516 ppgtt_set_shadow_root_entry(mm, &se, i);
1517
1518 trace_gpt_change(vgpu->id, "destroy root pointer",
1519 NULL, se.type, se.val64, i);
1520 }
1521 mm->shadowed = false;
1522}
1523
1524/**
1525 * intel_vgpu_destroy_mm - destroy a mm object
1526 * @mm: a kref object
1527 *
1528 * This function is used to destroy a mm object for vGPU
1529 *
1530 */
1531void intel_vgpu_destroy_mm(struct kref *mm_ref)
1532{
1533 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1534 struct intel_vgpu *vgpu = mm->vgpu;
1535 struct intel_gvt *gvt = vgpu->gvt;
1536 struct intel_gvt_gtt *gtt = &gvt->gtt;
1537
1538 if (!mm->initialized)
1539 goto out;
1540
1541 list_del(&mm->list);
1542 list_del(&mm->lru_list);
1543
1544 if (mm->has_shadow_page_table)
1545 invalidate_mm(mm);
1546
1547 gtt->mm_free_page_table(mm);
1548out:
1549 kfree(mm);
1550}
1551
1552static int shadow_mm(struct intel_vgpu_mm *mm)
1553{
1554 struct intel_vgpu *vgpu = mm->vgpu;
1555 struct intel_gvt *gvt = vgpu->gvt;
1556 struct intel_gvt_gtt *gtt = &gvt->gtt;
1557 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1558 struct intel_vgpu_ppgtt_spt *spt;
1559 struct intel_gvt_gtt_entry ge, se;
1560 int i;
1561 int ret;
1562
1563 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
1564 return 0;
1565
1566 mm->shadowed = true;
1567
1568 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1569 ppgtt_get_guest_root_entry(mm, &ge, i);
1570 if (!ops->test_present(&ge))
1571 continue;
1572
1573 trace_gpt_change(vgpu->id, __func__, NULL,
1574 ge.type, ge.val64, i);
1575
1576 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1577 if (IS_ERR(spt)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001578 gvt_vgpu_err("fail to populate guest root pointer\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001579 ret = PTR_ERR(spt);
1580 goto fail;
1581 }
1582 ppgtt_generate_shadow_entry(&se, spt, &ge);
1583 ppgtt_set_shadow_root_entry(mm, &se, i);
1584
1585 trace_gpt_change(vgpu->id, "populate root pointer",
1586 NULL, se.type, se.val64, i);
1587 }
1588 return 0;
1589fail:
1590 invalidate_mm(mm);
1591 return ret;
1592}
1593
1594/**
1595 * intel_vgpu_create_mm - create a mm object for a vGPU
1596 * @vgpu: a vGPU
1597 * @mm_type: mm object type, should be PPGTT or GGTT
1598 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1599 * to populate shadow later.
1600 * @page_table_level: describe the page table level of the mm object
1601 * @pde_base_index: pde root pointer base in GGTT MMIO.
1602 *
1603 * This function is used to create a mm object for a vGPU.
1604 *
1605 * Returns:
1606 * Zero on success, negative error code in pointer if failed.
1607 */
1608struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1609 int mm_type, void *virtual_page_table, int page_table_level,
1610 u32 pde_base_index)
1611{
1612 struct intel_gvt *gvt = vgpu->gvt;
1613 struct intel_gvt_gtt *gtt = &gvt->gtt;
1614 struct intel_vgpu_mm *mm;
1615 int ret;
1616
Jike Song96317392017-01-09 15:38:38 +08001617 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
Zhi Wang2707e442016-03-28 23:23:16 +08001618 if (!mm) {
1619 ret = -ENOMEM;
1620 goto fail;
1621 }
1622
1623 mm->type = mm_type;
1624
1625 if (page_table_level == 1)
1626 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
1627 else if (page_table_level == 3)
1628 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1629 else if (page_table_level == 4)
1630 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1631 else {
1632 WARN_ON(1);
1633 ret = -EINVAL;
1634 goto fail;
1635 }
1636
1637 mm->page_table_level = page_table_level;
1638 mm->pde_base_index = pde_base_index;
1639
1640 mm->vgpu = vgpu;
1641 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
1642
1643 kref_init(&mm->ref);
1644 atomic_set(&mm->pincount, 0);
1645 INIT_LIST_HEAD(&mm->list);
1646 INIT_LIST_HEAD(&mm->lru_list);
1647 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
1648
1649 ret = gtt->mm_alloc_page_table(mm);
1650 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001651 gvt_vgpu_err("fail to allocate page table for mm\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001652 goto fail;
1653 }
1654
1655 mm->initialized = true;
1656
1657 if (virtual_page_table)
1658 memcpy(mm->virtual_page_table, virtual_page_table,
1659 mm->page_table_entry_size);
1660
1661 if (mm->has_shadow_page_table) {
1662 ret = shadow_mm(mm);
1663 if (ret)
1664 goto fail;
1665 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
1666 }
1667 return mm;
1668fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001669 gvt_vgpu_err("fail to create mm\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001670 if (mm)
1671 intel_gvt_mm_unreference(mm);
1672 return ERR_PTR(ret);
1673}
1674
1675/**
1676 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1677 * @mm: a vGPU mm object
1678 *
1679 * This function is called when user doesn't want to use a vGPU mm object
1680 */
1681void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1682{
1683 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1684 return;
1685
1686 atomic_dec(&mm->pincount);
1687}
1688
1689/**
1690 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1691 * @vgpu: a vGPU
1692 *
1693 * This function is called when user wants to use a vGPU mm object. If this
1694 * mm object hasn't been shadowed yet, the shadow will be populated at this
1695 * time.
1696 *
1697 * Returns:
1698 * Zero on success, negative error code if failed.
1699 */
1700int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1701{
1702 int ret;
1703
1704 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1705 return 0;
1706
Zhi Wang2707e442016-03-28 23:23:16 +08001707 if (!mm->shadowed) {
1708 ret = shadow_mm(mm);
1709 if (ret)
1710 return ret;
1711 }
1712
fred gao46b441e2017-08-18 15:41:09 +08001713 atomic_inc(&mm->pincount);
Zhi Wang2707e442016-03-28 23:23:16 +08001714 list_del_init(&mm->lru_list);
1715 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
1716 return 0;
1717}
1718
1719static int reclaim_one_mm(struct intel_gvt *gvt)
1720{
1721 struct intel_vgpu_mm *mm;
1722 struct list_head *pos, *n;
1723
1724 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
1725 mm = container_of(pos, struct intel_vgpu_mm, lru_list);
1726
1727 if (mm->type != INTEL_GVT_MM_PPGTT)
1728 continue;
1729 if (atomic_read(&mm->pincount))
1730 continue;
1731
1732 list_del_init(&mm->lru_list);
1733 invalidate_mm(mm);
1734 return 1;
1735 }
1736 return 0;
1737}
1738
1739/*
1740 * GMA translation APIs.
1741 */
1742static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1743 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1744{
1745 struct intel_vgpu *vgpu = mm->vgpu;
1746 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1747 struct intel_vgpu_ppgtt_spt *s;
1748
1749 if (WARN_ON(!mm->has_shadow_page_table))
1750 return -EINVAL;
1751
1752 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
1753 if (!s)
1754 return -ENXIO;
1755
1756 if (!guest)
1757 ppgtt_get_shadow_entry(s, e, index);
1758 else
1759 ppgtt_get_guest_entry(s, e, index);
1760 return 0;
1761}
1762
1763/**
1764 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1765 * @mm: mm object. could be a PPGTT or GGTT mm object
1766 * @gma: graphics memory address in this mm object
1767 *
1768 * This function is used to translate a graphics memory address in specific
1769 * graphics memory space to guest physical address.
1770 *
1771 * Returns:
1772 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1773 */
1774unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1775{
1776 struct intel_vgpu *vgpu = mm->vgpu;
1777 struct intel_gvt *gvt = vgpu->gvt;
1778 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1779 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1780 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1781 unsigned long gma_index[4];
1782 struct intel_gvt_gtt_entry e;
1783 int i, index;
1784 int ret;
1785
1786 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
1787 return INTEL_GVT_INVALID_ADDR;
1788
1789 if (mm->type == INTEL_GVT_MM_GGTT) {
1790 if (!vgpu_gmadr_is_valid(vgpu, gma))
1791 goto err;
1792
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001793 ret = ggtt_get_guest_entry(mm, &e,
1794 gma_ops->gma_to_ggtt_pte_index(gma));
1795 if (ret)
1796 goto err;
Zhi Wang9556e112017-10-10 13:51:32 +08001797 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
1798 + (gma & ~I915_GTT_PAGE_MASK);
Zhi Wang2707e442016-03-28 23:23:16 +08001799
1800 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
1801 return gpa;
1802 }
1803
1804 switch (mm->page_table_level) {
1805 case 4:
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001806 ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
1807 if (ret)
1808 goto err;
Zhi Wang2707e442016-03-28 23:23:16 +08001809 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1810 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1811 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1812 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1813 index = 4;
1814 break;
1815 case 3:
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001816 ret = ppgtt_get_shadow_root_entry(mm, &e,
Zhi Wang2707e442016-03-28 23:23:16 +08001817 gma_ops->gma_to_l3_pdp_index(gma));
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001818 if (ret)
1819 goto err;
Zhi Wang2707e442016-03-28 23:23:16 +08001820 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1821 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1822 index = 2;
1823 break;
1824 case 2:
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001825 ret = ppgtt_get_shadow_root_entry(mm, &e,
Zhi Wang2707e442016-03-28 23:23:16 +08001826 gma_ops->gma_to_pde_index(gma));
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001827 if (ret)
1828 goto err;
Zhi Wang2707e442016-03-28 23:23:16 +08001829 gma_index[0] = gma_ops->gma_to_pte_index(gma);
1830 index = 1;
1831 break;
1832 default:
1833 WARN_ON(1);
1834 goto err;
1835 }
1836
1837 /* walk into the shadow page table and get gpa from guest entry */
1838 for (i = 0; i < index; i++) {
1839 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1840 (i == index - 1));
1841 if (ret)
1842 goto err;
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001843
1844 if (!pte_ops->test_present(&e)) {
1845 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
1846 goto err;
1847 }
Zhi Wang2707e442016-03-28 23:23:16 +08001848 }
1849
Zhi Wang9556e112017-10-10 13:51:32 +08001850 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
1851 + (gma & ~I915_GTT_PAGE_MASK);
Zhi Wang2707e442016-03-28 23:23:16 +08001852
1853 trace_gma_translate(vgpu->id, "ppgtt", 0,
1854 mm->page_table_level, gma, gpa);
1855 return gpa;
1856err:
Tina Zhang695fbc02017-03-10 04:26:53 -05001857 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
Zhi Wang2707e442016-03-28 23:23:16 +08001858 return INTEL_GVT_INVALID_ADDR;
1859}
1860
1861static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
1862 unsigned int off, void *p_data, unsigned int bytes)
1863{
1864 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1865 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1866 unsigned long index = off >> info->gtt_entry_size_shift;
1867 struct intel_gvt_gtt_entry e;
1868
1869 if (bytes != 4 && bytes != 8)
1870 return -EINVAL;
1871
1872 ggtt_get_guest_entry(ggtt_mm, &e, index);
1873 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1874 bytes);
1875 return 0;
1876}
1877
1878/**
1879 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1880 * @vgpu: a vGPU
1881 * @off: register offset
1882 * @p_data: data will be returned to guest
1883 * @bytes: data length
1884 *
1885 * This function is used to emulate the GTT MMIO register read
1886 *
1887 * Returns:
1888 * Zero on success, error code if failed.
1889 */
1890int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1891 void *p_data, unsigned int bytes)
1892{
1893 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1894 int ret;
1895
1896 if (bytes != 4 && bytes != 8)
1897 return -EINVAL;
1898
1899 off -= info->gtt_start_offset;
1900 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
1901 return ret;
1902}
1903
1904static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1905 void *p_data, unsigned int bytes)
1906{
1907 struct intel_gvt *gvt = vgpu->gvt;
1908 const struct intel_gvt_device_info *info = &gvt->device_info;
1909 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1910 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1911 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
Hang Yuancc753fb2017-12-22 18:06:31 +08001912 unsigned long gma, gfn;
Zhi Wang2707e442016-03-28 23:23:16 +08001913 struct intel_gvt_gtt_entry e, m;
1914 int ret;
1915
1916 if (bytes != 4 && bytes != 8)
1917 return -EINVAL;
1918
Zhi Wang9556e112017-10-10 13:51:32 +08001919 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +08001920
1921 /* the VM may configure the whole GM space when ballooning is used */
Zhao, Xinda7c281352017-02-21 15:54:56 +08001922 if (!vgpu_gmadr_is_valid(vgpu, gma))
Zhi Wang2707e442016-03-28 23:23:16 +08001923 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +08001924
1925 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1926
1927 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1928 bytes);
1929
1930 if (ops->test_present(&e)) {
Hang Yuancc753fb2017-12-22 18:06:31 +08001931 gfn = ops->get_pfn(&e);
1932
1933 /* one PTE update may be issued in multiple writes and the
1934 * first write may not construct a valid gfn
1935 */
1936 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1937 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1938 goto out;
1939 }
1940
Zhi Wang2707e442016-03-28 23:23:16 +08001941 ret = gtt_entry_p2m(vgpu, &e, &m);
1942 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001943 gvt_vgpu_err("fail to translate guest gtt entry\n");
Xiaoguang Chen359b6932017-03-21 10:54:21 +08001944 /* guest driver may read/write the entry when partial
1945 * update the entry in this situation p2m will fail
1946 * settting the shadow entry to point to a scratch page
1947 */
Zhi Wang22115ce2017-10-10 14:34:11 +08001948 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
Zhi Wang2707e442016-03-28 23:23:16 +08001949 }
1950 } else {
1951 m = e;
Zhi Wang22115ce2017-10-10 14:34:11 +08001952 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
Zhi Wang2707e442016-03-28 23:23:16 +08001953 }
1954
Hang Yuancc753fb2017-12-22 18:06:31 +08001955out:
Zhi Wang2707e442016-03-28 23:23:16 +08001956 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
Chuanxiao Dongaf2c6392017-06-02 15:34:24 +08001957 gtt_invalidate(gvt->dev_priv);
Zhi Wang2707e442016-03-28 23:23:16 +08001958 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1959 return 0;
1960}
1961
1962/*
1963 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1964 * @vgpu: a vGPU
1965 * @off: register offset
1966 * @p_data: data from guest write
1967 * @bytes: data length
1968 *
1969 * This function is used to emulate the GTT MMIO register write
1970 *
1971 * Returns:
1972 * Zero on success, error code if failed.
1973 */
1974int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1975 void *p_data, unsigned int bytes)
1976{
1977 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1978 int ret;
1979
1980 if (bytes != 4 && bytes != 8)
1981 return -EINVAL;
1982
1983 off -= info->gtt_start_offset;
1984 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
1985 return ret;
1986}
1987
Zhenyu Wang4fafba22017-12-18 11:58:46 +08001988int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
1989 void *p_data, unsigned int bytes)
1990{
1991 struct intel_gvt *gvt = vgpu->gvt;
1992 int ret = 0;
1993
1994 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
1995 struct intel_vgpu_page_track *t;
1996
1997 mutex_lock(&gvt->lock);
1998
1999 t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
2000 if (t) {
2001 if (unlikely(vgpu->failsafe)) {
2002 /* remove write protection to prevent furture traps */
2003 intel_vgpu_clean_page_track(vgpu, t);
2004 } else {
2005 ret = t->handler(t, pa, p_data, bytes);
2006 if (ret) {
2007 gvt_err("guest page write error %d, "
2008 "gfn 0x%lx, pa 0x%llx, "
2009 "var 0x%x, len %d\n",
2010 ret, t->gfn, pa,
2011 *(u32 *)p_data, bytes);
2012 }
2013 }
2014 }
2015 mutex_unlock(&gvt->lock);
2016 }
2017 return ret;
2018}
2019
2020
Ping Gao3b6411c2016-11-04 13:47:35 +08002021static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2022 intel_gvt_gtt_type_t type)
Zhi Wang2707e442016-03-28 23:23:16 +08002023{
2024 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
Ping Gao3b6411c2016-11-04 13:47:35 +08002025 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Zhenyu Wang5c352582017-11-02 17:44:52 +08002026 int page_entry_num = I915_GTT_PAGE_SIZE >>
Ping Gao3b6411c2016-11-04 13:47:35 +08002027 vgpu->gvt->device_info.gtt_entry_size_shift;
Jike Song96317392017-01-09 15:38:38 +08002028 void *scratch_pt;
Ping Gao3b6411c2016-11-04 13:47:35 +08002029 int i;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002030 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2031 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +08002032
Ping Gao3b6411c2016-11-04 13:47:35 +08002033 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2034 return -EINVAL;
2035
Jike Song96317392017-01-09 15:38:38 +08002036 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
Ping Gao3b6411c2016-11-04 13:47:35 +08002037 if (!scratch_pt) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002038 gvt_vgpu_err("fail to allocate scratch page\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002039 return -ENOMEM;
2040 }
2041
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002042 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
2043 4096, PCI_DMA_BIDIRECTIONAL);
2044 if (dma_mapping_error(dev, daddr)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002045 gvt_vgpu_err("fail to dmamap scratch_pt\n");
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002046 __free_page(virt_to_page(scratch_pt));
2047 return -ENOMEM;
Ping Gao3b6411c2016-11-04 13:47:35 +08002048 }
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002049 gtt->scratch_pt[type].page_mfn =
Zhenyu Wang5c352582017-11-02 17:44:52 +08002050 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
Jike Song96317392017-01-09 15:38:38 +08002051 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
Ping Gao3b6411c2016-11-04 13:47:35 +08002052 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002053 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
Ping Gao3b6411c2016-11-04 13:47:35 +08002054
2055 /* Build the tree by full filled the scratch pt with the entries which
2056 * point to the next level scratch pt or scratch page. The
2057 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2058 * 'type' pt.
2059 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
Jike Song96317392017-01-09 15:38:38 +08002060 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
Ping Gao3b6411c2016-11-04 13:47:35 +08002061 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2062 */
2063 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
2064 struct intel_gvt_gtt_entry se;
2065
2066 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2067 se.type = get_entry_type(type - 1);
2068 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2069
2070 /* The entry parameters like present/writeable/cache type
2071 * set to the same as i915's scratch page tree.
2072 */
2073 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2074 if (type == GTT_TYPE_PPGTT_PDE_PT)
Zhi Wangc095b972017-09-14 20:39:41 +08002075 se.val64 |= PPAT_CACHED;
Ping Gao3b6411c2016-11-04 13:47:35 +08002076
2077 for (i = 0; i < page_entry_num; i++)
Jike Song96317392017-01-09 15:38:38 +08002078 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002079 }
2080
Zhi Wang2707e442016-03-28 23:23:16 +08002081 return 0;
2082}
2083
Ping Gao3b6411c2016-11-04 13:47:35 +08002084static int release_scratch_page_tree(struct intel_vgpu *vgpu)
Zhi Wang2707e442016-03-28 23:23:16 +08002085{
Ping Gao3b6411c2016-11-04 13:47:35 +08002086 int i;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002087 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2088 dma_addr_t daddr;
Ping Gao3b6411c2016-11-04 13:47:35 +08002089
2090 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2091 if (vgpu->gtt.scratch_pt[i].page != NULL) {
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002092 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
Zhenyu Wang5c352582017-11-02 17:44:52 +08002093 I915_GTT_PAGE_SHIFT);
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002094 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
Ping Gao3b6411c2016-11-04 13:47:35 +08002095 __free_page(vgpu->gtt.scratch_pt[i].page);
2096 vgpu->gtt.scratch_pt[i].page = NULL;
2097 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2098 }
Zhi Wang2707e442016-03-28 23:23:16 +08002099 }
Ping Gao3b6411c2016-11-04 13:47:35 +08002100
2101 return 0;
2102}
2103
2104static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2105{
2106 int i, ret;
2107
2108 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2109 ret = alloc_scratch_pages(vgpu, i);
2110 if (ret)
2111 goto err;
2112 }
2113
2114 return 0;
2115
2116err:
2117 release_scratch_page_tree(vgpu);
2118 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +08002119}
2120
2121/**
2122 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2123 * @vgpu: a vGPU
2124 *
2125 * This function is used to initialize per-vGPU graphics memory virtualization
2126 * components.
2127 *
2128 * Returns:
2129 * Zero on success, error code if failed.
2130 */
2131int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2132{
2133 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2134 struct intel_vgpu_mm *ggtt_mm;
2135
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08002136 hash_init(gtt->tracked_guest_page_hash_table);
Zhi Wang2707e442016-03-28 23:23:16 +08002137 hash_init(gtt->shadow_page_hash_table);
2138
2139 INIT_LIST_HEAD(&gtt->mm_list_head);
2140 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2141 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2142
Ping Gaod650ac02016-12-08 10:14:48 +08002143 intel_vgpu_reset_ggtt(vgpu);
2144
Zhi Wang2707e442016-03-28 23:23:16 +08002145 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2146 NULL, 1, 0);
2147 if (IS_ERR(ggtt_mm)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002148 gvt_vgpu_err("fail to create mm for ggtt.\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002149 return PTR_ERR(ggtt_mm);
2150 }
2151
2152 gtt->ggtt_mm = ggtt_mm;
2153
Ping Gao3b6411c2016-11-04 13:47:35 +08002154 return create_scratch_page_tree(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002155}
2156
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002157static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
2158{
2159 struct list_head *pos, *n;
2160 struct intel_vgpu_mm *mm;
2161
2162 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2163 mm = container_of(pos, struct intel_vgpu_mm, list);
2164 if (mm->type == type) {
2165 vgpu->gvt->gtt.mm_free_page_table(mm);
2166 list_del(&mm->list);
2167 list_del(&mm->lru_list);
2168 kfree(mm);
2169 }
2170 }
2171}
2172
Zhi Wang2707e442016-03-28 23:23:16 +08002173/**
2174 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2175 * @vgpu: a vGPU
2176 *
2177 * This function is used to clean up per-vGPU graphics memory virtualization
2178 * components.
2179 *
2180 * Returns:
2181 * Zero on success, error code if failed.
2182 */
2183void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2184{
Zhi Wang2707e442016-03-28 23:23:16 +08002185 ppgtt_free_all_shadow_page(vgpu);
Ping Gao3b6411c2016-11-04 13:47:35 +08002186 release_scratch_page_tree(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002187
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002188 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2189 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
Zhi Wang2707e442016-03-28 23:23:16 +08002190}
2191
2192static void clean_spt_oos(struct intel_gvt *gvt)
2193{
2194 struct intel_gvt_gtt *gtt = &gvt->gtt;
2195 struct list_head *pos, *n;
2196 struct intel_vgpu_oos_page *oos_page;
2197
2198 WARN(!list_empty(&gtt->oos_page_use_list_head),
2199 "someone is still using oos page\n");
2200
2201 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2202 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2203 list_del(&oos_page->list);
2204 kfree(oos_page);
2205 }
2206}
2207
2208static int setup_spt_oos(struct intel_gvt *gvt)
2209{
2210 struct intel_gvt_gtt *gtt = &gvt->gtt;
2211 struct intel_vgpu_oos_page *oos_page;
2212 int i;
2213 int ret;
2214
2215 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2216 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2217
2218 for (i = 0; i < preallocated_oos_pages; i++) {
2219 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2220 if (!oos_page) {
Zhi Wang2707e442016-03-28 23:23:16 +08002221 ret = -ENOMEM;
2222 goto fail;
2223 }
2224
2225 INIT_LIST_HEAD(&oos_page->list);
2226 INIT_LIST_HEAD(&oos_page->vm_list);
2227 oos_page->id = i;
2228 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2229 }
2230
2231 gvt_dbg_mm("%d oos pages preallocated\n", i);
2232
2233 return 0;
2234fail:
2235 clean_spt_oos(gvt);
2236 return ret;
2237}
2238
2239/**
2240 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2241 * @vgpu: a vGPU
2242 * @page_table_level: PPGTT page table level
2243 * @root_entry: PPGTT page table root pointers
2244 *
2245 * This function is used to find a PPGTT mm object from mm object pool
2246 *
2247 * Returns:
2248 * pointer to mm object on success, NULL if failed.
2249 */
2250struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2251 int page_table_level, void *root_entry)
2252{
2253 struct list_head *pos;
2254 struct intel_vgpu_mm *mm;
2255 u64 *src, *dst;
2256
2257 list_for_each(pos, &vgpu->gtt.mm_list_head) {
2258 mm = container_of(pos, struct intel_vgpu_mm, list);
2259 if (mm->type != INTEL_GVT_MM_PPGTT)
2260 continue;
2261
2262 if (mm->page_table_level != page_table_level)
2263 continue;
2264
2265 src = root_entry;
2266 dst = mm->virtual_page_table;
2267
2268 if (page_table_level == 3) {
2269 if (src[0] == dst[0]
2270 && src[1] == dst[1]
2271 && src[2] == dst[2]
2272 && src[3] == dst[3])
2273 return mm;
2274 } else {
2275 if (src[0] == dst[0])
2276 return mm;
2277 }
2278 }
2279 return NULL;
2280}
2281
2282/**
2283 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2284 * g2v notification
2285 * @vgpu: a vGPU
2286 * @page_table_level: PPGTT page table level
2287 *
2288 * This function is used to create a PPGTT mm object from a guest to GVT-g
2289 * notification.
2290 *
2291 * Returns:
2292 * Zero on success, negative error code if failed.
2293 */
2294int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2295 int page_table_level)
2296{
Zhenyu Wang90551a12017-12-19 13:02:51 +08002297 u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
Zhi Wang2707e442016-03-28 23:23:16 +08002298 struct intel_vgpu_mm *mm;
2299
2300 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2301 return -EINVAL;
2302
2303 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2304 if (mm) {
2305 intel_gvt_mm_reference(mm);
2306 } else {
2307 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2308 pdp, page_table_level, 0);
2309 if (IS_ERR(mm)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002310 gvt_vgpu_err("fail to create mm\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002311 return PTR_ERR(mm);
2312 }
2313 }
2314 return 0;
2315}
2316
2317/**
2318 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2319 * g2v notification
2320 * @vgpu: a vGPU
2321 * @page_table_level: PPGTT page table level
2322 *
2323 * This function is used to create a PPGTT mm object from a guest to GVT-g
2324 * notification.
2325 *
2326 * Returns:
2327 * Zero on success, negative error code if failed.
2328 */
2329int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2330 int page_table_level)
2331{
Zhenyu Wang90551a12017-12-19 13:02:51 +08002332 u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
Zhi Wang2707e442016-03-28 23:23:16 +08002333 struct intel_vgpu_mm *mm;
2334
2335 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2336 return -EINVAL;
2337
2338 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2339 if (!mm) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002340 gvt_vgpu_err("fail to find ppgtt instance.\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002341 return -EINVAL;
2342 }
2343 intel_gvt_mm_unreference(mm);
2344 return 0;
2345}
2346
2347/**
2348 * intel_gvt_init_gtt - initialize mm components of a GVT device
2349 * @gvt: GVT device
2350 *
2351 * This function is called at the initialization stage, to initialize
2352 * the mm components of a GVT device.
2353 *
2354 * Returns:
2355 * zero on success, negative error code if failed.
2356 */
2357int intel_gvt_init_gtt(struct intel_gvt *gvt)
2358{
2359 int ret;
Jike Song96317392017-01-09 15:38:38 +08002360 void *page;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002361 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2362 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +08002363
2364 gvt_dbg_core("init gtt\n");
2365
Xu Hane3476c02017-03-29 10:13:59 +08002366 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
2367 || IS_KABYLAKE(gvt->dev_priv)) {
Zhi Wang2707e442016-03-28 23:23:16 +08002368 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2369 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2370 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
2371 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
2372 } else {
2373 return -ENODEV;
2374 }
2375
Jike Song96317392017-01-09 15:38:38 +08002376 page = (void *)get_zeroed_page(GFP_KERNEL);
2377 if (!page) {
Ping Gaod650ac02016-12-08 10:14:48 +08002378 gvt_err("fail to allocate scratch ggtt page\n");
2379 return -ENOMEM;
2380 }
2381
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002382 daddr = dma_map_page(dev, virt_to_page(page), 0,
2383 4096, PCI_DMA_BIDIRECTIONAL);
2384 if (dma_mapping_error(dev, daddr)) {
2385 gvt_err("fail to dmamap scratch ggtt page\n");
2386 __free_page(virt_to_page(page));
2387 return -ENOMEM;
Ping Gaod650ac02016-12-08 10:14:48 +08002388 }
Zhi Wang22115ce2017-10-10 14:34:11 +08002389
2390 gvt->gtt.scratch_page = virt_to_page(page);
2391 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
Ping Gaod650ac02016-12-08 10:14:48 +08002392
Zhi Wang2707e442016-03-28 23:23:16 +08002393 if (enable_out_of_sync) {
2394 ret = setup_spt_oos(gvt);
2395 if (ret) {
2396 gvt_err("fail to initialize SPT oos\n");
Zhou, Wenjia0de98702017-07-04 15:47:00 +08002397 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
Zhi Wang22115ce2017-10-10 14:34:11 +08002398 __free_page(gvt->gtt.scratch_page);
Zhi Wang2707e442016-03-28 23:23:16 +08002399 return ret;
2400 }
2401 }
2402 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
2403 return 0;
2404}
2405
2406/**
2407 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2408 * @gvt: GVT device
2409 *
2410 * This function is called at the driver unloading stage, to clean up the
2411 * the mm components of a GVT device.
2412 *
2413 */
2414void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2415{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002416 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
Zhi Wang22115ce2017-10-10 14:34:11 +08002417 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
Zhi Wang9556e112017-10-10 13:51:32 +08002418 I915_GTT_PAGE_SHIFT);
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002419
2420 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2421
Zhi Wang22115ce2017-10-10 14:34:11 +08002422 __free_page(gvt->gtt.scratch_page);
Ping Gaod650ac02016-12-08 10:14:48 +08002423
Zhi Wang2707e442016-03-28 23:23:16 +08002424 if (enable_out_of_sync)
2425 clean_spt_oos(gvt);
2426}
Ping Gaod650ac02016-12-08 10:14:48 +08002427
2428/**
2429 * intel_vgpu_reset_ggtt - reset the GGTT entry
2430 * @vgpu: a vGPU
2431 *
2432 * This function is called at the vGPU create stage
2433 * to reset all the GGTT entries.
2434 *
2435 */
2436void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2437{
2438 struct intel_gvt *gvt = vgpu->gvt;
Zhenyu Wang5ad59bf2017-04-12 16:24:57 +08002439 struct drm_i915_private *dev_priv = gvt->dev_priv;
Ping Gaod650ac02016-12-08 10:14:48 +08002440 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2441 u32 index;
2442 u32 offset;
2443 u32 num_entries;
2444 struct intel_gvt_gtt_entry e;
2445
2446 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2447 e.type = GTT_TYPE_GGTT_PTE;
Zhi Wang22115ce2017-10-10 14:34:11 +08002448 ops->set_pfn(&e, gvt->gtt.scratch_mfn);
Ping Gaod650ac02016-12-08 10:14:48 +08002449 e.val64 |= _PAGE_PRESENT;
2450
2451 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2452 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2453 for (offset = 0; offset < num_entries; offset++)
2454 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2455
2456 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2457 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2458 for (offset = 0; offset < num_entries; offset++)
2459 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
Zhenyu Wang5ad59bf2017-04-12 16:24:57 +08002460
Chuanxiao Dongaf2c6392017-06-02 15:34:24 +08002461 gtt_invalidate(dev_priv);
Ping Gaod650ac02016-12-08 10:14:48 +08002462}
Changbin Dub6115812017-01-13 11:15:57 +08002463
2464/**
2465 * intel_vgpu_reset_gtt - reset the all GTT related status
2466 * @vgpu: a vGPU
Changbin Dub6115812017-01-13 11:15:57 +08002467 *
2468 * This function is called from vfio core to reset reset all
2469 * GTT related status, including GGTT, PPGTT, scratch page.
2470 *
2471 */
Chuanxiao Dong4d3e67b2017-08-04 13:08:59 +08002472void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
Changbin Dub6115812017-01-13 11:15:57 +08002473{
Changbin Dub6115812017-01-13 11:15:57 +08002474 ppgtt_free_all_shadow_page(vgpu);
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002475
2476 /* Shadow pages are only created when there is no page
2477 * table tracking data, so remove page tracking data after
2478 * removing the shadow pages.
2479 */
2480 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2481
Changbin Dub6115812017-01-13 11:15:57 +08002482 intel_vgpu_reset_ggtt(vgpu);
Changbin Dub6115812017-01-13 11:15:57 +08002483}