blob: bd3dc209cd8924473f5508d111c7a1ed4adb3567 [file] [log] [blame]
Zhi Wang2707e442016-03-28 23:23:16 +08001/*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
29 *
30 * Contributors:
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36#include "i915_drv.h"
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080037#include "gvt.h"
38#include "i915_pvinfo.h"
Zhi Wang2707e442016-03-28 23:23:16 +080039#include "trace.h"
40
41static bool enable_out_of_sync = false;
42static int preallocated_oos_pages = 8192;
43
44/*
45 * validate a gm address and related range size,
46 * translate it to host gm address
47 */
48bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
49{
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
Tina Zhang695fbc02017-03-10 04:26:53 -050052 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
53 addr, size);
Zhi Wang2707e442016-03-28 23:23:16 +080054 return false;
55 }
56 return true;
57}
58
59/* translate a guest gmadr to host gmadr */
60int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
61{
62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
63 "invalid guest gmadr %llx\n", g_addr))
64 return -EACCES;
65
66 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
67 *h_addr = vgpu_aperture_gmadr_base(vgpu)
68 + (g_addr - vgpu_aperture_offset(vgpu));
69 else
70 *h_addr = vgpu_hidden_gmadr_base(vgpu)
71 + (g_addr - vgpu_hidden_offset(vgpu));
72 return 0;
73}
74
75/* translate a host gmadr to guest gmadr */
76int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
77{
78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
79 "invalid host gmadr %llx\n", h_addr))
80 return -EACCES;
81
82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
83 *g_addr = vgpu_aperture_gmadr_base(vgpu)
84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
85 else
86 *g_addr = vgpu_hidden_gmadr_base(vgpu)
87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
88 return 0;
89}
90
91int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
92 unsigned long *h_index)
93{
94 u64 h_addr;
95 int ret;
96
97 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
98 &h_addr);
99 if (ret)
100 return ret;
101
102 *h_index = h_addr >> GTT_PAGE_SHIFT;
103 return 0;
104}
105
106int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
107 unsigned long *g_index)
108{
109 u64 g_addr;
110 int ret;
111
112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
113 &g_addr);
114 if (ret)
115 return ret;
116
117 *g_index = g_addr >> GTT_PAGE_SHIFT;
118 return 0;
119}
120
121#define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
125
126#define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
128
129#define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
131
132#define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
134
135#define gtt_init_entry(e, t, p, v) do { \
136 (e)->type = t; \
137 (e)->pdev = p; \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
139} while (0)
140
Zhi Wang2707e442016-03-28 23:23:16 +0800141/*
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
147 *
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
154 * page table.
155 */
156
157struct gtt_type_table_entry {
158 int entry_type;
159 int next_pt_type;
160 int pse_entry_type;
161};
162
163#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
164 [type] = { \
165 .entry_type = e_type, \
166 .next_pt_type = npt_type, \
167 .pse_entry_type = pse_type, \
168 }
169
170static struct gtt_type_table_entry gtt_type_table[] = {
171 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
172 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
173 GTT_TYPE_PPGTT_PML4_PT,
174 GTT_TYPE_INVALID),
175 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
176 GTT_TYPE_PPGTT_PML4_ENTRY,
177 GTT_TYPE_PPGTT_PDP_PT,
178 GTT_TYPE_INVALID),
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
180 GTT_TYPE_PPGTT_PML4_ENTRY,
181 GTT_TYPE_PPGTT_PDP_PT,
182 GTT_TYPE_INVALID),
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
184 GTT_TYPE_PPGTT_PDP_ENTRY,
185 GTT_TYPE_PPGTT_PDE_PT,
186 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
187 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
188 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
189 GTT_TYPE_PPGTT_PDE_PT,
190 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
191 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
192 GTT_TYPE_PPGTT_PDP_ENTRY,
193 GTT_TYPE_PPGTT_PDE_PT,
194 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
195 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
196 GTT_TYPE_PPGTT_PDE_ENTRY,
197 GTT_TYPE_PPGTT_PTE_PT,
198 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
200 GTT_TYPE_PPGTT_PDE_ENTRY,
201 GTT_TYPE_PPGTT_PTE_PT,
202 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
204 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
205 GTT_TYPE_INVALID,
206 GTT_TYPE_INVALID),
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
208 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
209 GTT_TYPE_INVALID,
210 GTT_TYPE_INVALID),
211 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
212 GTT_TYPE_PPGTT_PDE_ENTRY,
213 GTT_TYPE_INVALID,
214 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
215 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
216 GTT_TYPE_PPGTT_PDP_ENTRY,
217 GTT_TYPE_INVALID,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
220 GTT_TYPE_GGTT_PTE,
221 GTT_TYPE_INVALID,
222 GTT_TYPE_INVALID),
223};
224
225static inline int get_next_pt_type(int type)
226{
227 return gtt_type_table[type].next_pt_type;
228}
229
230static inline int get_entry_type(int type)
231{
232 return gtt_type_table[type].entry_type;
233}
234
235static inline int get_pse_type(int type)
236{
237 return gtt_type_table[type].pse_entry_type;
238}
239
240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
241{
Du, Changbin321927d2016-10-20 14:08:46 +0800242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
Zhi Wang2707e442016-03-28 23:23:16 +0800243
Changbin Du905a5032016-12-30 14:10:53 +0800244 return readq(addr);
Zhi Wang2707e442016-03-28 23:23:16 +0800245}
246
Chuanxiao Dongaf2c6392017-06-02 15:34:24 +0800247static void gtt_invalidate(struct drm_i915_private *dev_priv)
248{
249 mmio_hw_access_pre(dev_priv);
250 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
251 mmio_hw_access_post(dev_priv);
252}
253
Zhi Wang2707e442016-03-28 23:23:16 +0800254static void write_pte64(struct drm_i915_private *dev_priv,
255 unsigned long index, u64 pte)
256{
Du, Changbin321927d2016-10-20 14:08:46 +0800257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
Zhi Wang2707e442016-03-28 23:23:16 +0800258
Zhi Wang2707e442016-03-28 23:23:16 +0800259 writeq(pte, addr);
Zhi Wang2707e442016-03-28 23:23:16 +0800260}
261
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800262static inline int gtt_get_entry64(void *pt,
Zhi Wang2707e442016-03-28 23:23:16 +0800263 struct intel_gvt_gtt_entry *e,
264 unsigned long index, bool hypervisor_access, unsigned long gpa,
265 struct intel_vgpu *vgpu)
266{
267 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
268 int ret;
269
270 if (WARN_ON(info->gtt_entry_size != 8))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800271 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800272
273 if (hypervisor_access) {
274 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
275 (index << info->gtt_entry_size_shift),
276 &e->val64, 8);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800277 if (WARN_ON(ret))
278 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800279 } else if (!pt) {
280 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
281 } else {
282 e->val64 = *((u64 *)pt + index);
283 }
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800284 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800285}
286
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800287static inline int gtt_set_entry64(void *pt,
Zhi Wang2707e442016-03-28 23:23:16 +0800288 struct intel_gvt_gtt_entry *e,
289 unsigned long index, bool hypervisor_access, unsigned long gpa,
290 struct intel_vgpu *vgpu)
291{
292 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
293 int ret;
294
295 if (WARN_ON(info->gtt_entry_size != 8))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800296 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800297
298 if (hypervisor_access) {
299 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
300 (index << info->gtt_entry_size_shift),
301 &e->val64, 8);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800302 if (WARN_ON(ret))
303 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800304 } else if (!pt) {
305 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
306 } else {
307 *((u64 *)pt + index) = e->val64;
308 }
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800309 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800310}
311
312#define GTT_HAW 46
313
314#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
315#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
316#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
317
318static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
319{
320 unsigned long pfn;
321
322 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
323 pfn = (e->val64 & ADDR_1G_MASK) >> 12;
324 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
325 pfn = (e->val64 & ADDR_2M_MASK) >> 12;
326 else
327 pfn = (e->val64 & ADDR_4K_MASK) >> 12;
328 return pfn;
329}
330
331static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
332{
333 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
334 e->val64 &= ~ADDR_1G_MASK;
335 pfn &= (ADDR_1G_MASK >> 12);
336 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
337 e->val64 &= ~ADDR_2M_MASK;
338 pfn &= (ADDR_2M_MASK >> 12);
339 } else {
340 e->val64 &= ~ADDR_4K_MASK;
341 pfn &= (ADDR_4K_MASK >> 12);
342 }
343
344 e->val64 |= (pfn << 12);
345}
346
347static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
348{
349 /* Entry doesn't have PSE bit. */
350 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
351 return false;
352
353 e->type = get_entry_type(e->type);
Zhi Wang5e86cce2017-09-26 15:02:21 +0800354 if (!(e->val64 & BIT(7)))
Zhi Wang2707e442016-03-28 23:23:16 +0800355 return false;
356
357 e->type = get_pse_type(e->type);
358 return true;
359}
360
361static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
362{
363 /*
364 * i915 writes PDP root pointer registers without present bit,
365 * it also works, so we need to treat root pointer entry
366 * specifically.
367 */
368 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
369 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
370 return (e->val64 != 0);
371 else
Zhi Wang5e86cce2017-09-26 15:02:21 +0800372 return (e->val64 & BIT(0));
Zhi Wang2707e442016-03-28 23:23:16 +0800373}
374
375static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
376{
Zhi Wang5e86cce2017-09-26 15:02:21 +0800377 e->val64 &= ~BIT(0);
Zhi Wang2707e442016-03-28 23:23:16 +0800378}
379
380/*
381 * Per-platform GMA routines.
382 */
383static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
384{
385 unsigned long x = (gma >> GTT_PAGE_SHIFT);
386
387 trace_gma_index(__func__, gma, x);
388 return x;
389}
390
391#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
392static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
393{ \
394 unsigned long x = (exp); \
395 trace_gma_index(__func__, gma, x); \
396 return x; \
397}
398
399DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
400DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
401DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
402DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
403DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
404
405static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
406 .get_entry = gtt_get_entry64,
407 .set_entry = gtt_set_entry64,
408 .clear_present = gtt_entry_clear_present,
409 .test_present = gen8_gtt_test_present,
410 .test_pse = gen8_gtt_test_pse,
411 .get_pfn = gen8_gtt_get_pfn,
412 .set_pfn = gen8_gtt_set_pfn,
413};
414
415static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
416 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
417 .gma_to_pte_index = gen8_gma_to_pte_index,
418 .gma_to_pde_index = gen8_gma_to_pde_index,
419 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
420 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
421 .gma_to_pml4_index = gen8_gma_to_pml4_index,
422};
423
424static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
425 struct intel_gvt_gtt_entry *m)
426{
427 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
428 unsigned long gfn, mfn;
429
430 *m = *p;
431
432 if (!ops->test_present(p))
433 return 0;
434
435 gfn = ops->get_pfn(p);
436
437 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
438 if (mfn == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500439 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
Zhi Wang2707e442016-03-28 23:23:16 +0800440 return -ENXIO;
441 }
442
443 ops->set_pfn(m, mfn);
444 return 0;
445}
446
447/*
448 * MM helpers.
449 */
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800450int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
Zhi Wang2707e442016-03-28 23:23:16 +0800451 void *page_table, struct intel_gvt_gtt_entry *e,
452 unsigned long index)
453{
454 struct intel_gvt *gvt = mm->vgpu->gvt;
455 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800456 int ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800457
458 e->type = mm->page_table_entry_type;
459
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800460 ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
461 if (ret)
462 return ret;
463
Zhi Wang2707e442016-03-28 23:23:16 +0800464 ops->test_pse(e);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800465 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800466}
467
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800468int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
Zhi Wang2707e442016-03-28 23:23:16 +0800469 void *page_table, struct intel_gvt_gtt_entry *e,
470 unsigned long index)
471{
472 struct intel_gvt *gvt = mm->vgpu->gvt;
473 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
474
475 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
476}
477
478/*
479 * PPGTT shadow page table helpers.
480 */
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800481static inline int ppgtt_spt_get_entry(
Zhi Wang2707e442016-03-28 23:23:16 +0800482 struct intel_vgpu_ppgtt_spt *spt,
483 void *page_table, int type,
484 struct intel_gvt_gtt_entry *e, unsigned long index,
485 bool guest)
486{
487 struct intel_gvt *gvt = spt->vgpu->gvt;
488 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800489 int ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800490
491 e->type = get_entry_type(type);
492
493 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800494 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800495
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800496 ret = ops->get_entry(page_table, e, index, guest,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800497 spt->guest_page.track.gfn << GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800498 spt->vgpu);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800499 if (ret)
500 return ret;
501
Zhi Wang2707e442016-03-28 23:23:16 +0800502 ops->test_pse(e);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800503 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800504}
505
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800506static inline int ppgtt_spt_set_entry(
Zhi Wang2707e442016-03-28 23:23:16 +0800507 struct intel_vgpu_ppgtt_spt *spt,
508 void *page_table, int type,
509 struct intel_gvt_gtt_entry *e, unsigned long index,
510 bool guest)
511{
512 struct intel_gvt *gvt = spt->vgpu->gvt;
513 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
514
515 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800516 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800517
518 return ops->set_entry(page_table, e, index, guest,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800519 spt->guest_page.track.gfn << GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800520 spt->vgpu);
521}
522
523#define ppgtt_get_guest_entry(spt, e, index) \
524 ppgtt_spt_get_entry(spt, NULL, \
525 spt->guest_page_type, e, index, true)
526
527#define ppgtt_set_guest_entry(spt, e, index) \
528 ppgtt_spt_set_entry(spt, NULL, \
529 spt->guest_page_type, e, index, true)
530
531#define ppgtt_get_shadow_entry(spt, e, index) \
532 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
533 spt->shadow_page.type, e, index, false)
534
535#define ppgtt_set_shadow_entry(spt, e, index) \
536 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
537 spt->shadow_page.type, e, index, false)
538
539/**
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800540 * intel_vgpu_init_page_track - init a page track data structure
Zhi Wang2707e442016-03-28 23:23:16 +0800541 * @vgpu: a vGPU
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800542 * @t: a page track data structure
Zhi Wang2707e442016-03-28 23:23:16 +0800543 * @gfn: guest memory page frame number
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800544 * @handler: the function will be called when target guest memory page has
Zhi Wang2707e442016-03-28 23:23:16 +0800545 * been modified.
546 *
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800547 * This function is called when a user wants to prepare a page track data
548 * structure to track a guest memory page.
Zhi Wang2707e442016-03-28 23:23:16 +0800549 *
550 * Returns:
551 * Zero on success, negative error code if failed.
552 */
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800553int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
554 struct intel_vgpu_page_track *t,
555 unsigned long gfn,
556 int (*handler)(void *, u64, void *, int),
557 void *data)
558{
559 INIT_HLIST_NODE(&t->node);
560
561 t->tracked = false;
562 t->gfn = gfn;
563 t->handler = handler;
564 t->data = data;
565
566 hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
567 return 0;
568}
569
570/**
571 * intel_vgpu_clean_page_track - release a page track data structure
572 * @vgpu: a vGPU
573 * @t: a page track data structure
574 *
575 * This function is called before a user frees a page track data structure.
576 */
577void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
578 struct intel_vgpu_page_track *t)
579{
580 if (!hlist_unhashed(&t->node))
581 hash_del(&t->node);
582
583 if (t->tracked)
584 intel_gvt_hypervisor_disable_page_track(vgpu, t);
585}
586
587/**
588 * intel_vgpu_find_tracked_page - find a tracked guest page
589 * @vgpu: a vGPU
590 * @gfn: guest memory page frame number
591 *
592 * This function is called when the emulation layer wants to figure out if a
593 * trapped GFN is a tracked guest page.
594 *
595 * Returns:
596 * Pointer to page track data structure, NULL if not found.
597 */
598struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
599 struct intel_vgpu *vgpu, unsigned long gfn)
600{
601 struct intel_vgpu_page_track *t;
602
603 hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
604 t, node, gfn) {
605 if (t->gfn == gfn)
606 return t;
607 }
608 return NULL;
609}
610
611static int init_guest_page(struct intel_vgpu *vgpu,
Zhi Wang2707e442016-03-28 23:23:16 +0800612 struct intel_vgpu_guest_page *p,
613 unsigned long gfn,
614 int (*handler)(void *, u64, void *, int),
615 void *data)
616{
Zhi Wang2707e442016-03-28 23:23:16 +0800617 p->oos_page = NULL;
618 p->write_cnt = 0;
619
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800620 return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
Zhi Wang2707e442016-03-28 23:23:16 +0800621}
622
623static int detach_oos_page(struct intel_vgpu *vgpu,
624 struct intel_vgpu_oos_page *oos_page);
625
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800626static void clean_guest_page(struct intel_vgpu *vgpu,
Zhi Wang2707e442016-03-28 23:23:16 +0800627 struct intel_vgpu_guest_page *p)
628{
Zhi Wang2707e442016-03-28 23:23:16 +0800629 if (p->oos_page)
630 detach_oos_page(vgpu, p->oos_page);
631
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800632 intel_vgpu_clean_page_track(vgpu, &p->track);
Zhi Wang2707e442016-03-28 23:23:16 +0800633}
634
635static inline int init_shadow_page(struct intel_vgpu *vgpu,
636 struct intel_vgpu_shadow_page *p, int type)
637{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800638 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
639 dma_addr_t daddr;
640
641 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
642 if (dma_mapping_error(kdev, daddr)) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500643 gvt_vgpu_err("fail to map dma addr\n");
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800644 return -EINVAL;
645 }
646
Zhi Wang2707e442016-03-28 23:23:16 +0800647 p->vaddr = page_address(p->page);
648 p->type = type;
649
650 INIT_HLIST_NODE(&p->node);
651
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800652 p->mfn = daddr >> GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800653 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
654 return 0;
655}
656
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800657static inline void clean_shadow_page(struct intel_vgpu *vgpu,
658 struct intel_vgpu_shadow_page *p)
Zhi Wang2707e442016-03-28 23:23:16 +0800659{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800660 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
661
662 dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
663 PCI_DMA_BIDIRECTIONAL);
664
Zhi Wang2707e442016-03-28 23:23:16 +0800665 if (!hlist_unhashed(&p->node))
666 hash_del(&p->node);
667}
668
669static inline struct intel_vgpu_shadow_page *find_shadow_page(
670 struct intel_vgpu *vgpu, unsigned long mfn)
671{
672 struct intel_vgpu_shadow_page *p;
673
674 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
675 p, node, mfn) {
676 if (p->mfn == mfn)
677 return p;
678 }
679 return NULL;
680}
681
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800682#define page_track_to_guest_page(ptr) \
683 container_of(ptr, struct intel_vgpu_guest_page, track)
684
Zhi Wang2707e442016-03-28 23:23:16 +0800685#define guest_page_to_ppgtt_spt(ptr) \
686 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
687
688#define shadow_page_to_ppgtt_spt(ptr) \
689 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
690
691static void *alloc_spt(gfp_t gfp_mask)
692{
693 struct intel_vgpu_ppgtt_spt *spt;
694
695 spt = kzalloc(sizeof(*spt), gfp_mask);
696 if (!spt)
697 return NULL;
698
699 spt->shadow_page.page = alloc_page(gfp_mask);
700 if (!spt->shadow_page.page) {
701 kfree(spt);
702 return NULL;
703 }
704 return spt;
705}
706
707static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
708{
709 __free_page(spt->shadow_page.page);
710 kfree(spt);
711}
712
713static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
714{
715 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
716
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800717 clean_shadow_page(spt->vgpu, &spt->shadow_page);
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800718 clean_guest_page(spt->vgpu, &spt->guest_page);
Zhi Wang2707e442016-03-28 23:23:16 +0800719 list_del_init(&spt->post_shadow_list);
720
721 free_spt(spt);
722}
723
724static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
725{
726 struct hlist_node *n;
727 struct intel_vgpu_shadow_page *sp;
728 int i;
729
730 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
731 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
732}
733
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800734static int ppgtt_handle_guest_write_page_table_bytes(
735 struct intel_vgpu_guest_page *gpt,
Zhi Wang2707e442016-03-28 23:23:16 +0800736 u64 pa, void *p_data, int bytes);
737
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800738static int ppgtt_write_protection_handler(void *data, u64 pa,
Zhi Wang2707e442016-03-28 23:23:16 +0800739 void *p_data, int bytes)
740{
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800741 struct intel_vgpu_page_track *t = data;
742 struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
Zhi Wang2707e442016-03-28 23:23:16 +0800743 int ret;
744
745 if (bytes != 4 && bytes != 8)
746 return -EINVAL;
747
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800748 if (!t->tracked)
Zhi Wang2707e442016-03-28 23:23:16 +0800749 return -EINVAL;
750
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800751 ret = ppgtt_handle_guest_write_page_table_bytes(p,
Zhi Wang2707e442016-03-28 23:23:16 +0800752 pa, p_data, bytes);
753 if (ret)
754 return ret;
755 return ret;
756}
757
758static int reclaim_one_mm(struct intel_gvt *gvt);
759
760static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
761 struct intel_vgpu *vgpu, int type, unsigned long gfn)
762{
763 struct intel_vgpu_ppgtt_spt *spt = NULL;
764 int ret;
765
766retry:
767 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
768 if (!spt) {
769 if (reclaim_one_mm(vgpu->gvt))
770 goto retry;
771
Tina Zhang695fbc02017-03-10 04:26:53 -0500772 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800773 return ERR_PTR(-ENOMEM);
774 }
775
776 spt->vgpu = vgpu;
777 spt->guest_page_type = type;
778 atomic_set(&spt->refcount, 1);
779 INIT_LIST_HEAD(&spt->post_shadow_list);
780
781 /*
782 * TODO: guest page type may be different with shadow page type,
783 * when we support PSE page in future.
784 */
785 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
786 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500787 gvt_vgpu_err("fail to initialize shadow page for spt\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800788 goto err;
789 }
790
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800791 ret = init_guest_page(vgpu, &spt->guest_page,
Zhi Wang2707e442016-03-28 23:23:16 +0800792 gfn, ppgtt_write_protection_handler, NULL);
793 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500794 gvt_vgpu_err("fail to initialize guest page for spt\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800795 goto err;
796 }
797
798 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
799 return spt;
800err:
801 ppgtt_free_shadow_page(spt);
802 return ERR_PTR(ret);
803}
804
805static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
806 struct intel_vgpu *vgpu, unsigned long mfn)
807{
808 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
809
810 if (p)
811 return shadow_page_to_ppgtt_spt(p);
812
Tina Zhang695fbc02017-03-10 04:26:53 -0500813 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
Zhi Wang2707e442016-03-28 23:23:16 +0800814 return NULL;
815}
816
817#define pt_entry_size_shift(spt) \
818 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
819
820#define pt_entries(spt) \
821 (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
822
823#define for_each_present_guest_entry(spt, e, i) \
824 for (i = 0; i < pt_entries(spt); i++) \
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800825 if (!ppgtt_get_guest_entry(spt, e, i) && \
826 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
Zhi Wang2707e442016-03-28 23:23:16 +0800827
828#define for_each_present_shadow_entry(spt, e, i) \
829 for (i = 0; i < pt_entries(spt); i++) \
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800830 if (!ppgtt_get_shadow_entry(spt, e, i) && \
831 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
Zhi Wang2707e442016-03-28 23:23:16 +0800832
833static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
834{
835 int v = atomic_read(&spt->refcount);
836
837 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
838
839 atomic_inc(&spt->refcount);
840}
841
842static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
843
844static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
845 struct intel_gvt_gtt_entry *e)
846{
847 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
848 struct intel_vgpu_ppgtt_spt *s;
Ping Gao3b6411c2016-11-04 13:47:35 +0800849 intel_gvt_gtt_type_t cur_pt_type;
Zhi Wang2707e442016-03-28 23:23:16 +0800850
851 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
852 return -EINVAL;
853
Ping Gao3b6411c2016-11-04 13:47:35 +0800854 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
855 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
856 cur_pt_type = get_next_pt_type(e->type) + 1;
857 if (ops->get_pfn(e) ==
858 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
859 return 0;
860 }
Zhi Wang2707e442016-03-28 23:23:16 +0800861 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
862 if (!s) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500863 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
864 ops->get_pfn(e));
Zhi Wang2707e442016-03-28 23:23:16 +0800865 return -ENXIO;
866 }
867 return ppgtt_invalidate_shadow_page(s);
868}
869
870static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
871{
Tina Zhang695fbc02017-03-10 04:26:53 -0500872 struct intel_vgpu *vgpu = spt->vgpu;
Zhi Wang2707e442016-03-28 23:23:16 +0800873 struct intel_gvt_gtt_entry e;
874 unsigned long index;
875 int ret;
876 int v = atomic_read(&spt->refcount);
877
878 trace_spt_change(spt->vgpu->id, "die", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800879 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800880
881 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
882
883 if (atomic_dec_return(&spt->refcount) > 0)
884 return 0;
885
886 if (gtt_type_is_pte_pt(spt->shadow_page.type))
887 goto release;
888
889 for_each_present_shadow_entry(spt, &e, index) {
890 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500891 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800892 return -EINVAL;
893 }
894 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
895 spt->vgpu, &e);
896 if (ret)
897 goto fail;
898 }
899release:
900 trace_spt_change(spt->vgpu->id, "release", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800901 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800902 ppgtt_free_shadow_page(spt);
903 return 0;
904fail:
Tina Zhang695fbc02017-03-10 04:26:53 -0500905 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
906 spt, e.val64, e.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800907 return ret;
908}
909
910static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
911
912static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
913 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
914{
915 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
916 struct intel_vgpu_ppgtt_spt *s = NULL;
917 struct intel_vgpu_guest_page *g;
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800918 struct intel_vgpu_page_track *t;
Zhi Wang2707e442016-03-28 23:23:16 +0800919 int ret;
920
921 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
922 ret = -EINVAL;
923 goto fail;
924 }
925
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800926 t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
927 if (t) {
928 g = page_track_to_guest_page(t);
Zhi Wang2707e442016-03-28 23:23:16 +0800929 s = guest_page_to_ppgtt_spt(g);
930 ppgtt_get_shadow_page(s);
931 } else {
932 int type = get_next_pt_type(we->type);
933
934 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
935 if (IS_ERR(s)) {
936 ret = PTR_ERR(s);
937 goto fail;
938 }
939
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800940 ret = intel_gvt_hypervisor_enable_page_track(vgpu,
941 &s->guest_page.track);
Zhi Wang2707e442016-03-28 23:23:16 +0800942 if (ret)
943 goto fail;
944
945 ret = ppgtt_populate_shadow_page(s);
946 if (ret)
947 goto fail;
948
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800949 trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
Zhi Wang2707e442016-03-28 23:23:16 +0800950 s->shadow_page.type);
951 }
952 return s;
953fail:
Tina Zhang695fbc02017-03-10 04:26:53 -0500954 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
955 s, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +0800956 return ERR_PTR(ret);
957}
958
959static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
960 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
961{
962 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
963
964 se->type = ge->type;
965 se->val64 = ge->val64;
966
967 ops->set_pfn(se, s->shadow_page.mfn);
968}
969
970static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
971{
972 struct intel_vgpu *vgpu = spt->vgpu;
973 struct intel_vgpu_ppgtt_spt *s;
974 struct intel_gvt_gtt_entry se, ge;
975 unsigned long i;
976 int ret;
977
978 trace_spt_change(spt->vgpu->id, "born", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800979 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800980
981 if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
982 for_each_present_guest_entry(spt, &ge, i) {
983 ret = gtt_entry_p2m(vgpu, &ge, &se);
984 if (ret)
985 goto fail;
986 ppgtt_set_shadow_entry(spt, &se, i);
987 }
988 return 0;
989 }
990
991 for_each_present_guest_entry(spt, &ge, i) {
992 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500993 gvt_vgpu_err("GVT doesn't support pse bit now\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800994 ret = -EINVAL;
995 goto fail;
996 }
997
998 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
999 if (IS_ERR(s)) {
1000 ret = PTR_ERR(s);
1001 goto fail;
1002 }
1003 ppgtt_get_shadow_entry(spt, &se, i);
1004 ppgtt_generate_shadow_entry(&se, s, &ge);
1005 ppgtt_set_shadow_entry(spt, &se, i);
1006 }
1007 return 0;
1008fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001009 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1010 spt, ge.val64, ge.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001011 return ret;
1012}
1013
1014static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
Tina Zhang6b3816d2017-08-14 15:24:14 +08001015 struct intel_gvt_gtt_entry *se, unsigned long index)
Zhi Wang2707e442016-03-28 23:23:16 +08001016{
1017 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1018 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1019 struct intel_vgpu *vgpu = spt->vgpu;
1020 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +08001021 int ret;
1022
Tina Zhang6b3816d2017-08-14 15:24:14 +08001023 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
Bing Niu9baf0922016-11-07 10:44:36 +08001024 index);
1025
Tina Zhang6b3816d2017-08-14 15:24:14 +08001026 if (!ops->test_present(se))
Zhi Wang2707e442016-03-28 23:23:16 +08001027 return 0;
1028
Tina Zhang6b3816d2017-08-14 15:24:14 +08001029 if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
Zhi Wang2707e442016-03-28 23:23:16 +08001030 return 0;
1031
Tina Zhang6b3816d2017-08-14 15:24:14 +08001032 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
Bing Niu9baf0922016-11-07 10:44:36 +08001033 struct intel_vgpu_ppgtt_spt *s =
Tina Zhang6b3816d2017-08-14 15:24:14 +08001034 ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
Bing Niu9baf0922016-11-07 10:44:36 +08001035 if (!s) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001036 gvt_vgpu_err("fail to find guest page\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001037 ret = -ENXIO;
1038 goto fail;
1039 }
Bing Niu9baf0922016-11-07 10:44:36 +08001040 ret = ppgtt_invalidate_shadow_page(s);
Zhi Wang2707e442016-03-28 23:23:16 +08001041 if (ret)
1042 goto fail;
1043 }
Zhi Wang2707e442016-03-28 23:23:16 +08001044 return 0;
1045fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001046 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
Tina Zhang6b3816d2017-08-14 15:24:14 +08001047 spt, se->val64, se->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001048 return ret;
1049}
1050
1051static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1052 struct intel_gvt_gtt_entry *we, unsigned long index)
1053{
1054 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1055 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1056 struct intel_vgpu *vgpu = spt->vgpu;
1057 struct intel_gvt_gtt_entry m;
1058 struct intel_vgpu_ppgtt_spt *s;
1059 int ret;
1060
1061 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
1062 we->val64, index);
1063
1064 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1065 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1066 if (IS_ERR(s)) {
1067 ret = PTR_ERR(s);
1068 goto fail;
1069 }
1070 ppgtt_get_shadow_entry(spt, &m, index);
1071 ppgtt_generate_shadow_entry(&m, s, we);
1072 ppgtt_set_shadow_entry(spt, &m, index);
1073 } else {
1074 ret = gtt_entry_p2m(vgpu, we, &m);
1075 if (ret)
1076 goto fail;
1077 ppgtt_set_shadow_entry(spt, &m, index);
1078 }
1079 return 0;
1080fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001081 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1082 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001083 return ret;
1084}
1085
1086static int sync_oos_page(struct intel_vgpu *vgpu,
1087 struct intel_vgpu_oos_page *oos_page)
1088{
1089 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1090 struct intel_gvt *gvt = vgpu->gvt;
1091 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1092 struct intel_vgpu_ppgtt_spt *spt =
1093 guest_page_to_ppgtt_spt(oos_page->guest_page);
1094 struct intel_gvt_gtt_entry old, new, m;
1095 int index;
1096 int ret;
1097
1098 trace_oos_change(vgpu->id, "sync", oos_page->id,
1099 oos_page->guest_page, spt->guest_page_type);
1100
1101 old.type = new.type = get_entry_type(spt->guest_page_type);
1102 old.val64 = new.val64 = 0;
1103
1104 for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
1105 index++) {
1106 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1107 ops->get_entry(NULL, &new, index, true,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001108 oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08001109
1110 if (old.val64 == new.val64
1111 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1112 continue;
1113
1114 trace_oos_sync(vgpu->id, oos_page->id,
1115 oos_page->guest_page, spt->guest_page_type,
1116 new.val64, index);
1117
1118 ret = gtt_entry_p2m(vgpu, &new, &m);
1119 if (ret)
1120 return ret;
1121
1122 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1123 ppgtt_set_shadow_entry(spt, &m, index);
1124 }
1125
1126 oos_page->guest_page->write_cnt = 0;
1127 list_del_init(&spt->post_shadow_list);
1128 return 0;
1129}
1130
1131static int detach_oos_page(struct intel_vgpu *vgpu,
1132 struct intel_vgpu_oos_page *oos_page)
1133{
1134 struct intel_gvt *gvt = vgpu->gvt;
1135 struct intel_vgpu_ppgtt_spt *spt =
1136 guest_page_to_ppgtt_spt(oos_page->guest_page);
1137
1138 trace_oos_change(vgpu->id, "detach", oos_page->id,
1139 oos_page->guest_page, spt->guest_page_type);
1140
1141 oos_page->guest_page->write_cnt = 0;
1142 oos_page->guest_page->oos_page = NULL;
1143 oos_page->guest_page = NULL;
1144
1145 list_del_init(&oos_page->vm_list);
1146 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1147
1148 return 0;
1149}
1150
1151static int attach_oos_page(struct intel_vgpu *vgpu,
1152 struct intel_vgpu_oos_page *oos_page,
1153 struct intel_vgpu_guest_page *gpt)
1154{
1155 struct intel_gvt *gvt = vgpu->gvt;
1156 int ret;
1157
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001158 ret = intel_gvt_hypervisor_read_gpa(vgpu,
1159 gpt->track.gfn << GTT_PAGE_SHIFT,
1160 oos_page->mem, GTT_PAGE_SIZE);
Zhi Wang2707e442016-03-28 23:23:16 +08001161 if (ret)
1162 return ret;
1163
1164 oos_page->guest_page = gpt;
1165 gpt->oos_page = oos_page;
1166
1167 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1168
1169 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
1170 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1171 return 0;
1172}
1173
1174static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
1175 struct intel_vgpu_guest_page *gpt)
1176{
1177 int ret;
1178
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001179 ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
Zhi Wang2707e442016-03-28 23:23:16 +08001180 if (ret)
1181 return ret;
1182
1183 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
1184 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1185
1186 list_del_init(&gpt->oos_page->vm_list);
1187 return sync_oos_page(vgpu, gpt->oos_page);
1188}
1189
1190static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
1191 struct intel_vgpu_guest_page *gpt)
1192{
1193 struct intel_gvt *gvt = vgpu->gvt;
1194 struct intel_gvt_gtt *gtt = &gvt->gtt;
1195 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1196 int ret;
1197
1198 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1199
1200 if (list_empty(&gtt->oos_page_free_list_head)) {
1201 oos_page = container_of(gtt->oos_page_use_list_head.next,
1202 struct intel_vgpu_oos_page, list);
1203 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1204 if (ret)
1205 return ret;
1206 ret = detach_oos_page(vgpu, oos_page);
1207 if (ret)
1208 return ret;
1209 } else
1210 oos_page = container_of(gtt->oos_page_free_list_head.next,
1211 struct intel_vgpu_oos_page, list);
1212 return attach_oos_page(vgpu, oos_page, gpt);
1213}
1214
1215static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
1216 struct intel_vgpu_guest_page *gpt)
1217{
1218 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1219
1220 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1221 return -EINVAL;
1222
1223 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
1224 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1225
1226 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001227 return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
Zhi Wang2707e442016-03-28 23:23:16 +08001228}
1229
1230/**
1231 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1232 * @vgpu: a vGPU
1233 *
1234 * This function is called before submitting a guest workload to host,
1235 * to sync all the out-of-synced shadow for vGPU
1236 *
1237 * Returns:
1238 * Zero on success, negative error code if failed.
1239 */
1240int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1241{
1242 struct list_head *pos, *n;
1243 struct intel_vgpu_oos_page *oos_page;
1244 int ret;
1245
1246 if (!enable_out_of_sync)
1247 return 0;
1248
1249 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1250 oos_page = container_of(pos,
1251 struct intel_vgpu_oos_page, vm_list);
1252 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1253 if (ret)
1254 return ret;
1255 }
1256 return 0;
1257}
1258
1259/*
1260 * The heart of PPGTT shadow page table.
1261 */
1262static int ppgtt_handle_guest_write_page_table(
1263 struct intel_vgpu_guest_page *gpt,
1264 struct intel_gvt_gtt_entry *we, unsigned long index)
1265{
1266 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1267 struct intel_vgpu *vgpu = spt->vgpu;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001268 int type = spt->shadow_page.type;
Zhi Wang2707e442016-03-28 23:23:16 +08001269 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001270 struct intel_gvt_gtt_entry se;
Zhi Wang2707e442016-03-28 23:23:16 +08001271
Zhi Wang2707e442016-03-28 23:23:16 +08001272 int ret;
Bing Niu9baf0922016-11-07 10:44:36 +08001273 int new_present;
Zhi Wang2707e442016-03-28 23:23:16 +08001274
Zhi Wang2707e442016-03-28 23:23:16 +08001275 new_present = ops->test_present(we);
1276
Tina Zhang6b3816d2017-08-14 15:24:14 +08001277 /*
1278 * Adding the new entry first and then removing the old one, that can
1279 * guarantee the ppgtt table is validated during the window between
1280 * adding and removal.
1281 */
1282 ppgtt_get_shadow_entry(spt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001283
Zhi Wang2707e442016-03-28 23:23:16 +08001284 if (new_present) {
1285 ret = ppgtt_handle_guest_entry_add(gpt, we, index);
1286 if (ret)
1287 goto fail;
1288 }
Tina Zhang6b3816d2017-08-14 15:24:14 +08001289
1290 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
1291 if (ret)
1292 goto fail;
1293
1294 if (!new_present) {
1295 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1296 ppgtt_set_shadow_entry(spt, &se, index);
1297 }
1298
Zhi Wang2707e442016-03-28 23:23:16 +08001299 return 0;
1300fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001301 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1302 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001303 return ret;
1304}
1305
1306static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
1307{
1308 return enable_out_of_sync
1309 && gtt_type_is_pte_pt(
1310 guest_page_to_ppgtt_spt(gpt)->guest_page_type)
1311 && gpt->write_cnt >= 2;
1312}
1313
1314static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1315 unsigned long index)
1316{
1317 set_bit(index, spt->post_shadow_bitmap);
1318 if (!list_empty(&spt->post_shadow_list))
1319 return;
1320
1321 list_add_tail(&spt->post_shadow_list,
1322 &spt->vgpu->gtt.post_shadow_list_head);
1323}
1324
1325/**
1326 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1327 * @vgpu: a vGPU
1328 *
1329 * This function is called before submitting a guest workload to host,
1330 * to flush all the post shadows for a vGPU.
1331 *
1332 * Returns:
1333 * Zero on success, negative error code if failed.
1334 */
1335int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1336{
1337 struct list_head *pos, *n;
1338 struct intel_vgpu_ppgtt_spt *spt;
Bing Niu9baf0922016-11-07 10:44:36 +08001339 struct intel_gvt_gtt_entry ge;
Zhi Wang2707e442016-03-28 23:23:16 +08001340 unsigned long index;
1341 int ret;
1342
1343 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1344 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1345 post_shadow_list);
1346
1347 for_each_set_bit(index, spt->post_shadow_bitmap,
1348 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1349 ppgtt_get_guest_entry(spt, &ge, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001350
1351 ret = ppgtt_handle_guest_write_page_table(
1352 &spt->guest_page, &ge, index);
1353 if (ret)
1354 return ret;
1355 clear_bit(index, spt->post_shadow_bitmap);
1356 }
1357 list_del_init(&spt->post_shadow_list);
1358 }
1359 return 0;
1360}
1361
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001362static int ppgtt_handle_guest_write_page_table_bytes(
1363 struct intel_vgpu_guest_page *gpt,
Zhi Wang2707e442016-03-28 23:23:16 +08001364 u64 pa, void *p_data, int bytes)
1365{
Zhi Wang2707e442016-03-28 23:23:16 +08001366 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1367 struct intel_vgpu *vgpu = spt->vgpu;
1368 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1369 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001370 struct intel_gvt_gtt_entry we, se;
Zhi Wang2707e442016-03-28 23:23:16 +08001371 unsigned long index;
1372 int ret;
1373
1374 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1375
1376 ppgtt_get_guest_entry(spt, &we, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001377
1378 ops->test_pse(&we);
1379
1380 if (bytes == info->gtt_entry_size) {
1381 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
1382 if (ret)
1383 return ret;
1384 } else {
Zhi Wang2707e442016-03-28 23:23:16 +08001385 if (!test_bit(index, spt->post_shadow_bitmap)) {
Tina Zhang6b3816d2017-08-14 15:24:14 +08001386 ppgtt_get_shadow_entry(spt, &se, index);
1387 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001388 if (ret)
1389 return ret;
1390 }
1391
1392 ppgtt_set_post_shadow(spt, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001393 }
1394
1395 if (!enable_out_of_sync)
1396 return 0;
1397
1398 gpt->write_cnt++;
1399
1400 if (gpt->oos_page)
1401 ops->set_entry(gpt->oos_page->mem, &we, index,
1402 false, 0, vgpu);
1403
1404 if (can_do_out_of_sync(gpt)) {
1405 if (!gpt->oos_page)
1406 ppgtt_allocate_oos_page(vgpu, gpt);
1407
1408 ret = ppgtt_set_guest_page_oos(vgpu, gpt);
1409 if (ret < 0)
1410 return ret;
1411 }
1412 return 0;
1413}
1414
1415/*
1416 * mm page table allocation policy for bdw+
1417 * - for ggtt, only virtual page table will be allocated.
1418 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1419 */
1420static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1421{
1422 struct intel_vgpu *vgpu = mm->vgpu;
1423 struct intel_gvt *gvt = vgpu->gvt;
1424 const struct intel_gvt_device_info *info = &gvt->device_info;
1425 void *mem;
1426
1427 if (mm->type == INTEL_GVT_MM_PPGTT) {
1428 mm->page_table_entry_cnt = 4;
1429 mm->page_table_entry_size = mm->page_table_entry_cnt *
1430 info->gtt_entry_size;
1431 mem = kzalloc(mm->has_shadow_page_table ?
1432 mm->page_table_entry_size * 2
Jike Song96317392017-01-09 15:38:38 +08001433 : mm->page_table_entry_size, GFP_KERNEL);
Zhi Wang2707e442016-03-28 23:23:16 +08001434 if (!mem)
1435 return -ENOMEM;
1436 mm->virtual_page_table = mem;
1437 if (!mm->has_shadow_page_table)
1438 return 0;
1439 mm->shadow_page_table = mem + mm->page_table_entry_size;
1440 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1441 mm->page_table_entry_cnt =
1442 (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
1443 mm->page_table_entry_size = mm->page_table_entry_cnt *
1444 info->gtt_entry_size;
1445 mem = vzalloc(mm->page_table_entry_size);
1446 if (!mem)
1447 return -ENOMEM;
1448 mm->virtual_page_table = mem;
1449 }
1450 return 0;
1451}
1452
1453static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
1454{
1455 if (mm->type == INTEL_GVT_MM_PPGTT) {
1456 kfree(mm->virtual_page_table);
1457 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1458 if (mm->virtual_page_table)
1459 vfree(mm->virtual_page_table);
1460 }
1461 mm->virtual_page_table = mm->shadow_page_table = NULL;
1462}
1463
1464static void invalidate_mm(struct intel_vgpu_mm *mm)
1465{
1466 struct intel_vgpu *vgpu = mm->vgpu;
1467 struct intel_gvt *gvt = vgpu->gvt;
1468 struct intel_gvt_gtt *gtt = &gvt->gtt;
1469 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1470 struct intel_gvt_gtt_entry se;
1471 int i;
1472
1473 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
1474 return;
1475
1476 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1477 ppgtt_get_shadow_root_entry(mm, &se, i);
1478 if (!ops->test_present(&se))
1479 continue;
1480 ppgtt_invalidate_shadow_page_by_shadow_entry(
1481 vgpu, &se);
1482 se.val64 = 0;
1483 ppgtt_set_shadow_root_entry(mm, &se, i);
1484
1485 trace_gpt_change(vgpu->id, "destroy root pointer",
1486 NULL, se.type, se.val64, i);
1487 }
1488 mm->shadowed = false;
1489}
1490
1491/**
1492 * intel_vgpu_destroy_mm - destroy a mm object
1493 * @mm: a kref object
1494 *
1495 * This function is used to destroy a mm object for vGPU
1496 *
1497 */
1498void intel_vgpu_destroy_mm(struct kref *mm_ref)
1499{
1500 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1501 struct intel_vgpu *vgpu = mm->vgpu;
1502 struct intel_gvt *gvt = vgpu->gvt;
1503 struct intel_gvt_gtt *gtt = &gvt->gtt;
1504
1505 if (!mm->initialized)
1506 goto out;
1507
1508 list_del(&mm->list);
1509 list_del(&mm->lru_list);
1510
1511 if (mm->has_shadow_page_table)
1512 invalidate_mm(mm);
1513
1514 gtt->mm_free_page_table(mm);
1515out:
1516 kfree(mm);
1517}
1518
1519static int shadow_mm(struct intel_vgpu_mm *mm)
1520{
1521 struct intel_vgpu *vgpu = mm->vgpu;
1522 struct intel_gvt *gvt = vgpu->gvt;
1523 struct intel_gvt_gtt *gtt = &gvt->gtt;
1524 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1525 struct intel_vgpu_ppgtt_spt *spt;
1526 struct intel_gvt_gtt_entry ge, se;
1527 int i;
1528 int ret;
1529
1530 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
1531 return 0;
1532
1533 mm->shadowed = true;
1534
1535 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1536 ppgtt_get_guest_root_entry(mm, &ge, i);
1537 if (!ops->test_present(&ge))
1538 continue;
1539
1540 trace_gpt_change(vgpu->id, __func__, NULL,
1541 ge.type, ge.val64, i);
1542
1543 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1544 if (IS_ERR(spt)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001545 gvt_vgpu_err("fail to populate guest root pointer\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001546 ret = PTR_ERR(spt);
1547 goto fail;
1548 }
1549 ppgtt_generate_shadow_entry(&se, spt, &ge);
1550 ppgtt_set_shadow_root_entry(mm, &se, i);
1551
1552 trace_gpt_change(vgpu->id, "populate root pointer",
1553 NULL, se.type, se.val64, i);
1554 }
1555 return 0;
1556fail:
1557 invalidate_mm(mm);
1558 return ret;
1559}
1560
1561/**
1562 * intel_vgpu_create_mm - create a mm object for a vGPU
1563 * @vgpu: a vGPU
1564 * @mm_type: mm object type, should be PPGTT or GGTT
1565 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1566 * to populate shadow later.
1567 * @page_table_level: describe the page table level of the mm object
1568 * @pde_base_index: pde root pointer base in GGTT MMIO.
1569 *
1570 * This function is used to create a mm object for a vGPU.
1571 *
1572 * Returns:
1573 * Zero on success, negative error code in pointer if failed.
1574 */
1575struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1576 int mm_type, void *virtual_page_table, int page_table_level,
1577 u32 pde_base_index)
1578{
1579 struct intel_gvt *gvt = vgpu->gvt;
1580 struct intel_gvt_gtt *gtt = &gvt->gtt;
1581 struct intel_vgpu_mm *mm;
1582 int ret;
1583
Jike Song96317392017-01-09 15:38:38 +08001584 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
Zhi Wang2707e442016-03-28 23:23:16 +08001585 if (!mm) {
1586 ret = -ENOMEM;
1587 goto fail;
1588 }
1589
1590 mm->type = mm_type;
1591
1592 if (page_table_level == 1)
1593 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
1594 else if (page_table_level == 3)
1595 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1596 else if (page_table_level == 4)
1597 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1598 else {
1599 WARN_ON(1);
1600 ret = -EINVAL;
1601 goto fail;
1602 }
1603
1604 mm->page_table_level = page_table_level;
1605 mm->pde_base_index = pde_base_index;
1606
1607 mm->vgpu = vgpu;
1608 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
1609
1610 kref_init(&mm->ref);
1611 atomic_set(&mm->pincount, 0);
1612 INIT_LIST_HEAD(&mm->list);
1613 INIT_LIST_HEAD(&mm->lru_list);
1614 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
1615
1616 ret = gtt->mm_alloc_page_table(mm);
1617 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001618 gvt_vgpu_err("fail to allocate page table for mm\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001619 goto fail;
1620 }
1621
1622 mm->initialized = true;
1623
1624 if (virtual_page_table)
1625 memcpy(mm->virtual_page_table, virtual_page_table,
1626 mm->page_table_entry_size);
1627
1628 if (mm->has_shadow_page_table) {
1629 ret = shadow_mm(mm);
1630 if (ret)
1631 goto fail;
1632 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
1633 }
1634 return mm;
1635fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001636 gvt_vgpu_err("fail to create mm\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001637 if (mm)
1638 intel_gvt_mm_unreference(mm);
1639 return ERR_PTR(ret);
1640}
1641
1642/**
1643 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1644 * @mm: a vGPU mm object
1645 *
1646 * This function is called when user doesn't want to use a vGPU mm object
1647 */
1648void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1649{
1650 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1651 return;
1652
1653 atomic_dec(&mm->pincount);
1654}
1655
1656/**
1657 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1658 * @vgpu: a vGPU
1659 *
1660 * This function is called when user wants to use a vGPU mm object. If this
1661 * mm object hasn't been shadowed yet, the shadow will be populated at this
1662 * time.
1663 *
1664 * Returns:
1665 * Zero on success, negative error code if failed.
1666 */
1667int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1668{
1669 int ret;
1670
1671 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1672 return 0;
1673
Zhi Wang2707e442016-03-28 23:23:16 +08001674 if (!mm->shadowed) {
1675 ret = shadow_mm(mm);
1676 if (ret)
1677 return ret;
1678 }
1679
fred gao46b441e2017-08-18 15:41:09 +08001680 atomic_inc(&mm->pincount);
Zhi Wang2707e442016-03-28 23:23:16 +08001681 list_del_init(&mm->lru_list);
1682 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
1683 return 0;
1684}
1685
1686static int reclaim_one_mm(struct intel_gvt *gvt)
1687{
1688 struct intel_vgpu_mm *mm;
1689 struct list_head *pos, *n;
1690
1691 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
1692 mm = container_of(pos, struct intel_vgpu_mm, lru_list);
1693
1694 if (mm->type != INTEL_GVT_MM_PPGTT)
1695 continue;
1696 if (atomic_read(&mm->pincount))
1697 continue;
1698
1699 list_del_init(&mm->lru_list);
1700 invalidate_mm(mm);
1701 return 1;
1702 }
1703 return 0;
1704}
1705
1706/*
1707 * GMA translation APIs.
1708 */
1709static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1710 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1711{
1712 struct intel_vgpu *vgpu = mm->vgpu;
1713 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1714 struct intel_vgpu_ppgtt_spt *s;
1715
1716 if (WARN_ON(!mm->has_shadow_page_table))
1717 return -EINVAL;
1718
1719 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
1720 if (!s)
1721 return -ENXIO;
1722
1723 if (!guest)
1724 ppgtt_get_shadow_entry(s, e, index);
1725 else
1726 ppgtt_get_guest_entry(s, e, index);
1727 return 0;
1728}
1729
1730/**
1731 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1732 * @mm: mm object. could be a PPGTT or GGTT mm object
1733 * @gma: graphics memory address in this mm object
1734 *
1735 * This function is used to translate a graphics memory address in specific
1736 * graphics memory space to guest physical address.
1737 *
1738 * Returns:
1739 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1740 */
1741unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1742{
1743 struct intel_vgpu *vgpu = mm->vgpu;
1744 struct intel_gvt *gvt = vgpu->gvt;
1745 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1746 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1747 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1748 unsigned long gma_index[4];
1749 struct intel_gvt_gtt_entry e;
1750 int i, index;
1751 int ret;
1752
1753 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
1754 return INTEL_GVT_INVALID_ADDR;
1755
1756 if (mm->type == INTEL_GVT_MM_GGTT) {
1757 if (!vgpu_gmadr_is_valid(vgpu, gma))
1758 goto err;
1759
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001760 ret = ggtt_get_guest_entry(mm, &e,
1761 gma_ops->gma_to_ggtt_pte_index(gma));
1762 if (ret)
1763 goto err;
Zhi Wang2707e442016-03-28 23:23:16 +08001764 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1765 + (gma & ~GTT_PAGE_MASK);
1766
1767 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
1768 return gpa;
1769 }
1770
1771 switch (mm->page_table_level) {
1772 case 4:
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001773 ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
1774 if (ret)
1775 goto err;
Zhi Wang2707e442016-03-28 23:23:16 +08001776 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1777 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1778 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1779 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1780 index = 4;
1781 break;
1782 case 3:
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001783 ret = ppgtt_get_shadow_root_entry(mm, &e,
Zhi Wang2707e442016-03-28 23:23:16 +08001784 gma_ops->gma_to_l3_pdp_index(gma));
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001785 if (ret)
1786 goto err;
Zhi Wang2707e442016-03-28 23:23:16 +08001787 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1788 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1789 index = 2;
1790 break;
1791 case 2:
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001792 ret = ppgtt_get_shadow_root_entry(mm, &e,
Zhi Wang2707e442016-03-28 23:23:16 +08001793 gma_ops->gma_to_pde_index(gma));
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001794 if (ret)
1795 goto err;
Zhi Wang2707e442016-03-28 23:23:16 +08001796 gma_index[0] = gma_ops->gma_to_pte_index(gma);
1797 index = 1;
1798 break;
1799 default:
1800 WARN_ON(1);
1801 goto err;
1802 }
1803
1804 /* walk into the shadow page table and get gpa from guest entry */
1805 for (i = 0; i < index; i++) {
1806 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1807 (i == index - 1));
1808 if (ret)
1809 goto err;
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001810
1811 if (!pte_ops->test_present(&e)) {
1812 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
1813 goto err;
1814 }
Zhi Wang2707e442016-03-28 23:23:16 +08001815 }
1816
1817 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1818 + (gma & ~GTT_PAGE_MASK);
1819
1820 trace_gma_translate(vgpu->id, "ppgtt", 0,
1821 mm->page_table_level, gma, gpa);
1822 return gpa;
1823err:
Tina Zhang695fbc02017-03-10 04:26:53 -05001824 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
Zhi Wang2707e442016-03-28 23:23:16 +08001825 return INTEL_GVT_INVALID_ADDR;
1826}
1827
1828static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
1829 unsigned int off, void *p_data, unsigned int bytes)
1830{
1831 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1832 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1833 unsigned long index = off >> info->gtt_entry_size_shift;
1834 struct intel_gvt_gtt_entry e;
1835
1836 if (bytes != 4 && bytes != 8)
1837 return -EINVAL;
1838
1839 ggtt_get_guest_entry(ggtt_mm, &e, index);
1840 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1841 bytes);
1842 return 0;
1843}
1844
1845/**
1846 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1847 * @vgpu: a vGPU
1848 * @off: register offset
1849 * @p_data: data will be returned to guest
1850 * @bytes: data length
1851 *
1852 * This function is used to emulate the GTT MMIO register read
1853 *
1854 * Returns:
1855 * Zero on success, error code if failed.
1856 */
1857int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1858 void *p_data, unsigned int bytes)
1859{
1860 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1861 int ret;
1862
1863 if (bytes != 4 && bytes != 8)
1864 return -EINVAL;
1865
1866 off -= info->gtt_start_offset;
1867 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
1868 return ret;
1869}
1870
1871static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1872 void *p_data, unsigned int bytes)
1873{
1874 struct intel_gvt *gvt = vgpu->gvt;
1875 const struct intel_gvt_device_info *info = &gvt->device_info;
1876 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1877 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1878 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
1879 unsigned long gma;
1880 struct intel_gvt_gtt_entry e, m;
1881 int ret;
1882
1883 if (bytes != 4 && bytes != 8)
1884 return -EINVAL;
1885
1886 gma = g_gtt_index << GTT_PAGE_SHIFT;
1887
1888 /* the VM may configure the whole GM space when ballooning is used */
Zhao, Xinda7c281352017-02-21 15:54:56 +08001889 if (!vgpu_gmadr_is_valid(vgpu, gma))
Zhi Wang2707e442016-03-28 23:23:16 +08001890 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +08001891
1892 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1893
1894 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1895 bytes);
1896
1897 if (ops->test_present(&e)) {
1898 ret = gtt_entry_p2m(vgpu, &e, &m);
1899 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001900 gvt_vgpu_err("fail to translate guest gtt entry\n");
Xiaoguang Chen359b6932017-03-21 10:54:21 +08001901 /* guest driver may read/write the entry when partial
1902 * update the entry in this situation p2m will fail
1903 * settting the shadow entry to point to a scratch page
1904 */
1905 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
Zhi Wang2707e442016-03-28 23:23:16 +08001906 }
1907 } else {
1908 m = e;
Xiaoguang Chen359b6932017-03-21 10:54:21 +08001909 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
Zhi Wang2707e442016-03-28 23:23:16 +08001910 }
1911
1912 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
Chuanxiao Dongaf2c6392017-06-02 15:34:24 +08001913 gtt_invalidate(gvt->dev_priv);
Zhi Wang2707e442016-03-28 23:23:16 +08001914 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1915 return 0;
1916}
1917
1918/*
1919 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1920 * @vgpu: a vGPU
1921 * @off: register offset
1922 * @p_data: data from guest write
1923 * @bytes: data length
1924 *
1925 * This function is used to emulate the GTT MMIO register write
1926 *
1927 * Returns:
1928 * Zero on success, error code if failed.
1929 */
1930int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1931 void *p_data, unsigned int bytes)
1932{
1933 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1934 int ret;
1935
1936 if (bytes != 4 && bytes != 8)
1937 return -EINVAL;
1938
1939 off -= info->gtt_start_offset;
1940 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
1941 return ret;
1942}
1943
Ping Gao3b6411c2016-11-04 13:47:35 +08001944static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1945 intel_gvt_gtt_type_t type)
Zhi Wang2707e442016-03-28 23:23:16 +08001946{
1947 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
Ping Gao3b6411c2016-11-04 13:47:35 +08001948 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1949 int page_entry_num = GTT_PAGE_SIZE >>
1950 vgpu->gvt->device_info.gtt_entry_size_shift;
Jike Song96317392017-01-09 15:38:38 +08001951 void *scratch_pt;
Ping Gao3b6411c2016-11-04 13:47:35 +08001952 int i;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001953 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
1954 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +08001955
Ping Gao3b6411c2016-11-04 13:47:35 +08001956 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1957 return -EINVAL;
1958
Jike Song96317392017-01-09 15:38:38 +08001959 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
Ping Gao3b6411c2016-11-04 13:47:35 +08001960 if (!scratch_pt) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001961 gvt_vgpu_err("fail to allocate scratch page\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001962 return -ENOMEM;
1963 }
1964
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001965 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1966 4096, PCI_DMA_BIDIRECTIONAL);
1967 if (dma_mapping_error(dev, daddr)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001968 gvt_vgpu_err("fail to dmamap scratch_pt\n");
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001969 __free_page(virt_to_page(scratch_pt));
1970 return -ENOMEM;
Ping Gao3b6411c2016-11-04 13:47:35 +08001971 }
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001972 gtt->scratch_pt[type].page_mfn =
1973 (unsigned long)(daddr >> GTT_PAGE_SHIFT);
Jike Song96317392017-01-09 15:38:38 +08001974 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
Ping Gao3b6411c2016-11-04 13:47:35 +08001975 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001976 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
Ping Gao3b6411c2016-11-04 13:47:35 +08001977
1978 /* Build the tree by full filled the scratch pt with the entries which
1979 * point to the next level scratch pt or scratch page. The
1980 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1981 * 'type' pt.
1982 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
Jike Song96317392017-01-09 15:38:38 +08001983 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
Ping Gao3b6411c2016-11-04 13:47:35 +08001984 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1985 */
1986 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
1987 struct intel_gvt_gtt_entry se;
1988
1989 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
1990 se.type = get_entry_type(type - 1);
1991 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
1992
1993 /* The entry parameters like present/writeable/cache type
1994 * set to the same as i915's scratch page tree.
1995 */
1996 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
1997 if (type == GTT_TYPE_PPGTT_PDE_PT)
Zhi Wangc095b972017-09-14 20:39:41 +08001998 se.val64 |= PPAT_CACHED;
Ping Gao3b6411c2016-11-04 13:47:35 +08001999
2000 for (i = 0; i < page_entry_num; i++)
Jike Song96317392017-01-09 15:38:38 +08002001 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002002 }
2003
Zhi Wang2707e442016-03-28 23:23:16 +08002004 return 0;
2005}
2006
Ping Gao3b6411c2016-11-04 13:47:35 +08002007static int release_scratch_page_tree(struct intel_vgpu *vgpu)
Zhi Wang2707e442016-03-28 23:23:16 +08002008{
Ping Gao3b6411c2016-11-04 13:47:35 +08002009 int i;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002010 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2011 dma_addr_t daddr;
Ping Gao3b6411c2016-11-04 13:47:35 +08002012
2013 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2014 if (vgpu->gtt.scratch_pt[i].page != NULL) {
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002015 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2016 GTT_PAGE_SHIFT);
2017 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
Ping Gao3b6411c2016-11-04 13:47:35 +08002018 __free_page(vgpu->gtt.scratch_pt[i].page);
2019 vgpu->gtt.scratch_pt[i].page = NULL;
2020 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2021 }
Zhi Wang2707e442016-03-28 23:23:16 +08002022 }
Ping Gao3b6411c2016-11-04 13:47:35 +08002023
2024 return 0;
2025}
2026
2027static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2028{
2029 int i, ret;
2030
2031 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2032 ret = alloc_scratch_pages(vgpu, i);
2033 if (ret)
2034 goto err;
2035 }
2036
2037 return 0;
2038
2039err:
2040 release_scratch_page_tree(vgpu);
2041 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +08002042}
2043
2044/**
2045 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2046 * @vgpu: a vGPU
2047 *
2048 * This function is used to initialize per-vGPU graphics memory virtualization
2049 * components.
2050 *
2051 * Returns:
2052 * Zero on success, error code if failed.
2053 */
2054int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2055{
2056 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2057 struct intel_vgpu_mm *ggtt_mm;
2058
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08002059 hash_init(gtt->tracked_guest_page_hash_table);
Zhi Wang2707e442016-03-28 23:23:16 +08002060 hash_init(gtt->shadow_page_hash_table);
2061
2062 INIT_LIST_HEAD(&gtt->mm_list_head);
2063 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2064 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2065
Ping Gaod650ac02016-12-08 10:14:48 +08002066 intel_vgpu_reset_ggtt(vgpu);
2067
Zhi Wang2707e442016-03-28 23:23:16 +08002068 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2069 NULL, 1, 0);
2070 if (IS_ERR(ggtt_mm)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002071 gvt_vgpu_err("fail to create mm for ggtt.\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002072 return PTR_ERR(ggtt_mm);
2073 }
2074
2075 gtt->ggtt_mm = ggtt_mm;
2076
Ping Gao3b6411c2016-11-04 13:47:35 +08002077 return create_scratch_page_tree(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002078}
2079
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002080static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
2081{
2082 struct list_head *pos, *n;
2083 struct intel_vgpu_mm *mm;
2084
2085 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2086 mm = container_of(pos, struct intel_vgpu_mm, list);
2087 if (mm->type == type) {
2088 vgpu->gvt->gtt.mm_free_page_table(mm);
2089 list_del(&mm->list);
2090 list_del(&mm->lru_list);
2091 kfree(mm);
2092 }
2093 }
2094}
2095
Zhi Wang2707e442016-03-28 23:23:16 +08002096/**
2097 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2098 * @vgpu: a vGPU
2099 *
2100 * This function is used to clean up per-vGPU graphics memory virtualization
2101 * components.
2102 *
2103 * Returns:
2104 * Zero on success, error code if failed.
2105 */
2106void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2107{
Zhi Wang2707e442016-03-28 23:23:16 +08002108 ppgtt_free_all_shadow_page(vgpu);
Ping Gao3b6411c2016-11-04 13:47:35 +08002109 release_scratch_page_tree(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002110
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002111 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2112 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
Zhi Wang2707e442016-03-28 23:23:16 +08002113}
2114
2115static void clean_spt_oos(struct intel_gvt *gvt)
2116{
2117 struct intel_gvt_gtt *gtt = &gvt->gtt;
2118 struct list_head *pos, *n;
2119 struct intel_vgpu_oos_page *oos_page;
2120
2121 WARN(!list_empty(&gtt->oos_page_use_list_head),
2122 "someone is still using oos page\n");
2123
2124 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2125 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2126 list_del(&oos_page->list);
2127 kfree(oos_page);
2128 }
2129}
2130
2131static int setup_spt_oos(struct intel_gvt *gvt)
2132{
2133 struct intel_gvt_gtt *gtt = &gvt->gtt;
2134 struct intel_vgpu_oos_page *oos_page;
2135 int i;
2136 int ret;
2137
2138 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2139 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2140
2141 for (i = 0; i < preallocated_oos_pages; i++) {
2142 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2143 if (!oos_page) {
Zhi Wang2707e442016-03-28 23:23:16 +08002144 ret = -ENOMEM;
2145 goto fail;
2146 }
2147
2148 INIT_LIST_HEAD(&oos_page->list);
2149 INIT_LIST_HEAD(&oos_page->vm_list);
2150 oos_page->id = i;
2151 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2152 }
2153
2154 gvt_dbg_mm("%d oos pages preallocated\n", i);
2155
2156 return 0;
2157fail:
2158 clean_spt_oos(gvt);
2159 return ret;
2160}
2161
2162/**
2163 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2164 * @vgpu: a vGPU
2165 * @page_table_level: PPGTT page table level
2166 * @root_entry: PPGTT page table root pointers
2167 *
2168 * This function is used to find a PPGTT mm object from mm object pool
2169 *
2170 * Returns:
2171 * pointer to mm object on success, NULL if failed.
2172 */
2173struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2174 int page_table_level, void *root_entry)
2175{
2176 struct list_head *pos;
2177 struct intel_vgpu_mm *mm;
2178 u64 *src, *dst;
2179
2180 list_for_each(pos, &vgpu->gtt.mm_list_head) {
2181 mm = container_of(pos, struct intel_vgpu_mm, list);
2182 if (mm->type != INTEL_GVT_MM_PPGTT)
2183 continue;
2184
2185 if (mm->page_table_level != page_table_level)
2186 continue;
2187
2188 src = root_entry;
2189 dst = mm->virtual_page_table;
2190
2191 if (page_table_level == 3) {
2192 if (src[0] == dst[0]
2193 && src[1] == dst[1]
2194 && src[2] == dst[2]
2195 && src[3] == dst[3])
2196 return mm;
2197 } else {
2198 if (src[0] == dst[0])
2199 return mm;
2200 }
2201 }
2202 return NULL;
2203}
2204
2205/**
2206 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2207 * g2v notification
2208 * @vgpu: a vGPU
2209 * @page_table_level: PPGTT page table level
2210 *
2211 * This function is used to create a PPGTT mm object from a guest to GVT-g
2212 * notification.
2213 *
2214 * Returns:
2215 * Zero on success, negative error code if failed.
2216 */
2217int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2218 int page_table_level)
2219{
2220 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2221 struct intel_vgpu_mm *mm;
2222
2223 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2224 return -EINVAL;
2225
2226 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2227 if (mm) {
2228 intel_gvt_mm_reference(mm);
2229 } else {
2230 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2231 pdp, page_table_level, 0);
2232 if (IS_ERR(mm)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002233 gvt_vgpu_err("fail to create mm\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002234 return PTR_ERR(mm);
2235 }
2236 }
2237 return 0;
2238}
2239
2240/**
2241 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2242 * g2v notification
2243 * @vgpu: a vGPU
2244 * @page_table_level: PPGTT page table level
2245 *
2246 * This function is used to create a PPGTT mm object from a guest to GVT-g
2247 * notification.
2248 *
2249 * Returns:
2250 * Zero on success, negative error code if failed.
2251 */
2252int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2253 int page_table_level)
2254{
2255 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2256 struct intel_vgpu_mm *mm;
2257
2258 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2259 return -EINVAL;
2260
2261 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2262 if (!mm) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002263 gvt_vgpu_err("fail to find ppgtt instance.\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002264 return -EINVAL;
2265 }
2266 intel_gvt_mm_unreference(mm);
2267 return 0;
2268}
2269
2270/**
2271 * intel_gvt_init_gtt - initialize mm components of a GVT device
2272 * @gvt: GVT device
2273 *
2274 * This function is called at the initialization stage, to initialize
2275 * the mm components of a GVT device.
2276 *
2277 * Returns:
2278 * zero on success, negative error code if failed.
2279 */
2280int intel_gvt_init_gtt(struct intel_gvt *gvt)
2281{
2282 int ret;
Jike Song96317392017-01-09 15:38:38 +08002283 void *page;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002284 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2285 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +08002286
2287 gvt_dbg_core("init gtt\n");
2288
Xu Hane3476c02017-03-29 10:13:59 +08002289 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
2290 || IS_KABYLAKE(gvt->dev_priv)) {
Zhi Wang2707e442016-03-28 23:23:16 +08002291 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2292 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2293 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
2294 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
2295 } else {
2296 return -ENODEV;
2297 }
2298
Jike Song96317392017-01-09 15:38:38 +08002299 page = (void *)get_zeroed_page(GFP_KERNEL);
2300 if (!page) {
Ping Gaod650ac02016-12-08 10:14:48 +08002301 gvt_err("fail to allocate scratch ggtt page\n");
2302 return -ENOMEM;
2303 }
2304
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002305 daddr = dma_map_page(dev, virt_to_page(page), 0,
2306 4096, PCI_DMA_BIDIRECTIONAL);
2307 if (dma_mapping_error(dev, daddr)) {
2308 gvt_err("fail to dmamap scratch ggtt page\n");
2309 __free_page(virt_to_page(page));
2310 return -ENOMEM;
Ping Gaod650ac02016-12-08 10:14:48 +08002311 }
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002312 gvt->gtt.scratch_ggtt_page = virt_to_page(page);
2313 gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
Ping Gaod650ac02016-12-08 10:14:48 +08002314
Zhi Wang2707e442016-03-28 23:23:16 +08002315 if (enable_out_of_sync) {
2316 ret = setup_spt_oos(gvt);
2317 if (ret) {
2318 gvt_err("fail to initialize SPT oos\n");
Zhou, Wenjia0de98702017-07-04 15:47:00 +08002319 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2320 __free_page(gvt->gtt.scratch_ggtt_page);
Zhi Wang2707e442016-03-28 23:23:16 +08002321 return ret;
2322 }
2323 }
2324 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
2325 return 0;
2326}
2327
2328/**
2329 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2330 * @gvt: GVT device
2331 *
2332 * This function is called at the driver unloading stage, to clean up the
2333 * the mm components of a GVT device.
2334 *
2335 */
2336void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2337{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002338 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2339 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
2340 GTT_PAGE_SHIFT);
2341
2342 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2343
Ping Gaod650ac02016-12-08 10:14:48 +08002344 __free_page(gvt->gtt.scratch_ggtt_page);
2345
Zhi Wang2707e442016-03-28 23:23:16 +08002346 if (enable_out_of_sync)
2347 clean_spt_oos(gvt);
2348}
Ping Gaod650ac02016-12-08 10:14:48 +08002349
2350/**
2351 * intel_vgpu_reset_ggtt - reset the GGTT entry
2352 * @vgpu: a vGPU
2353 *
2354 * This function is called at the vGPU create stage
2355 * to reset all the GGTT entries.
2356 *
2357 */
2358void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2359{
2360 struct intel_gvt *gvt = vgpu->gvt;
Zhenyu Wang5ad59bf2017-04-12 16:24:57 +08002361 struct drm_i915_private *dev_priv = gvt->dev_priv;
Ping Gaod650ac02016-12-08 10:14:48 +08002362 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2363 u32 index;
2364 u32 offset;
2365 u32 num_entries;
2366 struct intel_gvt_gtt_entry e;
2367
2368 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2369 e.type = GTT_TYPE_GGTT_PTE;
2370 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
2371 e.val64 |= _PAGE_PRESENT;
2372
2373 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2374 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2375 for (offset = 0; offset < num_entries; offset++)
2376 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2377
2378 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2379 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2380 for (offset = 0; offset < num_entries; offset++)
2381 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
Zhenyu Wang5ad59bf2017-04-12 16:24:57 +08002382
Chuanxiao Dongaf2c6392017-06-02 15:34:24 +08002383 gtt_invalidate(dev_priv);
Ping Gaod650ac02016-12-08 10:14:48 +08002384}
Changbin Dub6115812017-01-13 11:15:57 +08002385
2386/**
2387 * intel_vgpu_reset_gtt - reset the all GTT related status
2388 * @vgpu: a vGPU
Changbin Dub6115812017-01-13 11:15:57 +08002389 *
2390 * This function is called from vfio core to reset reset all
2391 * GTT related status, including GGTT, PPGTT, scratch page.
2392 *
2393 */
Chuanxiao Dong4d3e67b2017-08-04 13:08:59 +08002394void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
Changbin Dub6115812017-01-13 11:15:57 +08002395{
2396 int i;
2397
2398 ppgtt_free_all_shadow_page(vgpu);
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002399
2400 /* Shadow pages are only created when there is no page
2401 * table tracking data, so remove page tracking data after
2402 * removing the shadow pages.
2403 */
2404 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2405
Changbin Dub6115812017-01-13 11:15:57 +08002406 intel_vgpu_reset_ggtt(vgpu);
2407
2408 /* clear scratch page for security */
2409 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2410 if (vgpu->gtt.scratch_pt[i].page != NULL)
2411 memset(page_address(vgpu->gtt.scratch_pt[i].page),
2412 0, PAGE_SIZE);
2413 }
2414}