blob: 2189c45d44fcfa74b9f1b46461e4144ddedf4479 [file] [log] [blame]
Zhi Wang2707e442016-03-28 23:23:16 +08001/*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
29 *
30 * Contributors:
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36#include "i915_drv.h"
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080037#include "gvt.h"
38#include "i915_pvinfo.h"
Zhi Wang2707e442016-03-28 23:23:16 +080039#include "trace.h"
40
Changbin Dubc37ab52018-01-30 19:19:44 +080041#if defined(VERBOSE_DEBUG)
42#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
43#else
44#define gvt_vdbg_mm(fmt, args...)
45#endif
46
Zhi Wang2707e442016-03-28 23:23:16 +080047static bool enable_out_of_sync = false;
48static int preallocated_oos_pages = 8192;
49
50/*
51 * validate a gm address and related range size,
52 * translate it to host gm address
53 */
54bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
55{
56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
Tina Zhang695fbc02017-03-10 04:26:53 -050058 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
59 addr, size);
Zhi Wang2707e442016-03-28 23:23:16 +080060 return false;
61 }
62 return true;
63}
64
65/* translate a guest gmadr to host gmadr */
66int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
67{
68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
69 "invalid guest gmadr %llx\n", g_addr))
70 return -EACCES;
71
72 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
73 *h_addr = vgpu_aperture_gmadr_base(vgpu)
74 + (g_addr - vgpu_aperture_offset(vgpu));
75 else
76 *h_addr = vgpu_hidden_gmadr_base(vgpu)
77 + (g_addr - vgpu_hidden_offset(vgpu));
78 return 0;
79}
80
81/* translate a host gmadr to guest gmadr */
82int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
83{
84 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
85 "invalid host gmadr %llx\n", h_addr))
86 return -EACCES;
87
88 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
89 *g_addr = vgpu_aperture_gmadr_base(vgpu)
90 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
91 else
92 *g_addr = vgpu_hidden_gmadr_base(vgpu)
93 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
94 return 0;
95}
96
97int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
98 unsigned long *h_index)
99{
100 u64 h_addr;
101 int ret;
102
Zhi Wang9556e112017-10-10 13:51:32 +0800103 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800104 &h_addr);
105 if (ret)
106 return ret;
107
Zhi Wang9556e112017-10-10 13:51:32 +0800108 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800109 return 0;
110}
111
112int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
113 unsigned long *g_index)
114{
115 u64 g_addr;
116 int ret;
117
Zhi Wang9556e112017-10-10 13:51:32 +0800118 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800119 &g_addr);
120 if (ret)
121 return ret;
122
Zhi Wang9556e112017-10-10 13:51:32 +0800123 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800124 return 0;
125}
126
127#define gtt_type_is_entry(type) \
128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
131
132#define gtt_type_is_pt(type) \
133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
134
135#define gtt_type_is_pte_pt(type) \
136 (type == GTT_TYPE_PPGTT_PTE_PT)
137
138#define gtt_type_is_root_pointer(type) \
139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
140
141#define gtt_init_entry(e, t, p, v) do { \
142 (e)->type = t; \
143 (e)->pdev = p; \
144 memcpy(&(e)->val64, &v, sizeof(v)); \
145} while (0)
146
Zhi Wang2707e442016-03-28 23:23:16 +0800147/*
148 * Mappings between GTT_TYPE* enumerations.
149 * Following information can be found according to the given type:
150 * - type of next level page table
151 * - type of entry inside this level page table
152 * - type of entry with PSE set
153 *
154 * If the given type doesn't have such a kind of information,
155 * e.g. give a l4 root entry type, then request to get its PSE type,
156 * give a PTE page table type, then request to get its next level page
157 * table type, as we know l4 root entry doesn't have a PSE bit,
158 * and a PTE page table doesn't have a next level page table type,
159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
160 * page table.
161 */
162
163struct gtt_type_table_entry {
164 int entry_type;
Zhi Wang054f4eb2017-10-10 17:19:30 +0800165 int pt_type;
Zhi Wang2707e442016-03-28 23:23:16 +0800166 int next_pt_type;
167 int pse_entry_type;
168};
169
Zhi Wang054f4eb2017-10-10 17:19:30 +0800170#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
Zhi Wang2707e442016-03-28 23:23:16 +0800171 [type] = { \
172 .entry_type = e_type, \
Zhi Wang054f4eb2017-10-10 17:19:30 +0800173 .pt_type = cpt_type, \
Zhi Wang2707e442016-03-28 23:23:16 +0800174 .next_pt_type = npt_type, \
175 .pse_entry_type = pse_type, \
176 }
177
178static struct gtt_type_table_entry gtt_type_table[] = {
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800181 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800182 GTT_TYPE_PPGTT_PML4_PT,
183 GTT_TYPE_INVALID),
184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
185 GTT_TYPE_PPGTT_PML4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800186 GTT_TYPE_PPGTT_PML4_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800187 GTT_TYPE_PPGTT_PDP_PT,
188 GTT_TYPE_INVALID),
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
190 GTT_TYPE_PPGTT_PML4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800191 GTT_TYPE_PPGTT_PML4_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800192 GTT_TYPE_PPGTT_PDP_PT,
193 GTT_TYPE_INVALID),
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
195 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800196 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800197 GTT_TYPE_PPGTT_PDE_PT,
198 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800201 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800202 GTT_TYPE_PPGTT_PDE_PT,
203 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
205 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800206 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800207 GTT_TYPE_PPGTT_PDE_PT,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
210 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800211 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800212 GTT_TYPE_PPGTT_PTE_PT,
213 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
215 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800216 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800217 GTT_TYPE_PPGTT_PTE_PT,
218 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
220 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800221 GTT_TYPE_PPGTT_PTE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800222 GTT_TYPE_INVALID,
223 GTT_TYPE_INVALID),
224 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
225 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800226 GTT_TYPE_PPGTT_PTE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800227 GTT_TYPE_INVALID,
228 GTT_TYPE_INVALID),
229 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
230 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800231 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800232 GTT_TYPE_INVALID,
233 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
234 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
235 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800236 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800237 GTT_TYPE_INVALID,
238 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
239 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
240 GTT_TYPE_GGTT_PTE,
241 GTT_TYPE_INVALID,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800242 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800243 GTT_TYPE_INVALID),
244};
245
246static inline int get_next_pt_type(int type)
247{
248 return gtt_type_table[type].next_pt_type;
249}
250
Zhi Wang054f4eb2017-10-10 17:19:30 +0800251static inline int get_pt_type(int type)
252{
253 return gtt_type_table[type].pt_type;
254}
255
Zhi Wang2707e442016-03-28 23:23:16 +0800256static inline int get_entry_type(int type)
257{
258 return gtt_type_table[type].entry_type;
259}
260
261static inline int get_pse_type(int type)
262{
263 return gtt_type_table[type].pse_entry_type;
264}
265
266static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
267{
Du, Changbin321927d2016-10-20 14:08:46 +0800268 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
Zhi Wang2707e442016-03-28 23:23:16 +0800269
Changbin Du905a5032016-12-30 14:10:53 +0800270 return readq(addr);
Zhi Wang2707e442016-03-28 23:23:16 +0800271}
272
Changbin Dua143cef2018-01-30 19:19:45 +0800273static void ggtt_invalidate(struct drm_i915_private *dev_priv)
Chuanxiao Dongaf2c6392017-06-02 15:34:24 +0800274{
275 mmio_hw_access_pre(dev_priv);
276 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
277 mmio_hw_access_post(dev_priv);
278}
279
Zhi Wang2707e442016-03-28 23:23:16 +0800280static void write_pte64(struct drm_i915_private *dev_priv,
281 unsigned long index, u64 pte)
282{
Du, Changbin321927d2016-10-20 14:08:46 +0800283 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
Zhi Wang2707e442016-03-28 23:23:16 +0800284
Zhi Wang2707e442016-03-28 23:23:16 +0800285 writeq(pte, addr);
Zhi Wang2707e442016-03-28 23:23:16 +0800286}
287
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800288static inline int gtt_get_entry64(void *pt,
Zhi Wang2707e442016-03-28 23:23:16 +0800289 struct intel_gvt_gtt_entry *e,
290 unsigned long index, bool hypervisor_access, unsigned long gpa,
291 struct intel_vgpu *vgpu)
292{
293 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
294 int ret;
295
296 if (WARN_ON(info->gtt_entry_size != 8))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800297 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800298
299 if (hypervisor_access) {
300 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
301 (index << info->gtt_entry_size_shift),
302 &e->val64, 8);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800303 if (WARN_ON(ret))
304 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800305 } else if (!pt) {
306 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
307 } else {
308 e->val64 = *((u64 *)pt + index);
309 }
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800310 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800311}
312
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800313static inline int gtt_set_entry64(void *pt,
Zhi Wang2707e442016-03-28 23:23:16 +0800314 struct intel_gvt_gtt_entry *e,
315 unsigned long index, bool hypervisor_access, unsigned long gpa,
316 struct intel_vgpu *vgpu)
317{
318 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
319 int ret;
320
321 if (WARN_ON(info->gtt_entry_size != 8))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800322 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800323
324 if (hypervisor_access) {
325 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
326 (index << info->gtt_entry_size_shift),
327 &e->val64, 8);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800328 if (WARN_ON(ret))
329 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800330 } else if (!pt) {
331 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
332 } else {
333 *((u64 *)pt + index) = e->val64;
334 }
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800335 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800336}
337
338#define GTT_HAW 46
339
Xiong Zhangb721b652017-11-28 07:29:54 +0800340#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
341#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
342#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
Zhi Wang2707e442016-03-28 23:23:16 +0800343
344static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
345{
346 unsigned long pfn;
347
348 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
Changbin Dud861ca22018-01-30 19:19:47 +0800349 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800350 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
Changbin Dud861ca22018-01-30 19:19:47 +0800351 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800352 else
Changbin Dud861ca22018-01-30 19:19:47 +0800353 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800354 return pfn;
355}
356
357static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
358{
359 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
360 e->val64 &= ~ADDR_1G_MASK;
Changbin Dud861ca22018-01-30 19:19:47 +0800361 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800362 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
363 e->val64 &= ~ADDR_2M_MASK;
Changbin Dud861ca22018-01-30 19:19:47 +0800364 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800365 } else {
366 e->val64 &= ~ADDR_4K_MASK;
Changbin Dud861ca22018-01-30 19:19:47 +0800367 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800368 }
369
Changbin Dud861ca22018-01-30 19:19:47 +0800370 e->val64 |= (pfn << PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800371}
372
373static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
374{
375 /* Entry doesn't have PSE bit. */
376 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
377 return false;
378
379 e->type = get_entry_type(e->type);
Changbin Dud861ca22018-01-30 19:19:47 +0800380 if (!(e->val64 & _PAGE_PSE))
Zhi Wang2707e442016-03-28 23:23:16 +0800381 return false;
382
383 e->type = get_pse_type(e->type);
384 return true;
385}
386
387static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
388{
389 /*
390 * i915 writes PDP root pointer registers without present bit,
391 * it also works, so we need to treat root pointer entry
392 * specifically.
393 */
394 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
395 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
396 return (e->val64 != 0);
397 else
Changbin Dud861ca22018-01-30 19:19:47 +0800398 return (e->val64 & _PAGE_PRESENT);
Zhi Wang2707e442016-03-28 23:23:16 +0800399}
400
401static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
402{
Changbin Dud861ca22018-01-30 19:19:47 +0800403 e->val64 &= ~_PAGE_PRESENT;
Zhi Wang2707e442016-03-28 23:23:16 +0800404}
405
Zhi Wang655c64e2017-10-10 17:24:26 +0800406static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
407{
Changbin Dud861ca22018-01-30 19:19:47 +0800408 e->val64 |= _PAGE_PRESENT;
Zhi Wang2707e442016-03-28 23:23:16 +0800409}
410
411/*
412 * Per-platform GMA routines.
413 */
414static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
415{
Zhi Wang9556e112017-10-10 13:51:32 +0800416 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800417
418 trace_gma_index(__func__, gma, x);
419 return x;
420}
421
422#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
423static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
424{ \
425 unsigned long x = (exp); \
426 trace_gma_index(__func__, gma, x); \
427 return x; \
428}
429
430DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
431DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
432DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
433DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
434DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
435
436static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
437 .get_entry = gtt_get_entry64,
438 .set_entry = gtt_set_entry64,
439 .clear_present = gtt_entry_clear_present,
Zhi Wang655c64e2017-10-10 17:24:26 +0800440 .set_present = gtt_entry_set_present,
Zhi Wang2707e442016-03-28 23:23:16 +0800441 .test_present = gen8_gtt_test_present,
442 .test_pse = gen8_gtt_test_pse,
443 .get_pfn = gen8_gtt_get_pfn,
444 .set_pfn = gen8_gtt_set_pfn,
445};
446
447static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
448 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
449 .gma_to_pte_index = gen8_gma_to_pte_index,
450 .gma_to_pde_index = gen8_gma_to_pde_index,
451 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
452 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
453 .gma_to_pml4_index = gen8_gma_to_pml4_index,
454};
455
Zhi Wang2707e442016-03-28 23:23:16 +0800456/*
457 * MM helpers.
458 */
Changbin Du3aff3512018-01-30 19:19:42 +0800459static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
460 struct intel_gvt_gtt_entry *entry, unsigned long index,
461 bool guest)
Zhi Wang2707e442016-03-28 23:23:16 +0800462{
Changbin Du3aff3512018-01-30 19:19:42 +0800463 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +0800464
Changbin Du3aff3512018-01-30 19:19:42 +0800465 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
Zhi Wang2707e442016-03-28 23:23:16 +0800466
Changbin Du3aff3512018-01-30 19:19:42 +0800467 entry->type = mm->ppgtt_mm.root_entry_type;
468 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
469 mm->ppgtt_mm.shadow_pdps,
470 entry, index, false, 0, mm->vgpu);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800471
Changbin Du3aff3512018-01-30 19:19:42 +0800472 pte_ops->test_pse(entry);
Zhi Wang2707e442016-03-28 23:23:16 +0800473}
474
Changbin Du3aff3512018-01-30 19:19:42 +0800475static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
476 struct intel_gvt_gtt_entry *entry, unsigned long index)
Zhi Wang2707e442016-03-28 23:23:16 +0800477{
Changbin Du3aff3512018-01-30 19:19:42 +0800478 _ppgtt_get_root_entry(mm, entry, index, true);
479}
Zhi Wang2707e442016-03-28 23:23:16 +0800480
Changbin Du3aff3512018-01-30 19:19:42 +0800481static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
482 struct intel_gvt_gtt_entry *entry, unsigned long index)
483{
484 _ppgtt_get_root_entry(mm, entry, index, false);
485}
486
487static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
488 struct intel_gvt_gtt_entry *entry, unsigned long index,
489 bool guest)
490{
491 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
492
493 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
494 mm->ppgtt_mm.shadow_pdps,
495 entry, index, false, 0, mm->vgpu);
496}
497
498static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
499 struct intel_gvt_gtt_entry *entry, unsigned long index)
500{
501 _ppgtt_set_root_entry(mm, entry, index, true);
502}
503
504static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
505 struct intel_gvt_gtt_entry *entry, unsigned long index)
506{
507 _ppgtt_set_root_entry(mm, entry, index, false);
508}
509
510static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
511 struct intel_gvt_gtt_entry *entry, unsigned long index)
512{
513 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
514
515 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
516
517 entry->type = GTT_TYPE_GGTT_PTE;
518 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
519 false, 0, mm->vgpu);
520}
521
522static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
523 struct intel_gvt_gtt_entry *entry, unsigned long index)
524{
525 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
526
527 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
528
529 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
530 false, 0, mm->vgpu);
531}
532
533static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
534 struct intel_gvt_gtt_entry *entry, unsigned long index)
535{
536 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
537
538 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
539
540 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +0800541}
542
543/*
544 * PPGTT shadow page table helpers.
545 */
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800546static inline int ppgtt_spt_get_entry(
Zhi Wang2707e442016-03-28 23:23:16 +0800547 struct intel_vgpu_ppgtt_spt *spt,
548 void *page_table, int type,
549 struct intel_gvt_gtt_entry *e, unsigned long index,
550 bool guest)
551{
552 struct intel_gvt *gvt = spt->vgpu->gvt;
553 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800554 int ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800555
556 e->type = get_entry_type(type);
557
558 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800559 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800560
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800561 ret = ops->get_entry(page_table, e, index, guest,
Zhi Wang9556e112017-10-10 13:51:32 +0800562 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800563 spt->vgpu);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800564 if (ret)
565 return ret;
566
Zhi Wang2707e442016-03-28 23:23:16 +0800567 ops->test_pse(e);
Changbin Dubc37ab52018-01-30 19:19:44 +0800568
569 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
570 type, e->type, index, e->val64);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800571 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800572}
573
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800574static inline int ppgtt_spt_set_entry(
Zhi Wang2707e442016-03-28 23:23:16 +0800575 struct intel_vgpu_ppgtt_spt *spt,
576 void *page_table, int type,
577 struct intel_gvt_gtt_entry *e, unsigned long index,
578 bool guest)
579{
580 struct intel_gvt *gvt = spt->vgpu->gvt;
581 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
582
583 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800584 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800585
Changbin Dubc37ab52018-01-30 19:19:44 +0800586 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
587 type, e->type, index, e->val64);
588
Zhi Wang2707e442016-03-28 23:23:16 +0800589 return ops->set_entry(page_table, e, index, guest,
Zhi Wang9556e112017-10-10 13:51:32 +0800590 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800591 spt->vgpu);
592}
593
594#define ppgtt_get_guest_entry(spt, e, index) \
595 ppgtt_spt_get_entry(spt, NULL, \
Changbin Du44b46732018-01-30 19:19:49 +0800596 spt->guest_page.type, e, index, true)
Zhi Wang2707e442016-03-28 23:23:16 +0800597
598#define ppgtt_set_guest_entry(spt, e, index) \
599 ppgtt_spt_set_entry(spt, NULL, \
Changbin Du44b46732018-01-30 19:19:49 +0800600 spt->guest_page.type, e, index, true)
Zhi Wang2707e442016-03-28 23:23:16 +0800601
602#define ppgtt_get_shadow_entry(spt, e, index) \
603 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
604 spt->shadow_page.type, e, index, false)
605
606#define ppgtt_set_shadow_entry(spt, e, index) \
607 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
608 spt->shadow_page.type, e, index, false)
609
Changbin Du44b46732018-01-30 19:19:49 +0800610#define page_track_to_ppgtt_spt(ptr) \
611 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page.track)
612
613static void *alloc_spt(gfp_t gfp_mask)
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800614{
Changbin Du44b46732018-01-30 19:19:49 +0800615 struct intel_vgpu_ppgtt_spt *spt;
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800616
Changbin Du44b46732018-01-30 19:19:49 +0800617 spt = kzalloc(sizeof(*spt), gfp_mask);
618 if (!spt)
619 return NULL;
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800620
Changbin Du44b46732018-01-30 19:19:49 +0800621 spt->shadow_page.page = alloc_page(gfp_mask);
622 if (!spt->shadow_page.page) {
623 kfree(spt);
624 return NULL;
625 }
626 return spt;
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800627}
628
Changbin Du44b46732018-01-30 19:19:49 +0800629static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800630{
Changbin Du44b46732018-01-30 19:19:49 +0800631 __free_page(spt->shadow_page.page);
632 kfree(spt);
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800633}
634
635/**
636 * intel_vgpu_find_tracked_page - find a tracked guest page
637 * @vgpu: a vGPU
638 * @gfn: guest memory page frame number
639 *
640 * This function is called when the emulation layer wants to figure out if a
641 * trapped GFN is a tracked guest page.
642 *
643 * Returns:
644 * Pointer to page track data structure, NULL if not found.
645 */
646struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
647 struct intel_vgpu *vgpu, unsigned long gfn)
648{
649 struct intel_vgpu_page_track *t;
650
651 hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
652 t, node, gfn) {
653 if (t->gfn == gfn)
654 return t;
655 }
656 return NULL;
657}
658
Zhi Wang2707e442016-03-28 23:23:16 +0800659static int detach_oos_page(struct intel_vgpu *vgpu,
660 struct intel_vgpu_oos_page *oos_page);
661
Zhi Wang2707e442016-03-28 23:23:16 +0800662static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
663{
Changbin Du44b46732018-01-30 19:19:49 +0800664 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
Zhi Wang2707e442016-03-28 23:23:16 +0800665
Changbin Du44b46732018-01-30 19:19:49 +0800666 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
667
668 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
669 PCI_DMA_BIDIRECTIONAL);
670 if (!hlist_unhashed(&spt->node))
671 hash_del(&spt->node);
672
673 if (spt->guest_page.oos_page)
674 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
675
676 if (!hlist_unhashed(&spt->guest_page.track.node))
677 hash_del(&spt->guest_page.track.node);
678
679 if (spt->guest_page.track.tracked)
680 intel_gvt_hypervisor_disable_page_track(spt->vgpu,
681 &spt->guest_page.track);
682
Zhi Wang2707e442016-03-28 23:23:16 +0800683 list_del_init(&spt->post_shadow_list);
Zhi Wang2707e442016-03-28 23:23:16 +0800684 free_spt(spt);
685}
686
687static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
688{
689 struct hlist_node *n;
Changbin Du44b46732018-01-30 19:19:49 +0800690 struct intel_vgpu_ppgtt_spt *spt;
Zhi Wang2707e442016-03-28 23:23:16 +0800691 int i;
692
Changbin Du44b46732018-01-30 19:19:49 +0800693 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, spt, node)
694 ppgtt_free_shadow_page(spt);
Zhi Wang2707e442016-03-28 23:23:16 +0800695}
696
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800697static int ppgtt_handle_guest_write_page_table_bytes(
Changbin Du44b46732018-01-30 19:19:49 +0800698 struct intel_vgpu_ppgtt_spt *spt,
Zhi Wang2707e442016-03-28 23:23:16 +0800699 u64 pa, void *p_data, int bytes);
700
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800701static int ppgtt_write_protection_handler(void *data, u64 pa,
Zhi Wang2707e442016-03-28 23:23:16 +0800702 void *p_data, int bytes)
703{
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800704 struct intel_vgpu_page_track *t = data;
Changbin Du44b46732018-01-30 19:19:49 +0800705 struct intel_vgpu_ppgtt_spt *spt = page_track_to_ppgtt_spt(t);
Zhi Wang2707e442016-03-28 23:23:16 +0800706 int ret;
707
708 if (bytes != 4 && bytes != 8)
709 return -EINVAL;
710
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800711 if (!t->tracked)
Zhi Wang2707e442016-03-28 23:23:16 +0800712 return -EINVAL;
713
Changbin Du44b46732018-01-30 19:19:49 +0800714 ret = ppgtt_handle_guest_write_page_table_bytes(spt,
Zhi Wang2707e442016-03-28 23:23:16 +0800715 pa, p_data, bytes);
716 if (ret)
717 return ret;
718 return ret;
719}
720
Changbin Du44b46732018-01-30 19:19:49 +0800721/* Find a spt by guest gfn. */
722static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
723 struct intel_vgpu *vgpu, unsigned long gfn)
724{
725 struct intel_vgpu_page_track *track;
726
727 track = intel_vgpu_find_tracked_page(vgpu, gfn);
728 if (track)
729 return page_track_to_ppgtt_spt(track);
730
731 return NULL;
732}
733
734/* Find the spt by shadow page mfn. */
735static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
736 struct intel_vgpu *vgpu, unsigned long mfn)
737{
738 struct intel_vgpu_ppgtt_spt *spt;
739
740 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table, spt, node, mfn) {
741 if (spt->shadow_page.mfn == mfn)
742 return spt;
743 }
744 return NULL;
745}
746
Changbin Duede9d0c2018-01-30 19:19:40 +0800747static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
Zhi Wang2707e442016-03-28 23:23:16 +0800748
749static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
750 struct intel_vgpu *vgpu, int type, unsigned long gfn)
751{
Changbin Du44b46732018-01-30 19:19:49 +0800752 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
Zhi Wang2707e442016-03-28 23:23:16 +0800753 struct intel_vgpu_ppgtt_spt *spt = NULL;
Changbin Du44b46732018-01-30 19:19:49 +0800754 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +0800755
756retry:
757 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
758 if (!spt) {
Changbin Duede9d0c2018-01-30 19:19:40 +0800759 if (reclaim_one_ppgtt_mm(vgpu->gvt))
Zhi Wang2707e442016-03-28 23:23:16 +0800760 goto retry;
761
Tina Zhang695fbc02017-03-10 04:26:53 -0500762 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800763 return ERR_PTR(-ENOMEM);
764 }
765
766 spt->vgpu = vgpu;
Zhi Wang2707e442016-03-28 23:23:16 +0800767 atomic_set(&spt->refcount, 1);
768 INIT_LIST_HEAD(&spt->post_shadow_list);
769
770 /*
Changbin Du44b46732018-01-30 19:19:49 +0800771 * Init shadow_page.
Zhi Wang2707e442016-03-28 23:23:16 +0800772 */
Changbin Du44b46732018-01-30 19:19:49 +0800773 spt->shadow_page.type = type;
774 daddr = dma_map_page(kdev, spt->shadow_page.page,
775 0, 4096, PCI_DMA_BIDIRECTIONAL);
776 if (dma_mapping_error(kdev, daddr)) {
777 gvt_vgpu_err("fail to map dma addr\n");
778 free_spt(spt);
779 return ERR_PTR(-EINVAL);
Zhi Wang2707e442016-03-28 23:23:16 +0800780 }
Changbin Du44b46732018-01-30 19:19:49 +0800781 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
782 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800783
Changbin Du44b46732018-01-30 19:19:49 +0800784 /*
785 * Init guest_page.
786 */
787 spt->guest_page.type = type;
788 spt->guest_page.gfn = gfn;
789
790 spt->guest_page.track.gfn = gfn;
791 spt->guest_page.track.handler = ppgtt_write_protection_handler;
792 hash_add(vgpu->gtt.tracked_guest_page_hash_table,
793 &spt->guest_page.track.node, gfn);
794
795 INIT_HLIST_NODE(&spt->node);
796 hash_add(vgpu->gtt.shadow_page_hash_table, &spt->node, spt->shadow_page.mfn);
Zhi Wang2707e442016-03-28 23:23:16 +0800797
798 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
799 return spt;
Zhi Wang2707e442016-03-28 23:23:16 +0800800}
801
802#define pt_entry_size_shift(spt) \
803 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
804
805#define pt_entries(spt) \
Zhi Wang9556e112017-10-10 13:51:32 +0800806 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
Zhi Wang2707e442016-03-28 23:23:16 +0800807
808#define for_each_present_guest_entry(spt, e, i) \
809 for (i = 0; i < pt_entries(spt); i++) \
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800810 if (!ppgtt_get_guest_entry(spt, e, i) && \
811 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
Zhi Wang2707e442016-03-28 23:23:16 +0800812
813#define for_each_present_shadow_entry(spt, e, i) \
814 for (i = 0; i < pt_entries(spt); i++) \
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800815 if (!ppgtt_get_shadow_entry(spt, e, i) && \
816 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
Zhi Wang2707e442016-03-28 23:23:16 +0800817
818static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
819{
820 int v = atomic_read(&spt->refcount);
821
822 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
823
824 atomic_inc(&spt->refcount);
825}
826
827static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
828
829static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
830 struct intel_gvt_gtt_entry *e)
831{
832 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
833 struct intel_vgpu_ppgtt_spt *s;
Ping Gao3b6411c2016-11-04 13:47:35 +0800834 intel_gvt_gtt_type_t cur_pt_type;
Zhi Wang2707e442016-03-28 23:23:16 +0800835
Changbin Du72f03d72018-01-30 19:19:48 +0800836 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
Zhi Wang2707e442016-03-28 23:23:16 +0800837
Ping Gao3b6411c2016-11-04 13:47:35 +0800838 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
839 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
840 cur_pt_type = get_next_pt_type(e->type) + 1;
841 if (ops->get_pfn(e) ==
842 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
843 return 0;
844 }
Changbin Du44b46732018-01-30 19:19:49 +0800845 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
Zhi Wang2707e442016-03-28 23:23:16 +0800846 if (!s) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500847 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
848 ops->get_pfn(e));
Zhi Wang2707e442016-03-28 23:23:16 +0800849 return -ENXIO;
850 }
851 return ppgtt_invalidate_shadow_page(s);
852}
853
854static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
855{
Tina Zhang695fbc02017-03-10 04:26:53 -0500856 struct intel_vgpu *vgpu = spt->vgpu;
Zhi Wang2707e442016-03-28 23:23:16 +0800857 struct intel_gvt_gtt_entry e;
858 unsigned long index;
859 int ret;
860 int v = atomic_read(&spt->refcount);
861
862 trace_spt_change(spt->vgpu->id, "die", spt,
Changbin Du44b46732018-01-30 19:19:49 +0800863 spt->guest_page.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800864
865 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
866
867 if (atomic_dec_return(&spt->refcount) > 0)
868 return 0;
869
870 if (gtt_type_is_pte_pt(spt->shadow_page.type))
871 goto release;
872
873 for_each_present_shadow_entry(spt, &e, index) {
Changbin Du72f03d72018-01-30 19:19:48 +0800874 switch (e.type) {
875 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
876 gvt_vdbg_mm("invalidate 4K entry\n");
877 continue;
878 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
879 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
880 WARN(1, "GVT doesn't support 2M/1GB page\n");
881 continue;
882 case GTT_TYPE_PPGTT_PML4_ENTRY:
883 case GTT_TYPE_PPGTT_PDP_ENTRY:
884 case GTT_TYPE_PPGTT_PDE_ENTRY:
885 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
886 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
887 spt->vgpu, &e);
888 if (ret)
889 goto fail;
890 break;
891 default:
892 GEM_BUG_ON(1);
Zhi Wang2707e442016-03-28 23:23:16 +0800893 }
Zhi Wang2707e442016-03-28 23:23:16 +0800894 }
895release:
896 trace_spt_change(spt->vgpu->id, "release", spt,
Changbin Du44b46732018-01-30 19:19:49 +0800897 spt->guest_page.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800898 ppgtt_free_shadow_page(spt);
899 return 0;
900fail:
Tina Zhang695fbc02017-03-10 04:26:53 -0500901 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
902 spt, e.val64, e.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800903 return ret;
904}
905
906static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
907
908static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
909 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
910{
911 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Changbin Du44b46732018-01-30 19:19:49 +0800912 struct intel_vgpu_ppgtt_spt *spt = NULL;
Zhi Wang2707e442016-03-28 23:23:16 +0800913 int ret;
914
Changbin Du72f03d72018-01-30 19:19:48 +0800915 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
Zhi Wang2707e442016-03-28 23:23:16 +0800916
Changbin Du44b46732018-01-30 19:19:49 +0800917 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
918 if (spt)
919 ppgtt_get_shadow_page(spt);
920 else {
Zhi Wang2707e442016-03-28 23:23:16 +0800921 int type = get_next_pt_type(we->type);
922
Changbin Du44b46732018-01-30 19:19:49 +0800923 spt = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
924 if (IS_ERR(spt)) {
925 ret = PTR_ERR(spt);
Zhi Wang2707e442016-03-28 23:23:16 +0800926 goto fail;
927 }
928
Changbin Du44b46732018-01-30 19:19:49 +0800929 ret = intel_gvt_hypervisor_enable_page_track(vgpu, &spt->guest_page.track);
Zhi Wang2707e442016-03-28 23:23:16 +0800930 if (ret)
931 goto fail;
932
Changbin Du44b46732018-01-30 19:19:49 +0800933 ret = ppgtt_populate_shadow_page(spt);
Zhi Wang2707e442016-03-28 23:23:16 +0800934 if (ret)
935 goto fail;
936
Changbin Du44b46732018-01-30 19:19:49 +0800937 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
938 spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800939 }
Changbin Du44b46732018-01-30 19:19:49 +0800940 return spt;
Zhi Wang2707e442016-03-28 23:23:16 +0800941fail:
Tina Zhang695fbc02017-03-10 04:26:53 -0500942 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
Changbin Du44b46732018-01-30 19:19:49 +0800943 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +0800944 return ERR_PTR(ret);
945}
946
947static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
948 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
949{
950 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
951
952 se->type = ge->type;
953 se->val64 = ge->val64;
954
955 ops->set_pfn(se, s->shadow_page.mfn);
956}
957
Changbin Du72f03d72018-01-30 19:19:48 +0800958static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
959 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
960 struct intel_gvt_gtt_entry *ge)
961{
962 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
963 struct intel_gvt_gtt_entry se = *ge;
964 unsigned long gfn, mfn;
965
966 if (!pte_ops->test_present(ge))
967 return 0;
968
969 gfn = pte_ops->get_pfn(ge);
970
971 switch (ge->type) {
972 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
973 gvt_vdbg_mm("shadow 4K gtt entry\n");
974 break;
975 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
976 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
977 gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
978 return -EINVAL;
979 default:
980 GEM_BUG_ON(1);
981 };
982
983 /* direct shadow */
984 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
985 if (mfn == INTEL_GVT_INVALID_ADDR)
986 return -ENXIO;
987
988 pte_ops->set_pfn(&se, mfn);
989 ppgtt_set_shadow_entry(spt, &se, index);
990 return 0;
991}
992
Zhi Wang2707e442016-03-28 23:23:16 +0800993static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
994{
995 struct intel_vgpu *vgpu = spt->vgpu;
Hang Yuancc753fb2017-12-22 18:06:31 +0800996 struct intel_gvt *gvt = vgpu->gvt;
997 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +0800998 struct intel_vgpu_ppgtt_spt *s;
999 struct intel_gvt_gtt_entry se, ge;
Hang Yuancc753fb2017-12-22 18:06:31 +08001000 unsigned long gfn, i;
Zhi Wang2707e442016-03-28 23:23:16 +08001001 int ret;
1002
1003 trace_spt_change(spt->vgpu->id, "born", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001004 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001005
Zhi Wang2707e442016-03-28 23:23:16 +08001006 for_each_present_guest_entry(spt, &ge, i) {
Changbin Du72f03d72018-01-30 19:19:48 +08001007 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
Changbin Du44b46732018-01-30 19:19:49 +08001008 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
Changbin Du72f03d72018-01-30 19:19:48 +08001009 if (IS_ERR(s)) {
1010 ret = PTR_ERR(s);
1011 goto fail;
1012 }
1013 ppgtt_get_shadow_entry(spt, &se, i);
1014 ppgtt_generate_shadow_entry(&se, s, &ge);
1015 ppgtt_set_shadow_entry(spt, &se, i);
1016 } else {
1017 gfn = ops->get_pfn(&ge);
1018 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1019 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1020 ppgtt_set_shadow_entry(spt, &se, i);
1021 continue;
1022 }
Zhi Wang2707e442016-03-28 23:23:16 +08001023
Changbin Du72f03d72018-01-30 19:19:48 +08001024 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1025 if (ret)
1026 goto fail;
Zhi Wang2707e442016-03-28 23:23:16 +08001027 }
Zhi Wang2707e442016-03-28 23:23:16 +08001028 }
1029 return 0;
1030fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001031 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1032 spt, ge.val64, ge.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001033 return ret;
1034}
1035
Changbin Du44b46732018-01-30 19:19:49 +08001036static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
Tina Zhang6b3816d2017-08-14 15:24:14 +08001037 struct intel_gvt_gtt_entry *se, unsigned long index)
Zhi Wang2707e442016-03-28 23:23:16 +08001038{
Zhi Wang2707e442016-03-28 23:23:16 +08001039 struct intel_vgpu *vgpu = spt->vgpu;
1040 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +08001041 int ret;
1042
Changbin Du44b46732018-01-30 19:19:49 +08001043 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1044 spt->shadow_page.type, se->val64, index);
Bing Niu9baf0922016-11-07 10:44:36 +08001045
Changbin Dubc37ab52018-01-30 19:19:44 +08001046 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1047 se->type, index, se->val64);
1048
Tina Zhang6b3816d2017-08-14 15:24:14 +08001049 if (!ops->test_present(se))
Zhi Wang2707e442016-03-28 23:23:16 +08001050 return 0;
1051
Changbin Du44b46732018-01-30 19:19:49 +08001052 if (ops->get_pfn(se) ==
1053 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
Zhi Wang2707e442016-03-28 23:23:16 +08001054 return 0;
1055
Tina Zhang6b3816d2017-08-14 15:24:14 +08001056 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
Bing Niu9baf0922016-11-07 10:44:36 +08001057 struct intel_vgpu_ppgtt_spt *s =
Changbin Du44b46732018-01-30 19:19:49 +08001058 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
Bing Niu9baf0922016-11-07 10:44:36 +08001059 if (!s) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001060 gvt_vgpu_err("fail to find guest page\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001061 ret = -ENXIO;
1062 goto fail;
1063 }
Bing Niu9baf0922016-11-07 10:44:36 +08001064 ret = ppgtt_invalidate_shadow_page(s);
Zhi Wang2707e442016-03-28 23:23:16 +08001065 if (ret)
1066 goto fail;
1067 }
Zhi Wang2707e442016-03-28 23:23:16 +08001068 return 0;
1069fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001070 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
Tina Zhang6b3816d2017-08-14 15:24:14 +08001071 spt, se->val64, se->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001072 return ret;
1073}
1074
Changbin Du44b46732018-01-30 19:19:49 +08001075static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
Zhi Wang2707e442016-03-28 23:23:16 +08001076 struct intel_gvt_gtt_entry *we, unsigned long index)
1077{
Zhi Wang2707e442016-03-28 23:23:16 +08001078 struct intel_vgpu *vgpu = spt->vgpu;
1079 struct intel_gvt_gtt_entry m;
1080 struct intel_vgpu_ppgtt_spt *s;
1081 int ret;
1082
Changbin Du44b46732018-01-30 19:19:49 +08001083 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1084 we->val64, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001085
Changbin Dubc37ab52018-01-30 19:19:44 +08001086 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1087 we->type, index, we->val64);
1088
Zhi Wang2707e442016-03-28 23:23:16 +08001089 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1090 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1091 if (IS_ERR(s)) {
1092 ret = PTR_ERR(s);
1093 goto fail;
1094 }
1095 ppgtt_get_shadow_entry(spt, &m, index);
1096 ppgtt_generate_shadow_entry(&m, s, we);
1097 ppgtt_set_shadow_entry(spt, &m, index);
1098 } else {
Changbin Du72f03d72018-01-30 19:19:48 +08001099 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
Zhi Wang2707e442016-03-28 23:23:16 +08001100 if (ret)
1101 goto fail;
Zhi Wang2707e442016-03-28 23:23:16 +08001102 }
1103 return 0;
1104fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001105 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1106 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001107 return ret;
1108}
1109
1110static int sync_oos_page(struct intel_vgpu *vgpu,
1111 struct intel_vgpu_oos_page *oos_page)
1112{
1113 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1114 struct intel_gvt *gvt = vgpu->gvt;
1115 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Changbin Du44b46732018-01-30 19:19:49 +08001116 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
Changbin Du72f03d72018-01-30 19:19:48 +08001117 struct intel_gvt_gtt_entry old, new;
Zhi Wang2707e442016-03-28 23:23:16 +08001118 int index;
1119 int ret;
1120
1121 trace_oos_change(vgpu->id, "sync", oos_page->id,
Changbin Du44b46732018-01-30 19:19:49 +08001122 spt, spt->guest_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001123
Changbin Du44b46732018-01-30 19:19:49 +08001124 old.type = new.type = get_entry_type(spt->guest_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001125 old.val64 = new.val64 = 0;
1126
Zhi Wang9556e112017-10-10 13:51:32 +08001127 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1128 info->gtt_entry_size_shift); index++) {
Zhi Wang2707e442016-03-28 23:23:16 +08001129 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1130 ops->get_entry(NULL, &new, index, true,
Changbin Du44b46732018-01-30 19:19:49 +08001131 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08001132
1133 if (old.val64 == new.val64
1134 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1135 continue;
1136
1137 trace_oos_sync(vgpu->id, oos_page->id,
Changbin Du44b46732018-01-30 19:19:49 +08001138 spt, spt->guest_page.type,
Zhi Wang2707e442016-03-28 23:23:16 +08001139 new.val64, index);
1140
Changbin Du72f03d72018-01-30 19:19:48 +08001141 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
Zhi Wang2707e442016-03-28 23:23:16 +08001142 if (ret)
1143 return ret;
1144
1145 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08001146 }
1147
Changbin Du44b46732018-01-30 19:19:49 +08001148 spt->guest_page.write_cnt = 0;
Zhi Wang2707e442016-03-28 23:23:16 +08001149 list_del_init(&spt->post_shadow_list);
1150 return 0;
1151}
1152
1153static int detach_oos_page(struct intel_vgpu *vgpu,
1154 struct intel_vgpu_oos_page *oos_page)
1155{
1156 struct intel_gvt *gvt = vgpu->gvt;
Changbin Du44b46732018-01-30 19:19:49 +08001157 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
Zhi Wang2707e442016-03-28 23:23:16 +08001158
1159 trace_oos_change(vgpu->id, "detach", oos_page->id,
Changbin Du44b46732018-01-30 19:19:49 +08001160 spt, spt->guest_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001161
Changbin Du44b46732018-01-30 19:19:49 +08001162 spt->guest_page.write_cnt = 0;
1163 spt->guest_page.oos_page = NULL;
1164 oos_page->spt = NULL;
Zhi Wang2707e442016-03-28 23:23:16 +08001165
1166 list_del_init(&oos_page->vm_list);
1167 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1168
1169 return 0;
1170}
1171
Changbin Du44b46732018-01-30 19:19:49 +08001172static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1173 struct intel_vgpu_ppgtt_spt *spt)
Zhi Wang2707e442016-03-28 23:23:16 +08001174{
Changbin Du44b46732018-01-30 19:19:49 +08001175 struct intel_gvt *gvt = spt->vgpu->gvt;
Zhi Wang2707e442016-03-28 23:23:16 +08001176 int ret;
1177
Changbin Du44b46732018-01-30 19:19:49 +08001178 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1179 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
Zhi Wang9556e112017-10-10 13:51:32 +08001180 oos_page->mem, I915_GTT_PAGE_SIZE);
Zhi Wang2707e442016-03-28 23:23:16 +08001181 if (ret)
1182 return ret;
1183
Changbin Du44b46732018-01-30 19:19:49 +08001184 oos_page->spt = spt;
1185 spt->guest_page.oos_page = oos_page;
Zhi Wang2707e442016-03-28 23:23:16 +08001186
1187 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1188
Changbin Du44b46732018-01-30 19:19:49 +08001189 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1190 spt, spt->guest_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001191 return 0;
1192}
1193
Changbin Du44b46732018-01-30 19:19:49 +08001194static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
Zhi Wang2707e442016-03-28 23:23:16 +08001195{
Changbin Du44b46732018-01-30 19:19:49 +08001196 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
Zhi Wang2707e442016-03-28 23:23:16 +08001197 int ret;
1198
Changbin Du44b46732018-01-30 19:19:49 +08001199 ret = intel_gvt_hypervisor_enable_page_track(spt->vgpu, &spt->guest_page.track);
Zhi Wang2707e442016-03-28 23:23:16 +08001200 if (ret)
1201 return ret;
1202
Changbin Du44b46732018-01-30 19:19:49 +08001203 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1204 spt, spt->guest_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001205
Changbin Du44b46732018-01-30 19:19:49 +08001206 list_del_init(&oos_page->vm_list);
1207 return sync_oos_page(spt->vgpu, oos_page);
Zhi Wang2707e442016-03-28 23:23:16 +08001208}
1209
Changbin Du44b46732018-01-30 19:19:49 +08001210static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
Zhi Wang2707e442016-03-28 23:23:16 +08001211{
Changbin Du44b46732018-01-30 19:19:49 +08001212 struct intel_gvt *gvt = spt->vgpu->gvt;
Zhi Wang2707e442016-03-28 23:23:16 +08001213 struct intel_gvt_gtt *gtt = &gvt->gtt;
Changbin Du44b46732018-01-30 19:19:49 +08001214 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
Zhi Wang2707e442016-03-28 23:23:16 +08001215 int ret;
1216
1217 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1218
1219 if (list_empty(&gtt->oos_page_free_list_head)) {
1220 oos_page = container_of(gtt->oos_page_use_list_head.next,
1221 struct intel_vgpu_oos_page, list);
Changbin Du44b46732018-01-30 19:19:49 +08001222 ret = ppgtt_set_guest_page_sync(oos_page->spt);
Zhi Wang2707e442016-03-28 23:23:16 +08001223 if (ret)
1224 return ret;
Changbin Du44b46732018-01-30 19:19:49 +08001225 ret = detach_oos_page(spt->vgpu, oos_page);
Zhi Wang2707e442016-03-28 23:23:16 +08001226 if (ret)
1227 return ret;
1228 } else
1229 oos_page = container_of(gtt->oos_page_free_list_head.next,
1230 struct intel_vgpu_oos_page, list);
Changbin Du44b46732018-01-30 19:19:49 +08001231 return attach_oos_page(oos_page, spt);
Zhi Wang2707e442016-03-28 23:23:16 +08001232}
1233
Changbin Du44b46732018-01-30 19:19:49 +08001234static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
Zhi Wang2707e442016-03-28 23:23:16 +08001235{
Changbin Du44b46732018-01-30 19:19:49 +08001236 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
Zhi Wang2707e442016-03-28 23:23:16 +08001237
1238 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1239 return -EINVAL;
1240
Changbin Du44b46732018-01-30 19:19:49 +08001241 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1242 spt, spt->guest_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001243
Changbin Du44b46732018-01-30 19:19:49 +08001244 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1245 return intel_gvt_hypervisor_disable_page_track(spt->vgpu, &spt->guest_page.track);
Zhi Wang2707e442016-03-28 23:23:16 +08001246}
1247
1248/**
1249 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1250 * @vgpu: a vGPU
1251 *
1252 * This function is called before submitting a guest workload to host,
1253 * to sync all the out-of-synced shadow for vGPU
1254 *
1255 * Returns:
1256 * Zero on success, negative error code if failed.
1257 */
1258int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1259{
1260 struct list_head *pos, *n;
1261 struct intel_vgpu_oos_page *oos_page;
1262 int ret;
1263
1264 if (!enable_out_of_sync)
1265 return 0;
1266
1267 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1268 oos_page = container_of(pos,
1269 struct intel_vgpu_oos_page, vm_list);
Changbin Du44b46732018-01-30 19:19:49 +08001270 ret = ppgtt_set_guest_page_sync(oos_page->spt);
Zhi Wang2707e442016-03-28 23:23:16 +08001271 if (ret)
1272 return ret;
1273 }
1274 return 0;
1275}
1276
1277/*
1278 * The heart of PPGTT shadow page table.
1279 */
1280static int ppgtt_handle_guest_write_page_table(
Changbin Du44b46732018-01-30 19:19:49 +08001281 struct intel_vgpu_ppgtt_spt *spt,
Zhi Wang2707e442016-03-28 23:23:16 +08001282 struct intel_gvt_gtt_entry *we, unsigned long index)
1283{
Zhi Wang2707e442016-03-28 23:23:16 +08001284 struct intel_vgpu *vgpu = spt->vgpu;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001285 int type = spt->shadow_page.type;
Zhi Wang2707e442016-03-28 23:23:16 +08001286 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Changbin Du72f03d72018-01-30 19:19:48 +08001287 struct intel_gvt_gtt_entry old_se;
Bing Niu9baf0922016-11-07 10:44:36 +08001288 int new_present;
Changbin Du72f03d72018-01-30 19:19:48 +08001289 int ret;
Zhi Wang2707e442016-03-28 23:23:16 +08001290
Zhi Wang2707e442016-03-28 23:23:16 +08001291 new_present = ops->test_present(we);
1292
Tina Zhang6b3816d2017-08-14 15:24:14 +08001293 /*
1294 * Adding the new entry first and then removing the old one, that can
1295 * guarantee the ppgtt table is validated during the window between
1296 * adding and removal.
1297 */
Changbin Du72f03d72018-01-30 19:19:48 +08001298 ppgtt_get_shadow_entry(spt, &old_se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001299
Zhi Wang2707e442016-03-28 23:23:16 +08001300 if (new_present) {
Changbin Du44b46732018-01-30 19:19:49 +08001301 ret = ppgtt_handle_guest_entry_add(spt, we, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001302 if (ret)
1303 goto fail;
1304 }
Tina Zhang6b3816d2017-08-14 15:24:14 +08001305
Changbin Du44b46732018-01-30 19:19:49 +08001306 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
Tina Zhang6b3816d2017-08-14 15:24:14 +08001307 if (ret)
1308 goto fail;
1309
1310 if (!new_present) {
Changbin Du72f03d72018-01-30 19:19:48 +08001311 ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
1312 ppgtt_set_shadow_entry(spt, &old_se, index);
Tina Zhang6b3816d2017-08-14 15:24:14 +08001313 }
1314
Zhi Wang2707e442016-03-28 23:23:16 +08001315 return 0;
1316fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001317 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1318 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001319 return ret;
1320}
1321
Changbin Du72f03d72018-01-30 19:19:48 +08001322
1323
Changbin Du44b46732018-01-30 19:19:49 +08001324static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
Zhi Wang2707e442016-03-28 23:23:16 +08001325{
1326 return enable_out_of_sync
Changbin Du44b46732018-01-30 19:19:49 +08001327 && gtt_type_is_pte_pt(spt->guest_page.type)
1328 && spt->guest_page.write_cnt >= 2;
Zhi Wang2707e442016-03-28 23:23:16 +08001329}
1330
1331static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1332 unsigned long index)
1333{
1334 set_bit(index, spt->post_shadow_bitmap);
1335 if (!list_empty(&spt->post_shadow_list))
1336 return;
1337
1338 list_add_tail(&spt->post_shadow_list,
1339 &spt->vgpu->gtt.post_shadow_list_head);
1340}
1341
1342/**
1343 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1344 * @vgpu: a vGPU
1345 *
1346 * This function is called before submitting a guest workload to host,
1347 * to flush all the post shadows for a vGPU.
1348 *
1349 * Returns:
1350 * Zero on success, negative error code if failed.
1351 */
1352int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1353{
1354 struct list_head *pos, *n;
1355 struct intel_vgpu_ppgtt_spt *spt;
Bing Niu9baf0922016-11-07 10:44:36 +08001356 struct intel_gvt_gtt_entry ge;
Zhi Wang2707e442016-03-28 23:23:16 +08001357 unsigned long index;
1358 int ret;
1359
1360 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1361 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1362 post_shadow_list);
1363
1364 for_each_set_bit(index, spt->post_shadow_bitmap,
1365 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1366 ppgtt_get_guest_entry(spt, &ge, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001367
Changbin Du44b46732018-01-30 19:19:49 +08001368 ret = ppgtt_handle_guest_write_page_table(spt,
1369 &ge, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001370 if (ret)
1371 return ret;
1372 clear_bit(index, spt->post_shadow_bitmap);
1373 }
1374 list_del_init(&spt->post_shadow_list);
1375 }
1376 return 0;
1377}
1378
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001379static int ppgtt_handle_guest_write_page_table_bytes(
Changbin Du44b46732018-01-30 19:19:49 +08001380 struct intel_vgpu_ppgtt_spt *spt,
Zhi Wang2707e442016-03-28 23:23:16 +08001381 u64 pa, void *p_data, int bytes)
1382{
Zhi Wang2707e442016-03-28 23:23:16 +08001383 struct intel_vgpu *vgpu = spt->vgpu;
1384 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1385 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001386 struct intel_gvt_gtt_entry we, se;
Zhi Wang2707e442016-03-28 23:23:16 +08001387 unsigned long index;
1388 int ret;
1389
1390 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1391
1392 ppgtt_get_guest_entry(spt, &we, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001393
1394 ops->test_pse(&we);
1395
1396 if (bytes == info->gtt_entry_size) {
Changbin Du44b46732018-01-30 19:19:49 +08001397 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001398 if (ret)
1399 return ret;
1400 } else {
Zhi Wang2707e442016-03-28 23:23:16 +08001401 if (!test_bit(index, spt->post_shadow_bitmap)) {
Zhi Wang121d760d2017-12-29 02:50:08 +08001402 int type = spt->shadow_page.type;
1403
Tina Zhang6b3816d2017-08-14 15:24:14 +08001404 ppgtt_get_shadow_entry(spt, &se, index);
Changbin Du44b46732018-01-30 19:19:49 +08001405 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001406 if (ret)
1407 return ret;
Zhi Wang121d760d2017-12-29 02:50:08 +08001408 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1409 ppgtt_set_shadow_entry(spt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001410 }
Zhi Wang2707e442016-03-28 23:23:16 +08001411 ppgtt_set_post_shadow(spt, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001412 }
1413
1414 if (!enable_out_of_sync)
1415 return 0;
1416
Changbin Du44b46732018-01-30 19:19:49 +08001417 spt->guest_page.write_cnt++;
Zhi Wang2707e442016-03-28 23:23:16 +08001418
Changbin Du44b46732018-01-30 19:19:49 +08001419 if (spt->guest_page.oos_page)
1420 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
Zhi Wang2707e442016-03-28 23:23:16 +08001421 false, 0, vgpu);
1422
Changbin Du44b46732018-01-30 19:19:49 +08001423 if (can_do_out_of_sync(spt)) {
1424 if (!spt->guest_page.oos_page)
1425 ppgtt_allocate_oos_page(spt);
Zhi Wang2707e442016-03-28 23:23:16 +08001426
Changbin Du44b46732018-01-30 19:19:49 +08001427 ret = ppgtt_set_guest_page_oos(spt);
Zhi Wang2707e442016-03-28 23:23:16 +08001428 if (ret < 0)
1429 return ret;
1430 }
1431 return 0;
1432}
1433
Changbin Duede9d0c2018-01-30 19:19:40 +08001434static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
Zhi Wang2707e442016-03-28 23:23:16 +08001435{
1436 struct intel_vgpu *vgpu = mm->vgpu;
1437 struct intel_gvt *gvt = vgpu->gvt;
1438 struct intel_gvt_gtt *gtt = &gvt->gtt;
1439 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1440 struct intel_gvt_gtt_entry se;
Changbin Duede9d0c2018-01-30 19:19:40 +08001441 int index;
Zhi Wang2707e442016-03-28 23:23:16 +08001442
Changbin Duede9d0c2018-01-30 19:19:40 +08001443 if (!mm->ppgtt_mm.shadowed)
Zhi Wang2707e442016-03-28 23:23:16 +08001444 return;
1445
Changbin Duede9d0c2018-01-30 19:19:40 +08001446 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1447 ppgtt_get_shadow_root_entry(mm, &se, index);
1448
Zhi Wang2707e442016-03-28 23:23:16 +08001449 if (!ops->test_present(&se))
1450 continue;
Changbin Duede9d0c2018-01-30 19:19:40 +08001451
1452 ppgtt_invalidate_shadow_page_by_shadow_entry(vgpu, &se);
Zhi Wang2707e442016-03-28 23:23:16 +08001453 se.val64 = 0;
Changbin Duede9d0c2018-01-30 19:19:40 +08001454 ppgtt_set_shadow_root_entry(mm, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001455
Changbin Du44b46732018-01-30 19:19:49 +08001456 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1457 NULL, se.type, se.val64, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001458 }
Changbin Duede9d0c2018-01-30 19:19:40 +08001459
1460 mm->ppgtt_mm.shadowed = false;
Zhi Wang2707e442016-03-28 23:23:16 +08001461}
1462
Zhi Wang2707e442016-03-28 23:23:16 +08001463
Changbin Duede9d0c2018-01-30 19:19:40 +08001464static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
Zhi Wang2707e442016-03-28 23:23:16 +08001465{
1466 struct intel_vgpu *vgpu = mm->vgpu;
1467 struct intel_gvt *gvt = vgpu->gvt;
1468 struct intel_gvt_gtt *gtt = &gvt->gtt;
1469 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1470 struct intel_vgpu_ppgtt_spt *spt;
1471 struct intel_gvt_gtt_entry ge, se;
Changbin Duede9d0c2018-01-30 19:19:40 +08001472 int index, ret;
Zhi Wang2707e442016-03-28 23:23:16 +08001473
Changbin Duede9d0c2018-01-30 19:19:40 +08001474 if (mm->ppgtt_mm.shadowed)
Zhi Wang2707e442016-03-28 23:23:16 +08001475 return 0;
1476
Changbin Duede9d0c2018-01-30 19:19:40 +08001477 mm->ppgtt_mm.shadowed = true;
Zhi Wang2707e442016-03-28 23:23:16 +08001478
Changbin Duede9d0c2018-01-30 19:19:40 +08001479 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1480 ppgtt_get_guest_root_entry(mm, &ge, index);
1481
Zhi Wang2707e442016-03-28 23:23:16 +08001482 if (!ops->test_present(&ge))
1483 continue;
1484
Changbin Du44b46732018-01-30 19:19:49 +08001485 trace_spt_guest_change(vgpu->id, __func__, NULL,
1486 ge.type, ge.val64, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001487
1488 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1489 if (IS_ERR(spt)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001490 gvt_vgpu_err("fail to populate guest root pointer\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001491 ret = PTR_ERR(spt);
1492 goto fail;
1493 }
1494 ppgtt_generate_shadow_entry(&se, spt, &ge);
Changbin Duede9d0c2018-01-30 19:19:40 +08001495 ppgtt_set_shadow_root_entry(mm, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001496
Changbin Du44b46732018-01-30 19:19:49 +08001497 trace_spt_guest_change(vgpu->id, "populate root pointer",
1498 NULL, se.type, se.val64, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001499 }
Changbin Duede9d0c2018-01-30 19:19:40 +08001500
Zhi Wang2707e442016-03-28 23:23:16 +08001501 return 0;
1502fail:
Changbin Duede9d0c2018-01-30 19:19:40 +08001503 invalidate_ppgtt_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001504 return ret;
1505}
1506
Changbin Duede9d0c2018-01-30 19:19:40 +08001507static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1508{
1509 struct intel_vgpu_mm *mm;
1510
1511 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1512 if (!mm)
1513 return NULL;
1514
1515 mm->vgpu = vgpu;
1516 kref_init(&mm->ref);
1517 atomic_set(&mm->pincount, 0);
1518
1519 return mm;
1520}
1521
1522static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1523{
1524 kfree(mm);
1525}
1526
Zhi Wang2707e442016-03-28 23:23:16 +08001527/**
Changbin Duede9d0c2018-01-30 19:19:40 +08001528 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
Zhi Wang2707e442016-03-28 23:23:16 +08001529 * @vgpu: a vGPU
Changbin Duede9d0c2018-01-30 19:19:40 +08001530 * @root_entry_type: ppgtt root entry type
1531 * @pdps: guest pdps.
Zhi Wang2707e442016-03-28 23:23:16 +08001532 *
Changbin Duede9d0c2018-01-30 19:19:40 +08001533 * This function is used to create a ppgtt mm object for a vGPU.
Zhi Wang2707e442016-03-28 23:23:16 +08001534 *
1535 * Returns:
1536 * Zero on success, negative error code in pointer if failed.
1537 */
Changbin Duede9d0c2018-01-30 19:19:40 +08001538struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1539 intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
Zhi Wang2707e442016-03-28 23:23:16 +08001540{
1541 struct intel_gvt *gvt = vgpu->gvt;
Zhi Wang2707e442016-03-28 23:23:16 +08001542 struct intel_vgpu_mm *mm;
1543 int ret;
1544
Changbin Duede9d0c2018-01-30 19:19:40 +08001545 mm = vgpu_alloc_mm(vgpu);
1546 if (!mm)
1547 return ERR_PTR(-ENOMEM);
Zhi Wang2707e442016-03-28 23:23:16 +08001548
Changbin Duede9d0c2018-01-30 19:19:40 +08001549 mm->type = INTEL_GVT_MM_PPGTT;
Zhi Wang2707e442016-03-28 23:23:16 +08001550
Changbin Duede9d0c2018-01-30 19:19:40 +08001551 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1552 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1553 mm->ppgtt_mm.root_entry_type = root_entry_type;
Zhi Wang2707e442016-03-28 23:23:16 +08001554
Changbin Duede9d0c2018-01-30 19:19:40 +08001555 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1556 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
Zhi Wang2707e442016-03-28 23:23:16 +08001557
Changbin Duede9d0c2018-01-30 19:19:40 +08001558 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1559 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1560 else
1561 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1562 sizeof(mm->ppgtt_mm.guest_pdps));
Zhi Wang2707e442016-03-28 23:23:16 +08001563
Changbin Duede9d0c2018-01-30 19:19:40 +08001564 ret = shadow_ppgtt_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001565 if (ret) {
Changbin Duede9d0c2018-01-30 19:19:40 +08001566 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1567 vgpu_free_mm(mm);
1568 return ERR_PTR(ret);
Zhi Wang2707e442016-03-28 23:23:16 +08001569 }
1570
Changbin Duede9d0c2018-01-30 19:19:40 +08001571 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1572 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
Zhi Wang2707e442016-03-28 23:23:16 +08001573 return mm;
Changbin Duede9d0c2018-01-30 19:19:40 +08001574}
1575
1576static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1577{
1578 struct intel_vgpu_mm *mm;
1579 unsigned long nr_entries;
1580
1581 mm = vgpu_alloc_mm(vgpu);
1582 if (!mm)
1583 return ERR_PTR(-ENOMEM);
1584
1585 mm->type = INTEL_GVT_MM_GGTT;
1586
1587 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1588 mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
1589 vgpu->gvt->device_info.gtt_entry_size);
1590 if (!mm->ggtt_mm.virtual_ggtt) {
1591 vgpu_free_mm(mm);
1592 return ERR_PTR(-ENOMEM);
1593 }
1594
1595 return mm;
1596}
1597
1598/**
Changbin Du1bc25852018-01-30 19:19:41 +08001599 * _intel_vgpu_mm_release - destroy a mm object
Changbin Duede9d0c2018-01-30 19:19:40 +08001600 * @mm_ref: a kref object
1601 *
1602 * This function is used to destroy a mm object for vGPU
1603 *
1604 */
Changbin Du1bc25852018-01-30 19:19:41 +08001605void _intel_vgpu_mm_release(struct kref *mm_ref)
Changbin Duede9d0c2018-01-30 19:19:40 +08001606{
1607 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1608
1609 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1610 gvt_err("vgpu mm pin count bug detected\n");
1611
1612 if (mm->type == INTEL_GVT_MM_PPGTT) {
1613 list_del(&mm->ppgtt_mm.list);
1614 list_del(&mm->ppgtt_mm.lru_list);
1615 invalidate_ppgtt_mm(mm);
1616 } else {
1617 vfree(mm->ggtt_mm.virtual_ggtt);
1618 }
1619
1620 vgpu_free_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001621}
1622
1623/**
1624 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1625 * @mm: a vGPU mm object
1626 *
1627 * This function is called when user doesn't want to use a vGPU mm object
1628 */
1629void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1630{
Zhi Wang2707e442016-03-28 23:23:16 +08001631 atomic_dec(&mm->pincount);
1632}
1633
1634/**
1635 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1636 * @vgpu: a vGPU
1637 *
1638 * This function is called when user wants to use a vGPU mm object. If this
1639 * mm object hasn't been shadowed yet, the shadow will be populated at this
1640 * time.
1641 *
1642 * Returns:
1643 * Zero on success, negative error code if failed.
1644 */
1645int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1646{
1647 int ret;
1648
Changbin Duede9d0c2018-01-30 19:19:40 +08001649 atomic_inc(&mm->pincount);
Zhi Wang2707e442016-03-28 23:23:16 +08001650
Changbin Duede9d0c2018-01-30 19:19:40 +08001651 if (mm->type == INTEL_GVT_MM_PPGTT) {
1652 ret = shadow_ppgtt_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001653 if (ret)
1654 return ret;
Changbin Duede9d0c2018-01-30 19:19:40 +08001655
1656 list_move_tail(&mm->ppgtt_mm.lru_list,
1657 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
1658
Zhi Wang2707e442016-03-28 23:23:16 +08001659 }
1660
Zhi Wang2707e442016-03-28 23:23:16 +08001661 return 0;
1662}
1663
Changbin Duede9d0c2018-01-30 19:19:40 +08001664static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
Zhi Wang2707e442016-03-28 23:23:16 +08001665{
1666 struct intel_vgpu_mm *mm;
1667 struct list_head *pos, *n;
1668
Changbin Duede9d0c2018-01-30 19:19:40 +08001669 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
1670 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
Zhi Wang2707e442016-03-28 23:23:16 +08001671
Zhi Wang2707e442016-03-28 23:23:16 +08001672 if (atomic_read(&mm->pincount))
1673 continue;
1674
Changbin Duede9d0c2018-01-30 19:19:40 +08001675 list_del_init(&mm->ppgtt_mm.lru_list);
1676 invalidate_ppgtt_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001677 return 1;
1678 }
1679 return 0;
1680}
1681
1682/*
1683 * GMA translation APIs.
1684 */
1685static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1686 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1687{
1688 struct intel_vgpu *vgpu = mm->vgpu;
1689 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1690 struct intel_vgpu_ppgtt_spt *s;
1691
Changbin Du44b46732018-01-30 19:19:49 +08001692 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
Zhi Wang2707e442016-03-28 23:23:16 +08001693 if (!s)
1694 return -ENXIO;
1695
1696 if (!guest)
1697 ppgtt_get_shadow_entry(s, e, index);
1698 else
1699 ppgtt_get_guest_entry(s, e, index);
1700 return 0;
1701}
1702
1703/**
1704 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1705 * @mm: mm object. could be a PPGTT or GGTT mm object
1706 * @gma: graphics memory address in this mm object
1707 *
1708 * This function is used to translate a graphics memory address in specific
1709 * graphics memory space to guest physical address.
1710 *
1711 * Returns:
1712 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1713 */
1714unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1715{
1716 struct intel_vgpu *vgpu = mm->vgpu;
1717 struct intel_gvt *gvt = vgpu->gvt;
1718 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1719 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1720 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1721 unsigned long gma_index[4];
1722 struct intel_gvt_gtt_entry e;
Changbin Duede9d0c2018-01-30 19:19:40 +08001723 int i, levels = 0;
Zhi Wang2707e442016-03-28 23:23:16 +08001724 int ret;
1725
Changbin Duede9d0c2018-01-30 19:19:40 +08001726 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
1727 mm->type != INTEL_GVT_MM_PPGTT);
Zhi Wang2707e442016-03-28 23:23:16 +08001728
1729 if (mm->type == INTEL_GVT_MM_GGTT) {
1730 if (!vgpu_gmadr_is_valid(vgpu, gma))
1731 goto err;
1732
Changbin Duede9d0c2018-01-30 19:19:40 +08001733 ggtt_get_guest_entry(mm, &e,
1734 gma_ops->gma_to_ggtt_pte_index(gma));
1735
Zhi Wang9556e112017-10-10 13:51:32 +08001736 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
1737 + (gma & ~I915_GTT_PAGE_MASK);
Zhi Wang2707e442016-03-28 23:23:16 +08001738
1739 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
Changbin Duede9d0c2018-01-30 19:19:40 +08001740 } else {
1741 switch (mm->ppgtt_mm.root_entry_type) {
1742 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
1743 ppgtt_get_shadow_root_entry(mm, &e, 0);
Zhi Wang2707e442016-03-28 23:23:16 +08001744
Changbin Duede9d0c2018-01-30 19:19:40 +08001745 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1746 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1747 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1748 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1749 levels = 4;
1750 break;
1751 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
1752 ppgtt_get_shadow_root_entry(mm, &e,
1753 gma_ops->gma_to_l3_pdp_index(gma));
Zhi Wang2707e442016-03-28 23:23:16 +08001754
Changbin Duede9d0c2018-01-30 19:19:40 +08001755 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1756 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1757 levels = 2;
1758 break;
1759 default:
1760 GEM_BUG_ON(1);
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001761 }
Changbin Duede9d0c2018-01-30 19:19:40 +08001762
1763 /* walk the shadow page table and get gpa from guest entry */
1764 for (i = 0; i < levels; i++) {
1765 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1766 (i == levels - 1));
1767 if (ret)
1768 goto err;
1769
1770 if (!pte_ops->test_present(&e)) {
1771 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
1772 goto err;
1773 }
1774 }
1775
1776 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
1777 (gma & ~I915_GTT_PAGE_MASK);
1778 trace_gma_translate(vgpu->id, "ppgtt", 0,
1779 mm->ppgtt_mm.root_entry_type, gma, gpa);
Zhi Wang2707e442016-03-28 23:23:16 +08001780 }
1781
Zhi Wang2707e442016-03-28 23:23:16 +08001782 return gpa;
1783err:
Tina Zhang695fbc02017-03-10 04:26:53 -05001784 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
Zhi Wang2707e442016-03-28 23:23:16 +08001785 return INTEL_GVT_INVALID_ADDR;
1786}
1787
Changbin Dua143cef2018-01-30 19:19:45 +08001788static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
Zhi Wang2707e442016-03-28 23:23:16 +08001789 unsigned int off, void *p_data, unsigned int bytes)
1790{
1791 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1792 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1793 unsigned long index = off >> info->gtt_entry_size_shift;
1794 struct intel_gvt_gtt_entry e;
1795
1796 if (bytes != 4 && bytes != 8)
1797 return -EINVAL;
1798
1799 ggtt_get_guest_entry(ggtt_mm, &e, index);
1800 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1801 bytes);
1802 return 0;
1803}
1804
1805/**
1806 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1807 * @vgpu: a vGPU
1808 * @off: register offset
1809 * @p_data: data will be returned to guest
1810 * @bytes: data length
1811 *
1812 * This function is used to emulate the GTT MMIO register read
1813 *
1814 * Returns:
1815 * Zero on success, error code if failed.
1816 */
Changbin Dua143cef2018-01-30 19:19:45 +08001817int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
Zhi Wang2707e442016-03-28 23:23:16 +08001818 void *p_data, unsigned int bytes)
1819{
1820 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1821 int ret;
1822
1823 if (bytes != 4 && bytes != 8)
1824 return -EINVAL;
1825
1826 off -= info->gtt_start_offset;
Changbin Dua143cef2018-01-30 19:19:45 +08001827 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
Zhi Wang2707e442016-03-28 23:23:16 +08001828 return ret;
1829}
1830
Changbin Dua143cef2018-01-30 19:19:45 +08001831static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
Zhi Wang2707e442016-03-28 23:23:16 +08001832 void *p_data, unsigned int bytes)
1833{
1834 struct intel_gvt *gvt = vgpu->gvt;
1835 const struct intel_gvt_device_info *info = &gvt->device_info;
1836 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1837 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1838 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
Changbin Du72f03d72018-01-30 19:19:48 +08001839 unsigned long gma, gfn, mfn;
Zhi Wang2707e442016-03-28 23:23:16 +08001840 struct intel_gvt_gtt_entry e, m;
Zhi Wang2707e442016-03-28 23:23:16 +08001841
1842 if (bytes != 4 && bytes != 8)
1843 return -EINVAL;
1844
Zhi Wang9556e112017-10-10 13:51:32 +08001845 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +08001846
1847 /* the VM may configure the whole GM space when ballooning is used */
Zhao, Xinda7c281352017-02-21 15:54:56 +08001848 if (!vgpu_gmadr_is_valid(vgpu, gma))
Zhi Wang2707e442016-03-28 23:23:16 +08001849 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +08001850
1851 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1852
1853 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1854 bytes);
Changbin Du72f03d72018-01-30 19:19:48 +08001855 m = e;
Zhi Wang2707e442016-03-28 23:23:16 +08001856
1857 if (ops->test_present(&e)) {
Hang Yuancc753fb2017-12-22 18:06:31 +08001858 gfn = ops->get_pfn(&e);
1859
1860 /* one PTE update may be issued in multiple writes and the
1861 * first write may not construct a valid gfn
1862 */
1863 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1864 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1865 goto out;
1866 }
1867
Changbin Du72f03d72018-01-30 19:19:48 +08001868 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
1869 if (mfn == INTEL_GVT_INVALID_ADDR) {
1870 gvt_vgpu_err("fail to populate guest ggtt entry\n");
Xiaoguang Chen359b6932017-03-21 10:54:21 +08001871 /* guest driver may read/write the entry when partial
1872 * update the entry in this situation p2m will fail
1873 * settting the shadow entry to point to a scratch page
1874 */
Zhi Wang22115ce2017-10-10 14:34:11 +08001875 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
Changbin Du72f03d72018-01-30 19:19:48 +08001876 } else
1877 ops->set_pfn(&m, mfn);
1878 } else
Zhi Wang22115ce2017-10-10 14:34:11 +08001879 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
Zhi Wang2707e442016-03-28 23:23:16 +08001880
Hang Yuancc753fb2017-12-22 18:06:31 +08001881out:
Changbin Du3aff3512018-01-30 19:19:42 +08001882 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
Changbin Dua143cef2018-01-30 19:19:45 +08001883 ggtt_invalidate(gvt->dev_priv);
Zhi Wang2707e442016-03-28 23:23:16 +08001884 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1885 return 0;
1886}
1887
1888/*
Changbin Dua143cef2018-01-30 19:19:45 +08001889 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
Zhi Wang2707e442016-03-28 23:23:16 +08001890 * @vgpu: a vGPU
1891 * @off: register offset
1892 * @p_data: data from guest write
1893 * @bytes: data length
1894 *
1895 * This function is used to emulate the GTT MMIO register write
1896 *
1897 * Returns:
1898 * Zero on success, error code if failed.
1899 */
Changbin Dua143cef2018-01-30 19:19:45 +08001900int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
1901 unsigned int off, void *p_data, unsigned int bytes)
Zhi Wang2707e442016-03-28 23:23:16 +08001902{
1903 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1904 int ret;
1905
1906 if (bytes != 4 && bytes != 8)
1907 return -EINVAL;
1908
1909 off -= info->gtt_start_offset;
Changbin Dua143cef2018-01-30 19:19:45 +08001910 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
Zhi Wang2707e442016-03-28 23:23:16 +08001911 return ret;
1912}
1913
Zhenyu Wang4fafba22017-12-18 11:58:46 +08001914int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
1915 void *p_data, unsigned int bytes)
1916{
1917 struct intel_gvt *gvt = vgpu->gvt;
1918 int ret = 0;
1919
1920 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
1921 struct intel_vgpu_page_track *t;
1922
1923 mutex_lock(&gvt->lock);
1924
1925 t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
1926 if (t) {
1927 if (unlikely(vgpu->failsafe)) {
1928 /* remove write protection to prevent furture traps */
Changbin Du44b46732018-01-30 19:19:49 +08001929 intel_gvt_hypervisor_disable_page_track(vgpu, t);
Zhenyu Wang4fafba22017-12-18 11:58:46 +08001930 } else {
1931 ret = t->handler(t, pa, p_data, bytes);
1932 if (ret) {
1933 gvt_err("guest page write error %d, "
1934 "gfn 0x%lx, pa 0x%llx, "
1935 "var 0x%x, len %d\n",
1936 ret, t->gfn, pa,
1937 *(u32 *)p_data, bytes);
1938 }
1939 }
1940 }
1941 mutex_unlock(&gvt->lock);
1942 }
1943 return ret;
1944}
1945
1946
Ping Gao3b6411c2016-11-04 13:47:35 +08001947static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1948 intel_gvt_gtt_type_t type)
Zhi Wang2707e442016-03-28 23:23:16 +08001949{
1950 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
Ping Gao3b6411c2016-11-04 13:47:35 +08001951 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Zhenyu Wang5c352582017-11-02 17:44:52 +08001952 int page_entry_num = I915_GTT_PAGE_SIZE >>
Ping Gao3b6411c2016-11-04 13:47:35 +08001953 vgpu->gvt->device_info.gtt_entry_size_shift;
Jike Song96317392017-01-09 15:38:38 +08001954 void *scratch_pt;
Ping Gao3b6411c2016-11-04 13:47:35 +08001955 int i;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001956 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
1957 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +08001958
Ping Gao3b6411c2016-11-04 13:47:35 +08001959 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1960 return -EINVAL;
1961
Jike Song96317392017-01-09 15:38:38 +08001962 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
Ping Gao3b6411c2016-11-04 13:47:35 +08001963 if (!scratch_pt) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001964 gvt_vgpu_err("fail to allocate scratch page\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001965 return -ENOMEM;
1966 }
1967
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001968 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1969 4096, PCI_DMA_BIDIRECTIONAL);
1970 if (dma_mapping_error(dev, daddr)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001971 gvt_vgpu_err("fail to dmamap scratch_pt\n");
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001972 __free_page(virt_to_page(scratch_pt));
1973 return -ENOMEM;
Ping Gao3b6411c2016-11-04 13:47:35 +08001974 }
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001975 gtt->scratch_pt[type].page_mfn =
Zhenyu Wang5c352582017-11-02 17:44:52 +08001976 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
Jike Song96317392017-01-09 15:38:38 +08001977 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
Ping Gao3b6411c2016-11-04 13:47:35 +08001978 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08001979 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
Ping Gao3b6411c2016-11-04 13:47:35 +08001980
1981 /* Build the tree by full filled the scratch pt with the entries which
1982 * point to the next level scratch pt or scratch page. The
1983 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1984 * 'type' pt.
1985 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
Jike Song96317392017-01-09 15:38:38 +08001986 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
Ping Gao3b6411c2016-11-04 13:47:35 +08001987 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1988 */
1989 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
1990 struct intel_gvt_gtt_entry se;
1991
1992 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
1993 se.type = get_entry_type(type - 1);
1994 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
1995
1996 /* The entry parameters like present/writeable/cache type
1997 * set to the same as i915's scratch page tree.
1998 */
1999 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2000 if (type == GTT_TYPE_PPGTT_PDE_PT)
Zhi Wangc095b972017-09-14 20:39:41 +08002001 se.val64 |= PPAT_CACHED;
Ping Gao3b6411c2016-11-04 13:47:35 +08002002
2003 for (i = 0; i < page_entry_num; i++)
Jike Song96317392017-01-09 15:38:38 +08002004 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002005 }
2006
Zhi Wang2707e442016-03-28 23:23:16 +08002007 return 0;
2008}
2009
Ping Gao3b6411c2016-11-04 13:47:35 +08002010static int release_scratch_page_tree(struct intel_vgpu *vgpu)
Zhi Wang2707e442016-03-28 23:23:16 +08002011{
Ping Gao3b6411c2016-11-04 13:47:35 +08002012 int i;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002013 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2014 dma_addr_t daddr;
Ping Gao3b6411c2016-11-04 13:47:35 +08002015
2016 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2017 if (vgpu->gtt.scratch_pt[i].page != NULL) {
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002018 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
Zhenyu Wang5c352582017-11-02 17:44:52 +08002019 I915_GTT_PAGE_SHIFT);
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002020 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
Ping Gao3b6411c2016-11-04 13:47:35 +08002021 __free_page(vgpu->gtt.scratch_pt[i].page);
2022 vgpu->gtt.scratch_pt[i].page = NULL;
2023 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2024 }
Zhi Wang2707e442016-03-28 23:23:16 +08002025 }
Ping Gao3b6411c2016-11-04 13:47:35 +08002026
2027 return 0;
2028}
2029
2030static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2031{
2032 int i, ret;
2033
2034 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2035 ret = alloc_scratch_pages(vgpu, i);
2036 if (ret)
2037 goto err;
2038 }
2039
2040 return 0;
2041
2042err:
2043 release_scratch_page_tree(vgpu);
2044 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +08002045}
2046
2047/**
2048 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2049 * @vgpu: a vGPU
2050 *
2051 * This function is used to initialize per-vGPU graphics memory virtualization
2052 * components.
2053 *
2054 * Returns:
2055 * Zero on success, error code if failed.
2056 */
2057int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2058{
2059 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
Zhi Wang2707e442016-03-28 23:23:16 +08002060
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08002061 hash_init(gtt->tracked_guest_page_hash_table);
Zhi Wang2707e442016-03-28 23:23:16 +08002062 hash_init(gtt->shadow_page_hash_table);
2063
Changbin Duede9d0c2018-01-30 19:19:40 +08002064 INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
Zhi Wang2707e442016-03-28 23:23:16 +08002065 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2066 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2067
Changbin Duede9d0c2018-01-30 19:19:40 +08002068 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2069 if (IS_ERR(gtt->ggtt_mm)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002070 gvt_vgpu_err("fail to create mm for ggtt.\n");
Changbin Duede9d0c2018-01-30 19:19:40 +08002071 return PTR_ERR(gtt->ggtt_mm);
Zhi Wang2707e442016-03-28 23:23:16 +08002072 }
2073
Changbin Duede9d0c2018-01-30 19:19:40 +08002074 intel_vgpu_reset_ggtt(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002075
Ping Gao3b6411c2016-11-04 13:47:35 +08002076 return create_scratch_page_tree(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002077}
2078
Changbin Duede9d0c2018-01-30 19:19:40 +08002079static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002080{
2081 struct list_head *pos, *n;
2082 struct intel_vgpu_mm *mm;
2083
Changbin Duede9d0c2018-01-30 19:19:40 +08002084 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2085 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
Changbin Du1bc25852018-01-30 19:19:41 +08002086 intel_vgpu_destroy_mm(mm);
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002087 }
Changbin Duede9d0c2018-01-30 19:19:40 +08002088
2089 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2090 gvt_err("vgpu ppgtt mm is not fully destoried\n");
2091
2092 if (GEM_WARN_ON(!hlist_empty(vgpu->gtt.shadow_page_hash_table))) {
2093 gvt_err("Why we still has spt not freed?\n");
2094 ppgtt_free_all_shadow_page(vgpu);
2095 }
2096}
2097
2098static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2099{
Changbin Du1bc25852018-01-30 19:19:41 +08002100 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
Changbin Duede9d0c2018-01-30 19:19:40 +08002101 vgpu->gtt.ggtt_mm = NULL;
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002102}
2103
Zhi Wang2707e442016-03-28 23:23:16 +08002104/**
2105 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2106 * @vgpu: a vGPU
2107 *
2108 * This function is used to clean up per-vGPU graphics memory virtualization
2109 * components.
2110 *
2111 * Returns:
2112 * Zero on success, error code if failed.
2113 */
2114void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2115{
Changbin Duede9d0c2018-01-30 19:19:40 +08002116 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2117 intel_vgpu_destroy_ggtt_mm(vgpu);
Ping Gao3b6411c2016-11-04 13:47:35 +08002118 release_scratch_page_tree(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002119}
2120
2121static void clean_spt_oos(struct intel_gvt *gvt)
2122{
2123 struct intel_gvt_gtt *gtt = &gvt->gtt;
2124 struct list_head *pos, *n;
2125 struct intel_vgpu_oos_page *oos_page;
2126
2127 WARN(!list_empty(&gtt->oos_page_use_list_head),
2128 "someone is still using oos page\n");
2129
2130 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2131 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2132 list_del(&oos_page->list);
2133 kfree(oos_page);
2134 }
2135}
2136
2137static int setup_spt_oos(struct intel_gvt *gvt)
2138{
2139 struct intel_gvt_gtt *gtt = &gvt->gtt;
2140 struct intel_vgpu_oos_page *oos_page;
2141 int i;
2142 int ret;
2143
2144 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2145 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2146
2147 for (i = 0; i < preallocated_oos_pages; i++) {
2148 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2149 if (!oos_page) {
Zhi Wang2707e442016-03-28 23:23:16 +08002150 ret = -ENOMEM;
2151 goto fail;
2152 }
2153
2154 INIT_LIST_HEAD(&oos_page->list);
2155 INIT_LIST_HEAD(&oos_page->vm_list);
2156 oos_page->id = i;
2157 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2158 }
2159
2160 gvt_dbg_mm("%d oos pages preallocated\n", i);
2161
2162 return 0;
2163fail:
2164 clean_spt_oos(gvt);
2165 return ret;
2166}
2167
2168/**
2169 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2170 * @vgpu: a vGPU
2171 * @page_table_level: PPGTT page table level
2172 * @root_entry: PPGTT page table root pointers
2173 *
2174 * This function is used to find a PPGTT mm object from mm object pool
2175 *
2176 * Returns:
2177 * pointer to mm object on success, NULL if failed.
2178 */
2179struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
Changbin Duede9d0c2018-01-30 19:19:40 +08002180 u64 pdps[])
Zhi Wang2707e442016-03-28 23:23:16 +08002181{
Zhi Wang2707e442016-03-28 23:23:16 +08002182 struct intel_vgpu_mm *mm;
Changbin Duede9d0c2018-01-30 19:19:40 +08002183 struct list_head *pos;
Zhi Wang2707e442016-03-28 23:23:16 +08002184
Changbin Duede9d0c2018-01-30 19:19:40 +08002185 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2186 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
Zhi Wang2707e442016-03-28 23:23:16 +08002187
Changbin Duede9d0c2018-01-30 19:19:40 +08002188 switch (mm->ppgtt_mm.root_entry_type) {
2189 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2190 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
Zhi Wang2707e442016-03-28 23:23:16 +08002191 return mm;
Changbin Duede9d0c2018-01-30 19:19:40 +08002192 break;
2193 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2194 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2195 sizeof(mm->ppgtt_mm.guest_pdps)))
Zhi Wang2707e442016-03-28 23:23:16 +08002196 return mm;
Changbin Duede9d0c2018-01-30 19:19:40 +08002197 break;
2198 default:
2199 GEM_BUG_ON(1);
Zhi Wang2707e442016-03-28 23:23:16 +08002200 }
2201 }
2202 return NULL;
2203}
2204
2205/**
Changbin Due6e9c462018-01-30 19:19:46 +08002206 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
Zhi Wang2707e442016-03-28 23:23:16 +08002207 * @vgpu: a vGPU
Changbin Duede9d0c2018-01-30 19:19:40 +08002208 * @root_entry_type: ppgtt root entry type
2209 * @pdps: guest pdps
Zhi Wang2707e442016-03-28 23:23:16 +08002210 *
Changbin Due6e9c462018-01-30 19:19:46 +08002211 * This function is used to find or create a PPGTT mm object from a guest.
Zhi Wang2707e442016-03-28 23:23:16 +08002212 *
2213 * Returns:
2214 * Zero on success, negative error code if failed.
2215 */
Changbin Due6e9c462018-01-30 19:19:46 +08002216struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
Changbin Duede9d0c2018-01-30 19:19:40 +08002217 intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
Zhi Wang2707e442016-03-28 23:23:16 +08002218{
Zhi Wang2707e442016-03-28 23:23:16 +08002219 struct intel_vgpu_mm *mm;
2220
Changbin Duede9d0c2018-01-30 19:19:40 +08002221 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
Zhi Wang2707e442016-03-28 23:23:16 +08002222 if (mm) {
Changbin Du1bc25852018-01-30 19:19:41 +08002223 intel_vgpu_mm_get(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08002224 } else {
Changbin Duede9d0c2018-01-30 19:19:40 +08002225 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
Changbin Due6e9c462018-01-30 19:19:46 +08002226 if (IS_ERR(mm))
Tina Zhang695fbc02017-03-10 04:26:53 -05002227 gvt_vgpu_err("fail to create mm\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002228 }
Changbin Due6e9c462018-01-30 19:19:46 +08002229 return mm;
Zhi Wang2707e442016-03-28 23:23:16 +08002230}
2231
2232/**
Changbin Due6e9c462018-01-30 19:19:46 +08002233 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
Zhi Wang2707e442016-03-28 23:23:16 +08002234 * @vgpu: a vGPU
Changbin Duede9d0c2018-01-30 19:19:40 +08002235 * @pdps: guest pdps
Zhi Wang2707e442016-03-28 23:23:16 +08002236 *
Changbin Due6e9c462018-01-30 19:19:46 +08002237 * This function is used to find a PPGTT mm object from a guest and destroy it.
Zhi Wang2707e442016-03-28 23:23:16 +08002238 *
2239 * Returns:
2240 * Zero on success, negative error code if failed.
2241 */
Changbin Due6e9c462018-01-30 19:19:46 +08002242int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
Zhi Wang2707e442016-03-28 23:23:16 +08002243{
Zhi Wang2707e442016-03-28 23:23:16 +08002244 struct intel_vgpu_mm *mm;
2245
Changbin Duede9d0c2018-01-30 19:19:40 +08002246 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
Zhi Wang2707e442016-03-28 23:23:16 +08002247 if (!mm) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002248 gvt_vgpu_err("fail to find ppgtt instance.\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002249 return -EINVAL;
2250 }
Changbin Du1bc25852018-01-30 19:19:41 +08002251 intel_vgpu_mm_put(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08002252 return 0;
2253}
2254
2255/**
2256 * intel_gvt_init_gtt - initialize mm components of a GVT device
2257 * @gvt: GVT device
2258 *
2259 * This function is called at the initialization stage, to initialize
2260 * the mm components of a GVT device.
2261 *
2262 * Returns:
2263 * zero on success, negative error code if failed.
2264 */
2265int intel_gvt_init_gtt(struct intel_gvt *gvt)
2266{
2267 int ret;
Jike Song96317392017-01-09 15:38:38 +08002268 void *page;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002269 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2270 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +08002271
2272 gvt_dbg_core("init gtt\n");
2273
Xu Hane3476c02017-03-29 10:13:59 +08002274 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
2275 || IS_KABYLAKE(gvt->dev_priv)) {
Zhi Wang2707e442016-03-28 23:23:16 +08002276 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2277 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
Zhi Wang2707e442016-03-28 23:23:16 +08002278 } else {
2279 return -ENODEV;
2280 }
2281
Jike Song96317392017-01-09 15:38:38 +08002282 page = (void *)get_zeroed_page(GFP_KERNEL);
2283 if (!page) {
Ping Gaod650ac02016-12-08 10:14:48 +08002284 gvt_err("fail to allocate scratch ggtt page\n");
2285 return -ENOMEM;
2286 }
2287
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002288 daddr = dma_map_page(dev, virt_to_page(page), 0,
2289 4096, PCI_DMA_BIDIRECTIONAL);
2290 if (dma_mapping_error(dev, daddr)) {
2291 gvt_err("fail to dmamap scratch ggtt page\n");
2292 __free_page(virt_to_page(page));
2293 return -ENOMEM;
Ping Gaod650ac02016-12-08 10:14:48 +08002294 }
Zhi Wang22115ce2017-10-10 14:34:11 +08002295
2296 gvt->gtt.scratch_page = virt_to_page(page);
2297 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
Ping Gaod650ac02016-12-08 10:14:48 +08002298
Zhi Wang2707e442016-03-28 23:23:16 +08002299 if (enable_out_of_sync) {
2300 ret = setup_spt_oos(gvt);
2301 if (ret) {
2302 gvt_err("fail to initialize SPT oos\n");
Zhou, Wenjia0de98702017-07-04 15:47:00 +08002303 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
Zhi Wang22115ce2017-10-10 14:34:11 +08002304 __free_page(gvt->gtt.scratch_page);
Zhi Wang2707e442016-03-28 23:23:16 +08002305 return ret;
2306 }
2307 }
Changbin Duede9d0c2018-01-30 19:19:40 +08002308 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
Zhi Wang2707e442016-03-28 23:23:16 +08002309 return 0;
2310}
2311
2312/**
2313 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2314 * @gvt: GVT device
2315 *
2316 * This function is called at the driver unloading stage, to clean up the
2317 * the mm components of a GVT device.
2318 *
2319 */
2320void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2321{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002322 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
Zhi Wang22115ce2017-10-10 14:34:11 +08002323 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
Zhi Wang9556e112017-10-10 13:51:32 +08002324 I915_GTT_PAGE_SHIFT);
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002325
2326 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2327
Zhi Wang22115ce2017-10-10 14:34:11 +08002328 __free_page(gvt->gtt.scratch_page);
Ping Gaod650ac02016-12-08 10:14:48 +08002329
Zhi Wang2707e442016-03-28 23:23:16 +08002330 if (enable_out_of_sync)
2331 clean_spt_oos(gvt);
2332}
Ping Gaod650ac02016-12-08 10:14:48 +08002333
2334/**
2335 * intel_vgpu_reset_ggtt - reset the GGTT entry
2336 * @vgpu: a vGPU
2337 *
2338 * This function is called at the vGPU create stage
2339 * to reset all the GGTT entries.
2340 *
2341 */
2342void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2343{
2344 struct intel_gvt *gvt = vgpu->gvt;
Zhenyu Wang5ad59bf2017-04-12 16:24:57 +08002345 struct drm_i915_private *dev_priv = gvt->dev_priv;
Changbin Dub0c766b2018-01-30 19:19:43 +08002346 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2347 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
Ping Gaod650ac02016-12-08 10:14:48 +08002348 u32 index;
Ping Gaod650ac02016-12-08 10:14:48 +08002349 u32 num_entries;
Ping Gaod650ac02016-12-08 10:14:48 +08002350
Changbin Dub0c766b2018-01-30 19:19:43 +08002351 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2352 pte_ops->set_present(&entry);
Ping Gaod650ac02016-12-08 10:14:48 +08002353
2354 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2355 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
Changbin Dub0c766b2018-01-30 19:19:43 +08002356 while (num_entries--)
2357 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
Ping Gaod650ac02016-12-08 10:14:48 +08002358
2359 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2360 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
Changbin Dub0c766b2018-01-30 19:19:43 +08002361 while (num_entries--)
2362 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
Zhenyu Wang5ad59bf2017-04-12 16:24:57 +08002363
Changbin Dua143cef2018-01-30 19:19:45 +08002364 ggtt_invalidate(dev_priv);
Ping Gaod650ac02016-12-08 10:14:48 +08002365}
Changbin Dub6115812017-01-13 11:15:57 +08002366
2367/**
2368 * intel_vgpu_reset_gtt - reset the all GTT related status
2369 * @vgpu: a vGPU
Changbin Dub6115812017-01-13 11:15:57 +08002370 *
2371 * This function is called from vfio core to reset reset all
2372 * GTT related status, including GGTT, PPGTT, scratch page.
2373 *
2374 */
Chuanxiao Dong4d3e67b2017-08-04 13:08:59 +08002375void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
Changbin Dub6115812017-01-13 11:15:57 +08002376{
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002377 /* Shadow pages are only created when there is no page
2378 * table tracking data, so remove page tracking data after
2379 * removing the shadow pages.
2380 */
Changbin Duede9d0c2018-01-30 19:19:40 +08002381 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
Changbin Dub6115812017-01-13 11:15:57 +08002382 intel_vgpu_reset_ggtt(vgpu);
Changbin Dub6115812017-01-13 11:15:57 +08002383}