blob: 7b4a345a0d52a40bd351a9e951104d97791edc70 [file] [log] [blame]
Zhi Wang2707e442016-03-28 23:23:16 +08001/*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
29 *
30 * Contributors:
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36#include "i915_drv.h"
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080037#include "gvt.h"
38#include "i915_pvinfo.h"
Zhi Wang2707e442016-03-28 23:23:16 +080039#include "trace.h"
40
Changbin Dubc37ab52018-01-30 19:19:44 +080041#if defined(VERBOSE_DEBUG)
42#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
43#else
44#define gvt_vdbg_mm(fmt, args...)
45#endif
46
Zhi Wang2707e442016-03-28 23:23:16 +080047static bool enable_out_of_sync = false;
48static int preallocated_oos_pages = 8192;
49
50/*
51 * validate a gm address and related range size,
52 * translate it to host gm address
53 */
54bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
55{
56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
Tina Zhang695fbc02017-03-10 04:26:53 -050058 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
59 addr, size);
Zhi Wang2707e442016-03-28 23:23:16 +080060 return false;
61 }
62 return true;
63}
64
65/* translate a guest gmadr to host gmadr */
66int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
67{
68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
69 "invalid guest gmadr %llx\n", g_addr))
70 return -EACCES;
71
72 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
73 *h_addr = vgpu_aperture_gmadr_base(vgpu)
74 + (g_addr - vgpu_aperture_offset(vgpu));
75 else
76 *h_addr = vgpu_hidden_gmadr_base(vgpu)
77 + (g_addr - vgpu_hidden_offset(vgpu));
78 return 0;
79}
80
81/* translate a host gmadr to guest gmadr */
82int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
83{
84 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
85 "invalid host gmadr %llx\n", h_addr))
86 return -EACCES;
87
88 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
89 *g_addr = vgpu_aperture_gmadr_base(vgpu)
90 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
91 else
92 *g_addr = vgpu_hidden_gmadr_base(vgpu)
93 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
94 return 0;
95}
96
97int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
98 unsigned long *h_index)
99{
100 u64 h_addr;
101 int ret;
102
Zhi Wang9556e112017-10-10 13:51:32 +0800103 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800104 &h_addr);
105 if (ret)
106 return ret;
107
Zhi Wang9556e112017-10-10 13:51:32 +0800108 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800109 return 0;
110}
111
112int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
113 unsigned long *g_index)
114{
115 u64 g_addr;
116 int ret;
117
Zhi Wang9556e112017-10-10 13:51:32 +0800118 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800119 &g_addr);
120 if (ret)
121 return ret;
122
Zhi Wang9556e112017-10-10 13:51:32 +0800123 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800124 return 0;
125}
126
127#define gtt_type_is_entry(type) \
128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
131
132#define gtt_type_is_pt(type) \
133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
134
135#define gtt_type_is_pte_pt(type) \
136 (type == GTT_TYPE_PPGTT_PTE_PT)
137
138#define gtt_type_is_root_pointer(type) \
139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
140
141#define gtt_init_entry(e, t, p, v) do { \
142 (e)->type = t; \
143 (e)->pdev = p; \
144 memcpy(&(e)->val64, &v, sizeof(v)); \
145} while (0)
146
Zhi Wang2707e442016-03-28 23:23:16 +0800147/*
148 * Mappings between GTT_TYPE* enumerations.
149 * Following information can be found according to the given type:
150 * - type of next level page table
151 * - type of entry inside this level page table
152 * - type of entry with PSE set
153 *
154 * If the given type doesn't have such a kind of information,
155 * e.g. give a l4 root entry type, then request to get its PSE type,
156 * give a PTE page table type, then request to get its next level page
157 * table type, as we know l4 root entry doesn't have a PSE bit,
158 * and a PTE page table doesn't have a next level page table type,
159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
160 * page table.
161 */
162
163struct gtt_type_table_entry {
164 int entry_type;
Zhi Wang054f4eb2017-10-10 17:19:30 +0800165 int pt_type;
Zhi Wang2707e442016-03-28 23:23:16 +0800166 int next_pt_type;
167 int pse_entry_type;
168};
169
Zhi Wang054f4eb2017-10-10 17:19:30 +0800170#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
Zhi Wang2707e442016-03-28 23:23:16 +0800171 [type] = { \
172 .entry_type = e_type, \
Zhi Wang054f4eb2017-10-10 17:19:30 +0800173 .pt_type = cpt_type, \
Zhi Wang2707e442016-03-28 23:23:16 +0800174 .next_pt_type = npt_type, \
175 .pse_entry_type = pse_type, \
176 }
177
178static struct gtt_type_table_entry gtt_type_table[] = {
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800181 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800182 GTT_TYPE_PPGTT_PML4_PT,
183 GTT_TYPE_INVALID),
184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
185 GTT_TYPE_PPGTT_PML4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800186 GTT_TYPE_PPGTT_PML4_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800187 GTT_TYPE_PPGTT_PDP_PT,
188 GTT_TYPE_INVALID),
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
190 GTT_TYPE_PPGTT_PML4_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800191 GTT_TYPE_PPGTT_PML4_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800192 GTT_TYPE_PPGTT_PDP_PT,
193 GTT_TYPE_INVALID),
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
195 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800196 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800197 GTT_TYPE_PPGTT_PDE_PT,
198 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800201 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800202 GTT_TYPE_PPGTT_PDE_PT,
203 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
205 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800206 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800207 GTT_TYPE_PPGTT_PDE_PT,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
210 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800211 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800212 GTT_TYPE_PPGTT_PTE_PT,
213 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
215 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800216 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800217 GTT_TYPE_PPGTT_PTE_PT,
218 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
220 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800221 GTT_TYPE_PPGTT_PTE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800222 GTT_TYPE_INVALID,
223 GTT_TYPE_INVALID),
224 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
225 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800226 GTT_TYPE_PPGTT_PTE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800227 GTT_TYPE_INVALID,
228 GTT_TYPE_INVALID),
229 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
230 GTT_TYPE_PPGTT_PDE_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800231 GTT_TYPE_PPGTT_PDE_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800232 GTT_TYPE_INVALID,
233 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
234 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
235 GTT_TYPE_PPGTT_PDP_ENTRY,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800236 GTT_TYPE_PPGTT_PDP_PT,
Zhi Wang2707e442016-03-28 23:23:16 +0800237 GTT_TYPE_INVALID,
238 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
239 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
240 GTT_TYPE_GGTT_PTE,
241 GTT_TYPE_INVALID,
Zhi Wang054f4eb2017-10-10 17:19:30 +0800242 GTT_TYPE_INVALID,
Zhi Wang2707e442016-03-28 23:23:16 +0800243 GTT_TYPE_INVALID),
244};
245
246static inline int get_next_pt_type(int type)
247{
248 return gtt_type_table[type].next_pt_type;
249}
250
Zhi Wang054f4eb2017-10-10 17:19:30 +0800251static inline int get_pt_type(int type)
252{
253 return gtt_type_table[type].pt_type;
254}
255
Zhi Wang2707e442016-03-28 23:23:16 +0800256static inline int get_entry_type(int type)
257{
258 return gtt_type_table[type].entry_type;
259}
260
261static inline int get_pse_type(int type)
262{
263 return gtt_type_table[type].pse_entry_type;
264}
265
266static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
267{
Du, Changbin321927d2016-10-20 14:08:46 +0800268 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
Zhi Wang2707e442016-03-28 23:23:16 +0800269
Changbin Du905a5032016-12-30 14:10:53 +0800270 return readq(addr);
Zhi Wang2707e442016-03-28 23:23:16 +0800271}
272
Changbin Dua143cef2018-01-30 19:19:45 +0800273static void ggtt_invalidate(struct drm_i915_private *dev_priv)
Chuanxiao Dongaf2c6392017-06-02 15:34:24 +0800274{
275 mmio_hw_access_pre(dev_priv);
276 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
277 mmio_hw_access_post(dev_priv);
278}
279
Zhi Wang2707e442016-03-28 23:23:16 +0800280static void write_pte64(struct drm_i915_private *dev_priv,
281 unsigned long index, u64 pte)
282{
Du, Changbin321927d2016-10-20 14:08:46 +0800283 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
Zhi Wang2707e442016-03-28 23:23:16 +0800284
Zhi Wang2707e442016-03-28 23:23:16 +0800285 writeq(pte, addr);
Zhi Wang2707e442016-03-28 23:23:16 +0800286}
287
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800288static inline int gtt_get_entry64(void *pt,
Zhi Wang2707e442016-03-28 23:23:16 +0800289 struct intel_gvt_gtt_entry *e,
290 unsigned long index, bool hypervisor_access, unsigned long gpa,
291 struct intel_vgpu *vgpu)
292{
293 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
294 int ret;
295
296 if (WARN_ON(info->gtt_entry_size != 8))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800297 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800298
299 if (hypervisor_access) {
300 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
301 (index << info->gtt_entry_size_shift),
302 &e->val64, 8);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800303 if (WARN_ON(ret))
304 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800305 } else if (!pt) {
306 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
307 } else {
308 e->val64 = *((u64 *)pt + index);
309 }
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800310 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800311}
312
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800313static inline int gtt_set_entry64(void *pt,
Zhi Wang2707e442016-03-28 23:23:16 +0800314 struct intel_gvt_gtt_entry *e,
315 unsigned long index, bool hypervisor_access, unsigned long gpa,
316 struct intel_vgpu *vgpu)
317{
318 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
319 int ret;
320
321 if (WARN_ON(info->gtt_entry_size != 8))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800322 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800323
324 if (hypervisor_access) {
325 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
326 (index << info->gtt_entry_size_shift),
327 &e->val64, 8);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800328 if (WARN_ON(ret))
329 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800330 } else if (!pt) {
331 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
332 } else {
333 *((u64 *)pt + index) = e->val64;
334 }
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800335 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800336}
337
338#define GTT_HAW 46
339
Xiong Zhangb721b652017-11-28 07:29:54 +0800340#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
341#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
342#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
Zhi Wang2707e442016-03-28 23:23:16 +0800343
344static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
345{
346 unsigned long pfn;
347
348 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
Changbin Dud861ca22018-01-30 19:19:47 +0800349 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800350 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
Changbin Dud861ca22018-01-30 19:19:47 +0800351 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800352 else
Changbin Dud861ca22018-01-30 19:19:47 +0800353 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +0800354 return pfn;
355}
356
357static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
358{
359 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
360 e->val64 &= ~ADDR_1G_MASK;
Changbin Dud861ca22018-01-30 19:19:47 +0800361 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800362 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
363 e->val64 &= ~ADDR_2M_MASK;
Changbin Dud861ca22018-01-30 19:19:47 +0800364 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800365 } else {
366 e->val64 &= ~ADDR_4K_MASK;
Changbin Dud861ca22018-01-30 19:19:47 +0800367 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800368 }
369
Changbin Dud861ca22018-01-30 19:19:47 +0800370 e->val64 |= (pfn << PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800371}
372
373static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
374{
375 /* Entry doesn't have PSE bit. */
376 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
377 return false;
378
379 e->type = get_entry_type(e->type);
Changbin Dud861ca22018-01-30 19:19:47 +0800380 if (!(e->val64 & _PAGE_PSE))
Zhi Wang2707e442016-03-28 23:23:16 +0800381 return false;
382
383 e->type = get_pse_type(e->type);
384 return true;
385}
386
387static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
388{
389 /*
390 * i915 writes PDP root pointer registers without present bit,
391 * it also works, so we need to treat root pointer entry
392 * specifically.
393 */
394 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
395 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
396 return (e->val64 != 0);
397 else
Changbin Dud861ca22018-01-30 19:19:47 +0800398 return (e->val64 & _PAGE_PRESENT);
Zhi Wang2707e442016-03-28 23:23:16 +0800399}
400
401static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
402{
Changbin Dud861ca22018-01-30 19:19:47 +0800403 e->val64 &= ~_PAGE_PRESENT;
Zhi Wang2707e442016-03-28 23:23:16 +0800404}
405
Zhi Wang655c64e2017-10-10 17:24:26 +0800406static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
407{
Changbin Dud861ca22018-01-30 19:19:47 +0800408 e->val64 |= _PAGE_PRESENT;
Zhi Wang2707e442016-03-28 23:23:16 +0800409}
410
411/*
412 * Per-platform GMA routines.
413 */
414static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
415{
Zhi Wang9556e112017-10-10 13:51:32 +0800416 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
Zhi Wang2707e442016-03-28 23:23:16 +0800417
418 trace_gma_index(__func__, gma, x);
419 return x;
420}
421
422#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
423static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
424{ \
425 unsigned long x = (exp); \
426 trace_gma_index(__func__, gma, x); \
427 return x; \
428}
429
430DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
431DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
432DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
433DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
434DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
435
436static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
437 .get_entry = gtt_get_entry64,
438 .set_entry = gtt_set_entry64,
439 .clear_present = gtt_entry_clear_present,
Zhi Wang655c64e2017-10-10 17:24:26 +0800440 .set_present = gtt_entry_set_present,
Zhi Wang2707e442016-03-28 23:23:16 +0800441 .test_present = gen8_gtt_test_present,
442 .test_pse = gen8_gtt_test_pse,
443 .get_pfn = gen8_gtt_get_pfn,
444 .set_pfn = gen8_gtt_set_pfn,
445};
446
447static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
448 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
449 .gma_to_pte_index = gen8_gma_to_pte_index,
450 .gma_to_pde_index = gen8_gma_to_pde_index,
451 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
452 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
453 .gma_to_pml4_index = gen8_gma_to_pml4_index,
454};
455
Zhi Wang2707e442016-03-28 23:23:16 +0800456/*
457 * MM helpers.
458 */
Changbin Du3aff3512018-01-30 19:19:42 +0800459static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
460 struct intel_gvt_gtt_entry *entry, unsigned long index,
461 bool guest)
Zhi Wang2707e442016-03-28 23:23:16 +0800462{
Changbin Du3aff3512018-01-30 19:19:42 +0800463 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +0800464
Changbin Du3aff3512018-01-30 19:19:42 +0800465 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
Zhi Wang2707e442016-03-28 23:23:16 +0800466
Changbin Du3aff3512018-01-30 19:19:42 +0800467 entry->type = mm->ppgtt_mm.root_entry_type;
468 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
469 mm->ppgtt_mm.shadow_pdps,
470 entry, index, false, 0, mm->vgpu);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800471
Changbin Du3aff3512018-01-30 19:19:42 +0800472 pte_ops->test_pse(entry);
Zhi Wang2707e442016-03-28 23:23:16 +0800473}
474
Changbin Du3aff3512018-01-30 19:19:42 +0800475static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
476 struct intel_gvt_gtt_entry *entry, unsigned long index)
Zhi Wang2707e442016-03-28 23:23:16 +0800477{
Changbin Du3aff3512018-01-30 19:19:42 +0800478 _ppgtt_get_root_entry(mm, entry, index, true);
479}
Zhi Wang2707e442016-03-28 23:23:16 +0800480
Changbin Du3aff3512018-01-30 19:19:42 +0800481static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
482 struct intel_gvt_gtt_entry *entry, unsigned long index)
483{
484 _ppgtt_get_root_entry(mm, entry, index, false);
485}
486
487static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
488 struct intel_gvt_gtt_entry *entry, unsigned long index,
489 bool guest)
490{
491 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
492
493 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
494 mm->ppgtt_mm.shadow_pdps,
495 entry, index, false, 0, mm->vgpu);
496}
497
498static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
499 struct intel_gvt_gtt_entry *entry, unsigned long index)
500{
501 _ppgtt_set_root_entry(mm, entry, index, true);
502}
503
504static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
505 struct intel_gvt_gtt_entry *entry, unsigned long index)
506{
507 _ppgtt_set_root_entry(mm, entry, index, false);
508}
509
510static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
511 struct intel_gvt_gtt_entry *entry, unsigned long index)
512{
513 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
514
515 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
516
517 entry->type = GTT_TYPE_GGTT_PTE;
518 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
519 false, 0, mm->vgpu);
520}
521
522static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
523 struct intel_gvt_gtt_entry *entry, unsigned long index)
524{
525 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
526
527 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
528
529 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
530 false, 0, mm->vgpu);
531}
532
533static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
534 struct intel_gvt_gtt_entry *entry, unsigned long index)
535{
536 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
537
538 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
539
540 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +0800541}
542
543/*
544 * PPGTT shadow page table helpers.
545 */
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800546static inline int ppgtt_spt_get_entry(
Zhi Wang2707e442016-03-28 23:23:16 +0800547 struct intel_vgpu_ppgtt_spt *spt,
548 void *page_table, int type,
549 struct intel_gvt_gtt_entry *e, unsigned long index,
550 bool guest)
551{
552 struct intel_gvt *gvt = spt->vgpu->gvt;
553 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800554 int ret;
Zhi Wang2707e442016-03-28 23:23:16 +0800555
556 e->type = get_entry_type(type);
557
558 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800559 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800560
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800561 ret = ops->get_entry(page_table, e, index, guest,
Zhi Wang9556e112017-10-10 13:51:32 +0800562 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800563 spt->vgpu);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800564 if (ret)
565 return ret;
566
Zhi Wang2707e442016-03-28 23:23:16 +0800567 ops->test_pse(e);
Changbin Dubc37ab52018-01-30 19:19:44 +0800568
569 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
570 type, e->type, index, e->val64);
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800571 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +0800572}
573
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800574static inline int ppgtt_spt_set_entry(
Zhi Wang2707e442016-03-28 23:23:16 +0800575 struct intel_vgpu_ppgtt_spt *spt,
576 void *page_table, int type,
577 struct intel_gvt_gtt_entry *e, unsigned long index,
578 bool guest)
579{
580 struct intel_gvt *gvt = spt->vgpu->gvt;
581 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
582
583 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800584 return -EINVAL;
Zhi Wang2707e442016-03-28 23:23:16 +0800585
Changbin Dubc37ab52018-01-30 19:19:44 +0800586 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
587 type, e->type, index, e->val64);
588
Zhi Wang2707e442016-03-28 23:23:16 +0800589 return ops->set_entry(page_table, e, index, guest,
Zhi Wang9556e112017-10-10 13:51:32 +0800590 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
Zhi Wang2707e442016-03-28 23:23:16 +0800591 spt->vgpu);
592}
593
594#define ppgtt_get_guest_entry(spt, e, index) \
595 ppgtt_spt_get_entry(spt, NULL, \
596 spt->guest_page_type, e, index, true)
597
598#define ppgtt_set_guest_entry(spt, e, index) \
599 ppgtt_spt_set_entry(spt, NULL, \
600 spt->guest_page_type, e, index, true)
601
602#define ppgtt_get_shadow_entry(spt, e, index) \
603 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
604 spt->shadow_page.type, e, index, false)
605
606#define ppgtt_set_shadow_entry(spt, e, index) \
607 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
608 spt->shadow_page.type, e, index, false)
609
610/**
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800611 * intel_vgpu_init_page_track - init a page track data structure
Zhi Wang2707e442016-03-28 23:23:16 +0800612 * @vgpu: a vGPU
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800613 * @t: a page track data structure
Zhi Wang2707e442016-03-28 23:23:16 +0800614 * @gfn: guest memory page frame number
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800615 * @handler: the function will be called when target guest memory page has
Zhi Wang2707e442016-03-28 23:23:16 +0800616 * been modified.
617 *
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800618 * This function is called when a user wants to prepare a page track data
619 * structure to track a guest memory page.
Zhi Wang2707e442016-03-28 23:23:16 +0800620 *
621 * Returns:
622 * Zero on success, negative error code if failed.
623 */
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800624int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
625 struct intel_vgpu_page_track *t,
626 unsigned long gfn,
627 int (*handler)(void *, u64, void *, int),
628 void *data)
629{
630 INIT_HLIST_NODE(&t->node);
631
632 t->tracked = false;
633 t->gfn = gfn;
634 t->handler = handler;
635 t->data = data;
636
637 hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
638 return 0;
639}
640
641/**
642 * intel_vgpu_clean_page_track - release a page track data structure
643 * @vgpu: a vGPU
644 * @t: a page track data structure
645 *
646 * This function is called before a user frees a page track data structure.
647 */
648void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
649 struct intel_vgpu_page_track *t)
650{
651 if (!hlist_unhashed(&t->node))
652 hash_del(&t->node);
653
654 if (t->tracked)
655 intel_gvt_hypervisor_disable_page_track(vgpu, t);
656}
657
658/**
659 * intel_vgpu_find_tracked_page - find a tracked guest page
660 * @vgpu: a vGPU
661 * @gfn: guest memory page frame number
662 *
663 * This function is called when the emulation layer wants to figure out if a
664 * trapped GFN is a tracked guest page.
665 *
666 * Returns:
667 * Pointer to page track data structure, NULL if not found.
668 */
669struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
670 struct intel_vgpu *vgpu, unsigned long gfn)
671{
672 struct intel_vgpu_page_track *t;
673
674 hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
675 t, node, gfn) {
676 if (t->gfn == gfn)
677 return t;
678 }
679 return NULL;
680}
681
682static int init_guest_page(struct intel_vgpu *vgpu,
Zhi Wang2707e442016-03-28 23:23:16 +0800683 struct intel_vgpu_guest_page *p,
684 unsigned long gfn,
685 int (*handler)(void *, u64, void *, int),
686 void *data)
687{
Zhi Wang2707e442016-03-28 23:23:16 +0800688 p->oos_page = NULL;
689 p->write_cnt = 0;
690
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800691 return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
Zhi Wang2707e442016-03-28 23:23:16 +0800692}
693
694static int detach_oos_page(struct intel_vgpu *vgpu,
695 struct intel_vgpu_oos_page *oos_page);
696
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800697static void clean_guest_page(struct intel_vgpu *vgpu,
Zhi Wang2707e442016-03-28 23:23:16 +0800698 struct intel_vgpu_guest_page *p)
699{
Zhi Wang2707e442016-03-28 23:23:16 +0800700 if (p->oos_page)
701 detach_oos_page(vgpu, p->oos_page);
702
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800703 intel_vgpu_clean_page_track(vgpu, &p->track);
Zhi Wang2707e442016-03-28 23:23:16 +0800704}
705
706static inline int init_shadow_page(struct intel_vgpu *vgpu,
Zhi Wang22115ce2017-10-10 14:34:11 +0800707 struct intel_vgpu_shadow_page *p, int type, bool hash)
Zhi Wang2707e442016-03-28 23:23:16 +0800708{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800709 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
710 dma_addr_t daddr;
711
712 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
713 if (dma_mapping_error(kdev, daddr)) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500714 gvt_vgpu_err("fail to map dma addr\n");
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800715 return -EINVAL;
716 }
717
Zhi Wang2707e442016-03-28 23:23:16 +0800718 p->vaddr = page_address(p->page);
719 p->type = type;
720
721 INIT_HLIST_NODE(&p->node);
722
Zhi Wang9556e112017-10-10 13:51:32 +0800723 p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
Zhi Wang22115ce2017-10-10 14:34:11 +0800724 if (hash)
725 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
Zhi Wang2707e442016-03-28 23:23:16 +0800726 return 0;
727}
728
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800729static inline void clean_shadow_page(struct intel_vgpu *vgpu,
730 struct intel_vgpu_shadow_page *p)
Zhi Wang2707e442016-03-28 23:23:16 +0800731{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800732 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
733
Zhi Wang9556e112017-10-10 13:51:32 +0800734 dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800735 PCI_DMA_BIDIRECTIONAL);
736
Zhi Wang2707e442016-03-28 23:23:16 +0800737 if (!hlist_unhashed(&p->node))
738 hash_del(&p->node);
739}
740
741static inline struct intel_vgpu_shadow_page *find_shadow_page(
742 struct intel_vgpu *vgpu, unsigned long mfn)
743{
744 struct intel_vgpu_shadow_page *p;
745
746 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
747 p, node, mfn) {
748 if (p->mfn == mfn)
749 return p;
750 }
751 return NULL;
752}
753
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800754#define page_track_to_guest_page(ptr) \
755 container_of(ptr, struct intel_vgpu_guest_page, track)
756
Zhi Wang2707e442016-03-28 23:23:16 +0800757#define guest_page_to_ppgtt_spt(ptr) \
758 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
759
760#define shadow_page_to_ppgtt_spt(ptr) \
761 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
762
763static void *alloc_spt(gfp_t gfp_mask)
764{
765 struct intel_vgpu_ppgtt_spt *spt;
766
767 spt = kzalloc(sizeof(*spt), gfp_mask);
768 if (!spt)
769 return NULL;
770
771 spt->shadow_page.page = alloc_page(gfp_mask);
772 if (!spt->shadow_page.page) {
773 kfree(spt);
774 return NULL;
775 }
776 return spt;
777}
778
779static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
780{
781 __free_page(spt->shadow_page.page);
782 kfree(spt);
783}
784
785static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
786{
787 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
788
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +0800789 clean_shadow_page(spt->vgpu, &spt->shadow_page);
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800790 clean_guest_page(spt->vgpu, &spt->guest_page);
Zhi Wang2707e442016-03-28 23:23:16 +0800791 list_del_init(&spt->post_shadow_list);
792
793 free_spt(spt);
794}
795
796static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
797{
798 struct hlist_node *n;
799 struct intel_vgpu_shadow_page *sp;
800 int i;
801
802 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
803 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
804}
805
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800806static int ppgtt_handle_guest_write_page_table_bytes(
807 struct intel_vgpu_guest_page *gpt,
Zhi Wang2707e442016-03-28 23:23:16 +0800808 u64 pa, void *p_data, int bytes);
809
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800810static int ppgtt_write_protection_handler(void *data, u64 pa,
Zhi Wang2707e442016-03-28 23:23:16 +0800811 void *p_data, int bytes)
812{
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800813 struct intel_vgpu_page_track *t = data;
814 struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
Zhi Wang2707e442016-03-28 23:23:16 +0800815 int ret;
816
817 if (bytes != 4 && bytes != 8)
818 return -EINVAL;
819
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800820 if (!t->tracked)
Zhi Wang2707e442016-03-28 23:23:16 +0800821 return -EINVAL;
822
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800823 ret = ppgtt_handle_guest_write_page_table_bytes(p,
Zhi Wang2707e442016-03-28 23:23:16 +0800824 pa, p_data, bytes);
825 if (ret)
826 return ret;
827 return ret;
828}
829
Changbin Duede9d0c2018-01-30 19:19:40 +0800830static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
Zhi Wang2707e442016-03-28 23:23:16 +0800831
832static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
833 struct intel_vgpu *vgpu, int type, unsigned long gfn)
834{
835 struct intel_vgpu_ppgtt_spt *spt = NULL;
836 int ret;
837
838retry:
839 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
840 if (!spt) {
Changbin Duede9d0c2018-01-30 19:19:40 +0800841 if (reclaim_one_ppgtt_mm(vgpu->gvt))
Zhi Wang2707e442016-03-28 23:23:16 +0800842 goto retry;
843
Tina Zhang695fbc02017-03-10 04:26:53 -0500844 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800845 return ERR_PTR(-ENOMEM);
846 }
847
848 spt->vgpu = vgpu;
849 spt->guest_page_type = type;
850 atomic_set(&spt->refcount, 1);
851 INIT_LIST_HEAD(&spt->post_shadow_list);
852
853 /*
854 * TODO: guest page type may be different with shadow page type,
855 * when we support PSE page in future.
856 */
Zhi Wang22115ce2017-10-10 14:34:11 +0800857 ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
Zhi Wang2707e442016-03-28 23:23:16 +0800858 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500859 gvt_vgpu_err("fail to initialize shadow page for spt\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800860 goto err;
861 }
862
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800863 ret = init_guest_page(vgpu, &spt->guest_page,
Zhi Wang2707e442016-03-28 23:23:16 +0800864 gfn, ppgtt_write_protection_handler, NULL);
865 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500866 gvt_vgpu_err("fail to initialize guest page for spt\n");
Zhi Wang2707e442016-03-28 23:23:16 +0800867 goto err;
868 }
869
870 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
871 return spt;
872err:
873 ppgtt_free_shadow_page(spt);
874 return ERR_PTR(ret);
875}
876
877static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
878 struct intel_vgpu *vgpu, unsigned long mfn)
879{
880 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
881
882 if (p)
883 return shadow_page_to_ppgtt_spt(p);
884
Tina Zhang695fbc02017-03-10 04:26:53 -0500885 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
Zhi Wang2707e442016-03-28 23:23:16 +0800886 return NULL;
887}
888
889#define pt_entry_size_shift(spt) \
890 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
891
892#define pt_entries(spt) \
Zhi Wang9556e112017-10-10 13:51:32 +0800893 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
Zhi Wang2707e442016-03-28 23:23:16 +0800894
895#define for_each_present_guest_entry(spt, e, i) \
896 for (i = 0; i < pt_entries(spt); i++) \
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800897 if (!ppgtt_get_guest_entry(spt, e, i) && \
898 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
Zhi Wang2707e442016-03-28 23:23:16 +0800899
900#define for_each_present_shadow_entry(spt, e, i) \
901 for (i = 0; i < pt_entries(spt); i++) \
Changbin Du4b2dbbc2017-08-02 15:06:37 +0800902 if (!ppgtt_get_shadow_entry(spt, e, i) && \
903 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
Zhi Wang2707e442016-03-28 23:23:16 +0800904
905static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
906{
907 int v = atomic_read(&spt->refcount);
908
909 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
910
911 atomic_inc(&spt->refcount);
912}
913
914static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
915
916static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
917 struct intel_gvt_gtt_entry *e)
918{
919 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
920 struct intel_vgpu_ppgtt_spt *s;
Ping Gao3b6411c2016-11-04 13:47:35 +0800921 intel_gvt_gtt_type_t cur_pt_type;
Zhi Wang2707e442016-03-28 23:23:16 +0800922
Changbin Du72f03d72018-01-30 19:19:48 +0800923 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
Zhi Wang2707e442016-03-28 23:23:16 +0800924
Ping Gao3b6411c2016-11-04 13:47:35 +0800925 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
926 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
927 cur_pt_type = get_next_pt_type(e->type) + 1;
928 if (ops->get_pfn(e) ==
929 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
930 return 0;
931 }
Zhi Wang2707e442016-03-28 23:23:16 +0800932 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
933 if (!s) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500934 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
935 ops->get_pfn(e));
Zhi Wang2707e442016-03-28 23:23:16 +0800936 return -ENXIO;
937 }
938 return ppgtt_invalidate_shadow_page(s);
939}
940
941static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
942{
Tina Zhang695fbc02017-03-10 04:26:53 -0500943 struct intel_vgpu *vgpu = spt->vgpu;
Zhi Wang2707e442016-03-28 23:23:16 +0800944 struct intel_gvt_gtt_entry e;
945 unsigned long index;
946 int ret;
947 int v = atomic_read(&spt->refcount);
948
949 trace_spt_change(spt->vgpu->id, "die", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800950 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800951
952 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
953
954 if (atomic_dec_return(&spt->refcount) > 0)
955 return 0;
956
957 if (gtt_type_is_pte_pt(spt->shadow_page.type))
958 goto release;
959
960 for_each_present_shadow_entry(spt, &e, index) {
Changbin Du72f03d72018-01-30 19:19:48 +0800961 switch (e.type) {
962 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
963 gvt_vdbg_mm("invalidate 4K entry\n");
964 continue;
965 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
966 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
967 WARN(1, "GVT doesn't support 2M/1GB page\n");
968 continue;
969 case GTT_TYPE_PPGTT_PML4_ENTRY:
970 case GTT_TYPE_PPGTT_PDP_ENTRY:
971 case GTT_TYPE_PPGTT_PDE_ENTRY:
972 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
973 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
974 spt->vgpu, &e);
975 if (ret)
976 goto fail;
977 break;
978 default:
979 GEM_BUG_ON(1);
Zhi Wang2707e442016-03-28 23:23:16 +0800980 }
Zhi Wang2707e442016-03-28 23:23:16 +0800981 }
982release:
983 trace_spt_change(spt->vgpu->id, "release", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +0800984 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800985 ppgtt_free_shadow_page(spt);
986 return 0;
987fail:
Tina Zhang695fbc02017-03-10 04:26:53 -0500988 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
989 spt, e.val64, e.type);
Zhi Wang2707e442016-03-28 23:23:16 +0800990 return ret;
991}
992
993static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
994
995static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
996 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
997{
998 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
999 struct intel_vgpu_ppgtt_spt *s = NULL;
1000 struct intel_vgpu_guest_page *g;
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001001 struct intel_vgpu_page_track *t;
Zhi Wang2707e442016-03-28 23:23:16 +08001002 int ret;
1003
Changbin Du72f03d72018-01-30 19:19:48 +08001004 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
Zhi Wang2707e442016-03-28 23:23:16 +08001005
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001006 t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
1007 if (t) {
1008 g = page_track_to_guest_page(t);
Zhi Wang2707e442016-03-28 23:23:16 +08001009 s = guest_page_to_ppgtt_spt(g);
1010 ppgtt_get_shadow_page(s);
1011 } else {
1012 int type = get_next_pt_type(we->type);
1013
1014 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
1015 if (IS_ERR(s)) {
1016 ret = PTR_ERR(s);
1017 goto fail;
1018 }
1019
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001020 ret = intel_gvt_hypervisor_enable_page_track(vgpu,
1021 &s->guest_page.track);
Zhi Wang2707e442016-03-28 23:23:16 +08001022 if (ret)
1023 goto fail;
1024
1025 ret = ppgtt_populate_shadow_page(s);
1026 if (ret)
1027 goto fail;
1028
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001029 trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
Zhi Wang2707e442016-03-28 23:23:16 +08001030 s->shadow_page.type);
1031 }
1032 return s;
1033fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001034 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1035 s, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001036 return ERR_PTR(ret);
1037}
1038
1039static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1040 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1041{
1042 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1043
1044 se->type = ge->type;
1045 se->val64 = ge->val64;
1046
1047 ops->set_pfn(se, s->shadow_page.mfn);
1048}
1049
Changbin Du72f03d72018-01-30 19:19:48 +08001050static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1051 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1052 struct intel_gvt_gtt_entry *ge)
1053{
1054 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1055 struct intel_gvt_gtt_entry se = *ge;
1056 unsigned long gfn, mfn;
1057
1058 if (!pte_ops->test_present(ge))
1059 return 0;
1060
1061 gfn = pte_ops->get_pfn(ge);
1062
1063 switch (ge->type) {
1064 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1065 gvt_vdbg_mm("shadow 4K gtt entry\n");
1066 break;
1067 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1068 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1069 gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
1070 return -EINVAL;
1071 default:
1072 GEM_BUG_ON(1);
1073 };
1074
1075 /* direct shadow */
1076 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
1077 if (mfn == INTEL_GVT_INVALID_ADDR)
1078 return -ENXIO;
1079
1080 pte_ops->set_pfn(&se, mfn);
1081 ppgtt_set_shadow_entry(spt, &se, index);
1082 return 0;
1083}
1084
Zhi Wang2707e442016-03-28 23:23:16 +08001085static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
1086{
1087 struct intel_vgpu *vgpu = spt->vgpu;
Hang Yuancc753fb2017-12-22 18:06:31 +08001088 struct intel_gvt *gvt = vgpu->gvt;
1089 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +08001090 struct intel_vgpu_ppgtt_spt *s;
1091 struct intel_gvt_gtt_entry se, ge;
Hang Yuancc753fb2017-12-22 18:06:31 +08001092 unsigned long gfn, i;
Zhi Wang2707e442016-03-28 23:23:16 +08001093 int ret;
1094
1095 trace_spt_change(spt->vgpu->id, "born", spt,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001096 spt->guest_page.track.gfn, spt->shadow_page.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001097
Zhi Wang2707e442016-03-28 23:23:16 +08001098 for_each_present_guest_entry(spt, &ge, i) {
Changbin Du72f03d72018-01-30 19:19:48 +08001099 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1100 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu,
1101 &ge);
1102 if (IS_ERR(s)) {
1103 ret = PTR_ERR(s);
1104 goto fail;
1105 }
1106 ppgtt_get_shadow_entry(spt, &se, i);
1107 ppgtt_generate_shadow_entry(&se, s, &ge);
1108 ppgtt_set_shadow_entry(spt, &se, i);
1109 } else {
1110 gfn = ops->get_pfn(&ge);
1111 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1112 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1113 ppgtt_set_shadow_entry(spt, &se, i);
1114 continue;
1115 }
Zhi Wang2707e442016-03-28 23:23:16 +08001116
Changbin Du72f03d72018-01-30 19:19:48 +08001117 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1118 if (ret)
1119 goto fail;
Zhi Wang2707e442016-03-28 23:23:16 +08001120 }
Zhi Wang2707e442016-03-28 23:23:16 +08001121 }
1122 return 0;
1123fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001124 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1125 spt, ge.val64, ge.type);
Zhi Wang2707e442016-03-28 23:23:16 +08001126 return ret;
1127}
1128
1129static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
Tina Zhang6b3816d2017-08-14 15:24:14 +08001130 struct intel_gvt_gtt_entry *se, unsigned long index)
Zhi Wang2707e442016-03-28 23:23:16 +08001131{
1132 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1133 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1134 struct intel_vgpu *vgpu = spt->vgpu;
1135 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Zhi Wang2707e442016-03-28 23:23:16 +08001136 int ret;
1137
Tina Zhang6b3816d2017-08-14 15:24:14 +08001138 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
Bing Niu9baf0922016-11-07 10:44:36 +08001139 index);
1140
Changbin Dubc37ab52018-01-30 19:19:44 +08001141 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1142 se->type, index, se->val64);
1143
Tina Zhang6b3816d2017-08-14 15:24:14 +08001144 if (!ops->test_present(se))
Zhi Wang2707e442016-03-28 23:23:16 +08001145 return 0;
1146
Tina Zhang6b3816d2017-08-14 15:24:14 +08001147 if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
Zhi Wang2707e442016-03-28 23:23:16 +08001148 return 0;
1149
Tina Zhang6b3816d2017-08-14 15:24:14 +08001150 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
Bing Niu9baf0922016-11-07 10:44:36 +08001151 struct intel_vgpu_ppgtt_spt *s =
Tina Zhang6b3816d2017-08-14 15:24:14 +08001152 ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
Bing Niu9baf0922016-11-07 10:44:36 +08001153 if (!s) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001154 gvt_vgpu_err("fail to find guest page\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001155 ret = -ENXIO;
1156 goto fail;
1157 }
Bing Niu9baf0922016-11-07 10:44:36 +08001158 ret = ppgtt_invalidate_shadow_page(s);
Zhi Wang2707e442016-03-28 23:23:16 +08001159 if (ret)
1160 goto fail;
1161 }
Zhi Wang2707e442016-03-28 23:23:16 +08001162 return 0;
1163fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001164 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
Tina Zhang6b3816d2017-08-14 15:24:14 +08001165 spt, se->val64, se->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001166 return ret;
1167}
1168
1169static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1170 struct intel_gvt_gtt_entry *we, unsigned long index)
1171{
1172 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1173 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1174 struct intel_vgpu *vgpu = spt->vgpu;
1175 struct intel_gvt_gtt_entry m;
1176 struct intel_vgpu_ppgtt_spt *s;
1177 int ret;
1178
1179 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
1180 we->val64, index);
1181
Changbin Dubc37ab52018-01-30 19:19:44 +08001182 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1183 we->type, index, we->val64);
1184
Zhi Wang2707e442016-03-28 23:23:16 +08001185 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1186 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1187 if (IS_ERR(s)) {
1188 ret = PTR_ERR(s);
1189 goto fail;
1190 }
1191 ppgtt_get_shadow_entry(spt, &m, index);
1192 ppgtt_generate_shadow_entry(&m, s, we);
1193 ppgtt_set_shadow_entry(spt, &m, index);
1194 } else {
Changbin Du72f03d72018-01-30 19:19:48 +08001195 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
Zhi Wang2707e442016-03-28 23:23:16 +08001196 if (ret)
1197 goto fail;
Zhi Wang2707e442016-03-28 23:23:16 +08001198 }
1199 return 0;
1200fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001201 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1202 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001203 return ret;
1204}
1205
1206static int sync_oos_page(struct intel_vgpu *vgpu,
1207 struct intel_vgpu_oos_page *oos_page)
1208{
1209 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1210 struct intel_gvt *gvt = vgpu->gvt;
1211 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1212 struct intel_vgpu_ppgtt_spt *spt =
1213 guest_page_to_ppgtt_spt(oos_page->guest_page);
Changbin Du72f03d72018-01-30 19:19:48 +08001214 struct intel_gvt_gtt_entry old, new;
Zhi Wang2707e442016-03-28 23:23:16 +08001215 int index;
1216 int ret;
1217
1218 trace_oos_change(vgpu->id, "sync", oos_page->id,
1219 oos_page->guest_page, spt->guest_page_type);
1220
1221 old.type = new.type = get_entry_type(spt->guest_page_type);
1222 old.val64 = new.val64 = 0;
1223
Zhi Wang9556e112017-10-10 13:51:32 +08001224 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1225 info->gtt_entry_size_shift); index++) {
Zhi Wang2707e442016-03-28 23:23:16 +08001226 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1227 ops->get_entry(NULL, &new, index, true,
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001228 oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08001229
1230 if (old.val64 == new.val64
1231 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1232 continue;
1233
1234 trace_oos_sync(vgpu->id, oos_page->id,
1235 oos_page->guest_page, spt->guest_page_type,
1236 new.val64, index);
1237
Changbin Du72f03d72018-01-30 19:19:48 +08001238 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
Zhi Wang2707e442016-03-28 23:23:16 +08001239 if (ret)
1240 return ret;
1241
1242 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08001243 }
1244
1245 oos_page->guest_page->write_cnt = 0;
1246 list_del_init(&spt->post_shadow_list);
1247 return 0;
1248}
1249
1250static int detach_oos_page(struct intel_vgpu *vgpu,
1251 struct intel_vgpu_oos_page *oos_page)
1252{
1253 struct intel_gvt *gvt = vgpu->gvt;
1254 struct intel_vgpu_ppgtt_spt *spt =
1255 guest_page_to_ppgtt_spt(oos_page->guest_page);
1256
1257 trace_oos_change(vgpu->id, "detach", oos_page->id,
1258 oos_page->guest_page, spt->guest_page_type);
1259
1260 oos_page->guest_page->write_cnt = 0;
1261 oos_page->guest_page->oos_page = NULL;
1262 oos_page->guest_page = NULL;
1263
1264 list_del_init(&oos_page->vm_list);
1265 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1266
1267 return 0;
1268}
1269
1270static int attach_oos_page(struct intel_vgpu *vgpu,
1271 struct intel_vgpu_oos_page *oos_page,
1272 struct intel_vgpu_guest_page *gpt)
1273{
1274 struct intel_gvt *gvt = vgpu->gvt;
1275 int ret;
1276
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001277 ret = intel_gvt_hypervisor_read_gpa(vgpu,
Zhi Wang9556e112017-10-10 13:51:32 +08001278 gpt->track.gfn << I915_GTT_PAGE_SHIFT,
1279 oos_page->mem, I915_GTT_PAGE_SIZE);
Zhi Wang2707e442016-03-28 23:23:16 +08001280 if (ret)
1281 return ret;
1282
1283 oos_page->guest_page = gpt;
1284 gpt->oos_page = oos_page;
1285
1286 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1287
1288 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
1289 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1290 return 0;
1291}
1292
1293static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
1294 struct intel_vgpu_guest_page *gpt)
1295{
1296 int ret;
1297
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001298 ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
Zhi Wang2707e442016-03-28 23:23:16 +08001299 if (ret)
1300 return ret;
1301
1302 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
1303 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1304
1305 list_del_init(&gpt->oos_page->vm_list);
1306 return sync_oos_page(vgpu, gpt->oos_page);
1307}
1308
1309static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
1310 struct intel_vgpu_guest_page *gpt)
1311{
1312 struct intel_gvt *gvt = vgpu->gvt;
1313 struct intel_gvt_gtt *gtt = &gvt->gtt;
1314 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1315 int ret;
1316
1317 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1318
1319 if (list_empty(&gtt->oos_page_free_list_head)) {
1320 oos_page = container_of(gtt->oos_page_use_list_head.next,
1321 struct intel_vgpu_oos_page, list);
1322 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1323 if (ret)
1324 return ret;
1325 ret = detach_oos_page(vgpu, oos_page);
1326 if (ret)
1327 return ret;
1328 } else
1329 oos_page = container_of(gtt->oos_page_free_list_head.next,
1330 struct intel_vgpu_oos_page, list);
1331 return attach_oos_page(vgpu, oos_page, gpt);
1332}
1333
1334static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
1335 struct intel_vgpu_guest_page *gpt)
1336{
1337 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1338
1339 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1340 return -EINVAL;
1341
1342 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
1343 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1344
1345 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001346 return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
Zhi Wang2707e442016-03-28 23:23:16 +08001347}
1348
1349/**
1350 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1351 * @vgpu: a vGPU
1352 *
1353 * This function is called before submitting a guest workload to host,
1354 * to sync all the out-of-synced shadow for vGPU
1355 *
1356 * Returns:
1357 * Zero on success, negative error code if failed.
1358 */
1359int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1360{
1361 struct list_head *pos, *n;
1362 struct intel_vgpu_oos_page *oos_page;
1363 int ret;
1364
1365 if (!enable_out_of_sync)
1366 return 0;
1367
1368 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1369 oos_page = container_of(pos,
1370 struct intel_vgpu_oos_page, vm_list);
1371 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1372 if (ret)
1373 return ret;
1374 }
1375 return 0;
1376}
1377
1378/*
1379 * The heart of PPGTT shadow page table.
1380 */
1381static int ppgtt_handle_guest_write_page_table(
1382 struct intel_vgpu_guest_page *gpt,
1383 struct intel_gvt_gtt_entry *we, unsigned long index)
1384{
1385 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1386 struct intel_vgpu *vgpu = spt->vgpu;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001387 int type = spt->shadow_page.type;
Zhi Wang2707e442016-03-28 23:23:16 +08001388 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Changbin Du72f03d72018-01-30 19:19:48 +08001389 struct intel_gvt_gtt_entry old_se;
Bing Niu9baf0922016-11-07 10:44:36 +08001390 int new_present;
Changbin Du72f03d72018-01-30 19:19:48 +08001391 int ret;
Zhi Wang2707e442016-03-28 23:23:16 +08001392
Zhi Wang2707e442016-03-28 23:23:16 +08001393 new_present = ops->test_present(we);
1394
Tina Zhang6b3816d2017-08-14 15:24:14 +08001395 /*
1396 * Adding the new entry first and then removing the old one, that can
1397 * guarantee the ppgtt table is validated during the window between
1398 * adding and removal.
1399 */
Changbin Du72f03d72018-01-30 19:19:48 +08001400 ppgtt_get_shadow_entry(spt, &old_se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001401
Zhi Wang2707e442016-03-28 23:23:16 +08001402 if (new_present) {
1403 ret = ppgtt_handle_guest_entry_add(gpt, we, index);
1404 if (ret)
1405 goto fail;
1406 }
Tina Zhang6b3816d2017-08-14 15:24:14 +08001407
Changbin Du72f03d72018-01-30 19:19:48 +08001408 ret = ppgtt_handle_guest_entry_removal(gpt, &old_se, index);
Tina Zhang6b3816d2017-08-14 15:24:14 +08001409 if (ret)
1410 goto fail;
1411
1412 if (!new_present) {
Changbin Du72f03d72018-01-30 19:19:48 +08001413 ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
1414 ppgtt_set_shadow_entry(spt, &old_se, index);
Tina Zhang6b3816d2017-08-14 15:24:14 +08001415 }
1416
Zhi Wang2707e442016-03-28 23:23:16 +08001417 return 0;
1418fail:
Tina Zhang695fbc02017-03-10 04:26:53 -05001419 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1420 spt, we->val64, we->type);
Zhi Wang2707e442016-03-28 23:23:16 +08001421 return ret;
1422}
1423
Changbin Du72f03d72018-01-30 19:19:48 +08001424
1425
Zhi Wang2707e442016-03-28 23:23:16 +08001426static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
1427{
1428 return enable_out_of_sync
1429 && gtt_type_is_pte_pt(
1430 guest_page_to_ppgtt_spt(gpt)->guest_page_type)
1431 && gpt->write_cnt >= 2;
1432}
1433
1434static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1435 unsigned long index)
1436{
1437 set_bit(index, spt->post_shadow_bitmap);
1438 if (!list_empty(&spt->post_shadow_list))
1439 return;
1440
1441 list_add_tail(&spt->post_shadow_list,
1442 &spt->vgpu->gtt.post_shadow_list_head);
1443}
1444
1445/**
1446 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1447 * @vgpu: a vGPU
1448 *
1449 * This function is called before submitting a guest workload to host,
1450 * to flush all the post shadows for a vGPU.
1451 *
1452 * Returns:
1453 * Zero on success, negative error code if failed.
1454 */
1455int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1456{
1457 struct list_head *pos, *n;
1458 struct intel_vgpu_ppgtt_spt *spt;
Bing Niu9baf0922016-11-07 10:44:36 +08001459 struct intel_gvt_gtt_entry ge;
Zhi Wang2707e442016-03-28 23:23:16 +08001460 unsigned long index;
1461 int ret;
1462
1463 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1464 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1465 post_shadow_list);
1466
1467 for_each_set_bit(index, spt->post_shadow_bitmap,
1468 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1469 ppgtt_get_guest_entry(spt, &ge, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001470
1471 ret = ppgtt_handle_guest_write_page_table(
1472 &spt->guest_page, &ge, index);
1473 if (ret)
1474 return ret;
1475 clear_bit(index, spt->post_shadow_bitmap);
1476 }
1477 list_del_init(&spt->post_shadow_list);
1478 }
1479 return 0;
1480}
1481
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08001482static int ppgtt_handle_guest_write_page_table_bytes(
1483 struct intel_vgpu_guest_page *gpt,
Zhi Wang2707e442016-03-28 23:23:16 +08001484 u64 pa, void *p_data, int bytes)
1485{
Zhi Wang2707e442016-03-28 23:23:16 +08001486 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1487 struct intel_vgpu *vgpu = spt->vgpu;
1488 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1489 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
Tina Zhang6b3816d2017-08-14 15:24:14 +08001490 struct intel_gvt_gtt_entry we, se;
Zhi Wang2707e442016-03-28 23:23:16 +08001491 unsigned long index;
1492 int ret;
1493
1494 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1495
1496 ppgtt_get_guest_entry(spt, &we, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001497
1498 ops->test_pse(&we);
1499
1500 if (bytes == info->gtt_entry_size) {
1501 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
1502 if (ret)
1503 return ret;
1504 } else {
Zhi Wang2707e442016-03-28 23:23:16 +08001505 if (!test_bit(index, spt->post_shadow_bitmap)) {
Zhi Wang121d760d2017-12-29 02:50:08 +08001506 int type = spt->shadow_page.type;
1507
Tina Zhang6b3816d2017-08-14 15:24:14 +08001508 ppgtt_get_shadow_entry(spt, &se, index);
1509 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001510 if (ret)
1511 return ret;
Zhi Wang121d760d2017-12-29 02:50:08 +08001512 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1513 ppgtt_set_shadow_entry(spt, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001514 }
Zhi Wang2707e442016-03-28 23:23:16 +08001515 ppgtt_set_post_shadow(spt, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001516 }
1517
1518 if (!enable_out_of_sync)
1519 return 0;
1520
1521 gpt->write_cnt++;
1522
1523 if (gpt->oos_page)
1524 ops->set_entry(gpt->oos_page->mem, &we, index,
1525 false, 0, vgpu);
1526
1527 if (can_do_out_of_sync(gpt)) {
1528 if (!gpt->oos_page)
1529 ppgtt_allocate_oos_page(vgpu, gpt);
1530
1531 ret = ppgtt_set_guest_page_oos(vgpu, gpt);
1532 if (ret < 0)
1533 return ret;
1534 }
1535 return 0;
1536}
1537
Changbin Duede9d0c2018-01-30 19:19:40 +08001538static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
Zhi Wang2707e442016-03-28 23:23:16 +08001539{
1540 struct intel_vgpu *vgpu = mm->vgpu;
1541 struct intel_gvt *gvt = vgpu->gvt;
1542 struct intel_gvt_gtt *gtt = &gvt->gtt;
1543 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1544 struct intel_gvt_gtt_entry se;
Changbin Duede9d0c2018-01-30 19:19:40 +08001545 int index;
Zhi Wang2707e442016-03-28 23:23:16 +08001546
Changbin Duede9d0c2018-01-30 19:19:40 +08001547 if (!mm->ppgtt_mm.shadowed)
Zhi Wang2707e442016-03-28 23:23:16 +08001548 return;
1549
Changbin Duede9d0c2018-01-30 19:19:40 +08001550 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1551 ppgtt_get_shadow_root_entry(mm, &se, index);
1552
Zhi Wang2707e442016-03-28 23:23:16 +08001553 if (!ops->test_present(&se))
1554 continue;
Changbin Duede9d0c2018-01-30 19:19:40 +08001555
1556 ppgtt_invalidate_shadow_page_by_shadow_entry(vgpu, &se);
Zhi Wang2707e442016-03-28 23:23:16 +08001557 se.val64 = 0;
Changbin Duede9d0c2018-01-30 19:19:40 +08001558 ppgtt_set_shadow_root_entry(mm, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001559
1560 trace_gpt_change(vgpu->id, "destroy root pointer",
Changbin Duede9d0c2018-01-30 19:19:40 +08001561 NULL, se.type, se.val64, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001562 }
Changbin Duede9d0c2018-01-30 19:19:40 +08001563
1564 mm->ppgtt_mm.shadowed = false;
Zhi Wang2707e442016-03-28 23:23:16 +08001565}
1566
Zhi Wang2707e442016-03-28 23:23:16 +08001567
Changbin Duede9d0c2018-01-30 19:19:40 +08001568static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
Zhi Wang2707e442016-03-28 23:23:16 +08001569{
1570 struct intel_vgpu *vgpu = mm->vgpu;
1571 struct intel_gvt *gvt = vgpu->gvt;
1572 struct intel_gvt_gtt *gtt = &gvt->gtt;
1573 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1574 struct intel_vgpu_ppgtt_spt *spt;
1575 struct intel_gvt_gtt_entry ge, se;
Changbin Duede9d0c2018-01-30 19:19:40 +08001576 int index, ret;
Zhi Wang2707e442016-03-28 23:23:16 +08001577
Changbin Duede9d0c2018-01-30 19:19:40 +08001578 if (mm->ppgtt_mm.shadowed)
Zhi Wang2707e442016-03-28 23:23:16 +08001579 return 0;
1580
Changbin Duede9d0c2018-01-30 19:19:40 +08001581 mm->ppgtt_mm.shadowed = true;
Zhi Wang2707e442016-03-28 23:23:16 +08001582
Changbin Duede9d0c2018-01-30 19:19:40 +08001583 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1584 ppgtt_get_guest_root_entry(mm, &ge, index);
1585
Zhi Wang2707e442016-03-28 23:23:16 +08001586 if (!ops->test_present(&ge))
1587 continue;
1588
1589 trace_gpt_change(vgpu->id, __func__, NULL,
Changbin Duede9d0c2018-01-30 19:19:40 +08001590 ge.type, ge.val64, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001591
1592 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1593 if (IS_ERR(spt)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001594 gvt_vgpu_err("fail to populate guest root pointer\n");
Zhi Wang2707e442016-03-28 23:23:16 +08001595 ret = PTR_ERR(spt);
1596 goto fail;
1597 }
1598 ppgtt_generate_shadow_entry(&se, spt, &ge);
Changbin Duede9d0c2018-01-30 19:19:40 +08001599 ppgtt_set_shadow_root_entry(mm, &se, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001600
1601 trace_gpt_change(vgpu->id, "populate root pointer",
Changbin Duede9d0c2018-01-30 19:19:40 +08001602 NULL, se.type, se.val64, index);
Zhi Wang2707e442016-03-28 23:23:16 +08001603 }
Changbin Duede9d0c2018-01-30 19:19:40 +08001604
Zhi Wang2707e442016-03-28 23:23:16 +08001605 return 0;
1606fail:
Changbin Duede9d0c2018-01-30 19:19:40 +08001607 invalidate_ppgtt_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001608 return ret;
1609}
1610
Changbin Duede9d0c2018-01-30 19:19:40 +08001611static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1612{
1613 struct intel_vgpu_mm *mm;
1614
1615 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1616 if (!mm)
1617 return NULL;
1618
1619 mm->vgpu = vgpu;
1620 kref_init(&mm->ref);
1621 atomic_set(&mm->pincount, 0);
1622
1623 return mm;
1624}
1625
1626static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1627{
1628 kfree(mm);
1629}
1630
Zhi Wang2707e442016-03-28 23:23:16 +08001631/**
Changbin Duede9d0c2018-01-30 19:19:40 +08001632 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
Zhi Wang2707e442016-03-28 23:23:16 +08001633 * @vgpu: a vGPU
Changbin Duede9d0c2018-01-30 19:19:40 +08001634 * @root_entry_type: ppgtt root entry type
1635 * @pdps: guest pdps.
Zhi Wang2707e442016-03-28 23:23:16 +08001636 *
Changbin Duede9d0c2018-01-30 19:19:40 +08001637 * This function is used to create a ppgtt mm object for a vGPU.
Zhi Wang2707e442016-03-28 23:23:16 +08001638 *
1639 * Returns:
1640 * Zero on success, negative error code in pointer if failed.
1641 */
Changbin Duede9d0c2018-01-30 19:19:40 +08001642struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1643 intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
Zhi Wang2707e442016-03-28 23:23:16 +08001644{
1645 struct intel_gvt *gvt = vgpu->gvt;
Zhi Wang2707e442016-03-28 23:23:16 +08001646 struct intel_vgpu_mm *mm;
1647 int ret;
1648
Changbin Duede9d0c2018-01-30 19:19:40 +08001649 mm = vgpu_alloc_mm(vgpu);
1650 if (!mm)
1651 return ERR_PTR(-ENOMEM);
Zhi Wang2707e442016-03-28 23:23:16 +08001652
Changbin Duede9d0c2018-01-30 19:19:40 +08001653 mm->type = INTEL_GVT_MM_PPGTT;
Zhi Wang2707e442016-03-28 23:23:16 +08001654
Changbin Duede9d0c2018-01-30 19:19:40 +08001655 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1656 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1657 mm->ppgtt_mm.root_entry_type = root_entry_type;
Zhi Wang2707e442016-03-28 23:23:16 +08001658
Changbin Duede9d0c2018-01-30 19:19:40 +08001659 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1660 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
Zhi Wang2707e442016-03-28 23:23:16 +08001661
Changbin Duede9d0c2018-01-30 19:19:40 +08001662 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1663 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1664 else
1665 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1666 sizeof(mm->ppgtt_mm.guest_pdps));
Zhi Wang2707e442016-03-28 23:23:16 +08001667
Changbin Duede9d0c2018-01-30 19:19:40 +08001668 ret = shadow_ppgtt_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001669 if (ret) {
Changbin Duede9d0c2018-01-30 19:19:40 +08001670 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1671 vgpu_free_mm(mm);
1672 return ERR_PTR(ret);
Zhi Wang2707e442016-03-28 23:23:16 +08001673 }
1674
Changbin Duede9d0c2018-01-30 19:19:40 +08001675 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1676 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
Zhi Wang2707e442016-03-28 23:23:16 +08001677 return mm;
Changbin Duede9d0c2018-01-30 19:19:40 +08001678}
1679
1680static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1681{
1682 struct intel_vgpu_mm *mm;
1683 unsigned long nr_entries;
1684
1685 mm = vgpu_alloc_mm(vgpu);
1686 if (!mm)
1687 return ERR_PTR(-ENOMEM);
1688
1689 mm->type = INTEL_GVT_MM_GGTT;
1690
1691 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1692 mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
1693 vgpu->gvt->device_info.gtt_entry_size);
1694 if (!mm->ggtt_mm.virtual_ggtt) {
1695 vgpu_free_mm(mm);
1696 return ERR_PTR(-ENOMEM);
1697 }
1698
1699 return mm;
1700}
1701
1702/**
Changbin Du1bc25852018-01-30 19:19:41 +08001703 * _intel_vgpu_mm_release - destroy a mm object
Changbin Duede9d0c2018-01-30 19:19:40 +08001704 * @mm_ref: a kref object
1705 *
1706 * This function is used to destroy a mm object for vGPU
1707 *
1708 */
Changbin Du1bc25852018-01-30 19:19:41 +08001709void _intel_vgpu_mm_release(struct kref *mm_ref)
Changbin Duede9d0c2018-01-30 19:19:40 +08001710{
1711 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1712
1713 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1714 gvt_err("vgpu mm pin count bug detected\n");
1715
1716 if (mm->type == INTEL_GVT_MM_PPGTT) {
1717 list_del(&mm->ppgtt_mm.list);
1718 list_del(&mm->ppgtt_mm.lru_list);
1719 invalidate_ppgtt_mm(mm);
1720 } else {
1721 vfree(mm->ggtt_mm.virtual_ggtt);
1722 }
1723
1724 vgpu_free_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001725}
1726
1727/**
1728 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1729 * @mm: a vGPU mm object
1730 *
1731 * This function is called when user doesn't want to use a vGPU mm object
1732 */
1733void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1734{
Zhi Wang2707e442016-03-28 23:23:16 +08001735 atomic_dec(&mm->pincount);
1736}
1737
1738/**
1739 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1740 * @vgpu: a vGPU
1741 *
1742 * This function is called when user wants to use a vGPU mm object. If this
1743 * mm object hasn't been shadowed yet, the shadow will be populated at this
1744 * time.
1745 *
1746 * Returns:
1747 * Zero on success, negative error code if failed.
1748 */
1749int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1750{
1751 int ret;
1752
Changbin Duede9d0c2018-01-30 19:19:40 +08001753 atomic_inc(&mm->pincount);
Zhi Wang2707e442016-03-28 23:23:16 +08001754
Changbin Duede9d0c2018-01-30 19:19:40 +08001755 if (mm->type == INTEL_GVT_MM_PPGTT) {
1756 ret = shadow_ppgtt_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001757 if (ret)
1758 return ret;
Changbin Duede9d0c2018-01-30 19:19:40 +08001759
1760 list_move_tail(&mm->ppgtt_mm.lru_list,
1761 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
1762
Zhi Wang2707e442016-03-28 23:23:16 +08001763 }
1764
Zhi Wang2707e442016-03-28 23:23:16 +08001765 return 0;
1766}
1767
Changbin Duede9d0c2018-01-30 19:19:40 +08001768static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
Zhi Wang2707e442016-03-28 23:23:16 +08001769{
1770 struct intel_vgpu_mm *mm;
1771 struct list_head *pos, *n;
1772
Changbin Duede9d0c2018-01-30 19:19:40 +08001773 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
1774 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
Zhi Wang2707e442016-03-28 23:23:16 +08001775
Zhi Wang2707e442016-03-28 23:23:16 +08001776 if (atomic_read(&mm->pincount))
1777 continue;
1778
Changbin Duede9d0c2018-01-30 19:19:40 +08001779 list_del_init(&mm->ppgtt_mm.lru_list);
1780 invalidate_ppgtt_mm(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08001781 return 1;
1782 }
1783 return 0;
1784}
1785
1786/*
1787 * GMA translation APIs.
1788 */
1789static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1790 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1791{
1792 struct intel_vgpu *vgpu = mm->vgpu;
1793 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1794 struct intel_vgpu_ppgtt_spt *s;
1795
Zhi Wang2707e442016-03-28 23:23:16 +08001796 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
1797 if (!s)
1798 return -ENXIO;
1799
1800 if (!guest)
1801 ppgtt_get_shadow_entry(s, e, index);
1802 else
1803 ppgtt_get_guest_entry(s, e, index);
1804 return 0;
1805}
1806
1807/**
1808 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1809 * @mm: mm object. could be a PPGTT or GGTT mm object
1810 * @gma: graphics memory address in this mm object
1811 *
1812 * This function is used to translate a graphics memory address in specific
1813 * graphics memory space to guest physical address.
1814 *
1815 * Returns:
1816 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1817 */
1818unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1819{
1820 struct intel_vgpu *vgpu = mm->vgpu;
1821 struct intel_gvt *gvt = vgpu->gvt;
1822 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1823 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1824 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1825 unsigned long gma_index[4];
1826 struct intel_gvt_gtt_entry e;
Changbin Duede9d0c2018-01-30 19:19:40 +08001827 int i, levels = 0;
Zhi Wang2707e442016-03-28 23:23:16 +08001828 int ret;
1829
Changbin Duede9d0c2018-01-30 19:19:40 +08001830 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
1831 mm->type != INTEL_GVT_MM_PPGTT);
Zhi Wang2707e442016-03-28 23:23:16 +08001832
1833 if (mm->type == INTEL_GVT_MM_GGTT) {
1834 if (!vgpu_gmadr_is_valid(vgpu, gma))
1835 goto err;
1836
Changbin Duede9d0c2018-01-30 19:19:40 +08001837 ggtt_get_guest_entry(mm, &e,
1838 gma_ops->gma_to_ggtt_pte_index(gma));
1839
Zhi Wang9556e112017-10-10 13:51:32 +08001840 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
1841 + (gma & ~I915_GTT_PAGE_MASK);
Zhi Wang2707e442016-03-28 23:23:16 +08001842
1843 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
Changbin Duede9d0c2018-01-30 19:19:40 +08001844 } else {
1845 switch (mm->ppgtt_mm.root_entry_type) {
1846 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
1847 ppgtt_get_shadow_root_entry(mm, &e, 0);
Zhi Wang2707e442016-03-28 23:23:16 +08001848
Changbin Duede9d0c2018-01-30 19:19:40 +08001849 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1850 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1851 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1852 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1853 levels = 4;
1854 break;
1855 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
1856 ppgtt_get_shadow_root_entry(mm, &e,
1857 gma_ops->gma_to_l3_pdp_index(gma));
Zhi Wang2707e442016-03-28 23:23:16 +08001858
Changbin Duede9d0c2018-01-30 19:19:40 +08001859 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1860 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1861 levels = 2;
1862 break;
1863 default:
1864 GEM_BUG_ON(1);
Changbin Du4b2dbbc2017-08-02 15:06:37 +08001865 }
Changbin Duede9d0c2018-01-30 19:19:40 +08001866
1867 /* walk the shadow page table and get gpa from guest entry */
1868 for (i = 0; i < levels; i++) {
1869 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1870 (i == levels - 1));
1871 if (ret)
1872 goto err;
1873
1874 if (!pte_ops->test_present(&e)) {
1875 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
1876 goto err;
1877 }
1878 }
1879
1880 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
1881 (gma & ~I915_GTT_PAGE_MASK);
1882 trace_gma_translate(vgpu->id, "ppgtt", 0,
1883 mm->ppgtt_mm.root_entry_type, gma, gpa);
Zhi Wang2707e442016-03-28 23:23:16 +08001884 }
1885
Zhi Wang2707e442016-03-28 23:23:16 +08001886 return gpa;
1887err:
Tina Zhang695fbc02017-03-10 04:26:53 -05001888 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
Zhi Wang2707e442016-03-28 23:23:16 +08001889 return INTEL_GVT_INVALID_ADDR;
1890}
1891
Changbin Dua143cef2018-01-30 19:19:45 +08001892static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
Zhi Wang2707e442016-03-28 23:23:16 +08001893 unsigned int off, void *p_data, unsigned int bytes)
1894{
1895 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1896 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1897 unsigned long index = off >> info->gtt_entry_size_shift;
1898 struct intel_gvt_gtt_entry e;
1899
1900 if (bytes != 4 && bytes != 8)
1901 return -EINVAL;
1902
1903 ggtt_get_guest_entry(ggtt_mm, &e, index);
1904 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1905 bytes);
1906 return 0;
1907}
1908
1909/**
1910 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1911 * @vgpu: a vGPU
1912 * @off: register offset
1913 * @p_data: data will be returned to guest
1914 * @bytes: data length
1915 *
1916 * This function is used to emulate the GTT MMIO register read
1917 *
1918 * Returns:
1919 * Zero on success, error code if failed.
1920 */
Changbin Dua143cef2018-01-30 19:19:45 +08001921int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
Zhi Wang2707e442016-03-28 23:23:16 +08001922 void *p_data, unsigned int bytes)
1923{
1924 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1925 int ret;
1926
1927 if (bytes != 4 && bytes != 8)
1928 return -EINVAL;
1929
1930 off -= info->gtt_start_offset;
Changbin Dua143cef2018-01-30 19:19:45 +08001931 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
Zhi Wang2707e442016-03-28 23:23:16 +08001932 return ret;
1933}
1934
Changbin Dua143cef2018-01-30 19:19:45 +08001935static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
Zhi Wang2707e442016-03-28 23:23:16 +08001936 void *p_data, unsigned int bytes)
1937{
1938 struct intel_gvt *gvt = vgpu->gvt;
1939 const struct intel_gvt_device_info *info = &gvt->device_info;
1940 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1941 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1942 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
Changbin Du72f03d72018-01-30 19:19:48 +08001943 unsigned long gma, gfn, mfn;
Zhi Wang2707e442016-03-28 23:23:16 +08001944 struct intel_gvt_gtt_entry e, m;
Zhi Wang2707e442016-03-28 23:23:16 +08001945
1946 if (bytes != 4 && bytes != 8)
1947 return -EINVAL;
1948
Zhi Wang9556e112017-10-10 13:51:32 +08001949 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
Zhi Wang2707e442016-03-28 23:23:16 +08001950
1951 /* the VM may configure the whole GM space when ballooning is used */
Zhao, Xinda7c281352017-02-21 15:54:56 +08001952 if (!vgpu_gmadr_is_valid(vgpu, gma))
Zhi Wang2707e442016-03-28 23:23:16 +08001953 return 0;
Zhi Wang2707e442016-03-28 23:23:16 +08001954
1955 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1956
1957 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1958 bytes);
Changbin Du72f03d72018-01-30 19:19:48 +08001959 m = e;
Zhi Wang2707e442016-03-28 23:23:16 +08001960
1961 if (ops->test_present(&e)) {
Hang Yuancc753fb2017-12-22 18:06:31 +08001962 gfn = ops->get_pfn(&e);
1963
1964 /* one PTE update may be issued in multiple writes and the
1965 * first write may not construct a valid gfn
1966 */
1967 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1968 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1969 goto out;
1970 }
1971
Changbin Du72f03d72018-01-30 19:19:48 +08001972 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
1973 if (mfn == INTEL_GVT_INVALID_ADDR) {
1974 gvt_vgpu_err("fail to populate guest ggtt entry\n");
Xiaoguang Chen359b6932017-03-21 10:54:21 +08001975 /* guest driver may read/write the entry when partial
1976 * update the entry in this situation p2m will fail
1977 * settting the shadow entry to point to a scratch page
1978 */
Zhi Wang22115ce2017-10-10 14:34:11 +08001979 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
Changbin Du72f03d72018-01-30 19:19:48 +08001980 } else
1981 ops->set_pfn(&m, mfn);
1982 } else
Zhi Wang22115ce2017-10-10 14:34:11 +08001983 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
Zhi Wang2707e442016-03-28 23:23:16 +08001984
Hang Yuancc753fb2017-12-22 18:06:31 +08001985out:
Changbin Du3aff3512018-01-30 19:19:42 +08001986 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
Changbin Dua143cef2018-01-30 19:19:45 +08001987 ggtt_invalidate(gvt->dev_priv);
Zhi Wang2707e442016-03-28 23:23:16 +08001988 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1989 return 0;
1990}
1991
1992/*
Changbin Dua143cef2018-01-30 19:19:45 +08001993 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
Zhi Wang2707e442016-03-28 23:23:16 +08001994 * @vgpu: a vGPU
1995 * @off: register offset
1996 * @p_data: data from guest write
1997 * @bytes: data length
1998 *
1999 * This function is used to emulate the GTT MMIO register write
2000 *
2001 * Returns:
2002 * Zero on success, error code if failed.
2003 */
Changbin Dua143cef2018-01-30 19:19:45 +08002004int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2005 unsigned int off, void *p_data, unsigned int bytes)
Zhi Wang2707e442016-03-28 23:23:16 +08002006{
2007 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2008 int ret;
2009
2010 if (bytes != 4 && bytes != 8)
2011 return -EINVAL;
2012
2013 off -= info->gtt_start_offset;
Changbin Dua143cef2018-01-30 19:19:45 +08002014 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
Zhi Wang2707e442016-03-28 23:23:16 +08002015 return ret;
2016}
2017
Zhenyu Wang4fafba22017-12-18 11:58:46 +08002018int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
2019 void *p_data, unsigned int bytes)
2020{
2021 struct intel_gvt *gvt = vgpu->gvt;
2022 int ret = 0;
2023
2024 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
2025 struct intel_vgpu_page_track *t;
2026
2027 mutex_lock(&gvt->lock);
2028
2029 t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
2030 if (t) {
2031 if (unlikely(vgpu->failsafe)) {
2032 /* remove write protection to prevent furture traps */
2033 intel_vgpu_clean_page_track(vgpu, t);
2034 } else {
2035 ret = t->handler(t, pa, p_data, bytes);
2036 if (ret) {
2037 gvt_err("guest page write error %d, "
2038 "gfn 0x%lx, pa 0x%llx, "
2039 "var 0x%x, len %d\n",
2040 ret, t->gfn, pa,
2041 *(u32 *)p_data, bytes);
2042 }
2043 }
2044 }
2045 mutex_unlock(&gvt->lock);
2046 }
2047 return ret;
2048}
2049
2050
Ping Gao3b6411c2016-11-04 13:47:35 +08002051static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2052 intel_gvt_gtt_type_t type)
Zhi Wang2707e442016-03-28 23:23:16 +08002053{
2054 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
Ping Gao3b6411c2016-11-04 13:47:35 +08002055 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
Zhenyu Wang5c352582017-11-02 17:44:52 +08002056 int page_entry_num = I915_GTT_PAGE_SIZE >>
Ping Gao3b6411c2016-11-04 13:47:35 +08002057 vgpu->gvt->device_info.gtt_entry_size_shift;
Jike Song96317392017-01-09 15:38:38 +08002058 void *scratch_pt;
Ping Gao3b6411c2016-11-04 13:47:35 +08002059 int i;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002060 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2061 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +08002062
Ping Gao3b6411c2016-11-04 13:47:35 +08002063 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2064 return -EINVAL;
2065
Jike Song96317392017-01-09 15:38:38 +08002066 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
Ping Gao3b6411c2016-11-04 13:47:35 +08002067 if (!scratch_pt) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002068 gvt_vgpu_err("fail to allocate scratch page\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002069 return -ENOMEM;
2070 }
2071
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002072 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
2073 4096, PCI_DMA_BIDIRECTIONAL);
2074 if (dma_mapping_error(dev, daddr)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002075 gvt_vgpu_err("fail to dmamap scratch_pt\n");
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002076 __free_page(virt_to_page(scratch_pt));
2077 return -ENOMEM;
Ping Gao3b6411c2016-11-04 13:47:35 +08002078 }
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002079 gtt->scratch_pt[type].page_mfn =
Zhenyu Wang5c352582017-11-02 17:44:52 +08002080 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
Jike Song96317392017-01-09 15:38:38 +08002081 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
Ping Gao3b6411c2016-11-04 13:47:35 +08002082 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002083 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
Ping Gao3b6411c2016-11-04 13:47:35 +08002084
2085 /* Build the tree by full filled the scratch pt with the entries which
2086 * point to the next level scratch pt or scratch page. The
2087 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2088 * 'type' pt.
2089 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
Jike Song96317392017-01-09 15:38:38 +08002090 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
Ping Gao3b6411c2016-11-04 13:47:35 +08002091 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2092 */
2093 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
2094 struct intel_gvt_gtt_entry se;
2095
2096 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2097 se.type = get_entry_type(type - 1);
2098 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2099
2100 /* The entry parameters like present/writeable/cache type
2101 * set to the same as i915's scratch page tree.
2102 */
2103 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2104 if (type == GTT_TYPE_PPGTT_PDE_PT)
Zhi Wangc095b972017-09-14 20:39:41 +08002105 se.val64 |= PPAT_CACHED;
Ping Gao3b6411c2016-11-04 13:47:35 +08002106
2107 for (i = 0; i < page_entry_num; i++)
Jike Song96317392017-01-09 15:38:38 +08002108 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002109 }
2110
Zhi Wang2707e442016-03-28 23:23:16 +08002111 return 0;
2112}
2113
Ping Gao3b6411c2016-11-04 13:47:35 +08002114static int release_scratch_page_tree(struct intel_vgpu *vgpu)
Zhi Wang2707e442016-03-28 23:23:16 +08002115{
Ping Gao3b6411c2016-11-04 13:47:35 +08002116 int i;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002117 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2118 dma_addr_t daddr;
Ping Gao3b6411c2016-11-04 13:47:35 +08002119
2120 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2121 if (vgpu->gtt.scratch_pt[i].page != NULL) {
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002122 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
Zhenyu Wang5c352582017-11-02 17:44:52 +08002123 I915_GTT_PAGE_SHIFT);
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002124 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
Ping Gao3b6411c2016-11-04 13:47:35 +08002125 __free_page(vgpu->gtt.scratch_pt[i].page);
2126 vgpu->gtt.scratch_pt[i].page = NULL;
2127 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2128 }
Zhi Wang2707e442016-03-28 23:23:16 +08002129 }
Ping Gao3b6411c2016-11-04 13:47:35 +08002130
2131 return 0;
2132}
2133
2134static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2135{
2136 int i, ret;
2137
2138 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2139 ret = alloc_scratch_pages(vgpu, i);
2140 if (ret)
2141 goto err;
2142 }
2143
2144 return 0;
2145
2146err:
2147 release_scratch_page_tree(vgpu);
2148 return ret;
Zhi Wang2707e442016-03-28 23:23:16 +08002149}
2150
2151/**
2152 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2153 * @vgpu: a vGPU
2154 *
2155 * This function is used to initialize per-vGPU graphics memory virtualization
2156 * components.
2157 *
2158 * Returns:
2159 * Zero on success, error code if failed.
2160 */
2161int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2162{
2163 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
Zhi Wang2707e442016-03-28 23:23:16 +08002164
Zhi Wang7d1e5cd2017-09-29 02:47:55 +08002165 hash_init(gtt->tracked_guest_page_hash_table);
Zhi Wang2707e442016-03-28 23:23:16 +08002166 hash_init(gtt->shadow_page_hash_table);
2167
Changbin Duede9d0c2018-01-30 19:19:40 +08002168 INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
Zhi Wang2707e442016-03-28 23:23:16 +08002169 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2170 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2171
Changbin Duede9d0c2018-01-30 19:19:40 +08002172 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2173 if (IS_ERR(gtt->ggtt_mm)) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002174 gvt_vgpu_err("fail to create mm for ggtt.\n");
Changbin Duede9d0c2018-01-30 19:19:40 +08002175 return PTR_ERR(gtt->ggtt_mm);
Zhi Wang2707e442016-03-28 23:23:16 +08002176 }
2177
Changbin Duede9d0c2018-01-30 19:19:40 +08002178 intel_vgpu_reset_ggtt(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002179
Ping Gao3b6411c2016-11-04 13:47:35 +08002180 return create_scratch_page_tree(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002181}
2182
Changbin Duede9d0c2018-01-30 19:19:40 +08002183static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002184{
2185 struct list_head *pos, *n;
2186 struct intel_vgpu_mm *mm;
2187
Changbin Duede9d0c2018-01-30 19:19:40 +08002188 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2189 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
Changbin Du1bc25852018-01-30 19:19:41 +08002190 intel_vgpu_destroy_mm(mm);
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002191 }
Changbin Duede9d0c2018-01-30 19:19:40 +08002192
2193 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2194 gvt_err("vgpu ppgtt mm is not fully destoried\n");
2195
2196 if (GEM_WARN_ON(!hlist_empty(vgpu->gtt.shadow_page_hash_table))) {
2197 gvt_err("Why we still has spt not freed?\n");
2198 ppgtt_free_all_shadow_page(vgpu);
2199 }
2200}
2201
2202static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2203{
Changbin Du1bc25852018-01-30 19:19:41 +08002204 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
Changbin Duede9d0c2018-01-30 19:19:40 +08002205 vgpu->gtt.ggtt_mm = NULL;
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002206}
2207
Zhi Wang2707e442016-03-28 23:23:16 +08002208/**
2209 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2210 * @vgpu: a vGPU
2211 *
2212 * This function is used to clean up per-vGPU graphics memory virtualization
2213 * components.
2214 *
2215 * Returns:
2216 * Zero on success, error code if failed.
2217 */
2218void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2219{
Changbin Duede9d0c2018-01-30 19:19:40 +08002220 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2221 intel_vgpu_destroy_ggtt_mm(vgpu);
Ping Gao3b6411c2016-11-04 13:47:35 +08002222 release_scratch_page_tree(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +08002223}
2224
2225static void clean_spt_oos(struct intel_gvt *gvt)
2226{
2227 struct intel_gvt_gtt *gtt = &gvt->gtt;
2228 struct list_head *pos, *n;
2229 struct intel_vgpu_oos_page *oos_page;
2230
2231 WARN(!list_empty(&gtt->oos_page_use_list_head),
2232 "someone is still using oos page\n");
2233
2234 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2235 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2236 list_del(&oos_page->list);
2237 kfree(oos_page);
2238 }
2239}
2240
2241static int setup_spt_oos(struct intel_gvt *gvt)
2242{
2243 struct intel_gvt_gtt *gtt = &gvt->gtt;
2244 struct intel_vgpu_oos_page *oos_page;
2245 int i;
2246 int ret;
2247
2248 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2249 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2250
2251 for (i = 0; i < preallocated_oos_pages; i++) {
2252 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2253 if (!oos_page) {
Zhi Wang2707e442016-03-28 23:23:16 +08002254 ret = -ENOMEM;
2255 goto fail;
2256 }
2257
2258 INIT_LIST_HEAD(&oos_page->list);
2259 INIT_LIST_HEAD(&oos_page->vm_list);
2260 oos_page->id = i;
2261 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2262 }
2263
2264 gvt_dbg_mm("%d oos pages preallocated\n", i);
2265
2266 return 0;
2267fail:
2268 clean_spt_oos(gvt);
2269 return ret;
2270}
2271
2272/**
2273 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2274 * @vgpu: a vGPU
2275 * @page_table_level: PPGTT page table level
2276 * @root_entry: PPGTT page table root pointers
2277 *
2278 * This function is used to find a PPGTT mm object from mm object pool
2279 *
2280 * Returns:
2281 * pointer to mm object on success, NULL if failed.
2282 */
2283struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
Changbin Duede9d0c2018-01-30 19:19:40 +08002284 u64 pdps[])
Zhi Wang2707e442016-03-28 23:23:16 +08002285{
Zhi Wang2707e442016-03-28 23:23:16 +08002286 struct intel_vgpu_mm *mm;
Changbin Duede9d0c2018-01-30 19:19:40 +08002287 struct list_head *pos;
Zhi Wang2707e442016-03-28 23:23:16 +08002288
Changbin Duede9d0c2018-01-30 19:19:40 +08002289 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2290 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
Zhi Wang2707e442016-03-28 23:23:16 +08002291
Changbin Duede9d0c2018-01-30 19:19:40 +08002292 switch (mm->ppgtt_mm.root_entry_type) {
2293 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2294 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
Zhi Wang2707e442016-03-28 23:23:16 +08002295 return mm;
Changbin Duede9d0c2018-01-30 19:19:40 +08002296 break;
2297 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2298 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2299 sizeof(mm->ppgtt_mm.guest_pdps)))
Zhi Wang2707e442016-03-28 23:23:16 +08002300 return mm;
Changbin Duede9d0c2018-01-30 19:19:40 +08002301 break;
2302 default:
2303 GEM_BUG_ON(1);
Zhi Wang2707e442016-03-28 23:23:16 +08002304 }
2305 }
2306 return NULL;
2307}
2308
2309/**
Changbin Due6e9c462018-01-30 19:19:46 +08002310 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
Zhi Wang2707e442016-03-28 23:23:16 +08002311 * @vgpu: a vGPU
Changbin Duede9d0c2018-01-30 19:19:40 +08002312 * @root_entry_type: ppgtt root entry type
2313 * @pdps: guest pdps
Zhi Wang2707e442016-03-28 23:23:16 +08002314 *
Changbin Due6e9c462018-01-30 19:19:46 +08002315 * This function is used to find or create a PPGTT mm object from a guest.
Zhi Wang2707e442016-03-28 23:23:16 +08002316 *
2317 * Returns:
2318 * Zero on success, negative error code if failed.
2319 */
Changbin Due6e9c462018-01-30 19:19:46 +08002320struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
Changbin Duede9d0c2018-01-30 19:19:40 +08002321 intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
Zhi Wang2707e442016-03-28 23:23:16 +08002322{
Zhi Wang2707e442016-03-28 23:23:16 +08002323 struct intel_vgpu_mm *mm;
2324
Changbin Duede9d0c2018-01-30 19:19:40 +08002325 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
Zhi Wang2707e442016-03-28 23:23:16 +08002326 if (mm) {
Changbin Du1bc25852018-01-30 19:19:41 +08002327 intel_vgpu_mm_get(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08002328 } else {
Changbin Duede9d0c2018-01-30 19:19:40 +08002329 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
Changbin Due6e9c462018-01-30 19:19:46 +08002330 if (IS_ERR(mm))
Tina Zhang695fbc02017-03-10 04:26:53 -05002331 gvt_vgpu_err("fail to create mm\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002332 }
Changbin Due6e9c462018-01-30 19:19:46 +08002333 return mm;
Zhi Wang2707e442016-03-28 23:23:16 +08002334}
2335
2336/**
Changbin Due6e9c462018-01-30 19:19:46 +08002337 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
Zhi Wang2707e442016-03-28 23:23:16 +08002338 * @vgpu: a vGPU
Changbin Duede9d0c2018-01-30 19:19:40 +08002339 * @pdps: guest pdps
Zhi Wang2707e442016-03-28 23:23:16 +08002340 *
Changbin Due6e9c462018-01-30 19:19:46 +08002341 * This function is used to find a PPGTT mm object from a guest and destroy it.
Zhi Wang2707e442016-03-28 23:23:16 +08002342 *
2343 * Returns:
2344 * Zero on success, negative error code if failed.
2345 */
Changbin Due6e9c462018-01-30 19:19:46 +08002346int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
Zhi Wang2707e442016-03-28 23:23:16 +08002347{
Zhi Wang2707e442016-03-28 23:23:16 +08002348 struct intel_vgpu_mm *mm;
2349
Changbin Duede9d0c2018-01-30 19:19:40 +08002350 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
Zhi Wang2707e442016-03-28 23:23:16 +08002351 if (!mm) {
Tina Zhang695fbc02017-03-10 04:26:53 -05002352 gvt_vgpu_err("fail to find ppgtt instance.\n");
Zhi Wang2707e442016-03-28 23:23:16 +08002353 return -EINVAL;
2354 }
Changbin Du1bc25852018-01-30 19:19:41 +08002355 intel_vgpu_mm_put(mm);
Zhi Wang2707e442016-03-28 23:23:16 +08002356 return 0;
2357}
2358
2359/**
2360 * intel_gvt_init_gtt - initialize mm components of a GVT device
2361 * @gvt: GVT device
2362 *
2363 * This function is called at the initialization stage, to initialize
2364 * the mm components of a GVT device.
2365 *
2366 * Returns:
2367 * zero on success, negative error code if failed.
2368 */
2369int intel_gvt_init_gtt(struct intel_gvt *gvt)
2370{
2371 int ret;
Jike Song96317392017-01-09 15:38:38 +08002372 void *page;
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002373 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2374 dma_addr_t daddr;
Zhi Wang2707e442016-03-28 23:23:16 +08002375
2376 gvt_dbg_core("init gtt\n");
2377
Xu Hane3476c02017-03-29 10:13:59 +08002378 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
2379 || IS_KABYLAKE(gvt->dev_priv)) {
Zhi Wang2707e442016-03-28 23:23:16 +08002380 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2381 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
Zhi Wang2707e442016-03-28 23:23:16 +08002382 } else {
2383 return -ENODEV;
2384 }
2385
Jike Song96317392017-01-09 15:38:38 +08002386 page = (void *)get_zeroed_page(GFP_KERNEL);
2387 if (!page) {
Ping Gaod650ac02016-12-08 10:14:48 +08002388 gvt_err("fail to allocate scratch ggtt page\n");
2389 return -ENOMEM;
2390 }
2391
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002392 daddr = dma_map_page(dev, virt_to_page(page), 0,
2393 4096, PCI_DMA_BIDIRECTIONAL);
2394 if (dma_mapping_error(dev, daddr)) {
2395 gvt_err("fail to dmamap scratch ggtt page\n");
2396 __free_page(virt_to_page(page));
2397 return -ENOMEM;
Ping Gaod650ac02016-12-08 10:14:48 +08002398 }
Zhi Wang22115ce2017-10-10 14:34:11 +08002399
2400 gvt->gtt.scratch_page = virt_to_page(page);
2401 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
Ping Gaod650ac02016-12-08 10:14:48 +08002402
Zhi Wang2707e442016-03-28 23:23:16 +08002403 if (enable_out_of_sync) {
2404 ret = setup_spt_oos(gvt);
2405 if (ret) {
2406 gvt_err("fail to initialize SPT oos\n");
Zhou, Wenjia0de98702017-07-04 15:47:00 +08002407 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
Zhi Wang22115ce2017-10-10 14:34:11 +08002408 __free_page(gvt->gtt.scratch_page);
Zhi Wang2707e442016-03-28 23:23:16 +08002409 return ret;
2410 }
2411 }
Changbin Duede9d0c2018-01-30 19:19:40 +08002412 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
Zhi Wang2707e442016-03-28 23:23:16 +08002413 return 0;
2414}
2415
2416/**
2417 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2418 * @gvt: GVT device
2419 *
2420 * This function is called at the driver unloading stage, to clean up the
2421 * the mm components of a GVT device.
2422 *
2423 */
2424void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2425{
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002426 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
Zhi Wang22115ce2017-10-10 14:34:11 +08002427 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
Zhi Wang9556e112017-10-10 13:51:32 +08002428 I915_GTT_PAGE_SHIFT);
Chuanxiao Dong5de6bd42017-02-09 11:37:11 +08002429
2430 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2431
Zhi Wang22115ce2017-10-10 14:34:11 +08002432 __free_page(gvt->gtt.scratch_page);
Ping Gaod650ac02016-12-08 10:14:48 +08002433
Zhi Wang2707e442016-03-28 23:23:16 +08002434 if (enable_out_of_sync)
2435 clean_spt_oos(gvt);
2436}
Ping Gaod650ac02016-12-08 10:14:48 +08002437
2438/**
2439 * intel_vgpu_reset_ggtt - reset the GGTT entry
2440 * @vgpu: a vGPU
2441 *
2442 * This function is called at the vGPU create stage
2443 * to reset all the GGTT entries.
2444 *
2445 */
2446void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2447{
2448 struct intel_gvt *gvt = vgpu->gvt;
Zhenyu Wang5ad59bf2017-04-12 16:24:57 +08002449 struct drm_i915_private *dev_priv = gvt->dev_priv;
Changbin Dub0c766b2018-01-30 19:19:43 +08002450 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2451 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
Ping Gaod650ac02016-12-08 10:14:48 +08002452 u32 index;
Ping Gaod650ac02016-12-08 10:14:48 +08002453 u32 num_entries;
Ping Gaod650ac02016-12-08 10:14:48 +08002454
Changbin Dub0c766b2018-01-30 19:19:43 +08002455 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2456 pte_ops->set_present(&entry);
Ping Gaod650ac02016-12-08 10:14:48 +08002457
2458 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2459 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
Changbin Dub0c766b2018-01-30 19:19:43 +08002460 while (num_entries--)
2461 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
Ping Gaod650ac02016-12-08 10:14:48 +08002462
2463 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2464 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
Changbin Dub0c766b2018-01-30 19:19:43 +08002465 while (num_entries--)
2466 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
Zhenyu Wang5ad59bf2017-04-12 16:24:57 +08002467
Changbin Dua143cef2018-01-30 19:19:45 +08002468 ggtt_invalidate(dev_priv);
Ping Gaod650ac02016-12-08 10:14:48 +08002469}
Changbin Dub6115812017-01-13 11:15:57 +08002470
2471/**
2472 * intel_vgpu_reset_gtt - reset the all GTT related status
2473 * @vgpu: a vGPU
Changbin Dub6115812017-01-13 11:15:57 +08002474 *
2475 * This function is called from vfio core to reset reset all
2476 * GTT related status, including GGTT, PPGTT, scratch page.
2477 *
2478 */
Chuanxiao Dong4d3e67b2017-08-04 13:08:59 +08002479void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
Changbin Dub6115812017-01-13 11:15:57 +08002480{
Ping Gaoda9cc8d2017-02-21 15:52:56 +08002481 /* Shadow pages are only created when there is no page
2482 * table tracking data, so remove page tracking data after
2483 * removing the shadow pages.
2484 */
Changbin Duede9d0c2018-01-30 19:19:40 +08002485 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
Changbin Dub6115812017-01-13 11:15:57 +08002486 intel_vgpu_reset_ggtt(vgpu);
Changbin Dub6115812017-01-13 11:15:57 +08002487}