blob: ce3af95d049f16baf38de854e52d363eadc92285 [file] [log] [blame]
Zhi Wange39c5ad2016-09-02 13:33:29 +08001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Ke Yu
25 * Kevin Tian <kevin.tian@intel.com>
26 * Dexuan Cui
27 *
28 * Contributors:
29 * Tina Zhang <tina.zhang@intel.com>
30 * Min He <min.he@intel.com>
31 * Niu Bing <bing.niu@intel.com>
32 * Zhi Wang <zhi.a.wang@intel.com>
33 *
34 */
35
36#include "i915_drv.h"
37
38/**
39 * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
40 * @vgpu: a vGPU
41 *
42 * Returns:
43 * Zero on success, negative error code if failed
44 */
45int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
46{
47 u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
48 ~GENMASK(3, 0);
49 return gpa - gttmmio_gpa;
50}
51
52#define reg_is_mmio(gvt, reg) \
53 (reg >= 0 && reg < gvt->device_info.mmio_size)
54
55#define reg_is_gtt(gvt, reg) \
56 (reg >= gvt->device_info.gtt_start_offset \
57 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
58
59/**
60 * intel_vgpu_emulate_mmio_read - emulate MMIO read
61 * @vgpu: a vGPU
62 * @pa: guest physical address
63 * @p_data: data return buffer
64 * @bytes: access data length
65 *
66 * Returns:
67 * Zero on success, negative error code if failed
68 */
69int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
70 void *p_data, unsigned int bytes)
71{
72 struct intel_vgpu *vgpu = __vgpu;
73 struct intel_gvt *gvt = vgpu->gvt;
74 struct intel_gvt_mmio_info *mmio;
75 unsigned int offset = 0;
76 int ret = -EINVAL;
77
78 mutex_lock(&gvt->lock);
79
80 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
81 struct intel_vgpu_guest_page *gp;
82
83 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
84 if (gp) {
85 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
86 p_data, bytes);
87 if (ret) {
88 gvt_err("vgpu%d: guest page read error %d, "
89 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
90 vgpu->id, ret,
91 gp->gfn, pa, *(u32 *)p_data, bytes);
92 }
93 mutex_unlock(&gvt->lock);
94 return ret;
95 }
96 }
97
98 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
99
100 if (WARN_ON(bytes > 8))
101 goto err;
102
103 if (reg_is_gtt(gvt, offset)) {
104 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
105 goto err;
106 if (WARN_ON(bytes != 4 && bytes != 8))
107 goto err;
108 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
109 goto err;
110
111 ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
112 p_data, bytes);
113 if (ret)
114 goto err;
115 mutex_unlock(&gvt->lock);
116 return ret;
117 }
118
119 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
120 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
121 mutex_unlock(&gvt->lock);
122 return ret;
123 }
124
125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
126 goto err;
127
128 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
129 if (!mmio && !vgpu->mmio.disable_warn_untrack) {
130 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
131 vgpu->id, offset, bytes, *(u32 *)p_data);
132
133 if (offset == 0x206c) {
134 gvt_err("------------------------------------------\n");
135 gvt_err("vgpu%d: likely triggers a gfx reset\n",
136 vgpu->id);
137 gvt_err("------------------------------------------\n");
138 vgpu->mmio.disable_warn_untrack = true;
139 }
140 }
141
142 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
143 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
144 goto err;
145 }
146
147 if (mmio) {
148 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
149 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
150 goto err;
151 if (WARN_ON(mmio->offset != offset))
152 goto err;
153 }
154 ret = mmio->read(vgpu, offset, p_data, bytes);
155 } else
156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
157
158 if (ret)
159 goto err;
160
161 intel_gvt_mmio_set_accessed(gvt, offset);
162 mutex_unlock(&gvt->lock);
163 return 0;
164err:
165 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
166 vgpu->id, offset, bytes);
167 mutex_unlock(&gvt->lock);
168 return ret;
169}
170
171/**
172 * intel_vgpu_emulate_mmio_write - emulate MMIO write
173 * @vgpu: a vGPU
174 * @pa: guest physical address
175 * @p_data: write data buffer
176 * @bytes: access data length
177 *
178 * Returns:
179 * Zero on success, negative error code if failed
180 */
181int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
182 void *p_data, unsigned int bytes)
183{
184 struct intel_vgpu *vgpu = __vgpu;
185 struct intel_gvt *gvt = vgpu->gvt;
186 struct intel_gvt_mmio_info *mmio;
187 unsigned int offset = 0;
188 u32 old_vreg = 0, old_sreg = 0;
189 int ret = -EINVAL;
190
191 mutex_lock(&gvt->lock);
192
193 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
194 struct intel_vgpu_guest_page *gp;
195
196 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
197 if (gp) {
198 ret = gp->handler(gp, pa, p_data, bytes);
199 if (ret) {
200 gvt_err("vgpu%d: guest page write error %d, "
201 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
202 vgpu->id, ret,
203 gp->gfn, pa, *(u32 *)p_data, bytes);
204 }
205 mutex_unlock(&gvt->lock);
206 return ret;
207 }
208 }
209
210 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
211
212 if (WARN_ON(bytes > 8))
213 goto err;
214
215 if (reg_is_gtt(gvt, offset)) {
216 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
217 goto err;
218 if (WARN_ON(bytes != 4 && bytes != 8))
219 goto err;
220 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
221 goto err;
222
223 ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
224 p_data, bytes);
225 if (ret)
226 goto err;
227 mutex_unlock(&gvt->lock);
228 return ret;
229 }
230
231 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
232 ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
233 mutex_unlock(&gvt->lock);
234 return ret;
235 }
236
237 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
238 if (!mmio && !vgpu->mmio.disable_warn_untrack)
239 gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
240 vgpu->id, offset, bytes, *(u32 *)p_data);
241
242 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
243 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
244 goto err;
245 }
246
247 if (mmio) {
248 u64 ro_mask = mmio->ro_mask;
249
250 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
251 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
252 goto err;
253 if (WARN_ON(mmio->offset != offset))
254 goto err;
255 }
256
257 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
258 old_vreg = vgpu_vreg(vgpu, offset);
259 old_sreg = vgpu_sreg(vgpu, offset);
260 }
261
262 if (!ro_mask) {
263 ret = mmio->write(vgpu, offset, p_data, bytes);
264 } else {
265 /* Protect RO bits like HW */
266 u64 data = 0;
267
268 /* all register bits are RO. */
269 if (ro_mask == ~(u64)0) {
270 gvt_err("vgpu%d: try to write RO reg %x\n",
271 vgpu->id, offset);
272 ret = 0;
273 goto out;
274 }
275 /* keep the RO bits in the virtual register */
276 memcpy(&data, p_data, bytes);
277 data &= ~mmio->ro_mask;
278 data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
279 ret = mmio->write(vgpu, offset, &data, bytes);
280 }
281
282 /* higher 16bits of mode ctl regs are mask bits for change */
283 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
284 u32 mask = vgpu_vreg(vgpu, offset) >> 16;
285
286 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
287 | (vgpu_vreg(vgpu, offset) & mask);
288 vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
289 | (vgpu_sreg(vgpu, offset) & mask);
290 }
291 } else
292 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
293 bytes);
294 if (ret)
295 goto err;
296out:
297 intel_gvt_mmio_set_accessed(gvt, offset);
298 mutex_unlock(&gvt->lock);
299 return 0;
300err:
301 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
302 vgpu->id, offset, bytes);
303 mutex_unlock(&gvt->lock);
304 return ret;
305}