blob: 3af894b3d257c9012682912437b105512680cf83 [file] [log] [blame]
Zhi Wang17865712016-05-01 19:02:37 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
26 *
27 * Contributors:
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36#include "i915_drv.h"
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080037#include "gvt.h"
Zhi Wang17865712016-05-01 19:02:37 -040038
39struct render_mmio {
40 int ring_id;
41 i915_reg_t reg;
42 u32 mask;
43 bool in_context;
44 u32 value;
45};
46
47static struct render_mmio gen8_render_mmio_list[] = {
48 {RCS, _MMIO(0x229c), 0xffff, false},
49 {RCS, _MMIO(0x2248), 0x0, false},
50 {RCS, _MMIO(0x2098), 0x0, false},
51 {RCS, _MMIO(0x20c0), 0xffff, true},
52 {RCS, _MMIO(0x24d0), 0, false},
53 {RCS, _MMIO(0x24d4), 0, false},
54 {RCS, _MMIO(0x24d8), 0, false},
55 {RCS, _MMIO(0x24dc), 0, false},
56 {RCS, _MMIO(0x7004), 0xffff, true},
57 {RCS, _MMIO(0x7008), 0xffff, true},
58 {RCS, _MMIO(0x7000), 0xffff, true},
59 {RCS, _MMIO(0x7010), 0xffff, true},
60 {RCS, _MMIO(0x7300), 0xffff, true},
61 {RCS, _MMIO(0x83a4), 0xffff, true},
62
63 {BCS, _MMIO(0x2229c), 0xffff, false},
64 {BCS, _MMIO(0x2209c), 0xffff, false},
65 {BCS, _MMIO(0x220c0), 0xffff, false},
66 {BCS, _MMIO(0x22098), 0x0, false},
67 {BCS, _MMIO(0x22028), 0x0, false},
68};
69
70static struct render_mmio gen9_render_mmio_list[] = {
71 {RCS, _MMIO(0x229c), 0xffff, false},
72 {RCS, _MMIO(0x2248), 0x0, false},
73 {RCS, _MMIO(0x2098), 0x0, false},
74 {RCS, _MMIO(0x20c0), 0xffff, true},
75 {RCS, _MMIO(0x24d0), 0, false},
76 {RCS, _MMIO(0x24d4), 0, false},
77 {RCS, _MMIO(0x24d8), 0, false},
78 {RCS, _MMIO(0x24dc), 0, false},
79 {RCS, _MMIO(0x7004), 0xffff, true},
80 {RCS, _MMIO(0x7008), 0xffff, true},
81 {RCS, _MMIO(0x7000), 0xffff, true},
82 {RCS, _MMIO(0x7010), 0xffff, true},
83 {RCS, _MMIO(0x7300), 0xffff, true},
84 {RCS, _MMIO(0x83a4), 0xffff, true},
85
86 {RCS, _MMIO(0x40e0), 0, false},
87 {RCS, _MMIO(0x40e4), 0, false},
88 {RCS, _MMIO(0x2580), 0xffff, true},
89 {RCS, _MMIO(0x7014), 0xffff, true},
90 {RCS, _MMIO(0x20ec), 0xffff, false},
91 {RCS, _MMIO(0xb118), 0, false},
92 {RCS, _MMIO(0xe100), 0xffff, true},
93 {RCS, _MMIO(0xe180), 0xffff, true},
94 {RCS, _MMIO(0xe184), 0xffff, true},
95 {RCS, _MMIO(0xe188), 0xffff, true},
96 {RCS, _MMIO(0xe194), 0xffff, true},
97 {RCS, _MMIO(0x4de0), 0, false},
98 {RCS, _MMIO(0x4de4), 0, false},
99 {RCS, _MMIO(0x4de8), 0, false},
100 {RCS, _MMIO(0x4dec), 0, false},
101 {RCS, _MMIO(0x4df0), 0, false},
102 {RCS, _MMIO(0x4df4), 0, false},
103
104 {BCS, _MMIO(0x2229c), 0xffff, false},
105 {BCS, _MMIO(0x2209c), 0xffff, false},
106 {BCS, _MMIO(0x220c0), 0xffff, false},
107 {BCS, _MMIO(0x22098), 0x0, false},
108 {BCS, _MMIO(0x22028), 0x0, false},
109
110 {VCS2, _MMIO(0x1c028), 0xffff, false},
111
112 {VECS, _MMIO(0x1a028), 0xffff, false},
113};
114
115static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
116static u32 gen9_render_mocs_L3[32];
117
118static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
119{
120 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Arkadiusz Hiler1c860a32016-10-21 13:11:50 +0200121 enum forcewake_domains fw;
Zhi Wang17865712016-05-01 19:02:37 -0400122 i915_reg_t reg;
123 u32 regs[] = {
124 [RCS] = 0x4260,
125 [VCS] = 0x4264,
126 [VCS2] = 0x4268,
127 [BCS] = 0x426c,
128 [VECS] = 0x4270,
129 };
130
131 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
132 return;
133
134 if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
135 return;
136
137 reg = _MMIO(regs[ring_id]);
138
Arkadiusz Hiler1c860a32016-10-21 13:11:50 +0200139 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
140 * we need to put a forcewake when invalidating RCS TLB caches,
141 * otherwise device can go to RC6 state and interrupt invalidation
142 * process
143 */
144 fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
145 FW_REG_READ | FW_REG_WRITE);
146 if (ring_id == RCS && IS_SKYLAKE(dev_priv))
147 fw |= FORCEWAKE_RENDER;
Zhi Wang17865712016-05-01 19:02:37 -0400148
Arkadiusz Hiler1c860a32016-10-21 13:11:50 +0200149 intel_uncore_forcewake_get(dev_priv, fw);
150
151 I915_WRITE_FW(reg, 0x1);
152
153 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
Zhi Wang17865712016-05-01 19:02:37 -0400154 gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
155
Arkadiusz Hiler1c860a32016-10-21 13:11:50 +0200156 intel_uncore_forcewake_put(dev_priv, fw);
157
Zhi Wang17865712016-05-01 19:02:37 -0400158 gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
159}
160
161static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
162{
163 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
164 i915_reg_t offset, l3_offset;
165 u32 regs[] = {
166 [RCS] = 0xc800,
167 [VCS] = 0xc900,
168 [VCS2] = 0xca00,
169 [BCS] = 0xcc00,
170 [VECS] = 0xcb00,
171 };
172 int i;
173
174 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
175 return;
176
177 if (!IS_SKYLAKE(dev_priv))
178 return;
179
Zhenyu Wang946260e2016-10-22 13:21:45 +0800180 offset.reg = regs[ring_id];
Zhi Wang17865712016-05-01 19:02:37 -0400181 for (i = 0; i < 64; i++) {
182 gen9_render_mocs[ring_id][i] = I915_READ(offset);
183 I915_WRITE(offset, vgpu_vreg(vgpu, offset));
184 POSTING_READ(offset);
185 offset.reg += 4;
186 }
187
188 if (ring_id == RCS) {
189 l3_offset.reg = 0xb020;
190 for (i = 0; i < 32; i++) {
191 gen9_render_mocs_L3[i] = I915_READ(l3_offset);
192 I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
193 POSTING_READ(l3_offset);
194 l3_offset.reg += 4;
195 }
196 }
197}
198
199static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
200{
201 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
202 i915_reg_t offset, l3_offset;
203 u32 regs[] = {
204 [RCS] = 0xc800,
205 [VCS] = 0xc900,
206 [VCS2] = 0xca00,
207 [BCS] = 0xcc00,
208 [VECS] = 0xcb00,
209 };
210 int i;
211
212 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
213 return;
214
215 if (!IS_SKYLAKE(dev_priv))
216 return;
217
Zhenyu Wang946260e2016-10-22 13:21:45 +0800218 offset.reg = regs[ring_id];
Zhi Wang17865712016-05-01 19:02:37 -0400219 for (i = 0; i < 64; i++) {
220 vgpu_vreg(vgpu, offset) = I915_READ(offset);
221 I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
222 POSTING_READ(offset);
223 offset.reg += 4;
224 }
225
226 if (ring_id == RCS) {
227 l3_offset.reg = 0xb020;
228 for (i = 0; i < 32; i++) {
229 vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
230 I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
231 POSTING_READ(l3_offset);
232 l3_offset.reg += 4;
233 }
234 }
235}
236
237void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
238{
239 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
240 struct render_mmio *mmio;
241 u32 v;
242 int i, array_size;
243
244 if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
245 mmio = gen9_render_mmio_list;
246 array_size = ARRAY_SIZE(gen9_render_mmio_list);
247 load_mocs(vgpu, ring_id);
248 } else {
249 mmio = gen8_render_mmio_list;
250 array_size = ARRAY_SIZE(gen8_render_mmio_list);
251 }
252
253 for (i = 0; i < array_size; i++, mmio++) {
254 if (mmio->ring_id != ring_id)
255 continue;
256
257 mmio->value = I915_READ(mmio->reg);
258 if (mmio->mask)
259 v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
260 else
261 v = vgpu_vreg(vgpu, mmio->reg);
262
263 I915_WRITE(mmio->reg, v);
264 POSTING_READ(mmio->reg);
265
266 gvt_dbg_render("load reg %x old %x new %x\n",
267 i915_mmio_reg_offset(mmio->reg),
268 mmio->value, v);
269 }
270 handle_tlb_pending_event(vgpu, ring_id);
271}
272
273void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
274{
275 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
276 struct render_mmio *mmio;
277 u32 v;
278 int i, array_size;
279
280 if (IS_SKYLAKE(dev_priv)) {
281 mmio = gen9_render_mmio_list;
282 array_size = ARRAY_SIZE(gen9_render_mmio_list);
283 restore_mocs(vgpu, ring_id);
284 } else {
285 mmio = gen8_render_mmio_list;
286 array_size = ARRAY_SIZE(gen8_render_mmio_list);
287 }
288
289 for (i = 0; i < array_size; i++, mmio++) {
290 if (mmio->ring_id != ring_id)
291 continue;
292
293 vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
294
295 if (mmio->mask) {
296 vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
297 v = mmio->value | (mmio->mask << 16);
298 } else
299 v = mmio->value;
300
301 I915_WRITE(mmio->reg, v);
302 POSTING_READ(mmio->reg);
303
304 gvt_dbg_render("restore reg %x old %x new %x\n",
305 i915_mmio_reg_offset(mmio->reg),
306 mmio->value, v);
307 }
308}