blob: 1619881dbd51dfb3ae3b03779419d312c25afae6 [file] [log] [blame]
Zhi Wang0ad35fe2016-06-16 08:07:00 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
Zhi Wang12d14cc2016-08-30 11:06:17 +080022 *
23 * Authors:
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
26 *
27 * Contributors:
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 *
Zhi Wang0ad35fe2016-06-16 08:07:00 -040031 */
32
33#ifndef _GVT_H_
34#define _GVT_H_
35
36#include "debug.h"
37#include "hypercall.h"
Zhi Wang12d14cc2016-08-30 11:06:17 +080038#include "mmio.h"
Zhi Wang82d375d2016-07-05 12:40:49 -040039#include "reg.h"
Zhi Wangc8fe6a682015-09-17 09:22:08 +080040#include "interrupt.h"
Zhi Wang2707e442016-03-28 23:23:16 +080041#include "gtt.h"
Zhi Wang04d348a2016-04-25 18:28:56 -040042#include "display.h"
43#include "edid.h"
Zhi Wang0ad35fe2016-06-16 08:07:00 -040044
45#define GVT_MAX_VGPU 8
46
47enum {
48 INTEL_GVT_HYPERVISOR_XEN = 0,
49 INTEL_GVT_HYPERVISOR_KVM,
50};
51
52struct intel_gvt_host {
53 bool initialized;
54 int hypervisor_type;
55 struct intel_gvt_mpt *mpt;
56};
57
58extern struct intel_gvt_host intel_gvt_host;
59
60/* Describe per-platform limitations. */
61struct intel_gvt_device_info {
62 u32 max_support_vgpus;
Zhi Wang579cea52016-06-30 12:45:34 -040063 u32 cfg_space_size;
Zhi Wangc8fe6a682015-09-17 09:22:08 +080064 u32 mmio_size;
Zhi Wang579cea52016-06-30 12:45:34 -040065 u32 mmio_bar;
Zhi Wangc8fe6a682015-09-17 09:22:08 +080066 unsigned long msi_cap_offset;
Zhi Wang2707e442016-03-28 23:23:16 +080067 u32 gtt_start_offset;
68 u32 gtt_entry_size;
69 u32 gtt_entry_size_shift;
Zhi Wang0ad35fe2016-06-16 08:07:00 -040070};
71
Zhi Wang28a60de2016-09-02 12:41:29 +080072/* GM resources owned by a vGPU */
73struct intel_vgpu_gm {
74 u64 aperture_sz;
75 u64 hidden_sz;
76 struct drm_mm_node low_gm_node;
77 struct drm_mm_node high_gm_node;
78};
79
80#define INTEL_GVT_MAX_NUM_FENCES 32
81
82/* Fences owned by a vGPU */
83struct intel_vgpu_fence {
84 struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
85 u32 base;
86 u32 size;
87};
88
Zhi Wang82d375d2016-07-05 12:40:49 -040089struct intel_vgpu_mmio {
90 void *vreg;
91 void *sreg;
Zhi Wange39c5ad2016-09-02 13:33:29 +080092 bool disable_warn_untrack;
Zhi Wang82d375d2016-07-05 12:40:49 -040093};
94
95#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
96#define INTEL_GVT_MAX_BAR_NUM 4
97
98struct intel_vgpu_pci_bar {
99 u64 size;
100 bool tracked;
101};
102
103struct intel_vgpu_cfg_space {
104 unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
105 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
106};
107
108#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
109
Zhi Wang04d348a2016-04-25 18:28:56 -0400110#define INTEL_GVT_MAX_PIPE 4
111
Zhi Wangc8fe6a682015-09-17 09:22:08 +0800112struct intel_vgpu_irq {
113 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
Zhi Wang04d348a2016-04-25 18:28:56 -0400114 DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
115 INTEL_GVT_EVENT_MAX);
Zhi Wangc8fe6a682015-09-17 09:22:08 +0800116};
117
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400118struct intel_vgpu_opregion {
119 void *va;
120 u32 gfn[INTEL_GVT_OPREGION_PAGES];
121 struct page *pages[INTEL_GVT_OPREGION_PAGES];
122};
123
124#define vgpu_opregion(vgpu) (&(vgpu->opregion))
125
Zhi Wang04d348a2016-04-25 18:28:56 -0400126#define INTEL_GVT_MAX_PORT 5
127
128struct intel_vgpu_display {
129 struct intel_vgpu_i2c_edid i2c_edid;
130 struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
131 struct intel_vgpu_sbi sbi;
132};
133
Zhi Wang0ad35fe2016-06-16 08:07:00 -0400134struct intel_vgpu {
135 struct intel_gvt *gvt;
136 int id;
137 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
Zhi Wang82d375d2016-07-05 12:40:49 -0400138 bool active;
139 bool resetting;
Zhi Wang28a60de2016-09-02 12:41:29 +0800140
141 struct intel_vgpu_fence fence;
142 struct intel_vgpu_gm gm;
Zhi Wang82d375d2016-07-05 12:40:49 -0400143 struct intel_vgpu_cfg_space cfg_space;
144 struct intel_vgpu_mmio mmio;
Zhi Wangc8fe6a682015-09-17 09:22:08 +0800145 struct intel_vgpu_irq irq;
Zhi Wang2707e442016-03-28 23:23:16 +0800146 struct intel_vgpu_gtt gtt;
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400147 struct intel_vgpu_opregion opregion;
Zhi Wang04d348a2016-04-25 18:28:56 -0400148 struct intel_vgpu_display display;
Zhi Wang28a60de2016-09-02 12:41:29 +0800149};
150
151struct intel_gvt_gm {
152 unsigned long vgpu_allocated_low_gm_size;
153 unsigned long vgpu_allocated_high_gm_size;
154};
155
156struct intel_gvt_fence {
157 unsigned long vgpu_allocated_fence_num;
Zhi Wang0ad35fe2016-06-16 08:07:00 -0400158};
159
Zhi Wang12d14cc2016-08-30 11:06:17 +0800160#define INTEL_GVT_MMIO_HASH_BITS 9
161
162struct intel_gvt_mmio {
163 u32 *mmio_attribute;
164 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
165};
166
Zhi Wang579cea52016-06-30 12:45:34 -0400167struct intel_gvt_firmware {
168 void *cfg_space;
169 void *mmio;
170 bool firmware_loaded;
171};
172
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400173struct intel_gvt_opregion {
174 void *opregion_va;
175 u32 opregion_pa;
176};
177
Zhi Wang0ad35fe2016-06-16 08:07:00 -0400178struct intel_gvt {
179 struct mutex lock;
180 bool initialized;
181
182 struct drm_i915_private *dev_priv;
183 struct idr vgpu_idr; /* vGPU IDR pool */
184
185 struct intel_gvt_device_info device_info;
Zhi Wang28a60de2016-09-02 12:41:29 +0800186 struct intel_gvt_gm gm;
187 struct intel_gvt_fence fence;
Zhi Wang12d14cc2016-08-30 11:06:17 +0800188 struct intel_gvt_mmio mmio;
Zhi Wang579cea52016-06-30 12:45:34 -0400189 struct intel_gvt_firmware firmware;
Zhi Wangc8fe6a682015-09-17 09:22:08 +0800190 struct intel_gvt_irq irq;
Zhi Wang2707e442016-03-28 23:23:16 +0800191 struct intel_gvt_gtt gtt;
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400192 struct intel_gvt_opregion opregion;
Zhi Wang04d348a2016-04-25 18:28:56 -0400193
194 struct task_struct *service_thread;
195 wait_queue_head_t service_thread_wq;
196 unsigned long service_request;
Zhi Wang0ad35fe2016-06-16 08:07:00 -0400197};
198
Zhi Wang04d348a2016-04-25 18:28:56 -0400199enum {
200 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
201};
202
203static inline void intel_gvt_request_service(struct intel_gvt *gvt,
204 int service)
205{
206 set_bit(service, (void *)&gvt->service_request);
207 wake_up(&gvt->service_thread_wq);
208}
209
Zhi Wang579cea52016-06-30 12:45:34 -0400210void intel_gvt_free_firmware(struct intel_gvt *gvt);
211int intel_gvt_load_firmware(struct intel_gvt *gvt);
212
Zhi Wang28a60de2016-09-02 12:41:29 +0800213/* Aperture/GM space definitions for GVT device */
214#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
215#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
216
217#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
Zhi Wange39c5ad2016-09-02 13:33:29 +0800218#define gvt_ggtt_sz(gvt) \
219 ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
Zhi Wang28a60de2016-09-02 12:41:29 +0800220#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
221
222#define gvt_aperture_gmadr_base(gvt) (0)
223#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
224 + gvt_aperture_sz(gvt) - 1)
225
226#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
227 + gvt_aperture_sz(gvt))
228#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
229 + gvt_hidden_sz(gvt) - 1)
230
231#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
232
233/* Aperture/GM space definitions for vGPU */
234#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
235#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
236#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
237#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
238
239#define vgpu_aperture_pa_base(vgpu) \
240 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
241
242#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
243
244#define vgpu_aperture_pa_end(vgpu) \
245 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
246
247#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
248#define vgpu_aperture_gmadr_end(vgpu) \
249 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
250
251#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
252#define vgpu_hidden_gmadr_end(vgpu) \
253 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
254
255#define vgpu_fence_base(vgpu) (vgpu->fence.base)
256#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
257
258struct intel_vgpu_creation_params {
259 __u64 handle;
260 __u64 low_gm_sz; /* in MB */
261 __u64 high_gm_sz; /* in MB */
262 __u64 fence_sz;
263 __s32 primary;
264 __u64 vgpu_id;
265};
266
267int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
268 struct intel_vgpu_creation_params *param);
269void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
270void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
271 u32 fence, u64 value);
272
Zhi Wang82d375d2016-07-05 12:40:49 -0400273/* Macros for easily accessing vGPU virtual/shadow register */
274#define vgpu_vreg(vgpu, reg) \
275 (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
276#define vgpu_vreg8(vgpu, reg) \
277 (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
278#define vgpu_vreg16(vgpu, reg) \
279 (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
280#define vgpu_vreg64(vgpu, reg) \
281 (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
282#define vgpu_sreg(vgpu, reg) \
283 (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
284#define vgpu_sreg8(vgpu, reg) \
285 (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
286#define vgpu_sreg16(vgpu, reg) \
287 (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
288#define vgpu_sreg64(vgpu, reg) \
289 (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
290
291#define for_each_active_vgpu(gvt, vgpu, id) \
292 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
293 for_each_if(vgpu->active)
294
295static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
296 u32 offset, u32 val, bool low)
297{
298 u32 *pval;
299
300 /* BAR offset should be 32 bits algiend */
301 offset = rounddown(offset, 4);
302 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
303
304 if (low) {
305 /*
306 * only update bit 31 - bit 4,
307 * leave the bit 3 - bit 0 unchanged.
308 */
309 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
310 }
311}
312
313struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
314 struct intel_vgpu_creation_params *
315 param);
316
317void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
318
Zhi Wang2707e442016-03-28 23:23:16 +0800319/* validating GM functions */
320#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
321 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
322 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
323
324#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
325 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
326 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
327
328#define vgpu_gmadr_is_valid(vgpu, gmadr) \
329 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
330 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
331
332#define gvt_gmadr_is_aperture(gvt, gmadr) \
333 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
334 (gmadr <= gvt_aperture_gmadr_end(gvt)))
335
336#define gvt_gmadr_is_hidden(gvt, gmadr) \
337 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
338 (gmadr <= gvt_hidden_gmadr_end(gvt)))
339
340#define gvt_gmadr_is_valid(gvt, gmadr) \
341 (gvt_gmadr_is_aperture(gvt, gmadr) || \
342 gvt_gmadr_is_hidden(gvt, gmadr))
343
344bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
345int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
346int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
347int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
348 unsigned long *h_index);
349int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
350 unsigned long *g_index);
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400351
352int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
353 void *p_data, unsigned int bytes);
354
355int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
356 void *p_data, unsigned int bytes);
357
358void intel_gvt_clean_opregion(struct intel_gvt *gvt);
359int intel_gvt_init_opregion(struct intel_gvt *gvt);
360
361void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
362int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
363
364int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
365
Zhi Wang0ad35fe2016-06-16 08:07:00 -0400366#include "mpt.h"
367
368#endif