Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 21 | * SOFTWARE. |
Zhi Wang | 12d14cc | 2016-08-30 11:06:17 +0800 | [diff] [blame] | 22 | * |
| 23 | * Authors: |
| 24 | * Kevin Tian <kevin.tian@intel.com> |
| 25 | * Eddie Dong <eddie.dong@intel.com> |
| 26 | * |
| 27 | * Contributors: |
| 28 | * Niu Bing <bing.niu@intel.com> |
| 29 | * Zhi Wang <zhi.a.wang@intel.com> |
| 30 | * |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 31 | */ |
| 32 | |
| 33 | #ifndef _GVT_H_ |
| 34 | #define _GVT_H_ |
| 35 | |
| 36 | #include "debug.h" |
| 37 | #include "hypercall.h" |
Zhi Wang | 12d14cc | 2016-08-30 11:06:17 +0800 | [diff] [blame] | 38 | #include "mmio.h" |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 39 | #include "reg.h" |
Zhi Wang | c8fe6a68 | 2015-09-17 09:22:08 +0800 | [diff] [blame] | 40 | #include "interrupt.h" |
Zhi Wang | 2707e44 | 2016-03-28 23:23:16 +0800 | [diff] [blame] | 41 | #include "gtt.h" |
Zhi Wang | 04d348a | 2016-04-25 18:28:56 -0400 | [diff] [blame] | 42 | #include "display.h" |
| 43 | #include "edid.h" |
Zhi Wang | 8453d67 | 2016-05-01 02:48:25 -0400 | [diff] [blame] | 44 | #include "execlist.h" |
Zhi Wang | 28c4c6c | 2016-05-01 05:22:47 -0400 | [diff] [blame] | 45 | #include "scheduler.h" |
Zhi Wang | 4b63960 | 2016-05-01 17:09:58 -0400 | [diff] [blame] | 46 | #include "sched_policy.h" |
Zhi Wang | 1786571 | 2016-05-01 19:02:37 -0400 | [diff] [blame] | 47 | #include "render.h" |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 48 | #include "cmd_parser.h" |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 49 | |
| 50 | #define GVT_MAX_VGPU 8 |
| 51 | |
| 52 | enum { |
| 53 | INTEL_GVT_HYPERVISOR_XEN = 0, |
| 54 | INTEL_GVT_HYPERVISOR_KVM, |
| 55 | }; |
| 56 | |
| 57 | struct intel_gvt_host { |
| 58 | bool initialized; |
| 59 | int hypervisor_type; |
| 60 | struct intel_gvt_mpt *mpt; |
| 61 | }; |
| 62 | |
| 63 | extern struct intel_gvt_host intel_gvt_host; |
| 64 | |
| 65 | /* Describe per-platform limitations. */ |
| 66 | struct intel_gvt_device_info { |
| 67 | u32 max_support_vgpus; |
Zhi Wang | 579cea5 | 2016-06-30 12:45:34 -0400 | [diff] [blame] | 68 | u32 cfg_space_size; |
Zhi Wang | c8fe6a68 | 2015-09-17 09:22:08 +0800 | [diff] [blame] | 69 | u32 mmio_size; |
Zhi Wang | 579cea5 | 2016-06-30 12:45:34 -0400 | [diff] [blame] | 70 | u32 mmio_bar; |
Zhi Wang | c8fe6a68 | 2015-09-17 09:22:08 +0800 | [diff] [blame] | 71 | unsigned long msi_cap_offset; |
Zhi Wang | 2707e44 | 2016-03-28 23:23:16 +0800 | [diff] [blame] | 72 | u32 gtt_start_offset; |
| 73 | u32 gtt_entry_size; |
| 74 | u32 gtt_entry_size_shift; |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 75 | int gmadr_bytes_in_cmd; |
| 76 | u32 max_surface_size; |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 77 | }; |
| 78 | |
Zhi Wang | 28a60de | 2016-09-02 12:41:29 +0800 | [diff] [blame] | 79 | /* GM resources owned by a vGPU */ |
| 80 | struct intel_vgpu_gm { |
| 81 | u64 aperture_sz; |
| 82 | u64 hidden_sz; |
| 83 | struct drm_mm_node low_gm_node; |
| 84 | struct drm_mm_node high_gm_node; |
| 85 | }; |
| 86 | |
| 87 | #define INTEL_GVT_MAX_NUM_FENCES 32 |
| 88 | |
| 89 | /* Fences owned by a vGPU */ |
| 90 | struct intel_vgpu_fence { |
| 91 | struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; |
| 92 | u32 base; |
| 93 | u32 size; |
| 94 | }; |
| 95 | |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 96 | struct intel_vgpu_mmio { |
| 97 | void *vreg; |
| 98 | void *sreg; |
Zhi Wang | e39c5ad | 2016-09-02 13:33:29 +0800 | [diff] [blame] | 99 | bool disable_warn_untrack; |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 100 | }; |
| 101 | |
| 102 | #define INTEL_GVT_MAX_CFG_SPACE_SZ 256 |
| 103 | #define INTEL_GVT_MAX_BAR_NUM 4 |
| 104 | |
| 105 | struct intel_vgpu_pci_bar { |
| 106 | u64 size; |
| 107 | bool tracked; |
| 108 | }; |
| 109 | |
| 110 | struct intel_vgpu_cfg_space { |
| 111 | unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ]; |
| 112 | struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; |
| 113 | }; |
| 114 | |
| 115 | #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) |
| 116 | |
Zhi Wang | 04d348a | 2016-04-25 18:28:56 -0400 | [diff] [blame] | 117 | #define INTEL_GVT_MAX_PIPE 4 |
| 118 | |
Zhi Wang | c8fe6a68 | 2015-09-17 09:22:08 +0800 | [diff] [blame] | 119 | struct intel_vgpu_irq { |
| 120 | bool irq_warn_once[INTEL_GVT_EVENT_MAX]; |
Zhi Wang | 04d348a | 2016-04-25 18:28:56 -0400 | [diff] [blame] | 121 | DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE], |
| 122 | INTEL_GVT_EVENT_MAX); |
Zhi Wang | c8fe6a68 | 2015-09-17 09:22:08 +0800 | [diff] [blame] | 123 | }; |
| 124 | |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 125 | struct intel_vgpu_opregion { |
| 126 | void *va; |
| 127 | u32 gfn[INTEL_GVT_OPREGION_PAGES]; |
| 128 | struct page *pages[INTEL_GVT_OPREGION_PAGES]; |
| 129 | }; |
| 130 | |
| 131 | #define vgpu_opregion(vgpu) (&(vgpu->opregion)) |
| 132 | |
Zhi Wang | 04d348a | 2016-04-25 18:28:56 -0400 | [diff] [blame] | 133 | #define INTEL_GVT_MAX_PORT 5 |
| 134 | |
| 135 | struct intel_vgpu_display { |
| 136 | struct intel_vgpu_i2c_edid i2c_edid; |
| 137 | struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT]; |
| 138 | struct intel_vgpu_sbi sbi; |
| 139 | }; |
| 140 | |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 141 | struct intel_vgpu { |
| 142 | struct intel_gvt *gvt; |
| 143 | int id; |
| 144 | unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 145 | bool active; |
| 146 | bool resetting; |
Zhi Wang | 4b63960 | 2016-05-01 17:09:58 -0400 | [diff] [blame] | 147 | void *sched_data; |
Zhi Wang | 28a60de | 2016-09-02 12:41:29 +0800 | [diff] [blame] | 148 | |
| 149 | struct intel_vgpu_fence fence; |
| 150 | struct intel_vgpu_gm gm; |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 151 | struct intel_vgpu_cfg_space cfg_space; |
| 152 | struct intel_vgpu_mmio mmio; |
Zhi Wang | c8fe6a68 | 2015-09-17 09:22:08 +0800 | [diff] [blame] | 153 | struct intel_vgpu_irq irq; |
Zhi Wang | 2707e44 | 2016-03-28 23:23:16 +0800 | [diff] [blame] | 154 | struct intel_vgpu_gtt gtt; |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 155 | struct intel_vgpu_opregion opregion; |
Zhi Wang | 04d348a | 2016-04-25 18:28:56 -0400 | [diff] [blame] | 156 | struct intel_vgpu_display display; |
Zhi Wang | 8453d67 | 2016-05-01 02:48:25 -0400 | [diff] [blame] | 157 | struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; |
Zhi Wang | 28c4c6c | 2016-05-01 05:22:47 -0400 | [diff] [blame] | 158 | struct list_head workload_q_head[I915_NUM_ENGINES]; |
| 159 | struct kmem_cache *workloads; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 160 | atomic_t running_workload_num; |
Zhi Wang | 1786571 | 2016-05-01 19:02:37 -0400 | [diff] [blame] | 161 | DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 162 | struct i915_gem_context *shadow_ctx; |
| 163 | struct notifier_block shadow_ctx_notifier_block; |
Jike Song | f30437c | 2016-11-09 20:30:59 +0800 | [diff] [blame] | 164 | |
| 165 | #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) |
| 166 | struct { |
Jike Song | 659643f | 2016-12-08 11:00:36 +0800 | [diff] [blame] | 167 | struct mdev_device *mdev; |
Jike Song | f30437c | 2016-11-09 20:30:59 +0800 | [diff] [blame] | 168 | struct vfio_region *region; |
| 169 | int num_regions; |
| 170 | struct eventfd_ctx *intx_trigger; |
| 171 | struct eventfd_ctx *msi_trigger; |
| 172 | struct rb_root cache; |
| 173 | struct mutex cache_lock; |
Jike Song | f30437c | 2016-11-09 20:30:59 +0800 | [diff] [blame] | 174 | struct notifier_block iommu_notifier; |
Jike Song | 659643f | 2016-12-08 11:00:36 +0800 | [diff] [blame] | 175 | struct notifier_block group_notifier; |
| 176 | struct kvm *kvm; |
| 177 | struct work_struct release_work; |
Jike Song | 364fb6b | 2016-12-16 10:51:06 +0800 | [diff] [blame] | 178 | atomic_t released; |
Jike Song | f30437c | 2016-11-09 20:30:59 +0800 | [diff] [blame] | 179 | } vdev; |
| 180 | #endif |
Zhi Wang | 28a60de | 2016-09-02 12:41:29 +0800 | [diff] [blame] | 181 | }; |
| 182 | |
| 183 | struct intel_gvt_gm { |
| 184 | unsigned long vgpu_allocated_low_gm_size; |
| 185 | unsigned long vgpu_allocated_high_gm_size; |
| 186 | }; |
| 187 | |
| 188 | struct intel_gvt_fence { |
| 189 | unsigned long vgpu_allocated_fence_num; |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 190 | }; |
| 191 | |
Zhi Wang | 12d14cc | 2016-08-30 11:06:17 +0800 | [diff] [blame] | 192 | #define INTEL_GVT_MMIO_HASH_BITS 9 |
| 193 | |
| 194 | struct intel_gvt_mmio { |
| 195 | u32 *mmio_attribute; |
| 196 | DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); |
| 197 | }; |
| 198 | |
Zhi Wang | 579cea5 | 2016-06-30 12:45:34 -0400 | [diff] [blame] | 199 | struct intel_gvt_firmware { |
| 200 | void *cfg_space; |
| 201 | void *mmio; |
| 202 | bool firmware_loaded; |
| 203 | }; |
| 204 | |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 205 | struct intel_gvt_opregion { |
Du, Changbin | 321927d | 2016-10-20 14:08:46 +0800 | [diff] [blame] | 206 | void __iomem *opregion_va; |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 207 | u32 opregion_pa; |
| 208 | }; |
| 209 | |
Zhenyu Wang | 1f31c82 | 2016-11-03 18:38:31 +0800 | [diff] [blame] | 210 | #define NR_MAX_INTEL_VGPU_TYPES 20 |
| 211 | struct intel_vgpu_type { |
| 212 | char name[16]; |
| 213 | unsigned int max_instance; |
| 214 | unsigned int avail_instance; |
| 215 | unsigned int low_gm_size; |
| 216 | unsigned int high_gm_size; |
| 217 | unsigned int fence; |
| 218 | }; |
| 219 | |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 220 | struct intel_gvt { |
| 221 | struct mutex lock; |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 222 | struct drm_i915_private *dev_priv; |
| 223 | struct idr vgpu_idr; /* vGPU IDR pool */ |
| 224 | |
| 225 | struct intel_gvt_device_info device_info; |
Zhi Wang | 28a60de | 2016-09-02 12:41:29 +0800 | [diff] [blame] | 226 | struct intel_gvt_gm gm; |
| 227 | struct intel_gvt_fence fence; |
Zhi Wang | 12d14cc | 2016-08-30 11:06:17 +0800 | [diff] [blame] | 228 | struct intel_gvt_mmio mmio; |
Zhi Wang | 579cea5 | 2016-06-30 12:45:34 -0400 | [diff] [blame] | 229 | struct intel_gvt_firmware firmware; |
Zhi Wang | c8fe6a68 | 2015-09-17 09:22:08 +0800 | [diff] [blame] | 230 | struct intel_gvt_irq irq; |
Zhi Wang | 2707e44 | 2016-03-28 23:23:16 +0800 | [diff] [blame] | 231 | struct intel_gvt_gtt gtt; |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 232 | struct intel_gvt_opregion opregion; |
Zhi Wang | 28c4c6c | 2016-05-01 05:22:47 -0400 | [diff] [blame] | 233 | struct intel_gvt_workload_scheduler scheduler; |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 234 | DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); |
Zhenyu Wang | 1f31c82 | 2016-11-03 18:38:31 +0800 | [diff] [blame] | 235 | struct intel_vgpu_type *types; |
| 236 | unsigned int num_types; |
Zhi Wang | 04d348a | 2016-04-25 18:28:56 -0400 | [diff] [blame] | 237 | |
| 238 | struct task_struct *service_thread; |
| 239 | wait_queue_head_t service_thread_wq; |
| 240 | unsigned long service_request; |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 241 | }; |
| 242 | |
Zhenyu Wang | feddf6e | 2016-10-20 17:15:03 +0800 | [diff] [blame] | 243 | static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) |
| 244 | { |
| 245 | return i915->gvt; |
| 246 | } |
| 247 | |
Zhi Wang | 04d348a | 2016-04-25 18:28:56 -0400 | [diff] [blame] | 248 | enum { |
| 249 | INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, |
| 250 | }; |
| 251 | |
| 252 | static inline void intel_gvt_request_service(struct intel_gvt *gvt, |
| 253 | int service) |
| 254 | { |
| 255 | set_bit(service, (void *)&gvt->service_request); |
| 256 | wake_up(&gvt->service_thread_wq); |
| 257 | } |
| 258 | |
Zhi Wang | 579cea5 | 2016-06-30 12:45:34 -0400 | [diff] [blame] | 259 | void intel_gvt_free_firmware(struct intel_gvt *gvt); |
| 260 | int intel_gvt_load_firmware(struct intel_gvt *gvt); |
| 261 | |
Zhi Wang | 28a60de | 2016-09-02 12:41:29 +0800 | [diff] [blame] | 262 | /* Aperture/GM space definitions for GVT device */ |
Zhenyu Wang | 1f31c82 | 2016-11-03 18:38:31 +0800 | [diff] [blame] | 263 | #define MB_TO_BYTES(mb) ((mb) << 20ULL) |
| 264 | #define BYTES_TO_MB(b) ((b) >> 20ULL) |
| 265 | |
| 266 | #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) |
| 267 | #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) |
| 268 | #define HOST_FENCE 4 |
| 269 | |
| 270 | /* Aperture/GM space definitions for GVT device */ |
Zhi Wang | 28a60de | 2016-09-02 12:41:29 +0800 | [diff] [blame] | 271 | #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) |
| 272 | #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base) |
| 273 | |
| 274 | #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) |
Zhi Wang | e39c5ad | 2016-09-02 13:33:29 +0800 | [diff] [blame] | 275 | #define gvt_ggtt_sz(gvt) \ |
| 276 | ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3) |
Zhi Wang | 28a60de | 2016-09-02 12:41:29 +0800 | [diff] [blame] | 277 | #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) |
| 278 | |
| 279 | #define gvt_aperture_gmadr_base(gvt) (0) |
| 280 | #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ |
| 281 | + gvt_aperture_sz(gvt) - 1) |
| 282 | |
| 283 | #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \ |
| 284 | + gvt_aperture_sz(gvt)) |
| 285 | #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ |
| 286 | + gvt_hidden_sz(gvt) - 1) |
| 287 | |
| 288 | #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs) |
| 289 | |
| 290 | /* Aperture/GM space definitions for vGPU */ |
| 291 | #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) |
| 292 | #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) |
| 293 | #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) |
| 294 | #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) |
| 295 | |
| 296 | #define vgpu_aperture_pa_base(vgpu) \ |
| 297 | (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu)) |
| 298 | |
| 299 | #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) |
| 300 | |
| 301 | #define vgpu_aperture_pa_end(vgpu) \ |
| 302 | (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) |
| 303 | |
| 304 | #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) |
| 305 | #define vgpu_aperture_gmadr_end(vgpu) \ |
| 306 | (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) |
| 307 | |
| 308 | #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) |
| 309 | #define vgpu_hidden_gmadr_end(vgpu) \ |
| 310 | (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) |
| 311 | |
| 312 | #define vgpu_fence_base(vgpu) (vgpu->fence.base) |
| 313 | #define vgpu_fence_sz(vgpu) (vgpu->fence.size) |
| 314 | |
| 315 | struct intel_vgpu_creation_params { |
| 316 | __u64 handle; |
| 317 | __u64 low_gm_sz; /* in MB */ |
| 318 | __u64 high_gm_sz; /* in MB */ |
| 319 | __u64 fence_sz; |
| 320 | __s32 primary; |
| 321 | __u64 vgpu_id; |
| 322 | }; |
| 323 | |
| 324 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, |
| 325 | struct intel_vgpu_creation_params *param); |
Changbin Du | d22a48b | 2017-01-13 11:15:56 +0800 | [diff] [blame] | 326 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); |
Zhi Wang | 28a60de | 2016-09-02 12:41:29 +0800 | [diff] [blame] | 327 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); |
| 328 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, |
| 329 | u32 fence, u64 value); |
| 330 | |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 331 | /* Macros for easily accessing vGPU virtual/shadow register */ |
| 332 | #define vgpu_vreg(vgpu, reg) \ |
| 333 | (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) |
| 334 | #define vgpu_vreg8(vgpu, reg) \ |
| 335 | (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) |
| 336 | #define vgpu_vreg16(vgpu, reg) \ |
| 337 | (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) |
| 338 | #define vgpu_vreg64(vgpu, reg) \ |
| 339 | (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) |
| 340 | #define vgpu_sreg(vgpu, reg) \ |
| 341 | (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) |
| 342 | #define vgpu_sreg8(vgpu, reg) \ |
| 343 | (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) |
| 344 | #define vgpu_sreg16(vgpu, reg) \ |
| 345 | (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) |
| 346 | #define vgpu_sreg64(vgpu, reg) \ |
| 347 | (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) |
| 348 | |
| 349 | #define for_each_active_vgpu(gvt, vgpu, id) \ |
| 350 | idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ |
| 351 | for_each_if(vgpu->active) |
| 352 | |
| 353 | static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, |
| 354 | u32 offset, u32 val, bool low) |
| 355 | { |
| 356 | u32 *pval; |
| 357 | |
| 358 | /* BAR offset should be 32 bits algiend */ |
| 359 | offset = rounddown(offset, 4); |
| 360 | pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); |
| 361 | |
| 362 | if (low) { |
| 363 | /* |
| 364 | * only update bit 31 - bit 4, |
| 365 | * leave the bit 3 - bit 0 unchanged. |
| 366 | */ |
| 367 | *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); |
Xiaoguang Chen | 550dd77 | 2016-11-24 13:13:00 +0800 | [diff] [blame] | 368 | } else { |
| 369 | *pval = val; |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 370 | } |
| 371 | } |
| 372 | |
Zhenyu Wang | 1f31c82 | 2016-11-03 18:38:31 +0800 | [diff] [blame] | 373 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); |
| 374 | void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 375 | |
Zhenyu Wang | 1f31c82 | 2016-11-03 18:38:31 +0800 | [diff] [blame] | 376 | struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, |
| 377 | struct intel_vgpu_type *type); |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 378 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); |
Jike Song | 9ec1e66 | 2016-11-03 18:38:35 +0800 | [diff] [blame] | 379 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); |
Zhi Wang | 82d375d | 2016-07-05 12:40:49 -0400 | [diff] [blame] | 380 | |
Zhenyu Wang | 1f31c82 | 2016-11-03 18:38:31 +0800 | [diff] [blame] | 381 | |
Zhi Wang | 2707e44 | 2016-03-28 23:23:16 +0800 | [diff] [blame] | 382 | /* validating GM functions */ |
| 383 | #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ |
| 384 | ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ |
| 385 | (gmadr <= vgpu_aperture_gmadr_end(vgpu))) |
| 386 | |
| 387 | #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ |
| 388 | ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \ |
| 389 | (gmadr <= vgpu_hidden_gmadr_end(vgpu))) |
| 390 | |
| 391 | #define vgpu_gmadr_is_valid(vgpu, gmadr) \ |
| 392 | ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \ |
| 393 | (vgpu_gmadr_is_hidden(vgpu, gmadr)))) |
| 394 | |
| 395 | #define gvt_gmadr_is_aperture(gvt, gmadr) \ |
| 396 | ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \ |
| 397 | (gmadr <= gvt_aperture_gmadr_end(gvt))) |
| 398 | |
| 399 | #define gvt_gmadr_is_hidden(gvt, gmadr) \ |
| 400 | ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \ |
| 401 | (gmadr <= gvt_hidden_gmadr_end(gvt))) |
| 402 | |
| 403 | #define gvt_gmadr_is_valid(gvt, gmadr) \ |
| 404 | (gvt_gmadr_is_aperture(gvt, gmadr) || \ |
| 405 | gvt_gmadr_is_hidden(gvt, gmadr)) |
| 406 | |
| 407 | bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); |
| 408 | int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr); |
| 409 | int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr); |
| 410 | int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, |
| 411 | unsigned long *h_index); |
| 412 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, |
| 413 | unsigned long *g_index); |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 414 | |
Changbin Du | 536fc23 | 2017-01-13 11:15:58 +0800 | [diff] [blame^] | 415 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, |
| 416 | bool primary); |
Jike Song | 9ec1e66 | 2016-11-03 18:38:35 +0800 | [diff] [blame] | 417 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 418 | void *p_data, unsigned int bytes); |
| 419 | |
Jike Song | 9ec1e66 | 2016-11-03 18:38:35 +0800 | [diff] [blame] | 420 | int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 421 | void *p_data, unsigned int bytes); |
| 422 | |
| 423 | void intel_gvt_clean_opregion(struct intel_gvt *gvt); |
| 424 | int intel_gvt_init_opregion(struct intel_gvt *gvt); |
| 425 | |
| 426 | void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); |
| 427 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); |
| 428 | |
| 429 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); |
Ping Gao | 23736d1 | 2016-10-26 09:38:52 +0800 | [diff] [blame] | 430 | int setup_vgpu_mmio(struct intel_vgpu *vgpu); |
| 431 | void populate_pvinfo_page(struct intel_vgpu *vgpu); |
Zhi Wang | 4d60c5fd | 2016-07-20 01:14:38 -0400 | [diff] [blame] | 432 | |
Jike Song | 9ec1e66 | 2016-11-03 18:38:35 +0800 | [diff] [blame] | 433 | struct intel_gvt_ops { |
| 434 | int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, |
| 435 | unsigned int); |
| 436 | int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *, |
| 437 | unsigned int); |
| 438 | int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *, |
| 439 | unsigned int); |
| 440 | int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *, |
| 441 | unsigned int); |
| 442 | struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, |
| 443 | struct intel_vgpu_type *); |
| 444 | void (*vgpu_destroy)(struct intel_vgpu *); |
| 445 | void (*vgpu_reset)(struct intel_vgpu *); |
| 446 | }; |
| 447 | |
| 448 | |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 449 | #include "mpt.h" |
| 450 | |
| 451 | #endif |