blob: 6394b168ef29248fc1b0c6c875141a0d13f4c458 [file] [log] [blame]
Alex Williamson5846ff52016-02-22 16:02:43 -07001/*
2 * VFIO PCI Intel Graphics support
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Register a device specific region through which to provide read-only
12 * access to the Intel IGD opregion. The register defining the opregion
13 * address is also virtualized to prevent user modification.
14 */
15
16#include <linux/io.h>
17#include <linux/pci.h>
18#include <linux/uaccess.h>
19#include <linux/vfio.h>
20
21#include "vfio_pci_private.h"
22
23#define OPREGION_SIGNATURE "IntelGraphicsMem"
24#define OPREGION_SIZE (8 * 1024)
25#define OPREGION_PCI_ADDR 0xfc
26
27static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
28 size_t count, loff_t *ppos, bool iswrite)
29{
30 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
31 void *base = vdev->region[i].data;
32 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
33
34 if (pos >= vdev->region[i].size || iswrite)
35 return -EINVAL;
36
37 count = min(count, (size_t)(vdev->region[i].size - pos));
38
39 if (copy_to_user(buf, base + pos, count))
40 return -EFAULT;
41
42 *ppos += count;
43
44 return count;
45}
46
47static void vfio_pci_igd_release(struct vfio_pci_device *vdev,
48 struct vfio_pci_region *region)
49{
50 memunmap(region->data);
51}
52
53static const struct vfio_pci_regops vfio_pci_igd_regops = {
54 .rw = vfio_pci_igd_rw,
55 .release = vfio_pci_igd_release,
56};
57
Alex Williamsonf572a962016-02-22 16:02:45 -070058static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
Alex Williamson5846ff52016-02-22 16:02:43 -070059{
60 __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
61 u32 addr, size;
62 void *base;
63 int ret;
64
65 ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
66 if (ret)
67 return ret;
68
69 if (!addr || !(~addr))
70 return -ENODEV;
71
72 base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
73 if (!base)
74 return -ENOMEM;
75
76 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
77 memunmap(base);
78 return -EINVAL;
79 }
80
81 size = le32_to_cpu(*(__le32 *)(base + 16));
82 if (!size) {
83 memunmap(base);
84 return -EINVAL;
85 }
86
87 size *= 1024; /* In KB */
88
89 if (size != OPREGION_SIZE) {
90 memunmap(base);
91 base = memremap(addr, size, MEMREMAP_WB);
92 if (!base)
93 return -ENOMEM;
94 }
95
96 ret = vfio_pci_register_dev_region(vdev,
97 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
98 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
99 &vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base);
100 if (ret) {
101 memunmap(base);
102 return ret;
103 }
104
105 /* Fill vconfig with the hw value and virtualize register */
106 *dwordp = cpu_to_le32(addr);
107 memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
108 PCI_CAP_ID_INVALID_VIRT, 4);
109
110 return ret;
111}
Alex Williamsonf572a962016-02-22 16:02:45 -0700112
113static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
114 char __user *buf, size_t count, loff_t *ppos,
115 bool iswrite)
116{
117 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
118 struct pci_dev *pdev = vdev->region[i].data;
119 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
120 size_t size;
121 int ret;
122
123 if (pos >= vdev->region[i].size || iswrite)
124 return -EINVAL;
125
126 size = count = min(count, (size_t)(vdev->region[i].size - pos));
127
128 if ((pos & 1) && size) {
129 u8 val;
130
131 ret = pci_user_read_config_byte(pdev, pos, &val);
132 if (ret)
133 return pcibios_err_to_errno(ret);
134
135 if (copy_to_user(buf + count - size, &val, 1))
136 return -EFAULT;
137
138 pos++;
139 size--;
140 }
141
142 if ((pos & 3) && size > 2) {
143 u16 val;
144
145 ret = pci_user_read_config_word(pdev, pos, &val);
146 if (ret)
147 return pcibios_err_to_errno(ret);
148
149 val = cpu_to_le16(val);
150 if (copy_to_user(buf + count - size, &val, 2))
151 return -EFAULT;
152
153 pos += 2;
154 size -= 2;
155 }
156
157 while (size > 3) {
158 u32 val;
159
160 ret = pci_user_read_config_dword(pdev, pos, &val);
161 if (ret)
162 return pcibios_err_to_errno(ret);
163
164 val = cpu_to_le32(val);
165 if (copy_to_user(buf + count - size, &val, 4))
166 return -EFAULT;
167
168 pos += 4;
169 size -= 4;
170 }
171
172 while (size >= 2) {
173 u16 val;
174
175 ret = pci_user_read_config_word(pdev, pos, &val);
176 if (ret)
177 return pcibios_err_to_errno(ret);
178
179 val = cpu_to_le16(val);
180 if (copy_to_user(buf + count - size, &val, 2))
181 return -EFAULT;
182
183 pos += 2;
184 size -= 2;
185 }
186
187 while (size) {
188 u8 val;
189
190 ret = pci_user_read_config_byte(pdev, pos, &val);
191 if (ret)
192 return pcibios_err_to_errno(ret);
193
194 if (copy_to_user(buf + count - size, &val, 1))
195 return -EFAULT;
196
197 pos++;
198 size--;
199 }
200
201 *ppos += count;
202
203 return count;
204}
205
206static void vfio_pci_igd_cfg_release(struct vfio_pci_device *vdev,
207 struct vfio_pci_region *region)
208{
209 struct pci_dev *pdev = region->data;
210
211 pci_dev_put(pdev);
212}
213
214static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
215 .rw = vfio_pci_igd_cfg_rw,
216 .release = vfio_pci_igd_cfg_release,
217};
218
219static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev)
220{
221 struct pci_dev *host_bridge, *lpc_bridge;
222 int ret;
223
224 host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
225 if (!host_bridge)
226 return -ENODEV;
227
228 if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
229 host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
230 pci_dev_put(host_bridge);
231 return -EINVAL;
232 }
233
234 ret = vfio_pci_register_dev_region(vdev,
235 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
236 VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
237 &vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
238 VFIO_REGION_INFO_FLAG_READ, host_bridge);
239 if (ret) {
240 pci_dev_put(host_bridge);
241 return ret;
242 }
243
244 lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
245 if (!lpc_bridge)
246 return -ENODEV;
247
248 if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
249 lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
250 pci_dev_put(lpc_bridge);
251 return -EINVAL;
252 }
253
254 ret = vfio_pci_register_dev_region(vdev,
255 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
256 VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
257 &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
258 VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
259 if (ret) {
260 pci_dev_put(lpc_bridge);
261 return ret;
262 }
263
264 return 0;
265}
266
267int vfio_pci_igd_init(struct vfio_pci_device *vdev)
268{
269 int ret;
270
271 ret = vfio_pci_igd_opregion_init(vdev);
272 if (ret)
273 return ret;
274
275 ret = vfio_pci_igd_cfg_init(vdev);
276 if (ret)
277 return ret;
278
279 return 0;
280}