blob: ab67d2a7351623ad53b0b4aad0ecb33b6b8232ed [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000030#include "vmwgfx_drv.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/ttm/ttm_placement.h>
32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_object.h>
34#include <drm/ttm/ttm_module.h>
Thomas Hellstromd92d9852013-10-24 01:49:26 -070035#include <linux/dma_remapping.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000036
37#define VMWGFX_DRIVER_NAME "vmwgfx"
38#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
39#define VMWGFX_CHIP_SVGAII 0
40#define VMW_FB_RESERVATION 0
41
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010042#define VMW_MIN_INITIAL_WIDTH 800
43#define VMW_MIN_INITIAL_HEIGHT 600
44
45
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000046/**
47 * Fully encoded drm commands. Might move to vmw_drm.h
48 */
49
50#define DRM_IOCTL_VMW_GET_PARAM \
51 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
52 struct drm_vmw_getparam_arg)
53#define DRM_IOCTL_VMW_ALLOC_DMABUF \
54 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
55 union drm_vmw_alloc_dmabuf_arg)
56#define DRM_IOCTL_VMW_UNREF_DMABUF \
57 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
58 struct drm_vmw_unref_dmabuf_arg)
59#define DRM_IOCTL_VMW_CURSOR_BYPASS \
60 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
61 struct drm_vmw_cursor_bypass_arg)
62
63#define DRM_IOCTL_VMW_CONTROL_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
65 struct drm_vmw_control_stream_arg)
66#define DRM_IOCTL_VMW_CLAIM_STREAM \
67 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
68 struct drm_vmw_stream_arg)
69#define DRM_IOCTL_VMW_UNREF_STREAM \
70 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
71 struct drm_vmw_stream_arg)
72
73#define DRM_IOCTL_VMW_CREATE_CONTEXT \
74 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
75 struct drm_vmw_context_arg)
76#define DRM_IOCTL_VMW_UNREF_CONTEXT \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
78 struct drm_vmw_context_arg)
79#define DRM_IOCTL_VMW_CREATE_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
81 union drm_vmw_surface_create_arg)
82#define DRM_IOCTL_VMW_UNREF_SURFACE \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
84 struct drm_vmw_surface_arg)
85#define DRM_IOCTL_VMW_REF_SURFACE \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
87 union drm_vmw_surface_reference_arg)
88#define DRM_IOCTL_VMW_EXECBUF \
89 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
90 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000091#define DRM_IOCTL_VMW_GET_3D_CAP \
92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
93 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000094#define DRM_IOCTL_VMW_FENCE_WAIT \
95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
96 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000097#define DRM_IOCTL_VMW_FENCE_SIGNALED \
98 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
99 struct drm_vmw_fence_signaled_arg)
100#define DRM_IOCTL_VMW_FENCE_UNREF \
101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
102 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200103#define DRM_IOCTL_VMW_FENCE_EVENT \
104 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
105 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200106#define DRM_IOCTL_VMW_PRESENT \
107 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
108 struct drm_vmw_present_arg)
109#define DRM_IOCTL_VMW_PRESENT_READBACK \
110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
111 struct drm_vmw_present_readback_arg)
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200112#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
114 struct drm_vmw_update_layout_arg)
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100115#define DRM_IOCTL_VMW_CREATE_SHADER \
116 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
117 struct drm_vmw_shader_create_arg)
118#define DRM_IOCTL_VMW_UNREF_SHADER \
119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
120 struct drm_vmw_shader_arg)
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100121#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
122 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
123 union drm_vmw_gb_surface_create_arg)
124#define DRM_IOCTL_VMW_GB_SURFACE_REF \
125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
126 union drm_vmw_gb_surface_reference_arg)
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100127#define DRM_IOCTL_VMW_SYNCCPU \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
129 struct drm_vmw_synccpu_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000130
131/**
132 * The core DRM version of this macro doesn't account for
133 * DRM_COMMAND_BASE.
134 */
135
136#define VMW_IOCTL_DEF(ioctl, func, flags) \
Ville Syrjälä7e7392a2015-03-27 15:51:56 +0200137 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000138
139/**
140 * Ioctl definitions.
141 */
142
Rob Clarkbaa70942013-08-02 13:27:49 -0400143static const struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000144 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100145 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000146 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100147 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000148 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100149 DRM_UNLOCKED | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000150 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100151 vmw_kms_cursor_bypass_ioctl,
152 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000153
Dave Airlie1b2f1482010-08-14 20:20:34 +1000154 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100155 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000156 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100157 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000158 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100159 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000160
Dave Airlie1b2f1482010-08-14 20:20:34 +1000161 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100162 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000163 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100164 DRM_UNLOCKED | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000165 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100166 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000167 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100168 DRM_UNLOCKED | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000169 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100170 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000171 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100172 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000173 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
Thomas Hellstrom89dcbda2014-03-31 11:01:08 +0200174 DRM_UNLOCKED | DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000175 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
176 vmw_fence_obj_signaled_ioctl,
Thomas Hellstrom89dcbda2014-03-31 11:01:08 +0200177 DRM_UNLOCKED | DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000178 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100179 DRM_UNLOCKED | DRM_RENDER_ALLOW),
180 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
181 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000182 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100183 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200184
185 /* these allow direct access to the framebuffers mark as master only */
186 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
187 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
188 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
189 vmw_present_readback_ioctl,
190 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200191 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
192 vmw_kms_update_layout_ioctl,
193 DRM_MASTER | DRM_UNLOCKED),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100194 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
195 vmw_shader_define_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100196 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100197 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
198 vmw_shader_destroy_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100199 DRM_UNLOCKED | DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100200 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
201 vmw_gb_surface_define_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100202 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100203 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
204 vmw_gb_surface_reference_ioctl,
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100205 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100206 VMW_IOCTL_DEF(VMW_SYNCCPU,
207 vmw_user_dmabuf_synccpu_ioctl,
Thomas Hellstrom89dcbda2014-03-31 11:01:08 +0200208 DRM_UNLOCKED | DRM_RENDER_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000209};
210
211static struct pci_device_id vmw_pci_id_list[] = {
212 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
213 {0, 0, 0}
214};
Dave Airliec4903422012-08-28 21:40:51 -0400215MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000216
Dave Airlie5d2afab2012-08-28 21:38:49 -0400217static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700218static int vmw_force_iommu;
219static int vmw_restrict_iommu;
220static int vmw_force_coherent;
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100221static int vmw_restrict_dma_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000222
223static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
224static void vmw_master_init(struct vmw_master *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100225static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
226 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000227
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200228MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
229module_param_named(enable_fbdev, enable_fbdev, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700230MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
231module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
232MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
233module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
234MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
235module_param_named(force_coherent, vmw_force_coherent, int, 0600);
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100236MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
237module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700238
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200239
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000240static void vmw_print_capabilities(uint32_t capabilities)
241{
242 DRM_INFO("Capabilities:\n");
243 if (capabilities & SVGA_CAP_RECT_COPY)
244 DRM_INFO(" Rect copy.\n");
245 if (capabilities & SVGA_CAP_CURSOR)
246 DRM_INFO(" Cursor.\n");
247 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
248 DRM_INFO(" Cursor bypass.\n");
249 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
250 DRM_INFO(" Cursor bypass 2.\n");
251 if (capabilities & SVGA_CAP_8BIT_EMULATION)
252 DRM_INFO(" 8bit emulation.\n");
253 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
254 DRM_INFO(" Alpha cursor.\n");
255 if (capabilities & SVGA_CAP_3D)
256 DRM_INFO(" 3D.\n");
257 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
258 DRM_INFO(" Extended Fifo.\n");
259 if (capabilities & SVGA_CAP_MULTIMON)
260 DRM_INFO(" Multimon.\n");
261 if (capabilities & SVGA_CAP_PITCHLOCK)
262 DRM_INFO(" Pitchlock.\n");
263 if (capabilities & SVGA_CAP_IRQMASK)
264 DRM_INFO(" Irq mask.\n");
265 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
266 DRM_INFO(" Display Topology.\n");
267 if (capabilities & SVGA_CAP_GMR)
268 DRM_INFO(" GMR.\n");
269 if (capabilities & SVGA_CAP_TRACES)
270 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000271 if (capabilities & SVGA_CAP_GMR2)
272 DRM_INFO(" GMR2.\n");
273 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
274 DRM_INFO(" Screen Object 2.\n");
Thomas Hellstromc1234db2012-11-21 10:35:08 +0100275 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
276 DRM_INFO(" Command Buffers.\n");
277 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
278 DRM_INFO(" Command Buffers 2.\n");
279 if (capabilities & SVGA_CAP_GBOBJECTS)
280 DRM_INFO(" Guest Backed Resources.\n");
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700281 if (capabilities & SVGA_CAP_CMD_BUFFERS_3)
282 DRM_INFO(" Command Buffers 3.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000283}
284
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200285/**
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700286 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200287 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700288 * @dev_priv: A device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200289 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700290 * This function creates a small buffer object that holds the query
291 * result for dummy queries emitted as query barriers.
292 * The function will then map the first page and initialize a pending
293 * occlusion query result structure, Finally it will unmap the buffer.
294 * No interruptible waits are done within this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200295 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700296 * Returns an error if bo creation or initialization fails.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200297 */
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700298static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200299{
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700300 int ret;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700301 struct vmw_dma_buffer *vbo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200302 struct ttm_bo_kmap_obj map;
303 volatile SVGA3dQueryResult *result;
304 bool dummy;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200305
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700306 /*
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700307 * Create the vbo as pinned, so that a tryreserve will
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700308 * immediately succeed. This is because we're the only
309 * user of the bo currently.
310 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700311 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
312 if (!vbo)
313 return -ENOMEM;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700314
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700315 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
316 &vmw_sys_ne_placement, false,
317 &vmw_dmabuf_bo_free);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200318 if (unlikely(ret != 0))
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700319 return ret;
320
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700321 ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700322 BUG_ON(ret != 0);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700323 vmw_bo_pin_reserved(vbo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200324
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700325 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200326 if (likely(ret == 0)) {
327 result = ttm_kmap_obj_virtual(&map, &dummy);
328 result->totalSize = sizeof(*result);
329 result->state = SVGA3D_QUERYSTATE_PENDING;
330 result->result32 = 0xff;
331 ttm_bo_kunmap(&map);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700332 }
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700333 vmw_bo_pin_reserved(vbo, false);
334 ttm_bo_unreserve(&vbo->base);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700335
336 if (unlikely(ret != 0)) {
337 DRM_ERROR("Dummy query buffer map failed.\n");
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700338 vmw_dmabuf_unreference(&vbo);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700339 } else
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700340 dev_priv->dummy_query_bo = vbo;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700341
342 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200343}
344
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700345/**
346 * vmw_request_device_late - Perform late device setup
347 *
348 * @dev_priv: Pointer to device private.
349 *
350 * This function performs setup of otables and enables large command
351 * buffer submission. These tasks are split out to a separate function
352 * because it reverts vmw_release_device_early and is intended to be used
353 * by an error path in the hibernation code.
354 */
355static int vmw_request_device_late(struct vmw_private *dev_priv)
356{
357 int ret;
358
359 if (dev_priv->has_mob) {
360 ret = vmw_otables_setup(dev_priv);
361 if (unlikely(ret != 0)) {
362 DRM_ERROR("Unable to initialize "
363 "guest Memory OBjects.\n");
364 return ret;
365 }
366 }
367
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700368 if (dev_priv->cman) {
369 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
370 256*4096, 2*4096);
371 if (ret) {
372 struct vmw_cmdbuf_man *man = dev_priv->cman;
373
374 dev_priv->cman = NULL;
375 vmw_cmdbuf_man_destroy(man);
376 }
377 }
378
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700379 return 0;
380}
381
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000382static int vmw_request_device(struct vmw_private *dev_priv)
383{
384 int ret;
385
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000386 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
387 if (unlikely(ret != 0)) {
388 DRM_ERROR("Unable to initialize FIFO.\n");
389 return ret;
390 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000391 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700392 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
393 if (IS_ERR(dev_priv->cman))
394 dev_priv->cman = NULL;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700395
396 ret = vmw_request_device_late(dev_priv);
397 if (ret)
398 goto out_no_mob;
399
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200400 ret = vmw_dummy_query_bo_create(dev_priv);
401 if (unlikely(ret != 0))
402 goto out_no_query_bo;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000403
404 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200405
406out_no_query_bo:
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700407 if (dev_priv->cman)
408 vmw_cmdbuf_remove_pool(dev_priv->cman);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700409 if (dev_priv->has_mob) {
410 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100411 vmw_otables_takedown(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700412 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700413 if (dev_priv->cman)
414 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100415out_no_mob:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200416 vmw_fence_fifo_down(dev_priv->fman);
417 vmw_fifo_release(dev_priv, &dev_priv->fifo);
418 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000419}
420
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700421/**
422 * vmw_release_device_early - Early part of fifo takedown.
423 *
424 * @dev_priv: Pointer to device private struct.
425 *
426 * This is the first part of command submission takedown, to be called before
427 * buffer management is taken down.
428 */
429static void vmw_release_device_early(struct vmw_private *dev_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000430{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200431 /*
432 * Previous destructions should've released
433 * the pinned bo.
434 */
435
436 BUG_ON(dev_priv->pinned_bo != NULL);
437
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700438 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700439 if (dev_priv->cman)
440 vmw_cmdbuf_remove_pool(dev_priv->cman);
441
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700442 if (dev_priv->has_mob) {
443 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100444 vmw_otables_takedown(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700445 }
446}
447
448/**
449 * vmw_release_device_late - Late part of fifo takedown.
450 *
451 * @dev_priv: Pointer to device private struct.
452 *
453 * This is the last part of the command submission takedown, to be called when
454 * command submission is no longer needed. It may wait on pending fences.
455 */
456static void vmw_release_device_late(struct vmw_private *dev_priv)
457{
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000458 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700459 if (dev_priv->cman)
460 vmw_cmdbuf_man_destroy(dev_priv->cman);
461
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000462 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000463}
464
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100465/**
466 * Sets the initial_[width|height] fields on the given vmw_private.
467 *
468 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100469 * clamping the value to fb_max_[width|height] fields and the
470 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
471 * If the values appear to be invalid, set them to
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100472 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
473 */
474static void vmw_get_initial_size(struct vmw_private *dev_priv)
475{
476 uint32_t width;
477 uint32_t height;
478
479 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
480 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
481
482 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100483 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100484
485 if (width > dev_priv->fb_max_width ||
486 height > dev_priv->fb_max_height) {
487
488 /*
489 * This is a host error and shouldn't occur.
490 */
491
492 width = VMW_MIN_INITIAL_WIDTH;
493 height = VMW_MIN_INITIAL_HEIGHT;
494 }
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100495
496 dev_priv->initial_width = width;
497 dev_priv->initial_height = height;
498}
499
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700500/**
501 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
502 * system.
503 *
504 * @dev_priv: Pointer to a struct vmw_private
505 *
506 * This functions tries to determine the IOMMU setup and what actions
507 * need to be taken by the driver to make system pages visible to the
508 * device.
509 * If this function decides that DMA is not possible, it returns -EINVAL.
510 * The driver may then try to disable features of the device that require
511 * DMA.
512 */
513static int vmw_dma_select_mode(struct vmw_private *dev_priv)
514{
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700515 static const char *names[vmw_dma_map_max] = {
516 [vmw_dma_phys] = "Using physical TTM page addresses.",
517 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
518 [vmw_dma_map_populate] = "Keeping DMA mappings.",
519 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800520#ifdef CONFIG_X86
521 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700522
523#ifdef CONFIG_INTEL_IOMMU
524 if (intel_iommu_enabled) {
525 dev_priv->map_mode = vmw_dma_map_populate;
526 goto out_fixup;
527 }
528#endif
529
530 if (!(vmw_force_iommu || vmw_force_coherent)) {
531 dev_priv->map_mode = vmw_dma_phys;
532 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
533 return 0;
534 }
535
536 dev_priv->map_mode = vmw_dma_map_populate;
537
538 if (dma_ops->sync_single_for_cpu)
539 dev_priv->map_mode = vmw_dma_alloc_coherent;
540#ifdef CONFIG_SWIOTLB
541 if (swiotlb_nr_tbl() == 0)
542 dev_priv->map_mode = vmw_dma_map_populate;
543#endif
544
Dave Airlie21136942013-11-08 16:12:42 +1000545#ifdef CONFIG_INTEL_IOMMU
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700546out_fixup:
Dave Airlie21136942013-11-08 16:12:42 +1000547#endif
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700548 if (dev_priv->map_mode == vmw_dma_map_populate &&
549 vmw_restrict_iommu)
550 dev_priv->map_mode = vmw_dma_map_bind;
551
552 if (vmw_force_coherent)
553 dev_priv->map_mode = vmw_dma_alloc_coherent;
554
555#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
556 /*
557 * No coherent page pool
558 */
559 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
560 return -EINVAL;
561#endif
562
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800563#else /* CONFIG_X86 */
564 dev_priv->map_mode = vmw_dma_map_populate;
565#endif /* CONFIG_X86 */
566
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700567 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
568
569 return 0;
570}
571
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100572/**
573 * vmw_dma_masks - set required page- and dma masks
574 *
575 * @dev: Pointer to struct drm-device
576 *
577 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
578 * restriction also for 64-bit systems.
579 */
580#ifdef CONFIG_INTEL_IOMMU
581static int vmw_dma_masks(struct vmw_private *dev_priv)
582{
583 struct drm_device *dev = dev_priv->dev;
584
585 if (intel_iommu_enabled &&
586 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
587 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
588 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
589 }
590 return 0;
591}
592#else
593static int vmw_dma_masks(struct vmw_private *dev_priv)
594{
595 return 0;
596}
597#endif
598
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000599static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
600{
601 struct vmw_private *dev_priv;
602 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000603 uint32_t svga_id;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000604 enum vmw_res_type i;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700605 bool refuse_dma = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000606
607 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
608 if (unlikely(dev_priv == NULL)) {
609 DRM_ERROR("Failed allocating a device private struct.\n");
610 return -ENOMEM;
611 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000612
Dave Airlie466e69b2011-12-19 11:15:29 +0000613 pci_set_master(dev->pdev);
614
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000615 dev_priv->dev = dev;
616 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000617 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000618 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200619 mutex_init(&dev_priv->release_mutex);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700620 mutex_init(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000621 rwlock_init(&dev_priv->resource_lock);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100622 ttm_lock_init(&dev_priv->reservation_sem);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800623 spin_lock_init(&dev_priv->hw_lock);
624 spin_lock_init(&dev_priv->waiter_lock);
625 spin_lock_init(&dev_priv->cap_lock);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700626 spin_lock_init(&dev_priv->svga_lock);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000627
628 for (i = vmw_res_context; i < vmw_res_max; ++i) {
629 idr_init(&dev_priv->res_idr[i]);
630 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
631 }
632
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000633 mutex_init(&dev_priv->init_mutex);
634 init_waitqueue_head(&dev_priv->fence_queue);
635 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000636 dev_priv->fence_queue_waiters = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000637 atomic_set(&dev_priv->fifo_queue_waiters, 0);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000638
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200639 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000640
641 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
642 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
643 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
644
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200645 dev_priv->enable_fb = enable_fbdev;
646
Peter Hanzelc1886602010-01-30 03:38:07 +0000647 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
648 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
649 if (svga_id != SVGA_ID_2) {
650 ret = -ENOSYS;
Masanari Iida49625902012-02-05 22:50:36 +0900651 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
Peter Hanzelc1886602010-01-30 03:38:07 +0000652 goto out_err0;
653 }
654
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000655 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700656 ret = vmw_dma_select_mode(dev_priv);
657 if (unlikely(ret != 0)) {
658 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
659 refuse_dma = true;
660 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000661
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200662 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
663 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
664 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
665 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100666
667 vmw_get_initial_size(dev_priv);
668
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100669 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000670 dev_priv->max_gmr_ids =
671 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000672 dev_priv->max_gmr_pages =
673 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
674 dev_priv->memory_size =
675 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200676 dev_priv->memory_size -= dev_priv->vram_size;
677 } else {
678 /*
679 * An arbitrary limit of 512MiB on surface
680 * memory. But all HWV8 hardware supports GMR2.
681 */
682 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000683 }
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100684 dev_priv->max_mob_pages = 0;
Charmaine Lee857aea12014-02-12 12:07:38 +0100685 dev_priv->max_mob_size = 0;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100686 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
687 uint64_t mem_size =
688 vmw_read(dev_priv,
689 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
690
691 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100692 dev_priv->prim_bb_mem =
693 vmw_read(dev_priv,
694 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
Charmaine Lee857aea12014-02-12 12:07:38 +0100695 dev_priv->max_mob_size =
696 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700697 dev_priv->stdu_max_width =
698 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
699 dev_priv->stdu_max_height =
700 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
701
702 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
703 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
704 dev_priv->texture_max_width = vmw_read(dev_priv,
705 SVGA_REG_DEV_CAP);
706 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
707 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
708 dev_priv->texture_max_height = vmw_read(dev_priv,
709 SVGA_REG_DEV_CAP);
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100710 } else
711 dev_priv->prim_bb_mem = dev_priv->vram_size;
Sinclair Yeh35c05122015-06-26 01:42:06 -0700712
713 vmw_print_capabilities(dev_priv->capabilities);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000714
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100715 ret = vmw_dma_masks(dev_priv);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800716 if (unlikely(ret != 0))
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100717 goto out_err0;
718
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100719 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000720 DRM_INFO("Max GMR ids is %u\n",
721 (unsigned)dev_priv->max_gmr_ids);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000722 DRM_INFO("Max number of GMR pages is %u\n",
723 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200724 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
725 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000726 }
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100727 DRM_INFO("Maximum display memory size is %u kiB\n",
728 dev_priv->prim_bb_mem / 1024);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000729 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
730 dev_priv->vram_start, dev_priv->vram_size / 1024);
731 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
732 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
733
734 ret = vmw_ttm_global_init(dev_priv);
735 if (unlikely(ret != 0))
736 goto out_err0;
737
738
739 vmw_master_init(&dev_priv->fbdev_master);
740 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
741 dev_priv->active_master = &dev_priv->fbdev_master;
742
Dave Airliea2c06ee2011-02-23 14:24:01 +1000743
Andy Lutomirski247d36d2013-05-13 23:58:41 +0000744 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
745 dev_priv->mmio_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000746
747 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
748 dev_priv->mmio_size);
749
750 if (unlikely(dev_priv->mmio_virt == NULL)) {
751 ret = -ENOMEM;
752 DRM_ERROR("Failed mapping MMIO.\n");
753 goto out_err3;
754 }
755
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200756 /* Need mmio memory to check for fifo pitchlock cap. */
757 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
758 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
759 !vmw_fifo_have_pitchlock(dev_priv)) {
760 ret = -ENOSYS;
761 DRM_ERROR("Hardware has no pitchlock\n");
762 goto out_err4;
763 }
764
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000765 dev_priv->tdev = ttm_object_device_init
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800766 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000767
768 if (unlikely(dev_priv->tdev == NULL)) {
769 DRM_ERROR("Unable to initialize TTM object management.\n");
770 ret = -ENOMEM;
771 goto out_err4;
772 }
773
774 dev->dev_private = dev_priv;
775
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000776 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
777 dev_priv->stealth = (ret != 0);
778 if (dev_priv->stealth) {
779 /**
780 * Request at least the mmio PCI resource.
781 */
782
783 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000784 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000785 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
786 if (unlikely(ret != 0)) {
787 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
788 goto out_no_device;
789 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000790 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000791
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000792 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
Daniel Vetterbb0f1b52013-11-03 21:09:27 +0100793 ret = drm_irq_install(dev, dev->pdev->irq);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000794 if (ret != 0) {
795 DRM_ERROR("Failed installing irq: %d\n", ret);
796 goto out_no_irq;
797 }
798 }
799
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000800 dev_priv->fman = vmw_fence_manager_init(dev_priv);
Wei Yongjun14bbf202013-08-26 15:15:37 +0800801 if (unlikely(dev_priv->fman == NULL)) {
802 ret = -ENOMEM;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000803 goto out_no_fman;
Wei Yongjun14bbf202013-08-26 15:15:37 +0800804 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200805
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700806 ret = ttm_bo_device_init(&dev_priv->bdev,
807 dev_priv->bo_global_ref.ref.object,
808 &vmw_bo_driver,
809 dev->anon_inode->i_mapping,
810 VMWGFX_FILE_PAGE_OFFSET,
811 false);
812 if (unlikely(ret != 0)) {
813 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
814 goto out_no_bdev;
815 }
Thomas Hellstrom34583902015-03-05 02:33:24 -0800816
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700817 /*
818 * Enable VRAM, but initially don't use it until SVGA is enabled and
819 * unhidden.
820 */
Thomas Hellstrom34583902015-03-05 02:33:24 -0800821 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
822 (dev_priv->vram_size >> PAGE_SHIFT));
823 if (unlikely(ret != 0)) {
824 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
825 goto out_no_vram;
826 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700827 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom34583902015-03-05 02:33:24 -0800828
829 dev_priv->has_gmr = true;
830 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
831 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
832 VMW_PL_GMR) != 0) {
833 DRM_INFO("No GMR memory available. "
834 "Graphics memory resources are very limited.\n");
835 dev_priv->has_gmr = false;
836 }
837
838 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
839 dev_priv->has_mob = true;
840 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
841 VMW_PL_MOB) != 0) {
842 DRM_INFO("No MOB memory available. "
843 "3D will be disabled.\n");
844 dev_priv->has_mob = false;
845 }
846 }
847
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200848 ret = vmw_kms_init(dev_priv);
849 if (unlikely(ret != 0))
850 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000851 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200852
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700853 ret = vmw_request_device(dev_priv);
854 if (ret)
855 goto out_no_fifo;
856
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200857 if (dev_priv->enable_fb) {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700858 vmw_fifo_resource_inc(dev_priv);
859 vmw_svga_enable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200860 vmw_fb_init(dev_priv);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200861 }
862
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100863 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
864 register_pm_notifier(&dev_priv->pm_nb);
865
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000866 return 0;
867
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000868out_no_fifo:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200869 vmw_overlay_close(dev_priv);
870 vmw_kms_close(dev_priv);
871out_no_kms:
Thomas Hellstrom34583902015-03-05 02:33:24 -0800872 if (dev_priv->has_mob)
873 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
874 if (dev_priv->has_gmr)
875 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
876 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
877out_no_vram:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700878 (void)ttm_bo_device_release(&dev_priv->bdev);
879out_no_bdev:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000880 vmw_fence_manager_takedown(dev_priv->fman);
881out_no_fman:
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000882 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
883 drm_irq_uninstall(dev_priv->dev);
884out_no_irq:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200885 if (dev_priv->stealth)
886 pci_release_region(dev->pdev, 2);
887 else
888 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000889out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000890 ttm_object_device_release(&dev_priv->tdev);
891out_err4:
892 iounmap(dev_priv->mmio_virt);
893out_err3:
Andy Lutomirski247d36d2013-05-13 23:58:41 +0000894 arch_phys_wc_del(dev_priv->mmio_mtrr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000895 vmw_ttm_global_release(dev_priv);
896out_err0:
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000897 for (i = vmw_res_context; i < vmw_res_max; ++i)
898 idr_destroy(&dev_priv->res_idr[i]);
899
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000900 kfree(dev_priv);
901 return ret;
902}
903
904static int vmw_driver_unload(struct drm_device *dev)
905{
906 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000907 enum vmw_res_type i;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000908
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100909 unregister_pm_notifier(&dev_priv->pm_nb);
910
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000911 if (dev_priv->ctx.res_ht_initialized)
912 drm_ht_remove(&dev_priv->ctx.res_ht);
Markus Elfringa3a1a662014-11-19 17:50:19 +0100913 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200914 if (dev_priv->enable_fb) {
915 vmw_fb_close(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700916 vmw_fifo_resource_dec(dev_priv);
917 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200918 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700919
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000920 vmw_kms_close(dev_priv);
921 vmw_overlay_close(dev_priv);
Thomas Hellstrom34583902015-03-05 02:33:24 -0800922
Thomas Hellstrom34583902015-03-05 02:33:24 -0800923 if (dev_priv->has_gmr)
924 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
925 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
926
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700927 vmw_release_device_early(dev_priv);
928 if (dev_priv->has_mob)
929 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
930 (void) ttm_bo_device_release(&dev_priv->bdev);
931 vmw_release_device_late(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000932 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000933 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
934 drm_irq_uninstall(dev_priv->dev);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000935 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000936 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000937 else
938 pci_release_regions(dev->pdev);
939
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000940 ttm_object_device_release(&dev_priv->tdev);
941 iounmap(dev_priv->mmio_virt);
Andy Lutomirski247d36d2013-05-13 23:58:41 +0000942 arch_phys_wc_del(dev_priv->mmio_mtrr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000943 (void)ttm_bo_device_release(&dev_priv->bdev);
944 vmw_ttm_global_release(dev_priv);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000945
946 for (i = vmw_res_context; i < vmw_res_max; ++i)
947 idr_destroy(&dev_priv->res_idr[i]);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000948
949 kfree(dev_priv);
950
951 return 0;
952}
953
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +0100954static void vmw_preclose(struct drm_device *dev,
955 struct drm_file *file_priv)
956{
957 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
958 struct vmw_private *dev_priv = vmw_priv(dev);
959
960 vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
961}
962
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000963static void vmw_postclose(struct drm_device *dev,
964 struct drm_file *file_priv)
965{
966 struct vmw_fpriv *vmw_fp;
967
968 vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromc4249852013-10-09 01:42:51 -0700969
970 if (vmw_fp->locked_master) {
971 struct vmw_master *vmaster =
972 vmw_master(vmw_fp->locked_master);
973
974 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
975 ttm_vt_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000976 drm_master_put(&vmw_fp->locked_master);
Thomas Hellstromc4249852013-10-09 01:42:51 -0700977 }
978
979 ttm_object_file_release(&vmw_fp->tfile);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000980 kfree(vmw_fp);
981}
982
983static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
984{
985 struct vmw_private *dev_priv = vmw_priv(dev);
986 struct vmw_fpriv *vmw_fp;
987 int ret = -ENOMEM;
988
989 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
990 if (unlikely(vmw_fp == NULL))
991 return ret;
992
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +0100993 INIT_LIST_HEAD(&vmw_fp->fence_events);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000994 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
995 if (unlikely(vmw_fp->tfile == NULL))
996 goto out_no_tfile;
997
998 file_priv->driver_priv = vmw_fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000999
1000 return 0;
1001
1002out_no_tfile:
1003 kfree(vmw_fp);
1004 return ret;
1005}
1006
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001007static struct vmw_master *vmw_master_check(struct drm_device *dev,
1008 struct drm_file *file_priv,
1009 unsigned int flags)
1010{
1011 int ret;
1012 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1013 struct vmw_master *vmaster;
1014
1015 if (file_priv->minor->type != DRM_MINOR_LEGACY ||
1016 !(flags & DRM_AUTH))
1017 return NULL;
1018
1019 ret = mutex_lock_interruptible(&dev->master_mutex);
1020 if (unlikely(ret != 0))
1021 return ERR_PTR(-ERESTARTSYS);
1022
Dave Airlie7963e9d2014-08-08 07:30:53 +10001023 if (file_priv->is_master) {
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001024 mutex_unlock(&dev->master_mutex);
1025 return NULL;
1026 }
1027
1028 /*
1029 * Check if we were previously master, but now dropped.
1030 */
1031 if (vmw_fp->locked_master) {
1032 mutex_unlock(&dev->master_mutex);
1033 DRM_ERROR("Dropped master trying to access ioctl that "
1034 "requires authentication.\n");
1035 return ERR_PTR(-EACCES);
1036 }
1037 mutex_unlock(&dev->master_mutex);
1038
1039 /*
1040 * Taking the drm_global_mutex after the TTM lock might deadlock
1041 */
1042 if (!(flags & DRM_UNLOCKED)) {
1043 DRM_ERROR("Refusing locked ioctl access.\n");
1044 return ERR_PTR(-EDEADLK);
1045 }
1046
1047 /*
1048 * Take the TTM lock. Possibly sleep waiting for the authenticating
1049 * master to become master again, or for a SIGTERM if the
1050 * authenticating master exits.
1051 */
1052 vmaster = vmw_master(file_priv->master);
1053 ret = ttm_read_lock(&vmaster->lock, true);
1054 if (unlikely(ret != 0))
1055 vmaster = ERR_PTR(ret);
1056
1057 return vmaster;
1058}
1059
1060static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1061 unsigned long arg,
1062 long (*ioctl_func)(struct file *, unsigned int,
1063 unsigned long))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001064{
1065 struct drm_file *file_priv = filp->private_data;
1066 struct drm_device *dev = file_priv->minor->dev;
1067 unsigned int nr = DRM_IOCTL_NR(cmd);
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001068 struct vmw_master *vmaster;
1069 unsigned int flags;
1070 long ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001071
1072 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +01001073 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001074 */
1075
1076 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1077 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
Rob Clarkbaa70942013-08-02 13:27:49 -04001078 const struct drm_ioctl_desc *ioctl =
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001079 &vmw_ioctls[nr - DRM_COMMAND_BASE];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001080
Ville Syrjälä7e7392a2015-03-27 15:51:56 +02001081 if (unlikely(ioctl->cmd != cmd)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001082 DRM_ERROR("Invalid command format, ioctl %d\n",
1083 nr - DRM_COMMAND_BASE);
1084 return -EINVAL;
1085 }
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001086 flags = ioctl->flags;
1087 } else if (!drm_ioctl_flags(nr, &flags))
1088 return -EINVAL;
1089
1090 vmaster = vmw_master_check(dev, file_priv, flags);
1091 if (unlikely(IS_ERR(vmaster))) {
Thomas Hellstrome338c4c2014-11-25 08:20:05 +01001092 ret = PTR_ERR(vmaster);
1093
1094 if (ret != -ERESTARTSYS)
1095 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1096 nr, ret);
1097 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001098 }
1099
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001100 ret = ioctl_func(filp, cmd, arg);
1101 if (vmaster)
1102 ttm_read_unlock(&vmaster->lock);
1103
1104 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001105}
1106
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001107static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1108 unsigned long arg)
1109{
1110 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1111}
1112
1113#ifdef CONFIG_COMPAT
1114static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1115 unsigned long arg)
1116{
1117 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1118}
1119#endif
1120
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001121static void vmw_lastclose(struct drm_device *dev)
1122{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001123}
1124
1125static void vmw_master_init(struct vmw_master *vmaster)
1126{
1127 ttm_lock_init(&vmaster->lock);
1128}
1129
1130static int vmw_master_create(struct drm_device *dev,
1131 struct drm_master *master)
1132{
1133 struct vmw_master *vmaster;
1134
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001135 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1136 if (unlikely(vmaster == NULL))
1137 return -ENOMEM;
1138
Thomas Hellstrom3a939a52010-10-05 12:43:03 +02001139 vmw_master_init(vmaster);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001140 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1141 master->driver_priv = vmaster;
1142
1143 return 0;
1144}
1145
1146static void vmw_master_destroy(struct drm_device *dev,
1147 struct drm_master *master)
1148{
1149 struct vmw_master *vmaster = vmw_master(master);
1150
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001151 master->driver_priv = NULL;
1152 kfree(vmaster);
1153}
1154
1155
1156static int vmw_master_set(struct drm_device *dev,
1157 struct drm_file *file_priv,
1158 bool from_open)
1159{
1160 struct vmw_private *dev_priv = vmw_priv(dev);
1161 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1162 struct vmw_master *active = dev_priv->active_master;
1163 struct vmw_master *vmaster = vmw_master(file_priv->master);
1164 int ret = 0;
1165
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001166 if (active) {
1167 BUG_ON(active != &dev_priv->fbdev_master);
1168 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1169 if (unlikely(ret != 0))
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001170 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001171
1172 ttm_lock_set_kill(&active->lock, true, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001173 dev_priv->active_master = NULL;
1174 }
1175
1176 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1177 if (!from_open) {
1178 ttm_vt_unlock(&vmaster->lock);
1179 BUG_ON(vmw_fp->locked_master != file_priv->master);
1180 drm_master_put(&vmw_fp->locked_master);
1181 }
1182
1183 dev_priv->active_master = vmaster;
1184
1185 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001186}
1187
1188static void vmw_master_drop(struct drm_device *dev,
1189 struct drm_file *file_priv,
1190 bool from_release)
1191{
1192 struct vmw_private *dev_priv = vmw_priv(dev);
1193 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1194 struct vmw_master *vmaster = vmw_master(file_priv->master);
1195 int ret;
1196
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001197 /**
1198 * Make sure the master doesn't disappear while we have
1199 * it locked.
1200 */
1201
1202 vmw_fp->locked_master = drm_master_get(file_priv->master);
1203 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001204 if (unlikely((ret != 0))) {
1205 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1206 drm_master_put(&vmw_fp->locked_master);
1207 }
1208
Thomas Hellstromc4249852013-10-09 01:42:51 -07001209 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001210
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001211 if (!dev_priv->enable_fb)
1212 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001213
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001214 dev_priv->active_master = &dev_priv->fbdev_master;
1215 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1216 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1217
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001218 if (dev_priv->enable_fb)
1219 vmw_fb_on(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001220}
1221
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001222/**
1223 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1224 *
1225 * @dev_priv: Pointer to device private struct.
1226 * Needs the reservation sem to be held in non-exclusive mode.
1227 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001228static void __vmw_svga_enable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001229{
1230 spin_lock(&dev_priv->svga_lock);
1231 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1232 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1233 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1234 }
1235 spin_unlock(&dev_priv->svga_lock);
1236}
1237
1238/**
1239 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1240 *
1241 * @dev_priv: Pointer to device private struct.
1242 */
1243void vmw_svga_enable(struct vmw_private *dev_priv)
1244{
1245 ttm_read_lock(&dev_priv->reservation_sem, false);
1246 __vmw_svga_enable(dev_priv);
1247 ttm_read_unlock(&dev_priv->reservation_sem);
1248}
1249
1250/**
1251 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1252 *
1253 * @dev_priv: Pointer to device private struct.
1254 * Needs the reservation sem to be held in exclusive mode.
1255 * Will not empty VRAM. VRAM must be emptied by caller.
1256 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001257static void __vmw_svga_disable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001258{
1259 spin_lock(&dev_priv->svga_lock);
1260 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1261 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1262 vmw_write(dev_priv, SVGA_REG_ENABLE,
1263 SVGA_REG_ENABLE_ENABLE_HIDE);
1264 }
1265 spin_unlock(&dev_priv->svga_lock);
1266}
1267
1268/**
1269 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1270 * running.
1271 *
1272 * @dev_priv: Pointer to device private struct.
1273 * Will empty VRAM.
1274 */
1275void vmw_svga_disable(struct vmw_private *dev_priv)
1276{
1277 ttm_write_lock(&dev_priv->reservation_sem, false);
1278 spin_lock(&dev_priv->svga_lock);
1279 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1280 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1281 vmw_write(dev_priv, SVGA_REG_ENABLE,
1282 SVGA_REG_ENABLE_ENABLE_HIDE);
1283 spin_unlock(&dev_priv->svga_lock);
1284 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1285 DRM_ERROR("Failed evicting VRAM buffers.\n");
1286 } else
1287 spin_unlock(&dev_priv->svga_lock);
1288 ttm_write_unlock(&dev_priv->reservation_sem);
1289}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001290
1291static void vmw_remove(struct pci_dev *pdev)
1292{
1293 struct drm_device *dev = pci_get_drvdata(pdev);
1294
Thomas Hellstromfd3e4d62015-03-10 11:07:40 -07001295 pci_disable_device(pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001296 drm_put_dev(dev);
1297}
1298
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001299static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1300 void *ptr)
1301{
1302 struct vmw_private *dev_priv =
1303 container_of(nb, struct vmw_private, pm_nb);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001304
1305 switch (val) {
1306 case PM_HIBERNATION_PREPARE:
Thomas Hellstroma2787242015-06-29 12:55:07 -07001307 if (dev_priv->enable_fb)
1308 vmw_fb_off(dev_priv);
Thomas Hellstrom294adf72014-02-27 12:34:51 +01001309 ttm_suspend_lock(&dev_priv->reservation_sem);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001310
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001311 /*
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001312 * This empties VRAM and unbinds all GMR bindings.
1313 * Buffer contents is moved to swappable memory.
1314 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001315 vmw_execbuf_release_pinned_bo(dev_priv);
1316 vmw_resource_evict_all(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001317 vmw_release_device_early(dev_priv);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001318 ttm_bo_swapout_all(&dev_priv->bdev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001319 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001320 break;
1321 case PM_POST_HIBERNATION:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001322 case PM_POST_RESTORE:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001323 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom294adf72014-02-27 12:34:51 +01001324 ttm_suspend_unlock(&dev_priv->reservation_sem);
Thomas Hellstroma2787242015-06-29 12:55:07 -07001325 if (dev_priv->enable_fb)
1326 vmw_fb_on(dev_priv);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001327 break;
1328 case PM_RESTORE_PREPARE:
1329 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001330 default:
1331 break;
1332 }
1333 return 0;
1334}
1335
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001336static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001337{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001338 struct drm_device *dev = pci_get_drvdata(pdev);
1339 struct vmw_private *dev_priv = vmw_priv(dev);
1340
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001341 if (dev_priv->refuse_hibernation)
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001342 return -EBUSY;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001343
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001344 pci_save_state(pdev);
1345 pci_disable_device(pdev);
1346 pci_set_power_state(pdev, PCI_D3hot);
1347 return 0;
1348}
1349
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001350static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001351{
1352 pci_set_power_state(pdev, PCI_D0);
1353 pci_restore_state(pdev);
1354 return pci_enable_device(pdev);
1355}
1356
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001357static int vmw_pm_suspend(struct device *kdev)
1358{
1359 struct pci_dev *pdev = to_pci_dev(kdev);
1360 struct pm_message dummy;
1361
1362 dummy.event = 0;
1363
1364 return vmw_pci_suspend(pdev, dummy);
1365}
1366
1367static int vmw_pm_resume(struct device *kdev)
1368{
1369 struct pci_dev *pdev = to_pci_dev(kdev);
1370
1371 return vmw_pci_resume(pdev);
1372}
1373
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001374static int vmw_pm_freeze(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001375{
1376 struct pci_dev *pdev = to_pci_dev(kdev);
1377 struct drm_device *dev = pci_get_drvdata(pdev);
1378 struct vmw_private *dev_priv = vmw_priv(dev);
1379
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001380 dev_priv->suspended = true;
1381 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001382 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001383
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001384 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1385 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001386 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001387 vmw_fifo_resource_inc(dev_priv);
1388 WARN_ON(vmw_request_device_late(dev_priv));
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001389 dev_priv->suspended = false;
1390 return -EBUSY;
1391 }
1392
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001393 if (dev_priv->enable_fb)
1394 __vmw_svga_disable(dev_priv);
1395
1396 vmw_release_device_late(dev_priv);
1397
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001398 return 0;
1399}
1400
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001401static int vmw_pm_restore(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001402{
1403 struct pci_dev *pdev = to_pci_dev(kdev);
1404 struct drm_device *dev = pci_get_drvdata(pdev);
1405 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001406 int ret;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001407
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001408 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1409 (void) vmw_read(dev_priv, SVGA_REG_ID);
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001410
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001411 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001412 vmw_fifo_resource_inc(dev_priv);
1413
1414 ret = vmw_request_device(dev_priv);
1415 if (ret)
1416 return ret;
1417
1418 if (dev_priv->enable_fb)
1419 __vmw_svga_enable(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001420
1421 dev_priv->suspended = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001422
1423 return 0;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001424}
1425
1426static const struct dev_pm_ops vmw_pm_ops = {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001427 .freeze = vmw_pm_freeze,
1428 .thaw = vmw_pm_restore,
1429 .restore = vmw_pm_restore,
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001430 .suspend = vmw_pm_suspend,
1431 .resume = vmw_pm_resume,
1432};
1433
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001434static const struct file_operations vmwgfx_driver_fops = {
1435 .owner = THIS_MODULE,
1436 .open = drm_open,
1437 .release = drm_release,
1438 .unlocked_ioctl = vmw_unlocked_ioctl,
1439 .mmap = vmw_mmap,
1440 .poll = vmw_fops_poll,
1441 .read = vmw_fops_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001442#if defined(CONFIG_COMPAT)
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001443 .compat_ioctl = vmw_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001444#endif
1445 .llseek = noop_llseek,
1446};
1447
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001448static struct drm_driver driver = {
1449 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
Thomas Hellstrom03f80262014-03-20 13:06:34 +01001450 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001451 .load = vmw_driver_load,
1452 .unload = vmw_driver_unload,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001453 .lastclose = vmw_lastclose,
1454 .irq_preinstall = vmw_irq_preinstall,
1455 .irq_postinstall = vmw_irq_postinstall,
1456 .irq_uninstall = vmw_irq_uninstall,
1457 .irq_handler = vmw_irq_handler,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001458 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001459 .enable_vblank = vmw_enable_vblank,
1460 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001461 .ioctls = vmw_ioctls,
Damien Lespiauf95aeb12014-06-09 14:39:49 +01001462 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001463 .master_create = vmw_master_create,
1464 .master_destroy = vmw_master_destroy,
1465 .master_set = vmw_master_set,
1466 .master_drop = vmw_master_drop,
1467 .open = vmw_driver_open,
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +01001468 .preclose = vmw_preclose,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001469 .postclose = vmw_postclose,
David Herrmann915b4d12014-08-29 12:12:43 +02001470 .set_busid = drm_pci_set_busid,
Dave Airlie5e1782d2012-08-28 01:53:54 +00001471
1472 .dumb_create = vmw_dumb_create,
1473 .dumb_map_offset = vmw_dumb_map_offset,
1474 .dumb_destroy = vmw_dumb_destroy,
1475
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001476 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1477 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1478
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001479 .fops = &vmwgfx_driver_fops,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001480 .name = VMWGFX_DRIVER_NAME,
1481 .desc = VMWGFX_DRIVER_DESC,
1482 .date = VMWGFX_DRIVER_DATE,
1483 .major = VMWGFX_DRIVER_MAJOR,
1484 .minor = VMWGFX_DRIVER_MINOR,
1485 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1486};
1487
Dave Airlie8410ea32010-12-15 03:16:38 +10001488static struct pci_driver vmw_pci_driver = {
1489 .name = VMWGFX_DRIVER_NAME,
1490 .id_table = vmw_pci_id_list,
1491 .probe = vmw_probe,
1492 .remove = vmw_remove,
1493 .driver = {
1494 .pm = &vmw_pm_ops
1495 }
1496};
1497
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001498static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1499{
Jordan Crousedcdb1672010-05-27 13:40:25 -06001500 return drm_get_pci_dev(pdev, ent, &driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001501}
1502
1503static int __init vmwgfx_init(void)
1504{
1505 int ret;
Dave Airlie8410ea32010-12-15 03:16:38 +10001506 ret = drm_pci_init(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001507 if (ret)
1508 DRM_ERROR("Failed initializing DRM.\n");
1509 return ret;
1510}
1511
1512static void __exit vmwgfx_exit(void)
1513{
Dave Airlie8410ea32010-12-15 03:16:38 +10001514 drm_pci_exit(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001515}
1516
1517module_init(vmwgfx_init);
1518module_exit(vmwgfx_exit);
1519
1520MODULE_AUTHOR("VMware Inc. and others");
1521MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1522MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001523MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1524 __stringify(VMWGFX_DRIVER_MINOR) "."
1525 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1526 "0");