blob: c49812b80dd0dae82c77096f75fbd4cced19ffb1 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000030#include "vmwgfx_drv.h"
Thomas Hellstromd80efd52015-08-10 10:39:35 -070031#include "vmwgfx_binding.h"
David Howells760285e2012-10-02 18:01:07 +010032#include <drm/ttm/ttm_placement.h>
33#include <drm/ttm/ttm_bo_driver.h>
34#include <drm/ttm/ttm_object.h>
35#include <drm/ttm/ttm_module.h>
Thomas Hellstromd92d9852013-10-24 01:49:26 -070036#include <linux/dma_remapping.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037
38#define VMWGFX_DRIVER_NAME "vmwgfx"
39#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
40#define VMWGFX_CHIP_SVGAII 0
41#define VMW_FB_RESERVATION 0
42
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010043#define VMW_MIN_INITIAL_WIDTH 800
44#define VMW_MIN_INITIAL_HEIGHT 600
45
46
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000047/**
48 * Fully encoded drm commands. Might move to vmw_drm.h
49 */
50
51#define DRM_IOCTL_VMW_GET_PARAM \
52 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
53 struct drm_vmw_getparam_arg)
54#define DRM_IOCTL_VMW_ALLOC_DMABUF \
55 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
56 union drm_vmw_alloc_dmabuf_arg)
57#define DRM_IOCTL_VMW_UNREF_DMABUF \
58 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
59 struct drm_vmw_unref_dmabuf_arg)
60#define DRM_IOCTL_VMW_CURSOR_BYPASS \
61 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
62 struct drm_vmw_cursor_bypass_arg)
63
64#define DRM_IOCTL_VMW_CONTROL_STREAM \
65 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
66 struct drm_vmw_control_stream_arg)
67#define DRM_IOCTL_VMW_CLAIM_STREAM \
68 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
69 struct drm_vmw_stream_arg)
70#define DRM_IOCTL_VMW_UNREF_STREAM \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
72 struct drm_vmw_stream_arg)
73
74#define DRM_IOCTL_VMW_CREATE_CONTEXT \
75 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
76 struct drm_vmw_context_arg)
77#define DRM_IOCTL_VMW_UNREF_CONTEXT \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
79 struct drm_vmw_context_arg)
80#define DRM_IOCTL_VMW_CREATE_SURFACE \
81 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
82 union drm_vmw_surface_create_arg)
83#define DRM_IOCTL_VMW_UNREF_SURFACE \
84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
85 struct drm_vmw_surface_arg)
86#define DRM_IOCTL_VMW_REF_SURFACE \
87 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
88 union drm_vmw_surface_reference_arg)
89#define DRM_IOCTL_VMW_EXECBUF \
90 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
91 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000092#define DRM_IOCTL_VMW_GET_3D_CAP \
93 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
94 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000095#define DRM_IOCTL_VMW_FENCE_WAIT \
96 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
97 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000098#define DRM_IOCTL_VMW_FENCE_SIGNALED \
99 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
100 struct drm_vmw_fence_signaled_arg)
101#define DRM_IOCTL_VMW_FENCE_UNREF \
102 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
103 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200104#define DRM_IOCTL_VMW_FENCE_EVENT \
105 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
106 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200107#define DRM_IOCTL_VMW_PRESENT \
108 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
109 struct drm_vmw_present_arg)
110#define DRM_IOCTL_VMW_PRESENT_READBACK \
111 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
112 struct drm_vmw_present_readback_arg)
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200113#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
114 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
115 struct drm_vmw_update_layout_arg)
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100116#define DRM_IOCTL_VMW_CREATE_SHADER \
117 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
118 struct drm_vmw_shader_create_arg)
119#define DRM_IOCTL_VMW_UNREF_SHADER \
120 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
121 struct drm_vmw_shader_arg)
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100122#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
123 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
124 union drm_vmw_gb_surface_create_arg)
125#define DRM_IOCTL_VMW_GB_SURFACE_REF \
126 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
127 union drm_vmw_gb_surface_reference_arg)
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100128#define DRM_IOCTL_VMW_SYNCCPU \
129 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
130 struct drm_vmw_synccpu_arg)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700131#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
132 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
133 struct drm_vmw_context_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000134
135/**
136 * The core DRM version of this macro doesn't account for
137 * DRM_COMMAND_BASE.
138 */
139
140#define VMW_IOCTL_DEF(ioctl, func, flags) \
Ville Syrjälä7e7392a2015-03-27 15:51:56 +0200141 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000142
143/**
144 * Ioctl definitions.
145 */
146
Rob Clarkbaa70942013-08-02 13:27:49 -0400147static const struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000148 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200149 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000150 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200151 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000152 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200153 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000154 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100155 vmw_kms_cursor_bypass_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200156 DRM_MASTER | DRM_CONTROL_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000157
Dave Airlie1b2f1482010-08-14 20:20:34 +1000158 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200159 DRM_MASTER | DRM_CONTROL_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000160 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200161 DRM_MASTER | DRM_CONTROL_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000162 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200163 DRM_MASTER | DRM_CONTROL_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000164
Dave Airlie1b2f1482010-08-14 20:20:34 +1000165 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200166 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000167 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200168 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000169 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200170 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000171 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200172 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000173 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200174 DRM_AUTH | DRM_RENDER_ALLOW),
175 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700176 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000177 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200178 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000179 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
180 vmw_fence_obj_signaled_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200181 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000182 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200183 DRM_RENDER_ALLOW),
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100184 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200185 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000186 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200187 DRM_AUTH | DRM_RENDER_ALLOW),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200188
189 /* these allow direct access to the framebuffers mark as master only */
190 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200191 DRM_MASTER | DRM_AUTH),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200192 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
193 vmw_present_readback_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200194 DRM_MASTER | DRM_AUTH),
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200195 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
196 vmw_kms_update_layout_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200197 DRM_MASTER),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100198 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
199 vmw_shader_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200200 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100201 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
202 vmw_shader_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200203 DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100204 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
205 vmw_gb_surface_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200206 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100207 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
208 vmw_gb_surface_reference_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200209 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100210 VMW_IOCTL_DEF(VMW_SYNCCPU,
211 vmw_user_dmabuf_synccpu_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200212 DRM_RENDER_ALLOW),
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700213 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
214 vmw_extended_context_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200215 DRM_AUTH | DRM_RENDER_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000216};
217
218static struct pci_device_id vmw_pci_id_list[] = {
219 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
220 {0, 0, 0}
221};
Dave Airliec4903422012-08-28 21:40:51 -0400222MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000223
Dave Airlie5d2afab2012-08-28 21:38:49 -0400224static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700225static int vmw_force_iommu;
226static int vmw_restrict_iommu;
227static int vmw_force_coherent;
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100228static int vmw_restrict_dma_mask;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000229
230static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
231static void vmw_master_init(struct vmw_master *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100232static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
233 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000234
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200235MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
236module_param_named(enable_fbdev, enable_fbdev, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700237MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
238module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
239MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
240module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
241MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
242module_param_named(force_coherent, vmw_force_coherent, int, 0600);
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100243MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
244module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700245
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200246
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000247static void vmw_print_capabilities(uint32_t capabilities)
248{
249 DRM_INFO("Capabilities:\n");
250 if (capabilities & SVGA_CAP_RECT_COPY)
251 DRM_INFO(" Rect copy.\n");
252 if (capabilities & SVGA_CAP_CURSOR)
253 DRM_INFO(" Cursor.\n");
254 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
255 DRM_INFO(" Cursor bypass.\n");
256 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
257 DRM_INFO(" Cursor bypass 2.\n");
258 if (capabilities & SVGA_CAP_8BIT_EMULATION)
259 DRM_INFO(" 8bit emulation.\n");
260 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
261 DRM_INFO(" Alpha cursor.\n");
262 if (capabilities & SVGA_CAP_3D)
263 DRM_INFO(" 3D.\n");
264 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
265 DRM_INFO(" Extended Fifo.\n");
266 if (capabilities & SVGA_CAP_MULTIMON)
267 DRM_INFO(" Multimon.\n");
268 if (capabilities & SVGA_CAP_PITCHLOCK)
269 DRM_INFO(" Pitchlock.\n");
270 if (capabilities & SVGA_CAP_IRQMASK)
271 DRM_INFO(" Irq mask.\n");
272 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
273 DRM_INFO(" Display Topology.\n");
274 if (capabilities & SVGA_CAP_GMR)
275 DRM_INFO(" GMR.\n");
276 if (capabilities & SVGA_CAP_TRACES)
277 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000278 if (capabilities & SVGA_CAP_GMR2)
279 DRM_INFO(" GMR2.\n");
280 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
281 DRM_INFO(" Screen Object 2.\n");
Thomas Hellstromc1234db2012-11-21 10:35:08 +0100282 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
283 DRM_INFO(" Command Buffers.\n");
284 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
285 DRM_INFO(" Command Buffers 2.\n");
286 if (capabilities & SVGA_CAP_GBOBJECTS)
287 DRM_INFO(" Guest Backed Resources.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -0700288 if (capabilities & SVGA_CAP_DX)
289 DRM_INFO(" DX Features.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000290}
291
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200292/**
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700293 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200294 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700295 * @dev_priv: A device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200296 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700297 * This function creates a small buffer object that holds the query
298 * result for dummy queries emitted as query barriers.
299 * The function will then map the first page and initialize a pending
300 * occlusion query result structure, Finally it will unmap the buffer.
301 * No interruptible waits are done within this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200302 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700303 * Returns an error if bo creation or initialization fails.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200304 */
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700305static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200306{
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700307 int ret;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700308 struct vmw_dma_buffer *vbo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200309 struct ttm_bo_kmap_obj map;
310 volatile SVGA3dQueryResult *result;
311 bool dummy;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200312
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700313 /*
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700314 * Create the vbo as pinned, so that a tryreserve will
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700315 * immediately succeed. This is because we're the only
316 * user of the bo currently.
317 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700318 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
319 if (!vbo)
320 return -ENOMEM;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700321
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700322 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
323 &vmw_sys_ne_placement, false,
324 &vmw_dmabuf_bo_free);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200325 if (unlikely(ret != 0))
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700326 return ret;
327
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700328 ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700329 BUG_ON(ret != 0);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700330 vmw_bo_pin_reserved(vbo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200331
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700332 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200333 if (likely(ret == 0)) {
334 result = ttm_kmap_obj_virtual(&map, &dummy);
335 result->totalSize = sizeof(*result);
336 result->state = SVGA3D_QUERYSTATE_PENDING;
337 result->result32 = 0xff;
338 ttm_bo_kunmap(&map);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700339 }
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700340 vmw_bo_pin_reserved(vbo, false);
341 ttm_bo_unreserve(&vbo->base);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700342
343 if (unlikely(ret != 0)) {
344 DRM_ERROR("Dummy query buffer map failed.\n");
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700345 vmw_dmabuf_unreference(&vbo);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700346 } else
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700347 dev_priv->dummy_query_bo = vbo;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700348
349 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200350}
351
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700352/**
353 * vmw_request_device_late - Perform late device setup
354 *
355 * @dev_priv: Pointer to device private.
356 *
357 * This function performs setup of otables and enables large command
358 * buffer submission. These tasks are split out to a separate function
359 * because it reverts vmw_release_device_early and is intended to be used
360 * by an error path in the hibernation code.
361 */
362static int vmw_request_device_late(struct vmw_private *dev_priv)
363{
364 int ret;
365
366 if (dev_priv->has_mob) {
367 ret = vmw_otables_setup(dev_priv);
368 if (unlikely(ret != 0)) {
369 DRM_ERROR("Unable to initialize "
370 "guest Memory OBjects.\n");
371 return ret;
372 }
373 }
374
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700375 if (dev_priv->cman) {
376 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
377 256*4096, 2*4096);
378 if (ret) {
379 struct vmw_cmdbuf_man *man = dev_priv->cman;
380
381 dev_priv->cman = NULL;
382 vmw_cmdbuf_man_destroy(man);
383 }
384 }
385
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700386 return 0;
387}
388
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000389static int vmw_request_device(struct vmw_private *dev_priv)
390{
391 int ret;
392
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000393 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
394 if (unlikely(ret != 0)) {
395 DRM_ERROR("Unable to initialize FIFO.\n");
396 return ret;
397 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000398 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700399 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700400 if (IS_ERR(dev_priv->cman)) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700401 dev_priv->cman = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700402 dev_priv->has_dx = false;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100403 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700404
405 ret = vmw_request_device_late(dev_priv);
406 if (ret)
407 goto out_no_mob;
408
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200409 ret = vmw_dummy_query_bo_create(dev_priv);
410 if (unlikely(ret != 0))
411 goto out_no_query_bo;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000412
413 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200414
415out_no_query_bo:
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700416 if (dev_priv->cman)
417 vmw_cmdbuf_remove_pool(dev_priv->cman);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700418 if (dev_priv->has_mob) {
419 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100420 vmw_otables_takedown(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700421 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700422 if (dev_priv->cman)
423 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100424out_no_mob:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200425 vmw_fence_fifo_down(dev_priv->fman);
426 vmw_fifo_release(dev_priv, &dev_priv->fifo);
427 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000428}
429
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700430/**
431 * vmw_release_device_early - Early part of fifo takedown.
432 *
433 * @dev_priv: Pointer to device private struct.
434 *
435 * This is the first part of command submission takedown, to be called before
436 * buffer management is taken down.
437 */
438static void vmw_release_device_early(struct vmw_private *dev_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000439{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200440 /*
441 * Previous destructions should've released
442 * the pinned bo.
443 */
444
445 BUG_ON(dev_priv->pinned_bo != NULL);
446
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700447 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700448 if (dev_priv->cman)
449 vmw_cmdbuf_remove_pool(dev_priv->cman);
450
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700451 if (dev_priv->has_mob) {
452 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100453 vmw_otables_takedown(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200454 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200455}
456
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000457/**
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700458 * vmw_release_device_late - Late part of fifo takedown.
459 *
460 * @dev_priv: Pointer to device private struct.
461 *
462 * This is the last part of the command submission takedown, to be called when
463 * command submission is no longer needed. It may wait on pending fences.
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000464 */
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700465static void vmw_release_device_late(struct vmw_private *dev_priv)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200466{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000467 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700468 if (dev_priv->cman)
469 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200470
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000471 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200472}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000473
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100474/**
475 * Sets the initial_[width|height] fields on the given vmw_private.
476 *
477 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100478 * clamping the value to fb_max_[width|height] fields and the
479 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
480 * If the values appear to be invalid, set them to
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100481 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
482 */
483static void vmw_get_initial_size(struct vmw_private *dev_priv)
484{
485 uint32_t width;
486 uint32_t height;
487
488 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
489 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
490
491 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100492 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100493
494 if (width > dev_priv->fb_max_width ||
495 height > dev_priv->fb_max_height) {
496
497 /*
498 * This is a host error and shouldn't occur.
499 */
500
501 width = VMW_MIN_INITIAL_WIDTH;
502 height = VMW_MIN_INITIAL_HEIGHT;
503 }
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100504
505 dev_priv->initial_width = width;
506 dev_priv->initial_height = height;
507}
508
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700509/**
510 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
511 * system.
512 *
513 * @dev_priv: Pointer to a struct vmw_private
514 *
515 * This functions tries to determine the IOMMU setup and what actions
516 * need to be taken by the driver to make system pages visible to the
517 * device.
518 * If this function decides that DMA is not possible, it returns -EINVAL.
519 * The driver may then try to disable features of the device that require
520 * DMA.
521 */
522static int vmw_dma_select_mode(struct vmw_private *dev_priv)
523{
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700524 static const char *names[vmw_dma_map_max] = {
525 [vmw_dma_phys] = "Using physical TTM page addresses.",
526 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
527 [vmw_dma_map_populate] = "Keeping DMA mappings.",
528 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800529#ifdef CONFIG_X86
530 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700531
532#ifdef CONFIG_INTEL_IOMMU
533 if (intel_iommu_enabled) {
534 dev_priv->map_mode = vmw_dma_map_populate;
535 goto out_fixup;
536 }
537#endif
538
539 if (!(vmw_force_iommu || vmw_force_coherent)) {
540 dev_priv->map_mode = vmw_dma_phys;
541 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
542 return 0;
543 }
544
545 dev_priv->map_mode = vmw_dma_map_populate;
546
547 if (dma_ops->sync_single_for_cpu)
548 dev_priv->map_mode = vmw_dma_alloc_coherent;
549#ifdef CONFIG_SWIOTLB
550 if (swiotlb_nr_tbl() == 0)
551 dev_priv->map_mode = vmw_dma_map_populate;
552#endif
553
Dave Airlie21136942013-11-08 16:12:42 +1000554#ifdef CONFIG_INTEL_IOMMU
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700555out_fixup:
Dave Airlie21136942013-11-08 16:12:42 +1000556#endif
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700557 if (dev_priv->map_mode == vmw_dma_map_populate &&
558 vmw_restrict_iommu)
559 dev_priv->map_mode = vmw_dma_map_bind;
560
561 if (vmw_force_coherent)
562 dev_priv->map_mode = vmw_dma_alloc_coherent;
563
564#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
565 /*
566 * No coherent page pool
567 */
568 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
569 return -EINVAL;
570#endif
571
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800572#else /* CONFIG_X86 */
573 dev_priv->map_mode = vmw_dma_map_populate;
574#endif /* CONFIG_X86 */
575
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700576 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
577
578 return 0;
579}
580
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100581/**
582 * vmw_dma_masks - set required page- and dma masks
583 *
584 * @dev: Pointer to struct drm-device
585 *
586 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
587 * restriction also for 64-bit systems.
588 */
589#ifdef CONFIG_INTEL_IOMMU
590static int vmw_dma_masks(struct vmw_private *dev_priv)
591{
592 struct drm_device *dev = dev_priv->dev;
593
594 if (intel_iommu_enabled &&
595 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
596 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
597 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
598 }
599 return 0;
600}
601#else
602static int vmw_dma_masks(struct vmw_private *dev_priv)
603{
604 return 0;
605}
606#endif
607
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000608static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
609{
610 struct vmw_private *dev_priv;
611 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000612 uint32_t svga_id;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000613 enum vmw_res_type i;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700614 bool refuse_dma = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000615
616 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
617 if (unlikely(dev_priv == NULL)) {
618 DRM_ERROR("Failed allocating a device private struct.\n");
619 return -ENOMEM;
620 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000621
Dave Airlie466e69b2011-12-19 11:15:29 +0000622 pci_set_master(dev->pdev);
623
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000624 dev_priv->dev = dev;
625 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000626 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000627 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200628 mutex_init(&dev_priv->release_mutex);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700629 mutex_init(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000630 rwlock_init(&dev_priv->resource_lock);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100631 ttm_lock_init(&dev_priv->reservation_sem);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800632 spin_lock_init(&dev_priv->hw_lock);
633 spin_lock_init(&dev_priv->waiter_lock);
634 spin_lock_init(&dev_priv->cap_lock);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700635 spin_lock_init(&dev_priv->svga_lock);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000636
637 for (i = vmw_res_context; i < vmw_res_max; ++i) {
638 idr_init(&dev_priv->res_idr[i]);
639 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
640 }
641
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000642 mutex_init(&dev_priv->init_mutex);
643 init_waitqueue_head(&dev_priv->fence_queue);
644 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000645 dev_priv->fence_queue_waiters = 0;
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100646 dev_priv->fifo_queue_waiters = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000647
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200648 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000649
650 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
651 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
652 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
653
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200654 dev_priv->enable_fb = enable_fbdev;
655
Peter Hanzelc1886602010-01-30 03:38:07 +0000656 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
657 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
658 if (svga_id != SVGA_ID_2) {
659 ret = -ENOSYS;
Masanari Iida49625902012-02-05 22:50:36 +0900660 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
Peter Hanzelc1886602010-01-30 03:38:07 +0000661 goto out_err0;
662 }
663
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000664 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700665 ret = vmw_dma_select_mode(dev_priv);
666 if (unlikely(ret != 0)) {
667 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
668 refuse_dma = true;
669 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000670
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200671 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
672 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
673 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
674 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100675
676 vmw_get_initial_size(dev_priv);
677
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100678 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000679 dev_priv->max_gmr_ids =
680 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000681 dev_priv->max_gmr_pages =
682 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
683 dev_priv->memory_size =
684 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200685 dev_priv->memory_size -= dev_priv->vram_size;
686 } else {
687 /*
688 * An arbitrary limit of 512MiB on surface
689 * memory. But all HWV8 hardware supports GMR2.
690 */
691 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000692 }
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100693 dev_priv->max_mob_pages = 0;
Charmaine Lee857aea12014-02-12 12:07:38 +0100694 dev_priv->max_mob_size = 0;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100695 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
696 uint64_t mem_size =
697 vmw_read(dev_priv,
698 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
699
700 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100701 dev_priv->prim_bb_mem =
702 vmw_read(dev_priv,
703 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
Charmaine Lee857aea12014-02-12 12:07:38 +0100704 dev_priv->max_mob_size =
705 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700706 dev_priv->stdu_max_width =
707 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
708 dev_priv->stdu_max_height =
709 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
710
711 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
712 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
713 dev_priv->texture_max_width = vmw_read(dev_priv,
714 SVGA_REG_DEV_CAP);
715 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
716 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
717 dev_priv->texture_max_height = vmw_read(dev_priv,
718 SVGA_REG_DEV_CAP);
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700719 } else {
720 dev_priv->texture_max_width = 8192;
721 dev_priv->texture_max_height = 8192;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100722 dev_priv->prim_bb_mem = dev_priv->vram_size;
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700723 }
724
Sinclair Yeh35c05122015-06-26 01:42:06 -0700725 vmw_print_capabilities(dev_priv->capabilities);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000726
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100727 ret = vmw_dma_masks(dev_priv);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800728 if (unlikely(ret != 0))
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100729 goto out_err0;
730
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100731 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000732 DRM_INFO("Max GMR ids is %u\n",
733 (unsigned)dev_priv->max_gmr_ids);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000734 DRM_INFO("Max number of GMR pages is %u\n",
735 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200736 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
737 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000738 }
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100739 DRM_INFO("Maximum display memory size is %u kiB\n",
740 dev_priv->prim_bb_mem / 1024);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000741 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
742 dev_priv->vram_start, dev_priv->vram_size / 1024);
743 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
744 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
745
746 ret = vmw_ttm_global_init(dev_priv);
747 if (unlikely(ret != 0))
748 goto out_err0;
749
750
751 vmw_master_init(&dev_priv->fbdev_master);
752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
753 dev_priv->active_master = &dev_priv->fbdev_master;
754
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100755 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
756 dev_priv->mmio_size, MEMREMAP_WB);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000757
758 if (unlikely(dev_priv->mmio_virt == NULL)) {
759 ret = -ENOMEM;
760 DRM_ERROR("Failed mapping MMIO.\n");
761 goto out_err3;
762 }
763
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200764 /* Need mmio memory to check for fifo pitchlock cap. */
765 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
766 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
767 !vmw_fifo_have_pitchlock(dev_priv)) {
768 ret = -ENOSYS;
769 DRM_ERROR("Hardware has no pitchlock\n");
770 goto out_err4;
771 }
772
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000773 dev_priv->tdev = ttm_object_device_init
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800774 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000775
776 if (unlikely(dev_priv->tdev == NULL)) {
777 DRM_ERROR("Unable to initialize TTM object management.\n");
778 ret = -ENOMEM;
779 goto out_err4;
780 }
781
782 dev->dev_private = dev_priv;
783
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000784 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
785 dev_priv->stealth = (ret != 0);
786 if (dev_priv->stealth) {
787 /**
788 * Request at least the mmio PCI resource.
789 */
790
791 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000792 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000793 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
794 if (unlikely(ret != 0)) {
795 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
796 goto out_no_device;
797 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000798 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000799
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000800 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
Daniel Vetterbb0f1b52013-11-03 21:09:27 +0100801 ret = drm_irq_install(dev, dev->pdev->irq);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000802 if (ret != 0) {
803 DRM_ERROR("Failed installing irq: %d\n", ret);
804 goto out_no_irq;
805 }
806 }
807
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000808 dev_priv->fman = vmw_fence_manager_init(dev_priv);
Wei Yongjun14bbf202013-08-26 15:15:37 +0800809 if (unlikely(dev_priv->fman == NULL)) {
810 ret = -ENOMEM;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000811 goto out_no_fman;
Wei Yongjun14bbf202013-08-26 15:15:37 +0800812 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200813
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700814 ret = ttm_bo_device_init(&dev_priv->bdev,
815 dev_priv->bo_global_ref.ref.object,
816 &vmw_bo_driver,
817 dev->anon_inode->i_mapping,
818 VMWGFX_FILE_PAGE_OFFSET,
819 false);
820 if (unlikely(ret != 0)) {
821 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
822 goto out_no_bdev;
823 }
Thomas Hellstrom34583902015-03-05 02:33:24 -0800824
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700825 /*
826 * Enable VRAM, but initially don't use it until SVGA is enabled and
827 * unhidden.
828 */
Thomas Hellstrom34583902015-03-05 02:33:24 -0800829 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
830 (dev_priv->vram_size >> PAGE_SHIFT));
831 if (unlikely(ret != 0)) {
832 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
833 goto out_no_vram;
834 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700835 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom34583902015-03-05 02:33:24 -0800836
837 dev_priv->has_gmr = true;
838 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
839 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
840 VMW_PL_GMR) != 0) {
841 DRM_INFO("No GMR memory available. "
842 "Graphics memory resources are very limited.\n");
843 dev_priv->has_gmr = false;
844 }
845
846 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
847 dev_priv->has_mob = true;
848 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
849 VMW_PL_MOB) != 0) {
850 DRM_INFO("No MOB memory available. "
851 "3D will be disabled.\n");
852 dev_priv->has_mob = false;
853 }
854 }
855
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700856 if (dev_priv->has_mob) {
857 spin_lock(&dev_priv->cap_lock);
858 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
859 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
860 spin_unlock(&dev_priv->cap_lock);
861 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200862
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700863
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200864 ret = vmw_kms_init(dev_priv);
865 if (unlikely(ret != 0))
866 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000867 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200868
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700869 ret = vmw_request_device(dev_priv);
870 if (ret)
871 goto out_no_fifo;
872
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700873 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
874
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200875 if (dev_priv->enable_fb) {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700876 vmw_fifo_resource_inc(dev_priv);
877 vmw_svga_enable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200878 vmw_fb_init(dev_priv);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200879 }
880
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100881 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
882 register_pm_notifier(&dev_priv->pm_nb);
883
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000884 return 0;
885
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000886out_no_fifo:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200887 vmw_overlay_close(dev_priv);
888 vmw_kms_close(dev_priv);
889out_no_kms:
Thomas Hellstrom34583902015-03-05 02:33:24 -0800890 if (dev_priv->has_mob)
891 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
892 if (dev_priv->has_gmr)
893 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
894 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
895out_no_vram:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700896 (void)ttm_bo_device_release(&dev_priv->bdev);
897out_no_bdev:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000898 vmw_fence_manager_takedown(dev_priv->fman);
899out_no_fman:
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000900 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
901 drm_irq_uninstall(dev_priv->dev);
902out_no_irq:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200903 if (dev_priv->stealth)
904 pci_release_region(dev->pdev, 2);
905 else
906 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000907out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000908 ttm_object_device_release(&dev_priv->tdev);
909out_err4:
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100910 memunmap(dev_priv->mmio_virt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000911out_err3:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000912 vmw_ttm_global_release(dev_priv);
913out_err0:
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000914 for (i = vmw_res_context; i < vmw_res_max; ++i)
915 idr_destroy(&dev_priv->res_idr[i]);
916
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700917 if (dev_priv->ctx.staged_bindings)
918 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000919 kfree(dev_priv);
920 return ret;
921}
922
923static int vmw_driver_unload(struct drm_device *dev)
924{
925 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000926 enum vmw_res_type i;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000927
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100928 unregister_pm_notifier(&dev_priv->pm_nb);
929
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000930 if (dev_priv->ctx.res_ht_initialized)
931 drm_ht_remove(&dev_priv->ctx.res_ht);
Markus Elfringa3a1a662014-11-19 17:50:19 +0100932 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200933 if (dev_priv->enable_fb) {
Sinclair Yeh05c95012015-08-11 22:53:39 -0700934 vmw_fb_off(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200935 vmw_fb_close(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700936 vmw_fifo_resource_dec(dev_priv);
937 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200938 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700939
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000940 vmw_kms_close(dev_priv);
941 vmw_overlay_close(dev_priv);
Thomas Hellstrom34583902015-03-05 02:33:24 -0800942
Thomas Hellstrom34583902015-03-05 02:33:24 -0800943 if (dev_priv->has_gmr)
944 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
945 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
946
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700947 vmw_release_device_early(dev_priv);
948 if (dev_priv->has_mob)
949 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
950 (void) ttm_bo_device_release(&dev_priv->bdev);
951 vmw_release_device_late(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000952 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000953 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
954 drm_irq_uninstall(dev_priv->dev);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000955 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000956 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000957 else
958 pci_release_regions(dev->pdev);
959
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000960 ttm_object_device_release(&dev_priv->tdev);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100961 memunmap(dev_priv->mmio_virt);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700962 if (dev_priv->ctx.staged_bindings)
963 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000964 vmw_ttm_global_release(dev_priv);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000965
966 for (i = vmw_res_context; i < vmw_res_max; ++i)
967 idr_destroy(&dev_priv->res_idr[i]);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000968
969 kfree(dev_priv);
970
971 return 0;
972}
973
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +0100974static void vmw_preclose(struct drm_device *dev,
975 struct drm_file *file_priv)
976{
977 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
978 struct vmw_private *dev_priv = vmw_priv(dev);
979
980 vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
981}
982
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000983static void vmw_postclose(struct drm_device *dev,
984 struct drm_file *file_priv)
985{
986 struct vmw_fpriv *vmw_fp;
987
988 vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromc4249852013-10-09 01:42:51 -0700989
990 if (vmw_fp->locked_master) {
991 struct vmw_master *vmaster =
992 vmw_master(vmw_fp->locked_master);
993
994 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
995 ttm_vt_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000996 drm_master_put(&vmw_fp->locked_master);
Thomas Hellstromc4249852013-10-09 01:42:51 -0700997 }
998
999 ttm_object_file_release(&vmw_fp->tfile);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001000 kfree(vmw_fp);
1001}
1002
1003static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1004{
1005 struct vmw_private *dev_priv = vmw_priv(dev);
1006 struct vmw_fpriv *vmw_fp;
1007 int ret = -ENOMEM;
1008
1009 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1010 if (unlikely(vmw_fp == NULL))
1011 return ret;
1012
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +01001013 INIT_LIST_HEAD(&vmw_fp->fence_events);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001014 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1015 if (unlikely(vmw_fp->tfile == NULL))
1016 goto out_no_tfile;
1017
1018 file_priv->driver_priv = vmw_fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001019
1020 return 0;
1021
1022out_no_tfile:
1023 kfree(vmw_fp);
1024 return ret;
1025}
1026
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001027static struct vmw_master *vmw_master_check(struct drm_device *dev,
1028 struct drm_file *file_priv,
1029 unsigned int flags)
1030{
1031 int ret;
1032 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1033 struct vmw_master *vmaster;
1034
1035 if (file_priv->minor->type != DRM_MINOR_LEGACY ||
1036 !(flags & DRM_AUTH))
1037 return NULL;
1038
1039 ret = mutex_lock_interruptible(&dev->master_mutex);
1040 if (unlikely(ret != 0))
1041 return ERR_PTR(-ERESTARTSYS);
1042
Dave Airlie7963e9d2014-08-08 07:30:53 +10001043 if (file_priv->is_master) {
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001044 mutex_unlock(&dev->master_mutex);
1045 return NULL;
1046 }
1047
1048 /*
Thomas Hellstromaa3469c2015-08-27 10:06:24 -07001049 * Check if we were previously master, but now dropped. In that
1050 * case, allow at least render node functionality.
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001051 */
1052 if (vmw_fp->locked_master) {
1053 mutex_unlock(&dev->master_mutex);
Thomas Hellstromaa3469c2015-08-27 10:06:24 -07001054
1055 if (flags & DRM_RENDER_ALLOW)
1056 return NULL;
1057
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001058 DRM_ERROR("Dropped master trying to access ioctl that "
1059 "requires authentication.\n");
1060 return ERR_PTR(-EACCES);
1061 }
1062 mutex_unlock(&dev->master_mutex);
1063
1064 /*
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001065 * Take the TTM lock. Possibly sleep waiting for the authenticating
1066 * master to become master again, or for a SIGTERM if the
1067 * authenticating master exits.
1068 */
1069 vmaster = vmw_master(file_priv->master);
1070 ret = ttm_read_lock(&vmaster->lock, true);
1071 if (unlikely(ret != 0))
1072 vmaster = ERR_PTR(ret);
1073
1074 return vmaster;
1075}
1076
1077static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1078 unsigned long arg,
1079 long (*ioctl_func)(struct file *, unsigned int,
1080 unsigned long))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001081{
1082 struct drm_file *file_priv = filp->private_data;
1083 struct drm_device *dev = file_priv->minor->dev;
1084 unsigned int nr = DRM_IOCTL_NR(cmd);
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001085 struct vmw_master *vmaster;
1086 unsigned int flags;
1087 long ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001088
1089 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +01001090 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001091 */
1092
1093 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1094 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
Rob Clarkbaa70942013-08-02 13:27:49 -04001095 const struct drm_ioctl_desc *ioctl =
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001096 &vmw_ioctls[nr - DRM_COMMAND_BASE];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001097
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001098 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1099 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1100 if (unlikely(ret != 0))
1101 return ret;
1102
1103 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1104 goto out_io_encoding;
1105
1106 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1107 _IOC_SIZE(cmd));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001108 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001109
1110 if (unlikely(ioctl->cmd != cmd))
1111 goto out_io_encoding;
1112
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001113 flags = ioctl->flags;
1114 } else if (!drm_ioctl_flags(nr, &flags))
1115 return -EINVAL;
1116
1117 vmaster = vmw_master_check(dev, file_priv, flags);
Viresh Kumar55579cf2015-07-31 14:08:24 +05301118 if (IS_ERR(vmaster)) {
Thomas Hellstrome338c4c2014-11-25 08:20:05 +01001119 ret = PTR_ERR(vmaster);
1120
1121 if (ret != -ERESTARTSYS)
1122 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1123 nr, ret);
1124 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001125 }
1126
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001127 ret = ioctl_func(filp, cmd, arg);
1128 if (vmaster)
1129 ttm_read_unlock(&vmaster->lock);
1130
1131 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001132
1133out_io_encoding:
1134 DRM_ERROR("Invalid command format, ioctl %d\n",
1135 nr - DRM_COMMAND_BASE);
1136
1137 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001138}
1139
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001140static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1141 unsigned long arg)
1142{
1143 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1144}
1145
1146#ifdef CONFIG_COMPAT
1147static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1148 unsigned long arg)
1149{
1150 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1151}
1152#endif
1153
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001154static void vmw_lastclose(struct drm_device *dev)
1155{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001156}
1157
1158static void vmw_master_init(struct vmw_master *vmaster)
1159{
1160 ttm_lock_init(&vmaster->lock);
1161}
1162
1163static int vmw_master_create(struct drm_device *dev,
1164 struct drm_master *master)
1165{
1166 struct vmw_master *vmaster;
1167
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001168 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1169 if (unlikely(vmaster == NULL))
1170 return -ENOMEM;
1171
Thomas Hellstrom3a939a52010-10-05 12:43:03 +02001172 vmw_master_init(vmaster);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001173 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1174 master->driver_priv = vmaster;
1175
1176 return 0;
1177}
1178
1179static void vmw_master_destroy(struct drm_device *dev,
1180 struct drm_master *master)
1181{
1182 struct vmw_master *vmaster = vmw_master(master);
1183
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001184 master->driver_priv = NULL;
1185 kfree(vmaster);
1186}
1187
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001188static int vmw_master_set(struct drm_device *dev,
1189 struct drm_file *file_priv,
1190 bool from_open)
1191{
1192 struct vmw_private *dev_priv = vmw_priv(dev);
1193 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1194 struct vmw_master *active = dev_priv->active_master;
1195 struct vmw_master *vmaster = vmw_master(file_priv->master);
1196 int ret = 0;
1197
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001198 if (active) {
1199 BUG_ON(active != &dev_priv->fbdev_master);
1200 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1201 if (unlikely(ret != 0))
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001202 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001203
1204 ttm_lock_set_kill(&active->lock, true, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001205 dev_priv->active_master = NULL;
1206 }
1207
1208 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1209 if (!from_open) {
1210 ttm_vt_unlock(&vmaster->lock);
1211 BUG_ON(vmw_fp->locked_master != file_priv->master);
1212 drm_master_put(&vmw_fp->locked_master);
1213 }
1214
1215 dev_priv->active_master = vmaster;
1216
1217 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001218}
1219
1220static void vmw_master_drop(struct drm_device *dev,
1221 struct drm_file *file_priv,
1222 bool from_release)
1223{
1224 struct vmw_private *dev_priv = vmw_priv(dev);
1225 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1226 struct vmw_master *vmaster = vmw_master(file_priv->master);
1227 int ret;
1228
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001229 /**
1230 * Make sure the master doesn't disappear while we have
1231 * it locked.
1232 */
1233
1234 vmw_fp->locked_master = drm_master_get(file_priv->master);
1235 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +01001236 vmw_kms_legacy_hotspot_clear(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001237 if (unlikely((ret != 0))) {
1238 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1239 drm_master_put(&vmw_fp->locked_master);
1240 }
1241
Thomas Hellstromc4249852013-10-09 01:42:51 -07001242 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001243
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001244 if (!dev_priv->enable_fb)
1245 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001246
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001247 dev_priv->active_master = &dev_priv->fbdev_master;
1248 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1249 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1250
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001251 if (dev_priv->enable_fb)
1252 vmw_fb_on(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001253}
1254
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001255/**
1256 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1257 *
1258 * @dev_priv: Pointer to device private struct.
1259 * Needs the reservation sem to be held in non-exclusive mode.
1260 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001261static void __vmw_svga_enable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001262{
1263 spin_lock(&dev_priv->svga_lock);
1264 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1265 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1266 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1267 }
1268 spin_unlock(&dev_priv->svga_lock);
1269}
1270
1271/**
1272 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1273 *
1274 * @dev_priv: Pointer to device private struct.
1275 */
1276void vmw_svga_enable(struct vmw_private *dev_priv)
1277{
1278 ttm_read_lock(&dev_priv->reservation_sem, false);
1279 __vmw_svga_enable(dev_priv);
1280 ttm_read_unlock(&dev_priv->reservation_sem);
1281}
1282
1283/**
1284 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1285 *
1286 * @dev_priv: Pointer to device private struct.
1287 * Needs the reservation sem to be held in exclusive mode.
1288 * Will not empty VRAM. VRAM must be emptied by caller.
1289 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001290static void __vmw_svga_disable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001291{
1292 spin_lock(&dev_priv->svga_lock);
1293 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1294 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1295 vmw_write(dev_priv, SVGA_REG_ENABLE,
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001296 SVGA_REG_ENABLE_HIDE |
1297 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001298 }
1299 spin_unlock(&dev_priv->svga_lock);
1300}
1301
1302/**
1303 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1304 * running.
1305 *
1306 * @dev_priv: Pointer to device private struct.
1307 * Will empty VRAM.
1308 */
1309void vmw_svga_disable(struct vmw_private *dev_priv)
1310{
1311 ttm_write_lock(&dev_priv->reservation_sem, false);
1312 spin_lock(&dev_priv->svga_lock);
1313 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1314 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001315 spin_unlock(&dev_priv->svga_lock);
1316 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1317 DRM_ERROR("Failed evicting VRAM buffers.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001318 vmw_write(dev_priv, SVGA_REG_ENABLE,
1319 SVGA_REG_ENABLE_HIDE |
1320 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001321 } else
1322 spin_unlock(&dev_priv->svga_lock);
1323 ttm_write_unlock(&dev_priv->reservation_sem);
1324}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001325
1326static void vmw_remove(struct pci_dev *pdev)
1327{
1328 struct drm_device *dev = pci_get_drvdata(pdev);
1329
Thomas Hellstromfd3e4d62015-03-10 11:07:40 -07001330 pci_disable_device(pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001331 drm_put_dev(dev);
1332}
1333
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001334static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1335 void *ptr)
1336{
1337 struct vmw_private *dev_priv =
1338 container_of(nb, struct vmw_private, pm_nb);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001339
1340 switch (val) {
1341 case PM_HIBERNATION_PREPARE:
Thomas Hellstroma2787242015-06-29 12:55:07 -07001342 if (dev_priv->enable_fb)
1343 vmw_fb_off(dev_priv);
Thomas Hellstrom294adf72014-02-27 12:34:51 +01001344 ttm_suspend_lock(&dev_priv->reservation_sem);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001345
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001346 /*
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001347 * This empties VRAM and unbinds all GMR bindings.
1348 * Buffer contents is moved to swappable memory.
1349 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001350 vmw_execbuf_release_pinned_bo(dev_priv);
1351 vmw_resource_evict_all(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001352 vmw_release_device_early(dev_priv);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001353 ttm_bo_swapout_all(&dev_priv->bdev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001354 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001355 break;
1356 case PM_POST_HIBERNATION:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001357 case PM_POST_RESTORE:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001358 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom294adf72014-02-27 12:34:51 +01001359 ttm_suspend_unlock(&dev_priv->reservation_sem);
Thomas Hellstroma2787242015-06-29 12:55:07 -07001360 if (dev_priv->enable_fb)
1361 vmw_fb_on(dev_priv);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001362 break;
1363 case PM_RESTORE_PREPARE:
1364 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001365 default:
1366 break;
1367 }
1368 return 0;
1369}
1370
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001371static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001372{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001373 struct drm_device *dev = pci_get_drvdata(pdev);
1374 struct vmw_private *dev_priv = vmw_priv(dev);
1375
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001376 if (dev_priv->refuse_hibernation)
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001377 return -EBUSY;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001378
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001379 pci_save_state(pdev);
1380 pci_disable_device(pdev);
1381 pci_set_power_state(pdev, PCI_D3hot);
1382 return 0;
1383}
1384
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001385static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001386{
1387 pci_set_power_state(pdev, PCI_D0);
1388 pci_restore_state(pdev);
1389 return pci_enable_device(pdev);
1390}
1391
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001392static int vmw_pm_suspend(struct device *kdev)
1393{
1394 struct pci_dev *pdev = to_pci_dev(kdev);
1395 struct pm_message dummy;
1396
1397 dummy.event = 0;
1398
1399 return vmw_pci_suspend(pdev, dummy);
1400}
1401
1402static int vmw_pm_resume(struct device *kdev)
1403{
1404 struct pci_dev *pdev = to_pci_dev(kdev);
1405
1406 return vmw_pci_resume(pdev);
1407}
1408
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001409static int vmw_pm_freeze(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001410{
1411 struct pci_dev *pdev = to_pci_dev(kdev);
1412 struct drm_device *dev = pci_get_drvdata(pdev);
1413 struct vmw_private *dev_priv = vmw_priv(dev);
1414
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001415 dev_priv->suspended = true;
1416 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001417 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001418
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001419 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1420 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001421 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001422 vmw_fifo_resource_inc(dev_priv);
1423 WARN_ON(vmw_request_device_late(dev_priv));
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001424 dev_priv->suspended = false;
1425 return -EBUSY;
1426 }
1427
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001428 if (dev_priv->enable_fb)
1429 __vmw_svga_disable(dev_priv);
1430
1431 vmw_release_device_late(dev_priv);
1432
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001433 return 0;
1434}
1435
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001436static int vmw_pm_restore(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001437{
1438 struct pci_dev *pdev = to_pci_dev(kdev);
1439 struct drm_device *dev = pci_get_drvdata(pdev);
1440 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001441 int ret;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001442
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001443 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1444 (void) vmw_read(dev_priv, SVGA_REG_ID);
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001445
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001446 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001447 vmw_fifo_resource_inc(dev_priv);
1448
1449 ret = vmw_request_device(dev_priv);
1450 if (ret)
1451 return ret;
1452
1453 if (dev_priv->enable_fb)
1454 __vmw_svga_enable(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001455
1456 dev_priv->suspended = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001457
1458 return 0;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001459}
1460
1461static const struct dev_pm_ops vmw_pm_ops = {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001462 .freeze = vmw_pm_freeze,
1463 .thaw = vmw_pm_restore,
1464 .restore = vmw_pm_restore,
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001465 .suspend = vmw_pm_suspend,
1466 .resume = vmw_pm_resume,
1467};
1468
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001469static const struct file_operations vmwgfx_driver_fops = {
1470 .owner = THIS_MODULE,
1471 .open = drm_open,
1472 .release = drm_release,
1473 .unlocked_ioctl = vmw_unlocked_ioctl,
1474 .mmap = vmw_mmap,
1475 .poll = vmw_fops_poll,
1476 .read = vmw_fops_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001477#if defined(CONFIG_COMPAT)
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001478 .compat_ioctl = vmw_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001479#endif
1480 .llseek = noop_llseek,
1481};
1482
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001483static struct drm_driver driver = {
1484 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
Thomas Hellstrom03f80262014-03-20 13:06:34 +01001485 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001486 .load = vmw_driver_load,
1487 .unload = vmw_driver_unload,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001488 .lastclose = vmw_lastclose,
1489 .irq_preinstall = vmw_irq_preinstall,
1490 .irq_postinstall = vmw_irq_postinstall,
1491 .irq_uninstall = vmw_irq_uninstall,
1492 .irq_handler = vmw_irq_handler,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001493 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001494 .enable_vblank = vmw_enable_vblank,
1495 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001496 .ioctls = vmw_ioctls,
Damien Lespiauf95aeb12014-06-09 14:39:49 +01001497 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001498 .master_create = vmw_master_create,
1499 .master_destroy = vmw_master_destroy,
1500 .master_set = vmw_master_set,
1501 .master_drop = vmw_master_drop,
1502 .open = vmw_driver_open,
Thomas Hellstrom6b82ef52012-02-09 16:56:42 +01001503 .preclose = vmw_preclose,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001504 .postclose = vmw_postclose,
David Herrmann915b4d12014-08-29 12:12:43 +02001505 .set_busid = drm_pci_set_busid,
Dave Airlie5e1782d2012-08-28 01:53:54 +00001506
1507 .dumb_create = vmw_dumb_create,
1508 .dumb_map_offset = vmw_dumb_map_offset,
1509 .dumb_destroy = vmw_dumb_destroy,
1510
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001511 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1512 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1513
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001514 .fops = &vmwgfx_driver_fops,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001515 .name = VMWGFX_DRIVER_NAME,
1516 .desc = VMWGFX_DRIVER_DESC,
1517 .date = VMWGFX_DRIVER_DATE,
1518 .major = VMWGFX_DRIVER_MAJOR,
1519 .minor = VMWGFX_DRIVER_MINOR,
1520 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1521};
1522
Dave Airlie8410ea32010-12-15 03:16:38 +10001523static struct pci_driver vmw_pci_driver = {
1524 .name = VMWGFX_DRIVER_NAME,
1525 .id_table = vmw_pci_id_list,
1526 .probe = vmw_probe,
1527 .remove = vmw_remove,
1528 .driver = {
1529 .pm = &vmw_pm_ops
1530 }
1531};
1532
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001533static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1534{
Jordan Crousedcdb1672010-05-27 13:40:25 -06001535 return drm_get_pci_dev(pdev, ent, &driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001536}
1537
1538static int __init vmwgfx_init(void)
1539{
1540 int ret;
Dave Airlie8410ea32010-12-15 03:16:38 +10001541 ret = drm_pci_init(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001542 if (ret)
1543 DRM_ERROR("Failed initializing DRM.\n");
1544 return ret;
1545}
1546
1547static void __exit vmwgfx_exit(void)
1548{
Dave Airlie8410ea32010-12-15 03:16:38 +10001549 drm_pci_exit(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001550}
1551
1552module_init(vmwgfx_init);
1553module_exit(vmwgfx_exit);
1554
1555MODULE_AUTHOR("VMware Inc. and others");
1556MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1557MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001558MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1559 __stringify(VMWGFX_DRIVER_MINOR) "."
1560 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1561 "0");