blob: 1805b8c2a94817d8d8bac6fa6f5477cb084a9536 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000028
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31#include "ttm/ttm_placement.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_object.h"
34#include "ttm/ttm_module.h"
35
36#define VMWGFX_DRIVER_NAME "vmwgfx"
37#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38#define VMWGFX_CHIP_SVGAII 0
39#define VMW_FB_RESERVATION 0
40
41/**
42 * Fully encoded drm commands. Might move to vmw_drm.h
43 */
44
45#define DRM_IOCTL_VMW_GET_PARAM \
46 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
47 struct drm_vmw_getparam_arg)
48#define DRM_IOCTL_VMW_ALLOC_DMABUF \
49 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
50 union drm_vmw_alloc_dmabuf_arg)
51#define DRM_IOCTL_VMW_UNREF_DMABUF \
52 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
53 struct drm_vmw_unref_dmabuf_arg)
54#define DRM_IOCTL_VMW_CURSOR_BYPASS \
55 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
56 struct drm_vmw_cursor_bypass_arg)
57
58#define DRM_IOCTL_VMW_CONTROL_STREAM \
59 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
60 struct drm_vmw_control_stream_arg)
61#define DRM_IOCTL_VMW_CLAIM_STREAM \
62 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
63 struct drm_vmw_stream_arg)
64#define DRM_IOCTL_VMW_UNREF_STREAM \
65 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
66 struct drm_vmw_stream_arg)
67
68#define DRM_IOCTL_VMW_CREATE_CONTEXT \
69 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
70 struct drm_vmw_context_arg)
71#define DRM_IOCTL_VMW_UNREF_CONTEXT \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
73 struct drm_vmw_context_arg)
74#define DRM_IOCTL_VMW_CREATE_SURFACE \
75 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
76 union drm_vmw_surface_create_arg)
77#define DRM_IOCTL_VMW_UNREF_SURFACE \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
79 struct drm_vmw_surface_arg)
80#define DRM_IOCTL_VMW_REF_SURFACE \
81 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
82 union drm_vmw_surface_reference_arg)
83#define DRM_IOCTL_VMW_EXECBUF \
84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
85 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000086#define DRM_IOCTL_VMW_GET_3D_CAP \
87 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
88 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000089#define DRM_IOCTL_VMW_FENCE_WAIT \
90 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
91 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000092#define DRM_IOCTL_VMW_FENCE_SIGNALED \
93 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
94 struct drm_vmw_fence_signaled_arg)
95#define DRM_IOCTL_VMW_FENCE_UNREF \
96 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
97 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +020098#define DRM_IOCTL_VMW_FENCE_EVENT \
99 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
100 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200101#define DRM_IOCTL_VMW_PRESENT \
102 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
103 struct drm_vmw_present_arg)
104#define DRM_IOCTL_VMW_PRESENT_READBACK \
105 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
106 struct drm_vmw_present_readback_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000107
108/**
109 * The core DRM version of this macro doesn't account for
110 * DRM_COMMAND_BASE.
111 */
112
113#define VMW_IOCTL_DEF(ioctl, func, flags) \
Dave Airlie1b2f1482010-08-14 20:20:34 +1000114 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000115
116/**
117 * Ioctl definitions.
118 */
119
120static struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000121 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100122 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000123 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100124 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000125 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100126 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000127 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100128 vmw_kms_cursor_bypass_ioctl,
129 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000130
Dave Airlie1b2f1482010-08-14 20:20:34 +1000131 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100132 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000133 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100134 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000135 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100136 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000137
Dave Airlie1b2f1482010-08-14 20:20:34 +1000138 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100139 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000140 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100141 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000142 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100143 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000144 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100145 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000146 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100147 DRM_AUTH | DRM_UNLOCKED),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000148 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100149 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000150 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
151 DRM_AUTH | DRM_UNLOCKED),
152 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
153 vmw_fence_obj_signaled_ioctl,
154 DRM_AUTH | DRM_UNLOCKED),
155 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +0200156 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200157 VMW_IOCTL_DEF(VMW_FENCE_EVENT,
158 vmw_fence_event_ioctl,
159 DRM_AUTH | DRM_UNLOCKED),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000160 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
161 DRM_AUTH | DRM_UNLOCKED),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200162
163 /* these allow direct access to the framebuffers mark as master only */
164 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
165 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
166 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
167 vmw_present_readback_ioctl,
168 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000169};
170
171static struct pci_device_id vmw_pci_id_list[] = {
172 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
173 {0, 0, 0}
174};
175
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200176static int enable_fbdev;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000177
178static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
179static void vmw_master_init(struct vmw_master *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100180static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
181 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000182
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200183MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
184module_param_named(enable_fbdev, enable_fbdev, int, 0600);
185
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000186static void vmw_print_capabilities(uint32_t capabilities)
187{
188 DRM_INFO("Capabilities:\n");
189 if (capabilities & SVGA_CAP_RECT_COPY)
190 DRM_INFO(" Rect copy.\n");
191 if (capabilities & SVGA_CAP_CURSOR)
192 DRM_INFO(" Cursor.\n");
193 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
194 DRM_INFO(" Cursor bypass.\n");
195 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
196 DRM_INFO(" Cursor bypass 2.\n");
197 if (capabilities & SVGA_CAP_8BIT_EMULATION)
198 DRM_INFO(" 8bit emulation.\n");
199 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
200 DRM_INFO(" Alpha cursor.\n");
201 if (capabilities & SVGA_CAP_3D)
202 DRM_INFO(" 3D.\n");
203 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
204 DRM_INFO(" Extended Fifo.\n");
205 if (capabilities & SVGA_CAP_MULTIMON)
206 DRM_INFO(" Multimon.\n");
207 if (capabilities & SVGA_CAP_PITCHLOCK)
208 DRM_INFO(" Pitchlock.\n");
209 if (capabilities & SVGA_CAP_IRQMASK)
210 DRM_INFO(" Irq mask.\n");
211 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
212 DRM_INFO(" Display Topology.\n");
213 if (capabilities & SVGA_CAP_GMR)
214 DRM_INFO(" GMR.\n");
215 if (capabilities & SVGA_CAP_TRACES)
216 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000217 if (capabilities & SVGA_CAP_GMR2)
218 DRM_INFO(" GMR2.\n");
219 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
220 DRM_INFO(" Screen Object 2.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000221}
222
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200223
224/**
225 * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
226 * the start of a buffer object.
227 *
228 * @dev_priv: The device private structure.
229 *
230 * This function will idle the buffer using an uninterruptible wait, then
231 * map the first page and initialize a pending occlusion query result structure,
232 * Finally it will unmap the buffer.
233 *
234 * TODO: Since we're only mapping a single page, we should optimize the map
235 * to use kmap_atomic / iomap_atomic.
236 */
237static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
238{
239 struct ttm_bo_kmap_obj map;
240 volatile SVGA3dQueryResult *result;
241 bool dummy;
242 int ret;
243 struct ttm_bo_device *bdev = &dev_priv->bdev;
244 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
245
246 ttm_bo_reserve(bo, false, false, false, 0);
247 spin_lock(&bdev->fence_lock);
Dave Airlie1717c0e2011-10-27 18:28:37 +0200248 ret = ttm_bo_wait(bo, false, false, false);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200249 spin_unlock(&bdev->fence_lock);
250 if (unlikely(ret != 0))
251 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
252 10*HZ);
253
254 ret = ttm_bo_kmap(bo, 0, 1, &map);
255 if (likely(ret == 0)) {
256 result = ttm_kmap_obj_virtual(&map, &dummy);
257 result->totalSize = sizeof(*result);
258 result->state = SVGA3D_QUERYSTATE_PENDING;
259 result->result32 = 0xff;
260 ttm_bo_kunmap(&map);
261 } else
262 DRM_ERROR("Dummy query buffer map failed.\n");
263 ttm_bo_unreserve(bo);
264}
265
266
267/**
268 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
269 *
270 * @dev_priv: A device private structure.
271 *
272 * This function creates a small buffer object that holds the query
273 * result for dummy queries emitted as query barriers.
274 * No interruptible waits are done within this function.
275 *
276 * Returns an error if bo creation fails.
277 */
278static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
279{
280 return ttm_bo_create(&dev_priv->bdev,
281 PAGE_SIZE,
282 ttm_bo_type_device,
283 &vmw_vram_sys_placement,
284 0, 0, false, NULL,
285 &dev_priv->dummy_query_bo);
286}
287
288
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000289static int vmw_request_device(struct vmw_private *dev_priv)
290{
291 int ret;
292
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000293 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
294 if (unlikely(ret != 0)) {
295 DRM_ERROR("Unable to initialize FIFO.\n");
296 return ret;
297 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000298 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200299 ret = vmw_dummy_query_bo_create(dev_priv);
300 if (unlikely(ret != 0))
301 goto out_no_query_bo;
302 vmw_dummy_query_bo_prepare(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000303
304 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200305
306out_no_query_bo:
307 vmw_fence_fifo_down(dev_priv->fman);
308 vmw_fifo_release(dev_priv, &dev_priv->fifo);
309 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000310}
311
312static void vmw_release_device(struct vmw_private *dev_priv)
313{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200314 /*
315 * Previous destructions should've released
316 * the pinned bo.
317 */
318
319 BUG_ON(dev_priv->pinned_bo != NULL);
320
321 ttm_bo_unref(&dev_priv->dummy_query_bo);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000322 vmw_fence_fifo_down(dev_priv->fman);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000323 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000324}
325
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000326/**
327 * Increase the 3d resource refcount.
328 * If the count was prevously zero, initialize the fifo, switching to svga
329 * mode. Note that the master holds a ref as well, and may request an
330 * explicit switch to svga mode if fb is not running, using @unhide_svga.
331 */
332int vmw_3d_resource_inc(struct vmw_private *dev_priv,
333 bool unhide_svga)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200334{
335 int ret = 0;
336
337 mutex_lock(&dev_priv->release_mutex);
338 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
339 ret = vmw_request_device(dev_priv);
340 if (unlikely(ret != 0))
341 --dev_priv->num_3d_resources;
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000342 } else if (unhide_svga) {
343 mutex_lock(&dev_priv->hw_mutex);
344 vmw_write(dev_priv, SVGA_REG_ENABLE,
345 vmw_read(dev_priv, SVGA_REG_ENABLE) &
346 ~SVGA_REG_ENABLE_HIDE);
347 mutex_unlock(&dev_priv->hw_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200348 }
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000349
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200350 mutex_unlock(&dev_priv->release_mutex);
351 return ret;
352}
353
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000354/**
355 * Decrease the 3d resource refcount.
356 * If the count reaches zero, disable the fifo, switching to vga mode.
357 * Note that the master holds a refcount as well, and may request an
358 * explicit switch to vga mode when it releases its refcount to account
359 * for the situation of an X server vt switch to VGA with 3d resources
360 * active.
361 */
362void vmw_3d_resource_dec(struct vmw_private *dev_priv,
363 bool hide_svga)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200364{
365 int32_t n3d;
366
367 mutex_lock(&dev_priv->release_mutex);
368 if (unlikely(--dev_priv->num_3d_resources == 0))
369 vmw_release_device(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000370 else if (hide_svga) {
371 mutex_lock(&dev_priv->hw_mutex);
372 vmw_write(dev_priv, SVGA_REG_ENABLE,
373 vmw_read(dev_priv, SVGA_REG_ENABLE) |
374 SVGA_REG_ENABLE_HIDE);
375 mutex_unlock(&dev_priv->hw_mutex);
376 }
377
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200378 n3d = (int32_t) dev_priv->num_3d_resources;
379 mutex_unlock(&dev_priv->release_mutex);
380
381 BUG_ON(n3d < 0);
382}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000383
384static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
385{
386 struct vmw_private *dev_priv;
387 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000388 uint32_t svga_id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000389
390 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
391 if (unlikely(dev_priv == NULL)) {
392 DRM_ERROR("Failed allocating a device private struct.\n");
393 return -ENOMEM;
394 }
395 memset(dev_priv, 0, sizeof(*dev_priv));
396
397 dev_priv->dev = dev;
398 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d32011-09-01 20:18:42 +0000399 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000400 mutex_init(&dev_priv->hw_mutex);
401 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200402 mutex_init(&dev_priv->release_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000403 rwlock_init(&dev_priv->resource_lock);
404 idr_init(&dev_priv->context_idr);
405 idr_init(&dev_priv->surface_idr);
406 idr_init(&dev_priv->stream_idr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000407 mutex_init(&dev_priv->init_mutex);
408 init_waitqueue_head(&dev_priv->fence_queue);
409 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000410 dev_priv->fence_queue_waiters = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000411 atomic_set(&dev_priv->fifo_queue_waiters, 0);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200412 INIT_LIST_HEAD(&dev_priv->surface_lru);
413 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000414
415 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
416 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
417 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
418
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200419 dev_priv->enable_fb = enable_fbdev;
420
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000421 mutex_lock(&dev_priv->hw_mutex);
Peter Hanzelc1886602010-01-30 03:38:07 +0000422
423 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
424 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
425 if (svga_id != SVGA_ID_2) {
426 ret = -ENOSYS;
427 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
428 mutex_unlock(&dev_priv->hw_mutex);
429 goto out_err0;
430 }
431
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000432 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
433
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200434 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
435 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
436 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
437 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000438 if (dev_priv->capabilities & SVGA_CAP_GMR) {
439 dev_priv->max_gmr_descriptors =
440 vmw_read(dev_priv,
441 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
442 dev_priv->max_gmr_ids =
443 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
444 }
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000445 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
446 dev_priv->max_gmr_pages =
447 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
448 dev_priv->memory_size =
449 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200450 dev_priv->memory_size -= dev_priv->vram_size;
451 } else {
452 /*
453 * An arbitrary limit of 512MiB on surface
454 * memory. But all HWV8 hardware supports GMR2.
455 */
456 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000457 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000458
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000459 mutex_unlock(&dev_priv->hw_mutex);
460
461 vmw_print_capabilities(dev_priv->capabilities);
462
463 if (dev_priv->capabilities & SVGA_CAP_GMR) {
464 DRM_INFO("Max GMR ids is %u\n",
465 (unsigned)dev_priv->max_gmr_ids);
466 DRM_INFO("Max GMR descriptors is %u\n",
467 (unsigned)dev_priv->max_gmr_descriptors);
468 }
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000469 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
470 DRM_INFO("Max number of GMR pages is %u\n",
471 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200472 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
473 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000474 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000475 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
476 dev_priv->vram_start, dev_priv->vram_size / 1024);
477 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
478 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
479
480 ret = vmw_ttm_global_init(dev_priv);
481 if (unlikely(ret != 0))
482 goto out_err0;
483
484
485 vmw_master_init(&dev_priv->fbdev_master);
486 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
487 dev_priv->active_master = &dev_priv->fbdev_master;
488
Dave Airliea2c06ee2011-02-23 14:24:01 +1000489
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000490 ret = ttm_bo_device_init(&dev_priv->bdev,
491 dev_priv->bo_global_ref.ref.object,
492 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
493 false);
494 if (unlikely(ret != 0)) {
495 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
496 goto out_err1;
497 }
498
499 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
500 (dev_priv->vram_size >> PAGE_SHIFT));
501 if (unlikely(ret != 0)) {
502 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
503 goto out_err2;
504 }
505
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200506 dev_priv->has_gmr = true;
507 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
508 dev_priv->max_gmr_ids) != 0) {
509 DRM_INFO("No GMR memory available. "
510 "Graphics memory resources are very limited.\n");
511 dev_priv->has_gmr = false;
512 }
513
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000514 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
515 dev_priv->mmio_size, DRM_MTRR_WC);
516
517 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
518 dev_priv->mmio_size);
519
520 if (unlikely(dev_priv->mmio_virt == NULL)) {
521 ret = -ENOMEM;
522 DRM_ERROR("Failed mapping MMIO.\n");
523 goto out_err3;
524 }
525
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200526 /* Need mmio memory to check for fifo pitchlock cap. */
527 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
528 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
529 !vmw_fifo_have_pitchlock(dev_priv)) {
530 ret = -ENOSYS;
531 DRM_ERROR("Hardware has no pitchlock\n");
532 goto out_err4;
533 }
534
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000535 dev_priv->tdev = ttm_object_device_init
536 (dev_priv->mem_global_ref.object, 12);
537
538 if (unlikely(dev_priv->tdev == NULL)) {
539 DRM_ERROR("Unable to initialize TTM object management.\n");
540 ret = -ENOMEM;
541 goto out_err4;
542 }
543
544 dev->dev_private = dev_priv;
545
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000546 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
547 dev_priv->stealth = (ret != 0);
548 if (dev_priv->stealth) {
549 /**
550 * Request at least the mmio PCI resource.
551 */
552
553 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000554 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000555 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
556 if (unlikely(ret != 0)) {
557 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
558 goto out_no_device;
559 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000560 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000561
562 dev_priv->fman = vmw_fence_manager_init(dev_priv);
563 if (unlikely(dev_priv->fman == NULL))
564 goto out_no_fman;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200565
566 /* Need to start the fifo to check if we can do screen objects */
567 ret = vmw_3d_resource_inc(dev_priv, true);
568 if (unlikely(ret != 0))
569 goto out_no_fifo;
570 vmw_kms_save_vga(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200571
572 /* Start kms and overlay systems, needs fifo. */
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200573 ret = vmw_kms_init(dev_priv);
574 if (unlikely(ret != 0))
575 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000576 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200577
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200578 /* 3D Depends on Screen Objects being used. */
Thomas Hellstrom6ea77d12011-10-04 20:13:36 +0200579 DRM_INFO("Detected %sdevice 3D availability.\n",
580 vmw_fifo_have_3d(dev_priv) ?
581 "" : "no ");
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200582
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200583 /* We might be done with the fifo now */
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200584 if (dev_priv->enable_fb) {
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200585 vmw_fb_init(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200586 } else {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200587 vmw_kms_restore_vga(dev_priv);
588 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200589 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000590
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200591 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
592 ret = drm_irq_install(dev);
593 if (unlikely(ret != 0)) {
594 DRM_ERROR("Failed installing irq: %d\n", ret);
595 goto out_no_irq;
596 }
597 }
598
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100599 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
600 register_pm_notifier(&dev_priv->pm_nb);
601
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000602 return 0;
603
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200604out_no_irq:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200605 if (dev_priv->enable_fb)
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200606 vmw_fb_close(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200607 vmw_overlay_close(dev_priv);
608 vmw_kms_close(dev_priv);
609out_no_kms:
610 /* We still have a 3D resource reference held */
611 if (dev_priv->enable_fb) {
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200612 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000613 vmw_3d_resource_dec(dev_priv, false);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200614 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200615out_no_fifo:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000616 vmw_fence_manager_takedown(dev_priv->fman);
617out_no_fman:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200618 if (dev_priv->stealth)
619 pci_release_region(dev->pdev, 2);
620 else
621 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000622out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000623 ttm_object_device_release(&dev_priv->tdev);
624out_err4:
625 iounmap(dev_priv->mmio_virt);
626out_err3:
627 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
628 dev_priv->mmio_size, DRM_MTRR_WC);
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200629 if (dev_priv->has_gmr)
630 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000631 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
632out_err2:
633 (void)ttm_bo_device_release(&dev_priv->bdev);
634out_err1:
635 vmw_ttm_global_release(dev_priv);
636out_err0:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000637 idr_destroy(&dev_priv->surface_idr);
638 idr_destroy(&dev_priv->context_idr);
639 idr_destroy(&dev_priv->stream_idr);
640 kfree(dev_priv);
641 return ret;
642}
643
644static int vmw_driver_unload(struct drm_device *dev)
645{
646 struct vmw_private *dev_priv = vmw_priv(dev);
647
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100648 unregister_pm_notifier(&dev_priv->pm_nb);
649
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000650 if (dev_priv->ctx.cmd_bounce)
651 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200652 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
653 drm_irq_uninstall(dev_priv->dev);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200654 if (dev_priv->enable_fb) {
655 vmw_fb_close(dev_priv);
656 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000657 vmw_3d_resource_dec(dev_priv, false);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200658 }
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000659 vmw_kms_close(dev_priv);
660 vmw_overlay_close(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000661 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000662 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000663 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000664 else
665 pci_release_regions(dev->pdev);
666
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000667 ttm_object_device_release(&dev_priv->tdev);
668 iounmap(dev_priv->mmio_virt);
669 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
670 dev_priv->mmio_size, DRM_MTRR_WC);
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200671 if (dev_priv->has_gmr)
672 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000673 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
674 (void)ttm_bo_device_release(&dev_priv->bdev);
675 vmw_ttm_global_release(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000676 idr_destroy(&dev_priv->surface_idr);
677 idr_destroy(&dev_priv->context_idr);
678 idr_destroy(&dev_priv->stream_idr);
679
680 kfree(dev_priv);
681
682 return 0;
683}
684
685static void vmw_postclose(struct drm_device *dev,
686 struct drm_file *file_priv)
687{
688 struct vmw_fpriv *vmw_fp;
689
690 vmw_fp = vmw_fpriv(file_priv);
691 ttm_object_file_release(&vmw_fp->tfile);
692 if (vmw_fp->locked_master)
693 drm_master_put(&vmw_fp->locked_master);
694 kfree(vmw_fp);
695}
696
697static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
698{
699 struct vmw_private *dev_priv = vmw_priv(dev);
700 struct vmw_fpriv *vmw_fp;
701 int ret = -ENOMEM;
702
703 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
704 if (unlikely(vmw_fp == NULL))
705 return ret;
706
707 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
708 if (unlikely(vmw_fp->tfile == NULL))
709 goto out_no_tfile;
710
711 file_priv->driver_priv = vmw_fp;
712
713 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
714 dev_priv->bdev.dev_mapping =
715 file_priv->filp->f_path.dentry->d_inode->i_mapping;
716
717 return 0;
718
719out_no_tfile:
720 kfree(vmw_fp);
721 return ret;
722}
723
724static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
725 unsigned long arg)
726{
727 struct drm_file *file_priv = filp->private_data;
728 struct drm_device *dev = file_priv->minor->dev;
729 unsigned int nr = DRM_IOCTL_NR(cmd);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000730
731 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100732 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000733 */
734
735 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
736 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
737 struct drm_ioctl_desc *ioctl =
738 &vmw_ioctls[nr - DRM_COMMAND_BASE];
739
Thomas Hellstrom2854eed2010-09-30 12:18:33 +0200740 if (unlikely(ioctl->cmd_drv != cmd)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000741 DRM_ERROR("Invalid command format, ioctl %d\n",
742 nr - DRM_COMMAND_BASE);
743 return -EINVAL;
744 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000745 }
746
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100747 return drm_ioctl(filp, cmd, arg);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000748}
749
750static int vmw_firstopen(struct drm_device *dev)
751{
752 struct vmw_private *dev_priv = vmw_priv(dev);
753 dev_priv->is_opened = true;
754
755 return 0;
756}
757
758static void vmw_lastclose(struct drm_device *dev)
759{
760 struct vmw_private *dev_priv = vmw_priv(dev);
761 struct drm_crtc *crtc;
762 struct drm_mode_set set;
763 int ret;
764
765 /**
766 * Do nothing on the lastclose call from drm_unload.
767 */
768
769 if (!dev_priv->is_opened)
770 return;
771
772 dev_priv->is_opened = false;
773 set.x = 0;
774 set.y = 0;
775 set.fb = NULL;
776 set.mode = NULL;
777 set.connectors = NULL;
778 set.num_connectors = 0;
779
780 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
781 set.crtc = crtc;
782 ret = crtc->funcs->set_config(&set);
783 WARN_ON(ret != 0);
784 }
785
786}
787
788static void vmw_master_init(struct vmw_master *vmaster)
789{
790 ttm_lock_init(&vmaster->lock);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200791 INIT_LIST_HEAD(&vmaster->fb_surf);
792 mutex_init(&vmaster->fb_surf_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000793}
794
795static int vmw_master_create(struct drm_device *dev,
796 struct drm_master *master)
797{
798 struct vmw_master *vmaster;
799
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000800 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
801 if (unlikely(vmaster == NULL))
802 return -ENOMEM;
803
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200804 vmw_master_init(vmaster);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000805 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
806 master->driver_priv = vmaster;
807
808 return 0;
809}
810
811static void vmw_master_destroy(struct drm_device *dev,
812 struct drm_master *master)
813{
814 struct vmw_master *vmaster = vmw_master(master);
815
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000816 master->driver_priv = NULL;
817 kfree(vmaster);
818}
819
820
821static int vmw_master_set(struct drm_device *dev,
822 struct drm_file *file_priv,
823 bool from_open)
824{
825 struct vmw_private *dev_priv = vmw_priv(dev);
826 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
827 struct vmw_master *active = dev_priv->active_master;
828 struct vmw_master *vmaster = vmw_master(file_priv->master);
829 int ret = 0;
830
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200831 if (!dev_priv->enable_fb) {
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000832 ret = vmw_3d_resource_inc(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200833 if (unlikely(ret != 0))
834 return ret;
835 vmw_kms_save_vga(dev_priv);
836 mutex_lock(&dev_priv->hw_mutex);
837 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
838 mutex_unlock(&dev_priv->hw_mutex);
839 }
840
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000841 if (active) {
842 BUG_ON(active != &dev_priv->fbdev_master);
843 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
844 if (unlikely(ret != 0))
845 goto out_no_active_lock;
846
847 ttm_lock_set_kill(&active->lock, true, SIGTERM);
848 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
849 if (unlikely(ret != 0)) {
850 DRM_ERROR("Unable to clean VRAM on "
851 "master drop.\n");
852 }
853
854 dev_priv->active_master = NULL;
855 }
856
857 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
858 if (!from_open) {
859 ttm_vt_unlock(&vmaster->lock);
860 BUG_ON(vmw_fp->locked_master != file_priv->master);
861 drm_master_put(&vmw_fp->locked_master);
862 }
863
864 dev_priv->active_master = vmaster;
865
866 return 0;
867
868out_no_active_lock:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200869 if (!dev_priv->enable_fb) {
870 mutex_lock(&dev_priv->hw_mutex);
871 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
872 mutex_unlock(&dev_priv->hw_mutex);
873 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000874 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200875 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000876 return ret;
877}
878
879static void vmw_master_drop(struct drm_device *dev,
880 struct drm_file *file_priv,
881 bool from_release)
882{
883 struct vmw_private *dev_priv = vmw_priv(dev);
884 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
885 struct vmw_master *vmaster = vmw_master(file_priv->master);
886 int ret;
887
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000888 /**
889 * Make sure the master doesn't disappear while we have
890 * it locked.
891 */
892
893 vmw_fp->locked_master = drm_master_get(file_priv->master);
894 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200895 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
896
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000897 if (unlikely((ret != 0))) {
898 DRM_ERROR("Unable to lock TTM at VT switch.\n");
899 drm_master_put(&vmw_fp->locked_master);
900 }
901
902 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
903
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200904 if (!dev_priv->enable_fb) {
905 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
906 if (unlikely(ret != 0))
907 DRM_ERROR("Unable to clean VRAM on master drop.\n");
908 mutex_lock(&dev_priv->hw_mutex);
909 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
910 mutex_unlock(&dev_priv->hw_mutex);
911 vmw_kms_restore_vga(dev_priv);
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000912 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200913 }
914
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000915 dev_priv->active_master = &dev_priv->fbdev_master;
916 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
917 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
918
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200919 if (dev_priv->enable_fb)
920 vmw_fb_on(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000921}
922
923
924static void vmw_remove(struct pci_dev *pdev)
925{
926 struct drm_device *dev = pci_get_drvdata(pdev);
927
928 drm_put_dev(dev);
929}
930
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100931static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
932 void *ptr)
933{
934 struct vmw_private *dev_priv =
935 container_of(nb, struct vmw_private, pm_nb);
936 struct vmw_master *vmaster = dev_priv->active_master;
937
938 switch (val) {
939 case PM_HIBERNATION_PREPARE:
940 case PM_SUSPEND_PREPARE:
941 ttm_suspend_lock(&vmaster->lock);
942
943 /**
944 * This empties VRAM and unbinds all GMR bindings.
945 * Buffer contents is moved to swappable memory.
946 */
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200947 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100948 ttm_bo_swapout_all(&dev_priv->bdev);
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200949
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100950 break;
951 case PM_POST_HIBERNATION:
952 case PM_POST_SUSPEND:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200953 case PM_POST_RESTORE:
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100954 ttm_suspend_unlock(&vmaster->lock);
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200955
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100956 break;
957 case PM_RESTORE_PREPARE:
958 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100959 default:
960 break;
961 }
962 return 0;
963}
964
965/**
966 * These might not be needed with the virtual SVGA device.
967 */
968
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +0200969static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100970{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +0200971 struct drm_device *dev = pci_get_drvdata(pdev);
972 struct vmw_private *dev_priv = vmw_priv(dev);
973
974 if (dev_priv->num_3d_resources != 0) {
975 DRM_INFO("Can't suspend or hibernate "
976 "while 3D resources are active.\n");
977 return -EBUSY;
978 }
979
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100980 pci_save_state(pdev);
981 pci_disable_device(pdev);
982 pci_set_power_state(pdev, PCI_D3hot);
983 return 0;
984}
985
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +0200986static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100987{
988 pci_set_power_state(pdev, PCI_D0);
989 pci_restore_state(pdev);
990 return pci_enable_device(pdev);
991}
992
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +0200993static int vmw_pm_suspend(struct device *kdev)
994{
995 struct pci_dev *pdev = to_pci_dev(kdev);
996 struct pm_message dummy;
997
998 dummy.event = 0;
999
1000 return vmw_pci_suspend(pdev, dummy);
1001}
1002
1003static int vmw_pm_resume(struct device *kdev)
1004{
1005 struct pci_dev *pdev = to_pci_dev(kdev);
1006
1007 return vmw_pci_resume(pdev);
1008}
1009
1010static int vmw_pm_prepare(struct device *kdev)
1011{
1012 struct pci_dev *pdev = to_pci_dev(kdev);
1013 struct drm_device *dev = pci_get_drvdata(pdev);
1014 struct vmw_private *dev_priv = vmw_priv(dev);
1015
1016 /**
1017 * Release 3d reference held by fbdev and potentially
1018 * stop fifo.
1019 */
1020 dev_priv->suspended = true;
1021 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001022 vmw_3d_resource_dec(dev_priv, true);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001023
1024 if (dev_priv->num_3d_resources != 0) {
1025
1026 DRM_INFO("Can't suspend or hibernate "
1027 "while 3D resources are active.\n");
1028
1029 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001030 vmw_3d_resource_inc(dev_priv, true);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001031 dev_priv->suspended = false;
1032 return -EBUSY;
1033 }
1034
1035 return 0;
1036}
1037
1038static void vmw_pm_complete(struct device *kdev)
1039{
1040 struct pci_dev *pdev = to_pci_dev(kdev);
1041 struct drm_device *dev = pci_get_drvdata(pdev);
1042 struct vmw_private *dev_priv = vmw_priv(dev);
1043
1044 /**
1045 * Reclaim 3d reference held by fbdev and potentially
1046 * start fifo.
1047 */
1048 if (dev_priv->enable_fb)
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001049 vmw_3d_resource_inc(dev_priv, false);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001050
1051 dev_priv->suspended = false;
1052}
1053
1054static const struct dev_pm_ops vmw_pm_ops = {
1055 .prepare = vmw_pm_prepare,
1056 .complete = vmw_pm_complete,
1057 .suspend = vmw_pm_suspend,
1058 .resume = vmw_pm_resume,
1059};
1060
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001061static struct drm_driver driver = {
1062 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1063 DRIVER_MODESET,
1064 .load = vmw_driver_load,
1065 .unload = vmw_driver_unload,
1066 .firstopen = vmw_firstopen,
1067 .lastclose = vmw_lastclose,
1068 .irq_preinstall = vmw_irq_preinstall,
1069 .irq_postinstall = vmw_irq_postinstall,
1070 .irq_uninstall = vmw_irq_uninstall,
1071 .irq_handler = vmw_irq_handler,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001072 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001073 .enable_vblank = vmw_enable_vblank,
1074 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001075 .reclaim_buffers_locked = NULL,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001076 .ioctls = vmw_ioctls,
1077 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1078 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
1079 .master_create = vmw_master_create,
1080 .master_destroy = vmw_master_destroy,
1081 .master_set = vmw_master_set,
1082 .master_drop = vmw_master_drop,
1083 .open = vmw_driver_open,
1084 .postclose = vmw_postclose,
1085 .fops = {
1086 .owner = THIS_MODULE,
1087 .open = drm_open,
1088 .release = drm_release,
1089 .unlocked_ioctl = vmw_unlocked_ioctl,
1090 .mmap = vmw_mmap,
Thomas Hellstrom5438ae82011-10-10 12:23:27 +02001091 .poll = vmw_fops_poll,
1092 .read = vmw_fops_read,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001093 .fasync = drm_fasync,
1094#if defined(CONFIG_COMPAT)
1095 .compat_ioctl = drm_compat_ioctl,
1096#endif
Arnd Bergmanndc880ab2010-07-06 18:54:47 +02001097 .llseek = noop_llseek,
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001098 },
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001099 .name = VMWGFX_DRIVER_NAME,
1100 .desc = VMWGFX_DRIVER_DESC,
1101 .date = VMWGFX_DRIVER_DATE,
1102 .major = VMWGFX_DRIVER_MAJOR,
1103 .minor = VMWGFX_DRIVER_MINOR,
1104 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1105};
1106
Dave Airlie8410ea32010-12-15 03:16:38 +10001107static struct pci_driver vmw_pci_driver = {
1108 .name = VMWGFX_DRIVER_NAME,
1109 .id_table = vmw_pci_id_list,
1110 .probe = vmw_probe,
1111 .remove = vmw_remove,
1112 .driver = {
1113 .pm = &vmw_pm_ops
1114 }
1115};
1116
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001117static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1118{
Jordan Crousedcdb1672010-05-27 13:40:25 -06001119 return drm_get_pci_dev(pdev, ent, &driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001120}
1121
1122static int __init vmwgfx_init(void)
1123{
1124 int ret;
Dave Airlie8410ea32010-12-15 03:16:38 +10001125 ret = drm_pci_init(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001126 if (ret)
1127 DRM_ERROR("Failed initializing DRM.\n");
1128 return ret;
1129}
1130
1131static void __exit vmwgfx_exit(void)
1132{
Dave Airlie8410ea32010-12-15 03:16:38 +10001133 drm_pci_exit(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001134}
1135
1136module_init(vmwgfx_init);
1137module_exit(vmwgfx_exit);
1138
1139MODULE_AUTHOR("VMware Inc. and others");
1140MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1141MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001142MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1143 __stringify(VMWGFX_DRIVER_MINOR) "."
1144 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1145 "0");