blob: d08f26973d0b1954737e51f16da2b192dfe15555 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yehf9217912016-04-27 19:11:18 -07003 * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
Rob Clark96c5d072014-10-15 15:00:47 -040028#include <linux/console.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000029
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/drmP.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031#include "vmwgfx_drv.h"
Thomas Hellstromd80efd52015-08-10 10:39:35 -070032#include "vmwgfx_binding.h"
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/ttm/ttm_placement.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_object.h>
36#include <drm/ttm/ttm_module.h>
Thomas Hellstromd92d9852013-10-24 01:49:26 -070037#include <linux/dma_remapping.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000038
39#define VMWGFX_DRIVER_NAME "vmwgfx"
40#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41#define VMWGFX_CHIP_SVGAII 0
42#define VMW_FB_RESERVATION 0
43
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010044#define VMW_MIN_INITIAL_WIDTH 800
45#define VMW_MIN_INITIAL_HEIGHT 600
46
Sinclair Yehf9217912016-04-27 19:11:18 -070047#ifndef VMWGFX_GIT_VERSION
48#define VMWGFX_GIT_VERSION "Unknown"
49#endif
50
51#define VMWGFX_REPO "In Tree"
52
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010053
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054/**
55 * Fully encoded drm commands. Might move to vmw_drm.h
56 */
57
58#define DRM_IOCTL_VMW_GET_PARAM \
59 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
60 struct drm_vmw_getparam_arg)
61#define DRM_IOCTL_VMW_ALLOC_DMABUF \
62 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
63 union drm_vmw_alloc_dmabuf_arg)
64#define DRM_IOCTL_VMW_UNREF_DMABUF \
65 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
66 struct drm_vmw_unref_dmabuf_arg)
67#define DRM_IOCTL_VMW_CURSOR_BYPASS \
68 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
69 struct drm_vmw_cursor_bypass_arg)
70
71#define DRM_IOCTL_VMW_CONTROL_STREAM \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
73 struct drm_vmw_control_stream_arg)
74#define DRM_IOCTL_VMW_CLAIM_STREAM \
75 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
76 struct drm_vmw_stream_arg)
77#define DRM_IOCTL_VMW_UNREF_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
79 struct drm_vmw_stream_arg)
80
81#define DRM_IOCTL_VMW_CREATE_CONTEXT \
82 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
83 struct drm_vmw_context_arg)
84#define DRM_IOCTL_VMW_UNREF_CONTEXT \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
86 struct drm_vmw_context_arg)
87#define DRM_IOCTL_VMW_CREATE_SURFACE \
88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
89 union drm_vmw_surface_create_arg)
90#define DRM_IOCTL_VMW_UNREF_SURFACE \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
92 struct drm_vmw_surface_arg)
93#define DRM_IOCTL_VMW_REF_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
95 union drm_vmw_surface_reference_arg)
96#define DRM_IOCTL_VMW_EXECBUF \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
98 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000099#define DRM_IOCTL_VMW_GET_3D_CAP \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
101 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000102#define DRM_IOCTL_VMW_FENCE_WAIT \
103 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
104 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000105#define DRM_IOCTL_VMW_FENCE_SIGNALED \
106 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
107 struct drm_vmw_fence_signaled_arg)
108#define DRM_IOCTL_VMW_FENCE_UNREF \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
110 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200111#define DRM_IOCTL_VMW_FENCE_EVENT \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
113 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200114#define DRM_IOCTL_VMW_PRESENT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
116 struct drm_vmw_present_arg)
117#define DRM_IOCTL_VMW_PRESENT_READBACK \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
119 struct drm_vmw_present_readback_arg)
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200120#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
122 struct drm_vmw_update_layout_arg)
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100123#define DRM_IOCTL_VMW_CREATE_SHADER \
124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
125 struct drm_vmw_shader_create_arg)
126#define DRM_IOCTL_VMW_UNREF_SHADER \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
128 struct drm_vmw_shader_arg)
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100129#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
131 union drm_vmw_gb_surface_create_arg)
132#define DRM_IOCTL_VMW_GB_SURFACE_REF \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
134 union drm_vmw_gb_surface_reference_arg)
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100135#define DRM_IOCTL_VMW_SYNCCPU \
136 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
137 struct drm_vmw_synccpu_arg)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700138#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
140 struct drm_vmw_context_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000141
142/**
143 * The core DRM version of this macro doesn't account for
144 * DRM_COMMAND_BASE.
145 */
146
147#define VMW_IOCTL_DEF(ioctl, func, flags) \
Ville Syrjälä7e7392a2015-03-27 15:51:56 +0200148 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000149
150/**
151 * Ioctl definitions.
152 */
153
Rob Clarkbaa70942013-08-02 13:27:49 -0400154static const struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000155 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200156 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000157 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200158 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000159 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200160 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000161 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100162 vmw_kms_cursor_bypass_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200163 DRM_MASTER | DRM_CONTROL_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000164
Dave Airlie1b2f1482010-08-14 20:20:34 +1000165 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200166 DRM_MASTER | DRM_CONTROL_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000167 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200168 DRM_MASTER | DRM_CONTROL_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000169 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200170 DRM_MASTER | DRM_CONTROL_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000171
Dave Airlie1b2f1482010-08-14 20:20:34 +1000172 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200173 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000174 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200175 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000176 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200177 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000178 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200179 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000180 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200181 DRM_AUTH | DRM_RENDER_ALLOW),
182 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700183 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000184 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200185 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000186 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
187 vmw_fence_obj_signaled_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200188 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000189 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200190 DRM_RENDER_ALLOW),
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100191 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200192 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000193 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200194 DRM_AUTH | DRM_RENDER_ALLOW),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200195
196 /* these allow direct access to the framebuffers mark as master only */
197 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200198 DRM_MASTER | DRM_AUTH),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200199 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
200 vmw_present_readback_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200201 DRM_MASTER | DRM_AUTH),
Thomas Hellstrom31788ca2017-02-21 17:42:27 +0700202 /*
203 * The permissions of the below ioctl are overridden in
204 * vmw_generic_ioctl(). We require either
205 * DRM_MASTER or capable(CAP_SYS_ADMIN).
206 */
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200207 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
208 vmw_kms_update_layout_ioctl,
Thomas Hellstrom31788ca2017-02-21 17:42:27 +0700209 DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100210 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
211 vmw_shader_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200212 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100213 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
214 vmw_shader_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200215 DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100216 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
217 vmw_gb_surface_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200218 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100219 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
220 vmw_gb_surface_reference_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200221 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100222 VMW_IOCTL_DEF(VMW_SYNCCPU,
223 vmw_user_dmabuf_synccpu_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200224 DRM_RENDER_ALLOW),
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700225 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
226 vmw_extended_context_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200227 DRM_AUTH | DRM_RENDER_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000228};
229
230static struct pci_device_id vmw_pci_id_list[] = {
231 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
232 {0, 0, 0}
233};
Dave Airliec4903422012-08-28 21:40:51 -0400234MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000235
Dave Airlie5d2afab2012-08-28 21:38:49 -0400236static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700237static int vmw_force_iommu;
238static int vmw_restrict_iommu;
239static int vmw_force_coherent;
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100240static int vmw_restrict_dma_mask;
Sinclair Yeh04319d82016-06-29 12:15:48 -0700241static int vmw_assume_16bpp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000242
243static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
244static void vmw_master_init(struct vmw_master *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100245static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
246 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000247
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200248MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800249module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700250MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800251module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700252MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800253module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700254MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800255module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100256MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
Baole Ni2d8e60e2016-08-02 18:50:25 +0800257module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
Sinclair Yeh04319d82016-06-29 12:15:48 -0700258MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
259module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700260
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200261
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000262static void vmw_print_capabilities(uint32_t capabilities)
263{
264 DRM_INFO("Capabilities:\n");
265 if (capabilities & SVGA_CAP_RECT_COPY)
266 DRM_INFO(" Rect copy.\n");
267 if (capabilities & SVGA_CAP_CURSOR)
268 DRM_INFO(" Cursor.\n");
269 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
270 DRM_INFO(" Cursor bypass.\n");
271 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
272 DRM_INFO(" Cursor bypass 2.\n");
273 if (capabilities & SVGA_CAP_8BIT_EMULATION)
274 DRM_INFO(" 8bit emulation.\n");
275 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
276 DRM_INFO(" Alpha cursor.\n");
277 if (capabilities & SVGA_CAP_3D)
278 DRM_INFO(" 3D.\n");
279 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
280 DRM_INFO(" Extended Fifo.\n");
281 if (capabilities & SVGA_CAP_MULTIMON)
282 DRM_INFO(" Multimon.\n");
283 if (capabilities & SVGA_CAP_PITCHLOCK)
284 DRM_INFO(" Pitchlock.\n");
285 if (capabilities & SVGA_CAP_IRQMASK)
286 DRM_INFO(" Irq mask.\n");
287 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
288 DRM_INFO(" Display Topology.\n");
289 if (capabilities & SVGA_CAP_GMR)
290 DRM_INFO(" GMR.\n");
291 if (capabilities & SVGA_CAP_TRACES)
292 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000293 if (capabilities & SVGA_CAP_GMR2)
294 DRM_INFO(" GMR2.\n");
295 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
296 DRM_INFO(" Screen Object 2.\n");
Thomas Hellstromc1234db2012-11-21 10:35:08 +0100297 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
298 DRM_INFO(" Command Buffers.\n");
299 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
300 DRM_INFO(" Command Buffers 2.\n");
301 if (capabilities & SVGA_CAP_GBOBJECTS)
302 DRM_INFO(" Guest Backed Resources.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -0700303 if (capabilities & SVGA_CAP_DX)
304 DRM_INFO(" DX Features.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000305}
306
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200307/**
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700308 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200309 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700310 * @dev_priv: A device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200311 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700312 * This function creates a small buffer object that holds the query
313 * result for dummy queries emitted as query barriers.
314 * The function will then map the first page and initialize a pending
315 * occlusion query result structure, Finally it will unmap the buffer.
316 * No interruptible waits are done within this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200317 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700318 * Returns an error if bo creation or initialization fails.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200319 */
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700320static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200321{
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700322 int ret;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700323 struct vmw_dma_buffer *vbo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200324 struct ttm_bo_kmap_obj map;
325 volatile SVGA3dQueryResult *result;
326 bool dummy;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200327
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700328 /*
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700329 * Create the vbo as pinned, so that a tryreserve will
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700330 * immediately succeed. This is because we're the only
331 * user of the bo currently.
332 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700333 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
334 if (!vbo)
335 return -ENOMEM;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700336
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700337 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
338 &vmw_sys_ne_placement, false,
339 &vmw_dmabuf_bo_free);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200340 if (unlikely(ret != 0))
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700341 return ret;
342
Christian Königdfd5e502016-04-06 11:12:03 +0200343 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700344 BUG_ON(ret != 0);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700345 vmw_bo_pin_reserved(vbo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200346
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700347 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200348 if (likely(ret == 0)) {
349 result = ttm_kmap_obj_virtual(&map, &dummy);
350 result->totalSize = sizeof(*result);
351 result->state = SVGA3D_QUERYSTATE_PENDING;
352 result->result32 = 0xff;
353 ttm_bo_kunmap(&map);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700354 }
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700355 vmw_bo_pin_reserved(vbo, false);
356 ttm_bo_unreserve(&vbo->base);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700357
358 if (unlikely(ret != 0)) {
359 DRM_ERROR("Dummy query buffer map failed.\n");
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700360 vmw_dmabuf_unreference(&vbo);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700361 } else
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700362 dev_priv->dummy_query_bo = vbo;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700363
364 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200365}
366
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700367/**
368 * vmw_request_device_late - Perform late device setup
369 *
370 * @dev_priv: Pointer to device private.
371 *
372 * This function performs setup of otables and enables large command
373 * buffer submission. These tasks are split out to a separate function
374 * because it reverts vmw_release_device_early and is intended to be used
375 * by an error path in the hibernation code.
376 */
377static int vmw_request_device_late(struct vmw_private *dev_priv)
378{
379 int ret;
380
381 if (dev_priv->has_mob) {
382 ret = vmw_otables_setup(dev_priv);
383 if (unlikely(ret != 0)) {
384 DRM_ERROR("Unable to initialize "
385 "guest Memory OBjects.\n");
386 return ret;
387 }
388 }
389
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700390 if (dev_priv->cman) {
391 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
392 256*4096, 2*4096);
393 if (ret) {
394 struct vmw_cmdbuf_man *man = dev_priv->cman;
395
396 dev_priv->cman = NULL;
397 vmw_cmdbuf_man_destroy(man);
398 }
399 }
400
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700401 return 0;
402}
403
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000404static int vmw_request_device(struct vmw_private *dev_priv)
405{
406 int ret;
407
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000408 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
409 if (unlikely(ret != 0)) {
410 DRM_ERROR("Unable to initialize FIFO.\n");
411 return ret;
412 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000413 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700414 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700415 if (IS_ERR(dev_priv->cman)) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700416 dev_priv->cman = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700417 dev_priv->has_dx = false;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100418 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700419
420 ret = vmw_request_device_late(dev_priv);
421 if (ret)
422 goto out_no_mob;
423
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200424 ret = vmw_dummy_query_bo_create(dev_priv);
425 if (unlikely(ret != 0))
426 goto out_no_query_bo;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000427
428 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200429
430out_no_query_bo:
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700431 if (dev_priv->cman)
432 vmw_cmdbuf_remove_pool(dev_priv->cman);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700433 if (dev_priv->has_mob) {
434 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100435 vmw_otables_takedown(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700436 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700437 if (dev_priv->cman)
438 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100439out_no_mob:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200440 vmw_fence_fifo_down(dev_priv->fman);
441 vmw_fifo_release(dev_priv, &dev_priv->fifo);
442 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000443}
444
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700445/**
446 * vmw_release_device_early - Early part of fifo takedown.
447 *
448 * @dev_priv: Pointer to device private struct.
449 *
450 * This is the first part of command submission takedown, to be called before
451 * buffer management is taken down.
452 */
453static void vmw_release_device_early(struct vmw_private *dev_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000454{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200455 /*
456 * Previous destructions should've released
457 * the pinned bo.
458 */
459
460 BUG_ON(dev_priv->pinned_bo != NULL);
461
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700462 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700463 if (dev_priv->cman)
464 vmw_cmdbuf_remove_pool(dev_priv->cman);
465
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700466 if (dev_priv->has_mob) {
467 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100468 vmw_otables_takedown(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200469 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200470}
471
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000472/**
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700473 * vmw_release_device_late - Late part of fifo takedown.
474 *
475 * @dev_priv: Pointer to device private struct.
476 *
477 * This is the last part of the command submission takedown, to be called when
478 * command submission is no longer needed. It may wait on pending fences.
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000479 */
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700480static void vmw_release_device_late(struct vmw_private *dev_priv)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200481{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000482 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700483 if (dev_priv->cman)
484 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200485
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000486 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200487}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000488
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100489/**
490 * Sets the initial_[width|height] fields on the given vmw_private.
491 *
492 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100493 * clamping the value to fb_max_[width|height] fields and the
494 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
495 * If the values appear to be invalid, set them to
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100496 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
497 */
498static void vmw_get_initial_size(struct vmw_private *dev_priv)
499{
500 uint32_t width;
501 uint32_t height;
502
503 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
504 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
505
506 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100507 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100508
509 if (width > dev_priv->fb_max_width ||
510 height > dev_priv->fb_max_height) {
511
512 /*
513 * This is a host error and shouldn't occur.
514 */
515
516 width = VMW_MIN_INITIAL_WIDTH;
517 height = VMW_MIN_INITIAL_HEIGHT;
518 }
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100519
520 dev_priv->initial_width = width;
521 dev_priv->initial_height = height;
522}
523
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700524/**
525 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
526 * system.
527 *
528 * @dev_priv: Pointer to a struct vmw_private
529 *
530 * This functions tries to determine the IOMMU setup and what actions
531 * need to be taken by the driver to make system pages visible to the
532 * device.
533 * If this function decides that DMA is not possible, it returns -EINVAL.
534 * The driver may then try to disable features of the device that require
535 * DMA.
536 */
537static int vmw_dma_select_mode(struct vmw_private *dev_priv)
538{
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700539 static const char *names[vmw_dma_map_max] = {
540 [vmw_dma_phys] = "Using physical TTM page addresses.",
541 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
542 [vmw_dma_map_populate] = "Keeping DMA mappings.",
543 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800544#ifdef CONFIG_X86
545 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700546
547#ifdef CONFIG_INTEL_IOMMU
548 if (intel_iommu_enabled) {
549 dev_priv->map_mode = vmw_dma_map_populate;
550 goto out_fixup;
551 }
552#endif
553
554 if (!(vmw_force_iommu || vmw_force_coherent)) {
555 dev_priv->map_mode = vmw_dma_phys;
556 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
557 return 0;
558 }
559
560 dev_priv->map_mode = vmw_dma_map_populate;
561
562 if (dma_ops->sync_single_for_cpu)
563 dev_priv->map_mode = vmw_dma_alloc_coherent;
564#ifdef CONFIG_SWIOTLB
565 if (swiotlb_nr_tbl() == 0)
566 dev_priv->map_mode = vmw_dma_map_populate;
567#endif
568
Dave Airlie21136942013-11-08 16:12:42 +1000569#ifdef CONFIG_INTEL_IOMMU
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700570out_fixup:
Dave Airlie21136942013-11-08 16:12:42 +1000571#endif
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700572 if (dev_priv->map_mode == vmw_dma_map_populate &&
573 vmw_restrict_iommu)
574 dev_priv->map_mode = vmw_dma_map_bind;
575
576 if (vmw_force_coherent)
577 dev_priv->map_mode = vmw_dma_alloc_coherent;
578
579#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
580 /*
581 * No coherent page pool
582 */
583 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
584 return -EINVAL;
585#endif
586
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800587#else /* CONFIG_X86 */
588 dev_priv->map_mode = vmw_dma_map_populate;
589#endif /* CONFIG_X86 */
590
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700591 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
592
593 return 0;
594}
595
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100596/**
597 * vmw_dma_masks - set required page- and dma masks
598 *
599 * @dev: Pointer to struct drm-device
600 *
601 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
602 * restriction also for 64-bit systems.
603 */
604#ifdef CONFIG_INTEL_IOMMU
605static int vmw_dma_masks(struct vmw_private *dev_priv)
606{
607 struct drm_device *dev = dev_priv->dev;
608
609 if (intel_iommu_enabled &&
610 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
611 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
612 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
613 }
614 return 0;
615}
616#else
617static int vmw_dma_masks(struct vmw_private *dev_priv)
618{
619 return 0;
620}
621#endif
622
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000623static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
624{
625 struct vmw_private *dev_priv;
626 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000627 uint32_t svga_id;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000628 enum vmw_res_type i;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700629 bool refuse_dma = false;
Sinclair Yehf9217912016-04-27 19:11:18 -0700630 char host_log[100] = {0};
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000631
632 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
633 if (unlikely(dev_priv == NULL)) {
634 DRM_ERROR("Failed allocating a device private struct.\n");
635 return -ENOMEM;
636 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000637
Dave Airlie466e69b2011-12-19 11:15:29 +0000638 pci_set_master(dev->pdev);
639
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000640 dev_priv->dev = dev;
641 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000642 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000643 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200644 mutex_init(&dev_priv->release_mutex);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700645 mutex_init(&dev_priv->binding_mutex);
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200646 mutex_init(&dev_priv->global_kms_state_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000647 rwlock_init(&dev_priv->resource_lock);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100648 ttm_lock_init(&dev_priv->reservation_sem);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800649 spin_lock_init(&dev_priv->hw_lock);
650 spin_lock_init(&dev_priv->waiter_lock);
651 spin_lock_init(&dev_priv->cap_lock);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700652 spin_lock_init(&dev_priv->svga_lock);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000653
654 for (i = vmw_res_context; i < vmw_res_max; ++i) {
655 idr_init(&dev_priv->res_idr[i]);
656 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
657 }
658
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000659 mutex_init(&dev_priv->init_mutex);
660 init_waitqueue_head(&dev_priv->fence_queue);
661 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000662 dev_priv->fence_queue_waiters = 0;
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100663 dev_priv->fifo_queue_waiters = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000664
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200665 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000666
667 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
668 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
669 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
670
Sinclair Yeh04319d82016-06-29 12:15:48 -0700671 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
672
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200673 dev_priv->enable_fb = enable_fbdev;
674
Peter Hanzelc1886602010-01-30 03:38:07 +0000675 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
676 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
677 if (svga_id != SVGA_ID_2) {
678 ret = -ENOSYS;
Masanari Iida49625902012-02-05 22:50:36 +0900679 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
Peter Hanzelc1886602010-01-30 03:38:07 +0000680 goto out_err0;
681 }
682
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000683 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700684 ret = vmw_dma_select_mode(dev_priv);
685 if (unlikely(ret != 0)) {
686 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
687 refuse_dma = true;
688 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000689
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200690 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
691 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
692 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
693 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100694
695 vmw_get_initial_size(dev_priv);
696
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100697 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000698 dev_priv->max_gmr_ids =
699 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000700 dev_priv->max_gmr_pages =
701 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
702 dev_priv->memory_size =
703 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200704 dev_priv->memory_size -= dev_priv->vram_size;
705 } else {
706 /*
707 * An arbitrary limit of 512MiB on surface
708 * memory. But all HWV8 hardware supports GMR2.
709 */
710 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000711 }
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100712 dev_priv->max_mob_pages = 0;
Charmaine Lee857aea12014-02-12 12:07:38 +0100713 dev_priv->max_mob_size = 0;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100714 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
715 uint64_t mem_size =
716 vmw_read(dev_priv,
717 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
718
Sinclair Yeh7c20d212016-06-29 11:29:47 -0700719 /*
720 * Workaround for low memory 2D VMs to compensate for the
721 * allocation taken by fbdev
722 */
723 if (!(dev_priv->capabilities & SVGA_CAP_3D))
724 mem_size *= 2;
725
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100726 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100727 dev_priv->prim_bb_mem =
728 vmw_read(dev_priv,
729 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
Charmaine Lee857aea12014-02-12 12:07:38 +0100730 dev_priv->max_mob_size =
731 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700732 dev_priv->stdu_max_width =
733 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
734 dev_priv->stdu_max_height =
735 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
736
737 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
738 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
739 dev_priv->texture_max_width = vmw_read(dev_priv,
740 SVGA_REG_DEV_CAP);
741 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
742 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
743 dev_priv->texture_max_height = vmw_read(dev_priv,
744 SVGA_REG_DEV_CAP);
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700745 } else {
746 dev_priv->texture_max_width = 8192;
747 dev_priv->texture_max_height = 8192;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100748 dev_priv->prim_bb_mem = dev_priv->vram_size;
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700749 }
750
Sinclair Yeh35c05122015-06-26 01:42:06 -0700751 vmw_print_capabilities(dev_priv->capabilities);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000752
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100753 ret = vmw_dma_masks(dev_priv);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800754 if (unlikely(ret != 0))
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100755 goto out_err0;
756
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100757 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000758 DRM_INFO("Max GMR ids is %u\n",
759 (unsigned)dev_priv->max_gmr_ids);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000760 DRM_INFO("Max number of GMR pages is %u\n",
761 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200762 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
763 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000764 }
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100765 DRM_INFO("Maximum display memory size is %u kiB\n",
766 dev_priv->prim_bb_mem / 1024);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000767 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
768 dev_priv->vram_start, dev_priv->vram_size / 1024);
769 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
770 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
771
772 ret = vmw_ttm_global_init(dev_priv);
773 if (unlikely(ret != 0))
774 goto out_err0;
775
776
777 vmw_master_init(&dev_priv->fbdev_master);
778 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
779 dev_priv->active_master = &dev_priv->fbdev_master;
780
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100781 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
782 dev_priv->mmio_size, MEMREMAP_WB);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000783
784 if (unlikely(dev_priv->mmio_virt == NULL)) {
785 ret = -ENOMEM;
786 DRM_ERROR("Failed mapping MMIO.\n");
787 goto out_err3;
788 }
789
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200790 /* Need mmio memory to check for fifo pitchlock cap. */
791 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
792 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
793 !vmw_fifo_have_pitchlock(dev_priv)) {
794 ret = -ENOSYS;
795 DRM_ERROR("Hardware has no pitchlock\n");
796 goto out_err4;
797 }
798
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000799 dev_priv->tdev = ttm_object_device_init
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800800 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000801
802 if (unlikely(dev_priv->tdev == NULL)) {
803 DRM_ERROR("Unable to initialize TTM object management.\n");
804 ret = -ENOMEM;
805 goto out_err4;
806 }
807
808 dev->dev_private = dev_priv;
809
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000810 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
811 dev_priv->stealth = (ret != 0);
812 if (dev_priv->stealth) {
813 /**
814 * Request at least the mmio PCI resource.
815 */
816
817 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000818 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000819 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
820 if (unlikely(ret != 0)) {
821 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
822 goto out_no_device;
823 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000824 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000825
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000826 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
Daniel Vetterbb0f1b52013-11-03 21:09:27 +0100827 ret = drm_irq_install(dev, dev->pdev->irq);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000828 if (ret != 0) {
829 DRM_ERROR("Failed installing irq: %d\n", ret);
830 goto out_no_irq;
831 }
832 }
833
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000834 dev_priv->fman = vmw_fence_manager_init(dev_priv);
Wei Yongjun14bbf202013-08-26 15:15:37 +0800835 if (unlikely(dev_priv->fman == NULL)) {
836 ret = -ENOMEM;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000837 goto out_no_fman;
Wei Yongjun14bbf202013-08-26 15:15:37 +0800838 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200839
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700840 ret = ttm_bo_device_init(&dev_priv->bdev,
841 dev_priv->bo_global_ref.ref.object,
842 &vmw_bo_driver,
843 dev->anon_inode->i_mapping,
844 VMWGFX_FILE_PAGE_OFFSET,
845 false);
846 if (unlikely(ret != 0)) {
847 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
848 goto out_no_bdev;
849 }
Thomas Hellstrom34583902015-03-05 02:33:24 -0800850
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700851 /*
852 * Enable VRAM, but initially don't use it until SVGA is enabled and
853 * unhidden.
854 */
Thomas Hellstrom34583902015-03-05 02:33:24 -0800855 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
856 (dev_priv->vram_size >> PAGE_SHIFT));
857 if (unlikely(ret != 0)) {
858 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
859 goto out_no_vram;
860 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700861 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom34583902015-03-05 02:33:24 -0800862
863 dev_priv->has_gmr = true;
864 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
865 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
866 VMW_PL_GMR) != 0) {
867 DRM_INFO("No GMR memory available. "
868 "Graphics memory resources are very limited.\n");
869 dev_priv->has_gmr = false;
870 }
871
872 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
873 dev_priv->has_mob = true;
874 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
875 VMW_PL_MOB) != 0) {
876 DRM_INFO("No MOB memory available. "
877 "3D will be disabled.\n");
878 dev_priv->has_mob = false;
879 }
880 }
881
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700882 if (dev_priv->has_mob) {
883 spin_lock(&dev_priv->cap_lock);
884 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
885 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
886 spin_unlock(&dev_priv->cap_lock);
887 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200888
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700889
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200890 ret = vmw_kms_init(dev_priv);
891 if (unlikely(ret != 0))
892 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000893 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200894
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700895 ret = vmw_request_device(dev_priv);
896 if (ret)
897 goto out_no_fifo;
898
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700899 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
900
Sinclair Yehf9217912016-04-27 19:11:18 -0700901 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
902 VMWGFX_REPO, VMWGFX_GIT_VERSION);
903 vmw_host_log(host_log);
904
905 memset(host_log, 0, sizeof(host_log));
906 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
907 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
908 VMWGFX_DRIVER_PATCHLEVEL);
909 vmw_host_log(host_log);
910
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200911 if (dev_priv->enable_fb) {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700912 vmw_fifo_resource_inc(dev_priv);
913 vmw_svga_enable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200914 vmw_fb_init(dev_priv);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200915 }
916
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100917 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
918 register_pm_notifier(&dev_priv->pm_nb);
919
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000920 return 0;
921
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000922out_no_fifo:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200923 vmw_overlay_close(dev_priv);
924 vmw_kms_close(dev_priv);
925out_no_kms:
Thomas Hellstrom34583902015-03-05 02:33:24 -0800926 if (dev_priv->has_mob)
927 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
928 if (dev_priv->has_gmr)
929 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
930 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
931out_no_vram:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700932 (void)ttm_bo_device_release(&dev_priv->bdev);
933out_no_bdev:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000934 vmw_fence_manager_takedown(dev_priv->fman);
935out_no_fman:
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000936 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
937 drm_irq_uninstall(dev_priv->dev);
938out_no_irq:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200939 if (dev_priv->stealth)
940 pci_release_region(dev->pdev, 2);
941 else
942 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000943out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000944 ttm_object_device_release(&dev_priv->tdev);
945out_err4:
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100946 memunmap(dev_priv->mmio_virt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000947out_err3:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000948 vmw_ttm_global_release(dev_priv);
949out_err0:
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000950 for (i = vmw_res_context; i < vmw_res_max; ++i)
951 idr_destroy(&dev_priv->res_idr[i]);
952
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700953 if (dev_priv->ctx.staged_bindings)
954 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000955 kfree(dev_priv);
956 return ret;
957}
958
Gabriel Krisman Bertazi11b3c202017-01-06 15:57:31 -0200959static void vmw_driver_unload(struct drm_device *dev)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000960{
961 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000962 enum vmw_res_type i;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000963
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100964 unregister_pm_notifier(&dev_priv->pm_nb);
965
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000966 if (dev_priv->ctx.res_ht_initialized)
967 drm_ht_remove(&dev_priv->ctx.res_ht);
Markus Elfringa3a1a662014-11-19 17:50:19 +0100968 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200969 if (dev_priv->enable_fb) {
Sinclair Yeh05c95012015-08-11 22:53:39 -0700970 vmw_fb_off(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200971 vmw_fb_close(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700972 vmw_fifo_resource_dec(dev_priv);
973 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200974 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700975
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000976 vmw_kms_close(dev_priv);
977 vmw_overlay_close(dev_priv);
Thomas Hellstrom34583902015-03-05 02:33:24 -0800978
Thomas Hellstrom34583902015-03-05 02:33:24 -0800979 if (dev_priv->has_gmr)
980 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
981 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
982
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700983 vmw_release_device_early(dev_priv);
984 if (dev_priv->has_mob)
985 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
986 (void) ttm_bo_device_release(&dev_priv->bdev);
987 vmw_release_device_late(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000988 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000989 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
990 drm_irq_uninstall(dev_priv->dev);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000991 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000992 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000993 else
994 pci_release_regions(dev->pdev);
995
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000996 ttm_object_device_release(&dev_priv->tdev);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100997 memunmap(dev_priv->mmio_virt);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700998 if (dev_priv->ctx.staged_bindings)
999 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001000 vmw_ttm_global_release(dev_priv);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001001
1002 for (i = vmw_res_context; i < vmw_res_max; ++i)
1003 idr_destroy(&dev_priv->res_idr[i]);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001004
1005 kfree(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001006}
1007
1008static void vmw_postclose(struct drm_device *dev,
1009 struct drm_file *file_priv)
1010{
1011 struct vmw_fpriv *vmw_fp;
1012
1013 vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromc4249852013-10-09 01:42:51 -07001014
1015 if (vmw_fp->locked_master) {
1016 struct vmw_master *vmaster =
1017 vmw_master(vmw_fp->locked_master);
1018
1019 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1020 ttm_vt_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001021 drm_master_put(&vmw_fp->locked_master);
Thomas Hellstromc4249852013-10-09 01:42:51 -07001022 }
1023
1024 ttm_object_file_release(&vmw_fp->tfile);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001025 kfree(vmw_fp);
1026}
1027
1028static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1029{
1030 struct vmw_private *dev_priv = vmw_priv(dev);
1031 struct vmw_fpriv *vmw_fp;
1032 int ret = -ENOMEM;
1033
1034 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1035 if (unlikely(vmw_fp == NULL))
1036 return ret;
1037
1038 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1039 if (unlikely(vmw_fp->tfile == NULL))
1040 goto out_no_tfile;
1041
1042 file_priv->driver_priv = vmw_fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001043
1044 return 0;
1045
1046out_no_tfile:
1047 kfree(vmw_fp);
1048 return ret;
1049}
1050
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001051static struct vmw_master *vmw_master_check(struct drm_device *dev,
1052 struct drm_file *file_priv,
1053 unsigned int flags)
1054{
1055 int ret;
1056 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1057 struct vmw_master *vmaster;
1058
Frank Binns0d02c4a2016-06-24 18:15:15 +01001059 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001060 return NULL;
1061
1062 ret = mutex_lock_interruptible(&dev->master_mutex);
1063 if (unlikely(ret != 0))
1064 return ERR_PTR(-ERESTARTSYS);
1065
Daniel Vetterb3ac9f22016-06-21 10:54:20 +02001066 if (drm_is_current_master(file_priv)) {
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001067 mutex_unlock(&dev->master_mutex);
1068 return NULL;
1069 }
1070
1071 /*
Thomas Hellstromaa3469c2015-08-27 10:06:24 -07001072 * Check if we were previously master, but now dropped. In that
1073 * case, allow at least render node functionality.
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001074 */
1075 if (vmw_fp->locked_master) {
1076 mutex_unlock(&dev->master_mutex);
Thomas Hellstromaa3469c2015-08-27 10:06:24 -07001077
1078 if (flags & DRM_RENDER_ALLOW)
1079 return NULL;
1080
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001081 DRM_ERROR("Dropped master trying to access ioctl that "
1082 "requires authentication.\n");
1083 return ERR_PTR(-EACCES);
1084 }
1085 mutex_unlock(&dev->master_mutex);
1086
1087 /*
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001088 * Take the TTM lock. Possibly sleep waiting for the authenticating
1089 * master to become master again, or for a SIGTERM if the
1090 * authenticating master exits.
1091 */
1092 vmaster = vmw_master(file_priv->master);
1093 ret = ttm_read_lock(&vmaster->lock, true);
1094 if (unlikely(ret != 0))
1095 vmaster = ERR_PTR(ret);
1096
1097 return vmaster;
1098}
1099
1100static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1101 unsigned long arg,
1102 long (*ioctl_func)(struct file *, unsigned int,
1103 unsigned long))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001104{
1105 struct drm_file *file_priv = filp->private_data;
1106 struct drm_device *dev = file_priv->minor->dev;
1107 unsigned int nr = DRM_IOCTL_NR(cmd);
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001108 struct vmw_master *vmaster;
1109 unsigned int flags;
1110 long ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001111
1112 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +01001113 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001114 */
1115
1116 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1117 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
Rob Clarkbaa70942013-08-02 13:27:49 -04001118 const struct drm_ioctl_desc *ioctl =
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001119 &vmw_ioctls[nr - DRM_COMMAND_BASE];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001120
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001121 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1122 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1123 if (unlikely(ret != 0))
1124 return ret;
1125
1126 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1127 goto out_io_encoding;
1128
1129 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1130 _IOC_SIZE(cmd));
Thomas Hellstrom31788ca2017-02-21 17:42:27 +07001131 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1132 if (!drm_is_current_master(file_priv) &&
1133 !capable(CAP_SYS_ADMIN))
1134 return -EACCES;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001135 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001136
1137 if (unlikely(ioctl->cmd != cmd))
1138 goto out_io_encoding;
1139
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001140 flags = ioctl->flags;
1141 } else if (!drm_ioctl_flags(nr, &flags))
1142 return -EINVAL;
1143
1144 vmaster = vmw_master_check(dev, file_priv, flags);
Viresh Kumar55579cf2015-07-31 14:08:24 +05301145 if (IS_ERR(vmaster)) {
Thomas Hellstrome338c4c2014-11-25 08:20:05 +01001146 ret = PTR_ERR(vmaster);
1147
1148 if (ret != -ERESTARTSYS)
1149 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1150 nr, ret);
1151 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001152 }
1153
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001154 ret = ioctl_func(filp, cmd, arg);
1155 if (vmaster)
1156 ttm_read_unlock(&vmaster->lock);
1157
1158 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001159
1160out_io_encoding:
1161 DRM_ERROR("Invalid command format, ioctl %d\n",
1162 nr - DRM_COMMAND_BASE);
1163
1164 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001165}
1166
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001167static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1168 unsigned long arg)
1169{
1170 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1171}
1172
1173#ifdef CONFIG_COMPAT
1174static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1175 unsigned long arg)
1176{
1177 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1178}
1179#endif
1180
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001181static void vmw_lastclose(struct drm_device *dev)
1182{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001183}
1184
1185static void vmw_master_init(struct vmw_master *vmaster)
1186{
1187 ttm_lock_init(&vmaster->lock);
1188}
1189
1190static int vmw_master_create(struct drm_device *dev,
1191 struct drm_master *master)
1192{
1193 struct vmw_master *vmaster;
1194
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001195 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1196 if (unlikely(vmaster == NULL))
1197 return -ENOMEM;
1198
Thomas Hellstrom3a939a52010-10-05 12:43:03 +02001199 vmw_master_init(vmaster);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001200 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1201 master->driver_priv = vmaster;
1202
1203 return 0;
1204}
1205
1206static void vmw_master_destroy(struct drm_device *dev,
1207 struct drm_master *master)
1208{
1209 struct vmw_master *vmaster = vmw_master(master);
1210
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001211 master->driver_priv = NULL;
1212 kfree(vmaster);
1213}
1214
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001215static int vmw_master_set(struct drm_device *dev,
1216 struct drm_file *file_priv,
1217 bool from_open)
1218{
1219 struct vmw_private *dev_priv = vmw_priv(dev);
1220 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1221 struct vmw_master *active = dev_priv->active_master;
1222 struct vmw_master *vmaster = vmw_master(file_priv->master);
1223 int ret = 0;
1224
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001225 if (active) {
1226 BUG_ON(active != &dev_priv->fbdev_master);
1227 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1228 if (unlikely(ret != 0))
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001229 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001230
1231 ttm_lock_set_kill(&active->lock, true, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001232 dev_priv->active_master = NULL;
1233 }
1234
1235 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1236 if (!from_open) {
1237 ttm_vt_unlock(&vmaster->lock);
1238 BUG_ON(vmw_fp->locked_master != file_priv->master);
1239 drm_master_put(&vmw_fp->locked_master);
1240 }
1241
1242 dev_priv->active_master = vmaster;
Thomas Hellstrom5ea17342016-02-12 10:01:28 +01001243 drm_sysfs_hotplug_event(dev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001244
1245 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001246}
1247
1248static void vmw_master_drop(struct drm_device *dev,
Daniel Vetterd6ed6822016-06-21 14:20:38 +02001249 struct drm_file *file_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001250{
1251 struct vmw_private *dev_priv = vmw_priv(dev);
1252 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1253 struct vmw_master *vmaster = vmw_master(file_priv->master);
1254 int ret;
1255
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001256 /**
1257 * Make sure the master doesn't disappear while we have
1258 * it locked.
1259 */
1260
1261 vmw_fp->locked_master = drm_master_get(file_priv->master);
1262 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +01001263 vmw_kms_legacy_hotspot_clear(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001264 if (unlikely((ret != 0))) {
1265 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1266 drm_master_put(&vmw_fp->locked_master);
1267 }
1268
Thomas Hellstromc4249852013-10-09 01:42:51 -07001269 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001270
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001271 if (!dev_priv->enable_fb)
1272 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001273
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001274 dev_priv->active_master = &dev_priv->fbdev_master;
1275 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1276 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1277
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001278 if (dev_priv->enable_fb)
1279 vmw_fb_on(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001280}
1281
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001282/**
1283 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1284 *
1285 * @dev_priv: Pointer to device private struct.
1286 * Needs the reservation sem to be held in non-exclusive mode.
1287 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001288static void __vmw_svga_enable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001289{
1290 spin_lock(&dev_priv->svga_lock);
1291 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1292 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1293 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1294 }
1295 spin_unlock(&dev_priv->svga_lock);
1296}
1297
1298/**
1299 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1300 *
1301 * @dev_priv: Pointer to device private struct.
1302 */
1303void vmw_svga_enable(struct vmw_private *dev_priv)
1304{
Thomas Hellstromf08c86c2017-01-19 10:57:00 -08001305 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001306 __vmw_svga_enable(dev_priv);
1307 ttm_read_unlock(&dev_priv->reservation_sem);
1308}
1309
1310/**
1311 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1312 *
1313 * @dev_priv: Pointer to device private struct.
1314 * Needs the reservation sem to be held in exclusive mode.
1315 * Will not empty VRAM. VRAM must be emptied by caller.
1316 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001317static void __vmw_svga_disable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001318{
1319 spin_lock(&dev_priv->svga_lock);
1320 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1321 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1322 vmw_write(dev_priv, SVGA_REG_ENABLE,
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001323 SVGA_REG_ENABLE_HIDE |
1324 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001325 }
1326 spin_unlock(&dev_priv->svga_lock);
1327}
1328
1329/**
1330 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1331 * running.
1332 *
1333 * @dev_priv: Pointer to device private struct.
1334 * Will empty VRAM.
1335 */
1336void vmw_svga_disable(struct vmw_private *dev_priv)
1337{
1338 ttm_write_lock(&dev_priv->reservation_sem, false);
1339 spin_lock(&dev_priv->svga_lock);
1340 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1341 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001342 spin_unlock(&dev_priv->svga_lock);
1343 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1344 DRM_ERROR("Failed evicting VRAM buffers.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001345 vmw_write(dev_priv, SVGA_REG_ENABLE,
1346 SVGA_REG_ENABLE_HIDE |
1347 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001348 } else
1349 spin_unlock(&dev_priv->svga_lock);
1350 ttm_write_unlock(&dev_priv->reservation_sem);
1351}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001352
1353static void vmw_remove(struct pci_dev *pdev)
1354{
1355 struct drm_device *dev = pci_get_drvdata(pdev);
1356
Thomas Hellstromfd3e4d62015-03-10 11:07:40 -07001357 pci_disable_device(pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001358 drm_put_dev(dev);
1359}
1360
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001361static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1362 void *ptr)
1363{
1364 struct vmw_private *dev_priv =
1365 container_of(nb, struct vmw_private, pm_nb);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001366
1367 switch (val) {
1368 case PM_HIBERNATION_PREPARE:
Thomas Hellstroma2787242015-06-29 12:55:07 -07001369 if (dev_priv->enable_fb)
1370 vmw_fb_off(dev_priv);
Thomas Hellstrom294adf72014-02-27 12:34:51 +01001371 ttm_suspend_lock(&dev_priv->reservation_sem);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001372
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001373 /*
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001374 * This empties VRAM and unbinds all GMR bindings.
1375 * Buffer contents is moved to swappable memory.
1376 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001377 vmw_execbuf_release_pinned_bo(dev_priv);
1378 vmw_resource_evict_all(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001379 vmw_release_device_early(dev_priv);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001380 ttm_bo_swapout_all(&dev_priv->bdev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001381 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001382 break;
1383 case PM_POST_HIBERNATION:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001384 case PM_POST_RESTORE:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001385 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom294adf72014-02-27 12:34:51 +01001386 ttm_suspend_unlock(&dev_priv->reservation_sem);
Thomas Hellstroma2787242015-06-29 12:55:07 -07001387 if (dev_priv->enable_fb)
1388 vmw_fb_on(dev_priv);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001389 break;
1390 case PM_RESTORE_PREPARE:
1391 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001392 default:
1393 break;
1394 }
1395 return 0;
1396}
1397
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001398static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001399{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001400 struct drm_device *dev = pci_get_drvdata(pdev);
1401 struct vmw_private *dev_priv = vmw_priv(dev);
1402
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001403 if (dev_priv->refuse_hibernation)
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001404 return -EBUSY;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001405
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001406 pci_save_state(pdev);
1407 pci_disable_device(pdev);
1408 pci_set_power_state(pdev, PCI_D3hot);
1409 return 0;
1410}
1411
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001412static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001413{
1414 pci_set_power_state(pdev, PCI_D0);
1415 pci_restore_state(pdev);
1416 return pci_enable_device(pdev);
1417}
1418
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001419static int vmw_pm_suspend(struct device *kdev)
1420{
1421 struct pci_dev *pdev = to_pci_dev(kdev);
1422 struct pm_message dummy;
1423
1424 dummy.event = 0;
1425
1426 return vmw_pci_suspend(pdev, dummy);
1427}
1428
1429static int vmw_pm_resume(struct device *kdev)
1430{
1431 struct pci_dev *pdev = to_pci_dev(kdev);
1432
1433 return vmw_pci_resume(pdev);
1434}
1435
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001436static int vmw_pm_freeze(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001437{
1438 struct pci_dev *pdev = to_pci_dev(kdev);
1439 struct drm_device *dev = pci_get_drvdata(pdev);
1440 struct vmw_private *dev_priv = vmw_priv(dev);
1441
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001442 dev_priv->suspended = true;
1443 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001444 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001445
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001446 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1447 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001448 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001449 vmw_fifo_resource_inc(dev_priv);
1450 WARN_ON(vmw_request_device_late(dev_priv));
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001451 dev_priv->suspended = false;
1452 return -EBUSY;
1453 }
1454
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001455 if (dev_priv->enable_fb)
1456 __vmw_svga_disable(dev_priv);
1457
1458 vmw_release_device_late(dev_priv);
1459
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001460 return 0;
1461}
1462
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001463static int vmw_pm_restore(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001464{
1465 struct pci_dev *pdev = to_pci_dev(kdev);
1466 struct drm_device *dev = pci_get_drvdata(pdev);
1467 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001468 int ret;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001469
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001470 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1471 (void) vmw_read(dev_priv, SVGA_REG_ID);
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001472
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001473 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001474 vmw_fifo_resource_inc(dev_priv);
1475
1476 ret = vmw_request_device(dev_priv);
1477 if (ret)
1478 return ret;
1479
1480 if (dev_priv->enable_fb)
1481 __vmw_svga_enable(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001482
1483 dev_priv->suspended = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001484
1485 return 0;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001486}
1487
1488static const struct dev_pm_ops vmw_pm_ops = {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001489 .freeze = vmw_pm_freeze,
1490 .thaw = vmw_pm_restore,
1491 .restore = vmw_pm_restore,
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001492 .suspend = vmw_pm_suspend,
1493 .resume = vmw_pm_resume,
1494};
1495
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001496static const struct file_operations vmwgfx_driver_fops = {
1497 .owner = THIS_MODULE,
1498 .open = drm_open,
1499 .release = drm_release,
1500 .unlocked_ioctl = vmw_unlocked_ioctl,
1501 .mmap = vmw_mmap,
1502 .poll = vmw_fops_poll,
1503 .read = vmw_fops_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001504#if defined(CONFIG_COMPAT)
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001505 .compat_ioctl = vmw_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001506#endif
1507 .llseek = noop_llseek,
1508};
1509
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001510static struct drm_driver driver = {
1511 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
Thomas Hellstrom03f80262014-03-20 13:06:34 +01001512 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001513 .load = vmw_driver_load,
1514 .unload = vmw_driver_unload,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001515 .lastclose = vmw_lastclose,
1516 .irq_preinstall = vmw_irq_preinstall,
1517 .irq_postinstall = vmw_irq_postinstall,
1518 .irq_uninstall = vmw_irq_uninstall,
1519 .irq_handler = vmw_irq_handler,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001520 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001521 .enable_vblank = vmw_enable_vblank,
1522 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001523 .ioctls = vmw_ioctls,
Damien Lespiauf95aeb12014-06-09 14:39:49 +01001524 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001525 .master_create = vmw_master_create,
1526 .master_destroy = vmw_master_destroy,
1527 .master_set = vmw_master_set,
1528 .master_drop = vmw_master_drop,
1529 .open = vmw_driver_open,
1530 .postclose = vmw_postclose,
David Herrmann915b4d12014-08-29 12:12:43 +02001531 .set_busid = drm_pci_set_busid,
Dave Airlie5e1782d2012-08-28 01:53:54 +00001532
1533 .dumb_create = vmw_dumb_create,
1534 .dumb_map_offset = vmw_dumb_map_offset,
1535 .dumb_destroy = vmw_dumb_destroy,
1536
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001537 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1538 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1539
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001540 .fops = &vmwgfx_driver_fops,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001541 .name = VMWGFX_DRIVER_NAME,
1542 .desc = VMWGFX_DRIVER_DESC,
1543 .date = VMWGFX_DRIVER_DATE,
1544 .major = VMWGFX_DRIVER_MAJOR,
1545 .minor = VMWGFX_DRIVER_MINOR,
1546 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1547};
1548
Dave Airlie8410ea32010-12-15 03:16:38 +10001549static struct pci_driver vmw_pci_driver = {
1550 .name = VMWGFX_DRIVER_NAME,
1551 .id_table = vmw_pci_id_list,
1552 .probe = vmw_probe,
1553 .remove = vmw_remove,
1554 .driver = {
1555 .pm = &vmw_pm_ops
1556 }
1557};
1558
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001559static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1560{
Jordan Crousedcdb1672010-05-27 13:40:25 -06001561 return drm_get_pci_dev(pdev, ent, &driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001562}
1563
1564static int __init vmwgfx_init(void)
1565{
1566 int ret;
Rob Clark96c5d072014-10-15 15:00:47 -04001567
Rob Clark96c5d072014-10-15 15:00:47 -04001568 if (vgacon_text_force())
1569 return -EINVAL;
Rob Clark96c5d072014-10-15 15:00:47 -04001570
Dave Airlie8410ea32010-12-15 03:16:38 +10001571 ret = drm_pci_init(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001572 if (ret)
1573 DRM_ERROR("Failed initializing DRM.\n");
1574 return ret;
1575}
1576
1577static void __exit vmwgfx_exit(void)
1578{
Dave Airlie8410ea32010-12-15 03:16:38 +10001579 drm_pci_exit(&driver, &vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001580}
1581
1582module_init(vmwgfx_init);
1583module_exit(vmwgfx_exit);
1584
1585MODULE_AUTHOR("VMware Inc. and others");
1586MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1587MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001588MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1589 __stringify(VMWGFX_DRIVER_MINOR) "."
1590 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1591 "0");