blob: f2fad88e4c541241b9561983af0d7d7bf5599bd3 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yehf9217912016-04-27 19:11:18 -07003 * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
Rob Clark96c5d072014-10-15 15:00:47 -040028#include <linux/console.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000029
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/drmP.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031#include "vmwgfx_drv.h"
Thomas Hellstromd80efd52015-08-10 10:39:35 -070032#include "vmwgfx_binding.h"
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/ttm/ttm_placement.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_object.h>
36#include <drm/ttm/ttm_module.h>
Thomas Hellstromd92d9852013-10-24 01:49:26 -070037#include <linux/dma_remapping.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000038
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000039#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
40#define VMWGFX_CHIP_SVGAII 0
41#define VMW_FB_RESERVATION 0
42
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010043#define VMW_MIN_INITIAL_WIDTH 800
44#define VMW_MIN_INITIAL_HEIGHT 600
45
Sinclair Yehf9217912016-04-27 19:11:18 -070046#ifndef VMWGFX_GIT_VERSION
47#define VMWGFX_GIT_VERSION "Unknown"
48#endif
49
50#define VMWGFX_REPO "In Tree"
51
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +010052
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000053/**
54 * Fully encoded drm commands. Might move to vmw_drm.h
55 */
56
57#define DRM_IOCTL_VMW_GET_PARAM \
58 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
59 struct drm_vmw_getparam_arg)
60#define DRM_IOCTL_VMW_ALLOC_DMABUF \
61 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
62 union drm_vmw_alloc_dmabuf_arg)
63#define DRM_IOCTL_VMW_UNREF_DMABUF \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
65 struct drm_vmw_unref_dmabuf_arg)
66#define DRM_IOCTL_VMW_CURSOR_BYPASS \
67 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
68 struct drm_vmw_cursor_bypass_arg)
69
70#define DRM_IOCTL_VMW_CONTROL_STREAM \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
72 struct drm_vmw_control_stream_arg)
73#define DRM_IOCTL_VMW_CLAIM_STREAM \
74 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
75 struct drm_vmw_stream_arg)
76#define DRM_IOCTL_VMW_UNREF_STREAM \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
78 struct drm_vmw_stream_arg)
79
80#define DRM_IOCTL_VMW_CREATE_CONTEXT \
81 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
82 struct drm_vmw_context_arg)
83#define DRM_IOCTL_VMW_UNREF_CONTEXT \
84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
85 struct drm_vmw_context_arg)
86#define DRM_IOCTL_VMW_CREATE_SURFACE \
87 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
88 union drm_vmw_surface_create_arg)
89#define DRM_IOCTL_VMW_UNREF_SURFACE \
90 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
91 struct drm_vmw_surface_arg)
92#define DRM_IOCTL_VMW_REF_SURFACE \
93 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
94 union drm_vmw_surface_reference_arg)
95#define DRM_IOCTL_VMW_EXECBUF \
96 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
97 struct drm_vmw_execbuf_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +000098#define DRM_IOCTL_VMW_GET_3D_CAP \
99 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
100 struct drm_vmw_get_3d_cap_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000101#define DRM_IOCTL_VMW_FENCE_WAIT \
102 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
103 struct drm_vmw_fence_wait_arg)
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000104#define DRM_IOCTL_VMW_FENCE_SIGNALED \
105 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
106 struct drm_vmw_fence_signaled_arg)
107#define DRM_IOCTL_VMW_FENCE_UNREF \
108 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
109 struct drm_vmw_fence_arg)
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +0200110#define DRM_IOCTL_VMW_FENCE_EVENT \
111 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
112 struct drm_vmw_fence_event_arg)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200113#define DRM_IOCTL_VMW_PRESENT \
114 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
115 struct drm_vmw_present_arg)
116#define DRM_IOCTL_VMW_PRESENT_READBACK \
117 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
118 struct drm_vmw_present_readback_arg)
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200119#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
120 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
121 struct drm_vmw_update_layout_arg)
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100122#define DRM_IOCTL_VMW_CREATE_SHADER \
123 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
124 struct drm_vmw_shader_create_arg)
125#define DRM_IOCTL_VMW_UNREF_SHADER \
126 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
127 struct drm_vmw_shader_arg)
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100128#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
129 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
130 union drm_vmw_gb_surface_create_arg)
131#define DRM_IOCTL_VMW_GB_SURFACE_REF \
132 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
133 union drm_vmw_gb_surface_reference_arg)
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100134#define DRM_IOCTL_VMW_SYNCCPU \
135 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
136 struct drm_vmw_synccpu_arg)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700137#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
138 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
139 struct drm_vmw_context_arg)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000140
141/**
142 * The core DRM version of this macro doesn't account for
143 * DRM_COMMAND_BASE.
144 */
145
146#define VMW_IOCTL_DEF(ioctl, func, flags) \
Ville Syrjälä7e7392a2015-03-27 15:51:56 +0200147 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000148
149/**
150 * Ioctl definitions.
151 */
152
Rob Clarkbaa70942013-08-02 13:27:49 -0400153static const struct drm_ioctl_desc vmw_ioctls[] = {
Dave Airlie1b2f1482010-08-14 20:20:34 +1000154 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200155 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200156 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200157 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200158 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200159 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000160 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
Thomas Hellstrome1f78002009-12-08 12:57:51 +0100161 vmw_kms_cursor_bypass_ioctl,
Daniel Vetter190c4622018-04-20 08:51:58 +0200162 DRM_MASTER),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000163
Dave Airlie1b2f1482010-08-14 20:20:34 +1000164 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
Daniel Vetter190c4622018-04-20 08:51:58 +0200165 DRM_MASTER),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000166 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
Daniel Vetter190c4622018-04-20 08:51:58 +0200167 DRM_MASTER),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000168 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
Daniel Vetter190c4622018-04-20 08:51:58 +0200169 DRM_MASTER),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000170
Dave Airlie1b2f1482010-08-14 20:20:34 +1000171 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200172 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000173 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200174 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000175 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200176 DRM_AUTH | DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000177 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200178 DRM_RENDER_ALLOW),
Dave Airlie1b2f1482010-08-14 20:20:34 +1000179 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200180 DRM_AUTH | DRM_RENDER_ALLOW),
181 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700182 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000183 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200184 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000185 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
186 vmw_fence_obj_signaled_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200187 DRM_RENDER_ALLOW),
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000188 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200189 DRM_RENDER_ALLOW),
Thomas Hellstrom03f80262014-03-20 13:06:34 +0100190 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200191 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000192 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200193 DRM_AUTH | DRM_RENDER_ALLOW),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200194
195 /* these allow direct access to the framebuffers mark as master only */
196 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200197 DRM_MASTER | DRM_AUTH),
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200198 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
199 vmw_present_readback_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200200 DRM_MASTER | DRM_AUTH),
Thomas Hellstrom31788ca2017-02-21 17:42:27 +0700201 /*
202 * The permissions of the below ioctl are overridden in
203 * vmw_generic_ioctl(). We require either
204 * DRM_MASTER or capable(CAP_SYS_ADMIN).
205 */
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +0200206 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
207 vmw_kms_update_layout_ioctl,
Thomas Hellstrom31788ca2017-02-21 17:42:27 +0700208 DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100209 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
210 vmw_shader_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200211 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstromc74c1622012-11-21 12:10:26 +0100212 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
213 vmw_shader_destroy_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200214 DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100215 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
216 vmw_gb_surface_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200217 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100218 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
219 vmw_gb_surface_reference_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200220 DRM_AUTH | DRM_RENDER_ALLOW),
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100221 VMW_IOCTL_DEF(VMW_SYNCCPU,
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200222 vmw_user_bo_synccpu_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200223 DRM_RENDER_ALLOW),
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700224 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
225 vmw_extended_context_define_ioctl,
Daniel Vetterf8c47142015-09-08 13:56:30 +0200226 DRM_AUTH | DRM_RENDER_ALLOW),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000227};
228
Arvind Yadav80463062017-07-15 12:44:53 +0530229static const struct pci_device_id vmw_pci_id_list[] = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000230 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
231 {0, 0, 0}
232};
Dave Airliec4903422012-08-28 21:40:51 -0400233MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000234
Dave Airlie5d2afab2012-08-28 21:38:49 -0400235static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700236static int vmw_force_iommu;
237static int vmw_restrict_iommu;
238static int vmw_force_coherent;
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100239static int vmw_restrict_dma_mask;
Sinclair Yeh04319d82016-06-29 12:15:48 -0700240static int vmw_assume_16bpp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000241
242static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
243static void vmw_master_init(struct vmw_master *);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100244static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
245 void *ptr);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000246
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200247MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
Øyvind A. Holm50f83732017-03-23 14:54:48 -0700248module_param_named(enable_fbdev, enable_fbdev, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700249MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
Øyvind A. Holm50f83732017-03-23 14:54:48 -0700250module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700251MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
Øyvind A. Holm50f83732017-03-23 14:54:48 -0700252module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700253MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
Øyvind A. Holm50f83732017-03-23 14:54:48 -0700254module_param_named(force_coherent, vmw_force_coherent, int, 0600);
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100255MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
Øyvind A. Holm7a9d2002017-04-03 22:06:24 +0200256module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
Sinclair Yeh04319d82016-06-29 12:15:48 -0700257MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
258module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700259
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200260
Neha Bhende3b4c2512018-06-18 16:44:48 -0700261static void vmw_print_capabilities2(uint32_t capabilities2)
262{
263 DRM_INFO("Capabilities2:\n");
264 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
265 DRM_INFO(" Grow oTable.\n");
266 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
267 DRM_INFO(" IntraSurface copy.\n");
268}
269
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000270static void vmw_print_capabilities(uint32_t capabilities)
271{
272 DRM_INFO("Capabilities:\n");
273 if (capabilities & SVGA_CAP_RECT_COPY)
274 DRM_INFO(" Rect copy.\n");
275 if (capabilities & SVGA_CAP_CURSOR)
276 DRM_INFO(" Cursor.\n");
277 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
278 DRM_INFO(" Cursor bypass.\n");
279 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
280 DRM_INFO(" Cursor bypass 2.\n");
281 if (capabilities & SVGA_CAP_8BIT_EMULATION)
282 DRM_INFO(" 8bit emulation.\n");
283 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
284 DRM_INFO(" Alpha cursor.\n");
285 if (capabilities & SVGA_CAP_3D)
286 DRM_INFO(" 3D.\n");
287 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
288 DRM_INFO(" Extended Fifo.\n");
289 if (capabilities & SVGA_CAP_MULTIMON)
290 DRM_INFO(" Multimon.\n");
291 if (capabilities & SVGA_CAP_PITCHLOCK)
292 DRM_INFO(" Pitchlock.\n");
293 if (capabilities & SVGA_CAP_IRQMASK)
294 DRM_INFO(" Irq mask.\n");
295 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
296 DRM_INFO(" Display Topology.\n");
297 if (capabilities & SVGA_CAP_GMR)
298 DRM_INFO(" GMR.\n");
299 if (capabilities & SVGA_CAP_TRACES)
300 DRM_INFO(" Traces.\n");
Thomas Hellstromdcca2862011-08-31 07:42:51 +0000301 if (capabilities & SVGA_CAP_GMR2)
302 DRM_INFO(" GMR2.\n");
303 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
304 DRM_INFO(" Screen Object 2.\n");
Thomas Hellstromc1234db2012-11-21 10:35:08 +0100305 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
306 DRM_INFO(" Command Buffers.\n");
307 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
308 DRM_INFO(" Command Buffers 2.\n");
309 if (capabilities & SVGA_CAP_GBOBJECTS)
310 DRM_INFO(" Guest Backed Resources.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -0700311 if (capabilities & SVGA_CAP_DX)
312 DRM_INFO(" DX Features.\n");
Thomas Hellstromdc366362018-03-22 10:15:23 +0100313 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
314 DRM_INFO(" HP Command Queue.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000315}
316
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200317/**
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700318 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200319 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700320 * @dev_priv: A device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200321 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700322 * This function creates a small buffer object that holds the query
323 * result for dummy queries emitted as query barriers.
324 * The function will then map the first page and initialize a pending
325 * occlusion query result structure, Finally it will unmap the buffer.
326 * No interruptible waits are done within this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200327 *
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700328 * Returns an error if bo creation or initialization fails.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200329 */
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700330static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200331{
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700332 int ret;
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200333 struct vmw_buffer_object *vbo;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200334 struct ttm_bo_kmap_obj map;
335 volatile SVGA3dQueryResult *result;
336 bool dummy;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200337
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700338 /*
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700339 * Create the vbo as pinned, so that a tryreserve will
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700340 * immediately succeed. This is because we're the only
341 * user of the bo currently.
342 */
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700343 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
344 if (!vbo)
345 return -ENOMEM;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700346
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200347 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
348 &vmw_sys_ne_placement, false,
349 &vmw_bo_bo_free);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200350 if (unlikely(ret != 0))
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700351 return ret;
352
Christian Königdfd5e502016-04-06 11:12:03 +0200353 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700354 BUG_ON(ret != 0);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700355 vmw_bo_pin_reserved(vbo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200356
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700357 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200358 if (likely(ret == 0)) {
359 result = ttm_kmap_obj_virtual(&map, &dummy);
360 result->totalSize = sizeof(*result);
361 result->state = SVGA3D_QUERYSTATE_PENDING;
362 result->result32 = 0xff;
363 ttm_bo_kunmap(&map);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700364 }
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700365 vmw_bo_pin_reserved(vbo, false);
366 ttm_bo_unreserve(&vbo->base);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700367
368 if (unlikely(ret != 0)) {
369 DRM_ERROR("Dummy query buffer map failed.\n");
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200370 vmw_bo_unreference(&vbo);
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700371 } else
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700372 dev_priv->dummy_query_bo = vbo;
Thomas Hellstrom4b9e45e2013-10-10 09:52:52 -0700373
374 return ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200375}
376
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700377/**
378 * vmw_request_device_late - Perform late device setup
379 *
380 * @dev_priv: Pointer to device private.
381 *
382 * This function performs setup of otables and enables large command
383 * buffer submission. These tasks are split out to a separate function
384 * because it reverts vmw_release_device_early and is intended to be used
385 * by an error path in the hibernation code.
386 */
387static int vmw_request_device_late(struct vmw_private *dev_priv)
388{
389 int ret;
390
391 if (dev_priv->has_mob) {
392 ret = vmw_otables_setup(dev_priv);
393 if (unlikely(ret != 0)) {
394 DRM_ERROR("Unable to initialize "
395 "guest Memory OBjects.\n");
396 return ret;
397 }
398 }
399
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700400 if (dev_priv->cman) {
401 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
402 256*4096, 2*4096);
403 if (ret) {
404 struct vmw_cmdbuf_man *man = dev_priv->cman;
405
406 dev_priv->cman = NULL;
407 vmw_cmdbuf_man_destroy(man);
408 }
409 }
410
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700411 return 0;
412}
413
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000414static int vmw_request_device(struct vmw_private *dev_priv)
415{
416 int ret;
417
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000418 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
419 if (unlikely(ret != 0)) {
420 DRM_ERROR("Unable to initialize FIFO.\n");
421 return ret;
422 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000423 vmw_fence_fifo_up(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700424 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700425 if (IS_ERR(dev_priv->cman)) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700426 dev_priv->cman = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700427 dev_priv->has_dx = false;
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100428 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700429
430 ret = vmw_request_device_late(dev_priv);
431 if (ret)
432 goto out_no_mob;
433
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200434 ret = vmw_dummy_query_bo_create(dev_priv);
435 if (unlikely(ret != 0))
436 goto out_no_query_bo;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000437
438 return 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200439
440out_no_query_bo:
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700441 if (dev_priv->cman)
442 vmw_cmdbuf_remove_pool(dev_priv->cman);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700443 if (dev_priv->has_mob) {
444 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100445 vmw_otables_takedown(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700446 }
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700447 if (dev_priv->cman)
448 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100449out_no_mob:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200450 vmw_fence_fifo_down(dev_priv->fman);
451 vmw_fifo_release(dev_priv, &dev_priv->fifo);
452 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000453}
454
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700455/**
456 * vmw_release_device_early - Early part of fifo takedown.
457 *
458 * @dev_priv: Pointer to device private struct.
459 *
460 * This is the first part of command submission takedown, to be called before
461 * buffer management is taken down.
462 */
463static void vmw_release_device_early(struct vmw_private *dev_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000464{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200465 /*
466 * Previous destructions should've released
467 * the pinned bo.
468 */
469
470 BUG_ON(dev_priv->pinned_bo != NULL);
471
Thomas Hellstromf1d34bf2018-06-19 15:02:16 +0200472 vmw_bo_unreference(&dev_priv->dummy_query_bo);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700473 if (dev_priv->cman)
474 vmw_cmdbuf_remove_pool(dev_priv->cman);
475
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700476 if (dev_priv->has_mob) {
477 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
Thomas Hellstrom3530bdc2012-11-21 10:49:52 +0100478 vmw_otables_takedown(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200479 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200480}
481
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000482/**
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700483 * vmw_release_device_late - Late part of fifo takedown.
484 *
485 * @dev_priv: Pointer to device private struct.
486 *
487 * This is the last part of the command submission takedown, to be called when
488 * command submission is no longer needed. It may wait on pending fences.
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000489 */
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700490static void vmw_release_device_late(struct vmw_private *dev_priv)
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200491{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000492 vmw_fence_fifo_down(dev_priv->fman);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700493 if (dev_priv->cman)
494 vmw_cmdbuf_man_destroy(dev_priv->cman);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200495
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000496 vmw_fifo_release(dev_priv, &dev_priv->fifo);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200497}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000498
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100499/**
500 * Sets the initial_[width|height] fields on the given vmw_private.
501 *
502 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100503 * clamping the value to fb_max_[width|height] fields and the
504 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
505 * If the values appear to be invalid, set them to
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100506 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
507 */
508static void vmw_get_initial_size(struct vmw_private *dev_priv)
509{
510 uint32_t width;
511 uint32_t height;
512
513 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
514 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
515
516 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100517 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
Thomas Hellstrom67d4a872012-02-09 16:56:47 +0100518
519 if (width > dev_priv->fb_max_width ||
520 height > dev_priv->fb_max_height) {
521
522 /*
523 * This is a host error and shouldn't occur.
524 */
525
526 width = VMW_MIN_INITIAL_WIDTH;
527 height = VMW_MIN_INITIAL_HEIGHT;
528 }
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100529
530 dev_priv->initial_width = width;
531 dev_priv->initial_height = height;
532}
533
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700534/**
535 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
536 * system.
537 *
538 * @dev_priv: Pointer to a struct vmw_private
539 *
540 * This functions tries to determine the IOMMU setup and what actions
541 * need to be taken by the driver to make system pages visible to the
542 * device.
543 * If this function decides that DMA is not possible, it returns -EINVAL.
544 * The driver may then try to disable features of the device that require
545 * DMA.
546 */
547static int vmw_dma_select_mode(struct vmw_private *dev_priv)
548{
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700549 static const char *names[vmw_dma_map_max] = {
550 [vmw_dma_phys] = "Using physical TTM page addresses.",
551 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
552 [vmw_dma_map_populate] = "Keeping DMA mappings.",
553 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800554#ifdef CONFIG_X86
555 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700556
557#ifdef CONFIG_INTEL_IOMMU
558 if (intel_iommu_enabled) {
559 dev_priv->map_mode = vmw_dma_map_populate;
560 goto out_fixup;
561 }
562#endif
563
564 if (!(vmw_force_iommu || vmw_force_coherent)) {
565 dev_priv->map_mode = vmw_dma_phys;
566 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
567 return 0;
568 }
569
570 dev_priv->map_mode = vmw_dma_map_populate;
571
572 if (dma_ops->sync_single_for_cpu)
573 dev_priv->map_mode = vmw_dma_alloc_coherent;
574#ifdef CONFIG_SWIOTLB
575 if (swiotlb_nr_tbl() == 0)
576 dev_priv->map_mode = vmw_dma_map_populate;
577#endif
578
Dave Airlie21136942013-11-08 16:12:42 +1000579#ifdef CONFIG_INTEL_IOMMU
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700580out_fixup:
Dave Airlie21136942013-11-08 16:12:42 +1000581#endif
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700582 if (dev_priv->map_mode == vmw_dma_map_populate &&
583 vmw_restrict_iommu)
584 dev_priv->map_mode = vmw_dma_map_bind;
585
586 if (vmw_force_coherent)
587 dev_priv->map_mode = vmw_dma_alloc_coherent;
588
589#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
590 /*
591 * No coherent page pool
592 */
593 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
594 return -EINVAL;
595#endif
596
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800597#else /* CONFIG_X86 */
598 dev_priv->map_mode = vmw_dma_map_populate;
599#endif /* CONFIG_X86 */
600
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700601 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
602
603 return 0;
604}
605
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100606/**
607 * vmw_dma_masks - set required page- and dma masks
608 *
609 * @dev: Pointer to struct drm-device
610 *
611 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
612 * restriction also for 64-bit systems.
613 */
614#ifdef CONFIG_INTEL_IOMMU
615static int vmw_dma_masks(struct vmw_private *dev_priv)
616{
617 struct drm_device *dev = dev_priv->dev;
618
619 if (intel_iommu_enabled &&
620 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
621 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
622 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
623 }
624 return 0;
625}
626#else
627static int vmw_dma_masks(struct vmw_private *dev_priv)
628{
629 return 0;
630}
631#endif
632
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000633static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
634{
635 struct vmw_private *dev_priv;
636 int ret;
Peter Hanzelc1886602010-01-30 03:38:07 +0000637 uint32_t svga_id;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000638 enum vmw_res_type i;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700639 bool refuse_dma = false;
Sinclair Yehf9217912016-04-27 19:11:18 -0700640 char host_log[100] = {0};
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000641
642 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530643 if (unlikely(!dev_priv)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000644 DRM_ERROR("Failed allocating a device private struct.\n");
645 return -ENOMEM;
646 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000647
Dave Airlie466e69b2011-12-19 11:15:29 +0000648 pci_set_master(dev->pdev);
649
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000650 dev_priv->dev = dev;
651 dev_priv->vmw_chipset = chipset;
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000652 dev_priv->last_read_seqno = (uint32_t) -100;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000653 mutex_init(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200654 mutex_init(&dev_priv->release_mutex);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700655 mutex_init(&dev_priv->binding_mutex);
Deepak Rawatb89e5ff2018-06-20 11:32:29 +0200656 mutex_init(&dev_priv->requested_layout_mutex);
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200657 mutex_init(&dev_priv->global_kms_state_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000658 rwlock_init(&dev_priv->resource_lock);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100659 ttm_lock_init(&dev_priv->reservation_sem);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800660 spin_lock_init(&dev_priv->hw_lock);
661 spin_lock_init(&dev_priv->waiter_lock);
662 spin_lock_init(&dev_priv->cap_lock);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700663 spin_lock_init(&dev_priv->svga_lock);
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700664 spin_lock_init(&dev_priv->cursor_lock);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000665
666 for (i = vmw_res_context; i < vmw_res_max; ++i) {
667 idr_init(&dev_priv->res_idr[i]);
668 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
669 }
670
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000671 mutex_init(&dev_priv->init_mutex);
672 init_waitqueue_head(&dev_priv->fence_queue);
673 init_waitqueue_head(&dev_priv->fifo_queue);
Thomas Hellstrom4f73a962011-09-01 20:18:43 +0000674 dev_priv->fence_queue_waiters = 0;
Thomas Hellstromd2e88512015-10-28 19:07:35 +0100675 dev_priv->fifo_queue_waiters = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000676
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200677 dev_priv->used_memory_size = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000678
679 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
680 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
681 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
682
Sinclair Yeh04319d82016-06-29 12:15:48 -0700683 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
684
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200685 dev_priv->enable_fb = enable_fbdev;
686
Peter Hanzelc1886602010-01-30 03:38:07 +0000687 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
688 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
689 if (svga_id != SVGA_ID_2) {
690 ret = -ENOSYS;
Masanari Iida49625902012-02-05 22:50:36 +0900691 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
Peter Hanzelc1886602010-01-30 03:38:07 +0000692 goto out_err0;
693 }
694
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000695 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
Neha Bhende3b4c2512018-06-18 16:44:48 -0700696
697 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
698 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
699 }
700
701
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700702 ret = vmw_dma_select_mode(dev_priv);
703 if (unlikely(ret != 0)) {
704 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
705 refuse_dma = true;
706 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000707
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200708 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
709 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
710 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
711 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100712
713 vmw_get_initial_size(dev_priv);
714
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100715 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000716 dev_priv->max_gmr_ids =
717 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000718 dev_priv->max_gmr_pages =
719 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
720 dev_priv->memory_size =
721 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200722 dev_priv->memory_size -= dev_priv->vram_size;
723 } else {
724 /*
725 * An arbitrary limit of 512MiB on surface
726 * memory. But all HWV8 hardware supports GMR2.
727 */
728 dev_priv->memory_size = 512*1024*1024;
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000729 }
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100730 dev_priv->max_mob_pages = 0;
Charmaine Lee857aea12014-02-12 12:07:38 +0100731 dev_priv->max_mob_size = 0;
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100732 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
733 uint64_t mem_size =
734 vmw_read(dev_priv,
735 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
736
Sinclair Yeh7c20d212016-06-29 11:29:47 -0700737 /*
738 * Workaround for low memory 2D VMs to compensate for the
739 * allocation taken by fbdev
740 */
741 if (!(dev_priv->capabilities & SVGA_CAP_3D))
Sinclair Yehcef75032017-11-01 10:47:05 -0700742 mem_size *= 3;
Sinclair Yeh7c20d212016-06-29 11:29:47 -0700743
Thomas Hellstrom6da768a2012-11-21 11:06:22 +0100744 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100745 dev_priv->prim_bb_mem =
746 vmw_read(dev_priv,
747 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
Charmaine Lee857aea12014-02-12 12:07:38 +0100748 dev_priv->max_mob_size =
749 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700750 dev_priv->stdu_max_width =
751 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
752 dev_priv->stdu_max_height =
753 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
754
755 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
756 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
757 dev_priv->texture_max_width = vmw_read(dev_priv,
758 SVGA_REG_DEV_CAP);
759 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
760 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
761 dev_priv->texture_max_height = vmw_read(dev_priv,
762 SVGA_REG_DEV_CAP);
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700763 } else {
764 dev_priv->texture_max_width = 8192;
765 dev_priv->texture_max_height = 8192;
Thomas Hellstromafb0e502012-11-21 11:09:56 +0100766 dev_priv->prim_bb_mem = dev_priv->vram_size;
Thomas Hellstromdf45e9d2015-08-12 09:30:09 -0700767 }
768
Sinclair Yeh35c05122015-06-26 01:42:06 -0700769 vmw_print_capabilities(dev_priv->capabilities);
Neha Bhende3b4c2512018-06-18 16:44:48 -0700770 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
771 vmw_print_capabilities2(dev_priv->capabilities2);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000772
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100773 ret = vmw_dma_masks(dev_priv);
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800774 if (unlikely(ret != 0))
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100775 goto out_err0;
776
Thomas Hellstrom0d00c482014-01-15 20:19:53 +0100777 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000778 DRM_INFO("Max GMR ids is %u\n",
779 (unsigned)dev_priv->max_gmr_ids);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000780 DRM_INFO("Max number of GMR pages is %u\n",
781 (unsigned)dev_priv->max_gmr_pages);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200782 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
783 (unsigned)dev_priv->memory_size / 1024);
Thomas Hellstromfb17f182011-08-31 07:42:53 +0000784 }
Thomas Hellstrombc2d6502012-11-21 10:32:36 +0100785 DRM_INFO("Maximum display memory size is %u kiB\n",
786 dev_priv->prim_bb_mem / 1024);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000787 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
788 dev_priv->vram_start, dev_priv->vram_size / 1024);
789 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
790 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
791
792 ret = vmw_ttm_global_init(dev_priv);
793 if (unlikely(ret != 0))
794 goto out_err0;
795
796
797 vmw_master_init(&dev_priv->fbdev_master);
798 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
799 dev_priv->active_master = &dev_priv->fbdev_master;
800
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100801 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
802 dev_priv->mmio_size, MEMREMAP_WB);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000803
804 if (unlikely(dev_priv->mmio_virt == NULL)) {
805 ret = -ENOMEM;
806 DRM_ERROR("Failed mapping MMIO.\n");
807 goto out_err3;
808 }
809
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200810 /* Need mmio memory to check for fifo pitchlock cap. */
811 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
812 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
813 !vmw_fifo_have_pitchlock(dev_priv)) {
814 ret = -ENOSYS;
815 DRM_ERROR("Hardware has no pitchlock\n");
816 goto out_err4;
817 }
818
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000819 dev_priv->tdev = ttm_object_device_init
Thomas Hellstrom69977ff2013-11-13 01:50:46 -0800820 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000821
822 if (unlikely(dev_priv->tdev == NULL)) {
823 DRM_ERROR("Unable to initialize TTM object management.\n");
824 ret = -ENOMEM;
825 goto out_err4;
826 }
827
828 dev->dev_private = dev_priv;
829
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000830 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
831 dev_priv->stealth = (ret != 0);
832 if (dev_priv->stealth) {
833 /**
834 * Request at least the mmio PCI resource.
835 */
836
837 DRM_INFO("It appears like vesafb is loaded. "
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000838 "Ignore above error if any.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000839 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
840 if (unlikely(ret != 0)) {
841 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
842 goto out_no_device;
843 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000844 }
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000845
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000846 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
Thomas Hellstrome3001732017-08-24 08:06:27 +0200847 ret = vmw_irq_install(dev, dev->pdev->irq);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000848 if (ret != 0) {
849 DRM_ERROR("Failed installing irq: %d\n", ret);
850 goto out_no_irq;
851 }
852 }
853
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000854 dev_priv->fman = vmw_fence_manager_init(dev_priv);
Wei Yongjun14bbf202013-08-26 15:15:37 +0800855 if (unlikely(dev_priv->fman == NULL)) {
856 ret = -ENOMEM;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000857 goto out_no_fman;
Wei Yongjun14bbf202013-08-26 15:15:37 +0800858 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200859
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700860 ret = ttm_bo_device_init(&dev_priv->bdev,
861 dev_priv->bo_global_ref.ref.object,
862 &vmw_bo_driver,
863 dev->anon_inode->i_mapping,
864 VMWGFX_FILE_PAGE_OFFSET,
865 false);
866 if (unlikely(ret != 0)) {
867 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
868 goto out_no_bdev;
869 }
Thomas Hellstrom34583902015-03-05 02:33:24 -0800870
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700871 /*
872 * Enable VRAM, but initially don't use it until SVGA is enabled and
873 * unhidden.
874 */
Thomas Hellstrom34583902015-03-05 02:33:24 -0800875 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
876 (dev_priv->vram_size >> PAGE_SHIFT));
877 if (unlikely(ret != 0)) {
878 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
879 goto out_no_vram;
880 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700881 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom34583902015-03-05 02:33:24 -0800882
883 dev_priv->has_gmr = true;
884 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
885 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
886 VMW_PL_GMR) != 0) {
887 DRM_INFO("No GMR memory available. "
888 "Graphics memory resources are very limited.\n");
889 dev_priv->has_gmr = false;
890 }
891
892 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
893 dev_priv->has_mob = true;
894 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
895 VMW_PL_MOB) != 0) {
896 DRM_INFO("No MOB memory available. "
897 "3D will be disabled.\n");
898 dev_priv->has_mob = false;
899 }
900 }
901
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700902 if (dev_priv->has_mob) {
903 spin_lock(&dev_priv->cap_lock);
Deepak Rawatdc75e732018-06-13 13:53:28 -0700904 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700905 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
906 spin_unlock(&dev_priv->cap_lock);
907 }
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200908
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700909
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200910 ret = vmw_kms_init(dev_priv);
911 if (unlikely(ret != 0))
912 goto out_no_kms;
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000913 vmw_overlay_init(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200914
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700915 ret = vmw_request_device(dev_priv);
916 if (ret)
917 goto out_no_fifo;
918
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700919 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
Sinclair Yehf7c478b2017-03-31 10:16:22 -0700920 DRM_INFO("Atomic: %s\n",
921 (dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no");
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700922
Sinclair Yehf9217912016-04-27 19:11:18 -0700923 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
924 VMWGFX_REPO, VMWGFX_GIT_VERSION);
925 vmw_host_log(host_log);
926
927 memset(host_log, 0, sizeof(host_log));
928 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
929 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
930 VMWGFX_DRIVER_PATCHLEVEL);
931 vmw_host_log(host_log);
932
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200933 if (dev_priv->enable_fb) {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700934 vmw_fifo_resource_inc(dev_priv);
935 vmw_svga_enable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200936 vmw_fb_init(dev_priv);
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +0200937 }
938
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100939 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
940 register_pm_notifier(&dev_priv->pm_nb);
941
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000942 return 0;
943
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000944out_no_fifo:
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200945 vmw_overlay_close(dev_priv);
946 vmw_kms_close(dev_priv);
947out_no_kms:
Thomas Hellstrom34583902015-03-05 02:33:24 -0800948 if (dev_priv->has_mob)
949 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
950 if (dev_priv->has_gmr)
951 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
952 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
953out_no_vram:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700954 (void)ttm_bo_device_release(&dev_priv->bdev);
955out_no_bdev:
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000956 vmw_fence_manager_takedown(dev_priv->fman);
957out_no_fman:
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000958 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
Thomas Hellstrome3001732017-08-24 08:06:27 +0200959 vmw_irq_uninstall(dev_priv->dev);
Thomas Hellstrom506ff752012-11-09 12:26:14 +0000960out_no_irq:
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200961 if (dev_priv->stealth)
962 pci_release_region(dev->pdev, 2);
963 else
964 pci_release_regions(dev->pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000965out_no_device:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000966 ttm_object_device_release(&dev_priv->tdev);
967out_err4:
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100968 memunmap(dev_priv->mmio_virt);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000969out_err3:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000970 vmw_ttm_global_release(dev_priv);
971out_err0:
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000972 for (i = vmw_res_context; i < vmw_res_max; ++i)
973 idr_destroy(&dev_priv->res_idr[i]);
974
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700975 if (dev_priv->ctx.staged_bindings)
976 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000977 kfree(dev_priv);
978 return ret;
979}
980
Gabriel Krisman Bertazi11b3c202017-01-06 15:57:31 -0200981static void vmw_driver_unload(struct drm_device *dev)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000982{
983 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000984 enum vmw_res_type i;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000985
Thomas Hellstromd9f36a02010-01-13 22:28:43 +0100986 unregister_pm_notifier(&dev_priv->pm_nb);
987
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000988 if (dev_priv->ctx.res_ht_initialized)
989 drm_ht_remove(&dev_priv->ctx.res_ht);
Markus Elfringa3a1a662014-11-19 17:50:19 +0100990 vfree(dev_priv->ctx.cmd_bounce);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200991 if (dev_priv->enable_fb) {
Sinclair Yeh05c95012015-08-11 22:53:39 -0700992 vmw_fb_off(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200993 vmw_fb_close(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700994 vmw_fifo_resource_dec(dev_priv);
995 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200996 }
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700997
Thomas Hellstromf2d12b82010-02-15 14:45:22 +0000998 vmw_kms_close(dev_priv);
999 vmw_overlay_close(dev_priv);
Thomas Hellstrom34583902015-03-05 02:33:24 -08001000
Thomas Hellstrom34583902015-03-05 02:33:24 -08001001 if (dev_priv->has_gmr)
1002 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
1003 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
1004
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001005 vmw_release_device_early(dev_priv);
1006 if (dev_priv->has_mob)
1007 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
1008 (void) ttm_bo_device_release(&dev_priv->bdev);
1009 vmw_release_device_late(dev_priv);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001010 vmw_fence_manager_takedown(dev_priv->fman);
Thomas Hellstrom506ff752012-11-09 12:26:14 +00001011 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
Thomas Hellstrome3001732017-08-24 08:06:27 +02001012 vmw_irq_uninstall(dev_priv->dev);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +00001013 if (dev_priv->stealth)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001014 pci_release_region(dev->pdev, 2);
Thomas Hellstromf2d12b82010-02-15 14:45:22 +00001015 else
1016 pci_release_regions(dev->pdev);
1017
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001018 ttm_object_device_release(&dev_priv->tdev);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001019 memunmap(dev_priv->mmio_virt);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001020 if (dev_priv->ctx.staged_bindings)
1021 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001022 vmw_ttm_global_release(dev_priv);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001023
1024 for (i = vmw_res_context; i < vmw_res_max; ++i)
1025 idr_destroy(&dev_priv->res_idr[i]);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001026
1027 kfree(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001028}
1029
1030static void vmw_postclose(struct drm_device *dev,
1031 struct drm_file *file_priv)
1032{
1033 struct vmw_fpriv *vmw_fp;
1034
1035 vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromc4249852013-10-09 01:42:51 -07001036
1037 if (vmw_fp->locked_master) {
1038 struct vmw_master *vmaster =
1039 vmw_master(vmw_fp->locked_master);
1040
1041 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1042 ttm_vt_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001043 drm_master_put(&vmw_fp->locked_master);
Thomas Hellstromc4249852013-10-09 01:42:51 -07001044 }
1045
1046 ttm_object_file_release(&vmw_fp->tfile);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001047 kfree(vmw_fp);
1048}
1049
1050static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1051{
1052 struct vmw_private *dev_priv = vmw_priv(dev);
1053 struct vmw_fpriv *vmw_fp;
1054 int ret = -ENOMEM;
1055
1056 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +05301057 if (unlikely(!vmw_fp))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001058 return ret;
1059
1060 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1061 if (unlikely(vmw_fp->tfile == NULL))
1062 goto out_no_tfile;
1063
1064 file_priv->driver_priv = vmw_fp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001065
1066 return 0;
1067
1068out_no_tfile:
1069 kfree(vmw_fp);
1070 return ret;
1071}
1072
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001073static struct vmw_master *vmw_master_check(struct drm_device *dev,
1074 struct drm_file *file_priv,
1075 unsigned int flags)
1076{
1077 int ret;
1078 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1079 struct vmw_master *vmaster;
1080
Frank Binns0d02c4a2016-06-24 18:15:15 +01001081 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001082 return NULL;
1083
1084 ret = mutex_lock_interruptible(&dev->master_mutex);
1085 if (unlikely(ret != 0))
1086 return ERR_PTR(-ERESTARTSYS);
1087
Daniel Vetterb3ac9f22016-06-21 10:54:20 +02001088 if (drm_is_current_master(file_priv)) {
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001089 mutex_unlock(&dev->master_mutex);
1090 return NULL;
1091 }
1092
1093 /*
Thomas Hellstromaa3469c2015-08-27 10:06:24 -07001094 * Check if we were previously master, but now dropped. In that
1095 * case, allow at least render node functionality.
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001096 */
1097 if (vmw_fp->locked_master) {
1098 mutex_unlock(&dev->master_mutex);
Thomas Hellstromaa3469c2015-08-27 10:06:24 -07001099
1100 if (flags & DRM_RENDER_ALLOW)
1101 return NULL;
1102
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001103 DRM_ERROR("Dropped master trying to access ioctl that "
1104 "requires authentication.\n");
1105 return ERR_PTR(-EACCES);
1106 }
1107 mutex_unlock(&dev->master_mutex);
1108
1109 /*
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001110 * Take the TTM lock. Possibly sleep waiting for the authenticating
1111 * master to become master again, or for a SIGTERM if the
1112 * authenticating master exits.
1113 */
1114 vmaster = vmw_master(file_priv->master);
1115 ret = ttm_read_lock(&vmaster->lock, true);
1116 if (unlikely(ret != 0))
1117 vmaster = ERR_PTR(ret);
1118
1119 return vmaster;
1120}
1121
1122static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1123 unsigned long arg,
1124 long (*ioctl_func)(struct file *, unsigned int,
1125 unsigned long))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001126{
1127 struct drm_file *file_priv = filp->private_data;
1128 struct drm_device *dev = file_priv->minor->dev;
1129 unsigned int nr = DRM_IOCTL_NR(cmd);
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001130 struct vmw_master *vmaster;
1131 unsigned int flags;
1132 long ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001133
1134 /*
Thomas Hellstrome1f78002009-12-08 12:57:51 +01001135 * Do extra checking on driver private ioctls.
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001136 */
1137
1138 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1139 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
Rob Clarkbaa70942013-08-02 13:27:49 -04001140 const struct drm_ioctl_desc *ioctl =
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001141 &vmw_ioctls[nr - DRM_COMMAND_BASE];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001142
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001143 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1144 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1145 if (unlikely(ret != 0))
1146 return ret;
1147
1148 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1149 goto out_io_encoding;
1150
1151 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1152 _IOC_SIZE(cmd));
Thomas Hellstrom31788ca2017-02-21 17:42:27 +07001153 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1154 if (!drm_is_current_master(file_priv) &&
1155 !capable(CAP_SYS_ADMIN))
1156 return -EACCES;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001157 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001158
1159 if (unlikely(ioctl->cmd != cmd))
1160 goto out_io_encoding;
1161
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001162 flags = ioctl->flags;
1163 } else if (!drm_ioctl_flags(nr, &flags))
1164 return -EINVAL;
1165
1166 vmaster = vmw_master_check(dev, file_priv, flags);
Viresh Kumar55579cf2015-07-31 14:08:24 +05301167 if (IS_ERR(vmaster)) {
Thomas Hellstrome338c4c2014-11-25 08:20:05 +01001168 ret = PTR_ERR(vmaster);
1169
1170 if (ret != -ERESTARTSYS)
1171 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1172 nr, ret);
1173 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001174 }
1175
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001176 ret = ioctl_func(filp, cmd, arg);
1177 if (vmaster)
1178 ttm_read_unlock(&vmaster->lock);
1179
1180 return ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001181
1182out_io_encoding:
1183 DRM_ERROR("Invalid command format, ioctl %d\n",
1184 nr - DRM_COMMAND_BASE);
1185
1186 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001187}
1188
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001189static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1190 unsigned long arg)
1191{
1192 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1193}
1194
1195#ifdef CONFIG_COMPAT
1196static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1197 unsigned long arg)
1198{
1199 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1200}
1201#endif
1202
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001203static void vmw_lastclose(struct drm_device *dev)
1204{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001205}
1206
1207static void vmw_master_init(struct vmw_master *vmaster)
1208{
1209 ttm_lock_init(&vmaster->lock);
1210}
1211
1212static int vmw_master_create(struct drm_device *dev,
1213 struct drm_master *master)
1214{
1215 struct vmw_master *vmaster;
1216
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001217 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +05301218 if (unlikely(!vmaster))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001219 return -ENOMEM;
1220
Thomas Hellstrom3a939a52010-10-05 12:43:03 +02001221 vmw_master_init(vmaster);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001222 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1223 master->driver_priv = vmaster;
1224
1225 return 0;
1226}
1227
1228static void vmw_master_destroy(struct drm_device *dev,
1229 struct drm_master *master)
1230{
1231 struct vmw_master *vmaster = vmw_master(master);
1232
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001233 master->driver_priv = NULL;
1234 kfree(vmaster);
1235}
1236
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001237static int vmw_master_set(struct drm_device *dev,
1238 struct drm_file *file_priv,
1239 bool from_open)
1240{
1241 struct vmw_private *dev_priv = vmw_priv(dev);
1242 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1243 struct vmw_master *active = dev_priv->active_master;
1244 struct vmw_master *vmaster = vmw_master(file_priv->master);
1245 int ret = 0;
1246
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001247 if (active) {
1248 BUG_ON(active != &dev_priv->fbdev_master);
1249 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1250 if (unlikely(ret != 0))
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001251 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001252
1253 ttm_lock_set_kill(&active->lock, true, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001254 dev_priv->active_master = NULL;
1255 }
1256
1257 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1258 if (!from_open) {
1259 ttm_vt_unlock(&vmaster->lock);
1260 BUG_ON(vmw_fp->locked_master != file_priv->master);
1261 drm_master_put(&vmw_fp->locked_master);
1262 }
1263
1264 dev_priv->active_master = vmaster;
Thomas Hellstrom5ea17342016-02-12 10:01:28 +01001265 drm_sysfs_hotplug_event(dev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001266
1267 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001268}
1269
1270static void vmw_master_drop(struct drm_device *dev,
Daniel Vetterd6ed6822016-06-21 14:20:38 +02001271 struct drm_file *file_priv)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001272{
1273 struct vmw_private *dev_priv = vmw_priv(dev);
1274 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1275 struct vmw_master *vmaster = vmw_master(file_priv->master);
1276 int ret;
1277
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001278 /**
1279 * Make sure the master doesn't disappear while we have
1280 * it locked.
1281 */
1282
1283 vmw_fp->locked_master = drm_master_get(file_priv->master);
1284 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +01001285 vmw_kms_legacy_hotspot_clear(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001286 if (unlikely((ret != 0))) {
1287 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1288 drm_master_put(&vmw_fp->locked_master);
1289 }
1290
Thomas Hellstromc4249852013-10-09 01:42:51 -07001291 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001292
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001293 if (!dev_priv->enable_fb)
1294 vmw_svga_disable(dev_priv);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001295
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001296 dev_priv->active_master = &dev_priv->fbdev_master;
1297 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1298 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001299}
1300
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001301/**
1302 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1303 *
1304 * @dev_priv: Pointer to device private struct.
1305 * Needs the reservation sem to be held in non-exclusive mode.
1306 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001307static void __vmw_svga_enable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001308{
1309 spin_lock(&dev_priv->svga_lock);
1310 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1311 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1312 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1313 }
1314 spin_unlock(&dev_priv->svga_lock);
1315}
1316
1317/**
1318 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1319 *
1320 * @dev_priv: Pointer to device private struct.
1321 */
1322void vmw_svga_enable(struct vmw_private *dev_priv)
1323{
Thomas Hellstromf08c86c2017-01-19 10:57:00 -08001324 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001325 __vmw_svga_enable(dev_priv);
1326 ttm_read_unlock(&dev_priv->reservation_sem);
1327}
1328
1329/**
1330 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1331 *
1332 * @dev_priv: Pointer to device private struct.
1333 * Needs the reservation sem to be held in exclusive mode.
1334 * Will not empty VRAM. VRAM must be emptied by caller.
1335 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001336static void __vmw_svga_disable(struct vmw_private *dev_priv)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001337{
1338 spin_lock(&dev_priv->svga_lock);
1339 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1340 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1341 vmw_write(dev_priv, SVGA_REG_ENABLE,
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001342 SVGA_REG_ENABLE_HIDE |
1343 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001344 }
1345 spin_unlock(&dev_priv->svga_lock);
1346}
1347
1348/**
1349 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1350 * running.
1351 *
1352 * @dev_priv: Pointer to device private struct.
1353 * Will empty VRAM.
1354 */
1355void vmw_svga_disable(struct vmw_private *dev_priv)
1356{
Thomas Hellstrom140bcaa2018-03-08 10:07:37 +01001357 /*
1358 * Disabling SVGA will turn off device modesetting capabilities, so
1359 * notify KMS about that so that it doesn't cache atomic state that
1360 * isn't valid anymore, for example crtcs turned on.
1361 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1362 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1363 * end up with lock order reversal. Thus, a master may actually perform
1364 * a new modeset just after we call vmw_kms_lost_device() and race with
1365 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1366 * to be inconsistent with the device, causing modesetting problems.
1367 *
1368 */
1369 vmw_kms_lost_device(dev_priv->dev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001370 ttm_write_lock(&dev_priv->reservation_sem, false);
1371 spin_lock(&dev_priv->svga_lock);
1372 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1373 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001374 spin_unlock(&dev_priv->svga_lock);
1375 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1376 DRM_ERROR("Failed evicting VRAM buffers.\n");
Sinclair Yeh8ce75f82015-07-08 21:20:39 -07001377 vmw_write(dev_priv, SVGA_REG_ENABLE,
1378 SVGA_REG_ENABLE_HIDE |
1379 SVGA_REG_ENABLE_ENABLE);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001380 } else
1381 spin_unlock(&dev_priv->svga_lock);
1382 ttm_write_unlock(&dev_priv->reservation_sem);
1383}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001384
1385static void vmw_remove(struct pci_dev *pdev)
1386{
1387 struct drm_device *dev = pci_get_drvdata(pdev);
1388
Thomas Hellstromfd3e4d62015-03-10 11:07:40 -07001389 pci_disable_device(pdev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001390 drm_put_dev(dev);
1391}
1392
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001393static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1394 void *ptr)
1395{
1396 struct vmw_private *dev_priv =
1397 container_of(nb, struct vmw_private, pm_nb);
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001398
1399 switch (val) {
1400 case PM_HIBERNATION_PREPARE:
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001401 /*
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001402 * Take the reservation sem in write mode, which will make sure
1403 * there are no other processes holding a buffer object
1404 * reservation, meaning we should be able to evict all buffer
1405 * objects if needed.
1406 * Once user-space processes have been frozen, we can release
1407 * the lock again.
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001408 */
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001409 ttm_suspend_lock(&dev_priv->reservation_sem);
1410 dev_priv->suspend_locked = true;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001411 break;
1412 case PM_POST_HIBERNATION:
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001413 case PM_POST_RESTORE:
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001414 if (READ_ONCE(dev_priv->suspend_locked)) {
1415 dev_priv->suspend_locked = false;
1416 ttm_suspend_unlock(&dev_priv->reservation_sem);
1417 }
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001418 break;
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001419 default:
1420 break;
1421 }
1422 return 0;
1423}
1424
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001425static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001426{
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001427 struct drm_device *dev = pci_get_drvdata(pdev);
1428 struct vmw_private *dev_priv = vmw_priv(dev);
1429
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001430 if (dev_priv->refuse_hibernation)
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001431 return -EBUSY;
Thomas Hellstrom094e0fa2010-10-05 12:43:00 +02001432
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001433 pci_save_state(pdev);
1434 pci_disable_device(pdev);
1435 pci_set_power_state(pdev, PCI_D3hot);
1436 return 0;
1437}
1438
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001439static int vmw_pci_resume(struct pci_dev *pdev)
Thomas Hellstromd9f36a02010-01-13 22:28:43 +01001440{
1441 pci_set_power_state(pdev, PCI_D0);
1442 pci_restore_state(pdev);
1443 return pci_enable_device(pdev);
1444}
1445
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001446static int vmw_pm_suspend(struct device *kdev)
1447{
1448 struct pci_dev *pdev = to_pci_dev(kdev);
1449 struct pm_message dummy;
1450
1451 dummy.event = 0;
1452
1453 return vmw_pci_suspend(pdev, dummy);
1454}
1455
1456static int vmw_pm_resume(struct device *kdev)
1457{
1458 struct pci_dev *pdev = to_pci_dev(kdev);
1459
1460 return vmw_pci_resume(pdev);
1461}
1462
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001463static int vmw_pm_freeze(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001464{
1465 struct pci_dev *pdev = to_pci_dev(kdev);
1466 struct drm_device *dev = pci_get_drvdata(pdev);
1467 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001468 int ret;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001469
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001470 /*
1471 * Unlock for vmw_kms_suspend.
1472 * No user-space processes should be running now.
1473 */
1474 ttm_suspend_unlock(&dev_priv->reservation_sem);
1475 ret = vmw_kms_suspend(dev_priv->dev);
1476 if (ret) {
1477 ttm_suspend_lock(&dev_priv->reservation_sem);
1478 DRM_ERROR("Failed to freeze modesetting.\n");
1479 return ret;
1480 }
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001481 if (dev_priv->enable_fb)
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001482 vmw_fb_off(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001483
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001484 ttm_suspend_lock(&dev_priv->reservation_sem);
1485 vmw_execbuf_release_pinned_bo(dev_priv);
1486 vmw_resource_evict_all(dev_priv);
1487 vmw_release_device_early(dev_priv);
1488 ttm_bo_swapout_all(&dev_priv->bdev);
1489 if (dev_priv->enable_fb)
1490 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001491 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1492 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001493 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001494 vmw_fifo_resource_inc(dev_priv);
1495 WARN_ON(vmw_request_device_late(dev_priv));
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001496 dev_priv->suspend_locked = false;
1497 ttm_suspend_unlock(&dev_priv->reservation_sem);
1498 if (dev_priv->suspend_state)
1499 vmw_kms_resume(dev);
1500 if (dev_priv->enable_fb)
1501 vmw_fb_on(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001502 return -EBUSY;
1503 }
1504
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001505 vmw_fence_fifo_down(dev_priv->fman);
1506 __vmw_svga_disable(dev_priv);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001507
1508 vmw_release_device_late(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001509 return 0;
1510}
1511
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001512static int vmw_pm_restore(struct device *kdev)
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001513{
1514 struct pci_dev *pdev = to_pci_dev(kdev);
1515 struct drm_device *dev = pci_get_drvdata(pdev);
1516 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001517 int ret;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001518
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001519 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1520 (void) vmw_read(dev_priv, SVGA_REG_ID);
Thomas Hellstrom95e8f6a2012-11-09 10:05:57 +01001521
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001522 if (dev_priv->enable_fb)
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001523 vmw_fifo_resource_inc(dev_priv);
1524
1525 ret = vmw_request_device(dev_priv);
1526 if (ret)
1527 return ret;
1528
1529 if (dev_priv->enable_fb)
1530 __vmw_svga_enable(dev_priv);
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001531
Thomas Hellstromc3b9b162018-03-22 10:26:37 +01001532 vmw_fence_fifo_up(dev_priv->fman);
1533 dev_priv->suspend_locked = false;
1534 ttm_suspend_unlock(&dev_priv->reservation_sem);
1535 if (dev_priv->suspend_state)
1536 vmw_kms_resume(dev_priv->dev);
1537
1538 if (dev_priv->enable_fb)
1539 vmw_fb_on(dev_priv);
1540
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001541 return 0;
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001542}
1543
1544static const struct dev_pm_ops vmw_pm_ops = {
Thomas Hellstrom153b3d52015-06-25 10:47:43 -07001545 .freeze = vmw_pm_freeze,
1546 .thaw = vmw_pm_restore,
1547 .restore = vmw_pm_restore,
Thomas Hellstrom7fbd7212010-10-05 12:43:01 +02001548 .suspend = vmw_pm_suspend,
1549 .resume = vmw_pm_resume,
1550};
1551
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001552static const struct file_operations vmwgfx_driver_fops = {
1553 .owner = THIS_MODULE,
1554 .open = drm_open,
1555 .release = drm_release,
1556 .unlocked_ioctl = vmw_unlocked_ioctl,
1557 .mmap = vmw_mmap,
1558 .poll = vmw_fops_poll,
1559 .read = vmw_fops_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001560#if defined(CONFIG_COMPAT)
Thomas Hellstrom64190bd2014-02-27 12:56:08 +01001561 .compat_ioctl = vmw_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001562#endif
1563 .llseek = noop_llseek,
1564};
1565
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001566static struct drm_driver driver = {
1567 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
Sinclair Yehf7c478b2017-03-31 10:16:22 -07001568 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001569 .load = vmw_driver_load,
1570 .unload = vmw_driver_unload,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001571 .lastclose = vmw_lastclose,
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001572 .get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001573 .enable_vblank = vmw_enable_vblank,
1574 .disable_vblank = vmw_disable_vblank,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001575 .ioctls = vmw_ioctls,
Damien Lespiauf95aeb12014-06-09 14:39:49 +01001576 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001577 .master_create = vmw_master_create,
1578 .master_destroy = vmw_master_destroy,
1579 .master_set = vmw_master_set,
1580 .master_drop = vmw_master_drop,
1581 .open = vmw_driver_open,
1582 .postclose = vmw_postclose,
Dave Airlie5e1782d2012-08-28 01:53:54 +00001583
1584 .dumb_create = vmw_dumb_create,
1585 .dumb_map_offset = vmw_dumb_map_offset,
1586 .dumb_destroy = vmw_dumb_destroy,
1587
Thomas Hellstrom69977ff2013-11-13 01:50:46 -08001588 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1589 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1590
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001591 .fops = &vmwgfx_driver_fops,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001592 .name = VMWGFX_DRIVER_NAME,
1593 .desc = VMWGFX_DRIVER_DESC,
1594 .date = VMWGFX_DRIVER_DATE,
1595 .major = VMWGFX_DRIVER_MAJOR,
1596 .minor = VMWGFX_DRIVER_MINOR,
1597 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1598};
1599
Dave Airlie8410ea32010-12-15 03:16:38 +10001600static struct pci_driver vmw_pci_driver = {
1601 .name = VMWGFX_DRIVER_NAME,
1602 .id_table = vmw_pci_id_list,
1603 .probe = vmw_probe,
1604 .remove = vmw_remove,
1605 .driver = {
1606 .pm = &vmw_pm_ops
1607 }
1608};
1609
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001610static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1611{
Jordan Crousedcdb1672010-05-27 13:40:25 -06001612 return drm_get_pci_dev(pdev, ent, &driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001613}
1614
1615static int __init vmwgfx_init(void)
1616{
1617 int ret;
Rob Clark96c5d072014-10-15 15:00:47 -04001618
Rob Clark96c5d072014-10-15 15:00:47 -04001619 if (vgacon_text_force())
1620 return -EINVAL;
Rob Clark96c5d072014-10-15 15:00:47 -04001621
Daniel Vetter10631d72017-05-24 16:51:40 +02001622 ret = pci_register_driver(&vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001623 if (ret)
1624 DRM_ERROR("Failed initializing DRM.\n");
1625 return ret;
1626}
1627
1628static void __exit vmwgfx_exit(void)
1629{
Daniel Vetter10631d72017-05-24 16:51:40 +02001630 pci_unregister_driver(&vmw_pci_driver);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001631}
1632
1633module_init(vmwgfx_init);
1634module_exit(vmwgfx_exit);
1635
1636MODULE_AUTHOR("VMware Inc. and others");
1637MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1638MODULE_LICENSE("GPL and additional rights");
Thomas Hellstrom73558ea2010-10-05 12:43:07 +02001639MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1640 __stringify(VMWGFX_DRIVER_MINOR) "."
1641 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1642 "0");