blob: b8c6a03c8c54df15def2d359ee5253f213f1816e [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/vmwgfx_drm.h>
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +020030#include "vmwgfx_kms.h"
Sinclair Yeh8ce75f82015-07-08 21:20:39 -070031#include "device_include/svga3d_caps.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000032
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010033struct svga_3d_compat_cap {
34 SVGA3dCapsRecordHeader header;
35 SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
36};
37
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000038int vmw_getparam_ioctl(struct drm_device *dev, void *data,
39 struct drm_file *file_priv)
40{
41 struct vmw_private *dev_priv = vmw_priv(dev);
42 struct drm_vmw_getparam_arg *param =
43 (struct drm_vmw_getparam_arg *)data;
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010044 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000045
46 switch (param->param) {
47 case DRM_VMW_PARAM_NUM_STREAMS:
48 param->value = vmw_overlay_num_overlays(dev_priv);
49 break;
50 case DRM_VMW_PARAM_NUM_FREE_STREAMS:
51 param->value = vmw_overlay_num_free_overlays(dev_priv);
52 break;
53 case DRM_VMW_PARAM_3D:
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000054 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000055 break;
Thomas Hellstromf77cef32010-02-09 19:41:55 +000056 case DRM_VMW_PARAM_HW_CAPS:
57 param->value = dev_priv->capabilities;
58 break;
59 case DRM_VMW_PARAM_FIFO_CAPS:
60 param->value = dev_priv->fifo.capabilities;
61 break;
Thomas Hellstrom30f47fc82010-10-05 12:43:06 +020062 case DRM_VMW_PARAM_MAX_FB_SIZE:
Thomas Hellstrombc2d6502012-11-21 10:32:36 +010063 param->value = dev_priv->prim_bb_mem;
Thomas Hellstrom30f47fc82010-10-05 12:43:06 +020064 break;
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000065 case DRM_VMW_PARAM_FIFO_HW_VERSION:
66 {
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010067 u32 *fifo_mem = dev_priv->mmio_virt;
Thomas Hellstromebd4c6f2011-11-28 13:19:08 +010068 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000069
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010070 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
71 param->value = SVGA3D_HWVERSION_WS8_B1;
72 break;
73 }
74
Thomas Hellstromebd4c6f2011-11-28 13:19:08 +010075 param->value =
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +010076 vmw_mmio_read(fifo_mem +
77 ((fifo->capabilities &
78 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
79 SVGA_FIFO_3D_HWVERSION_REVISED :
80 SVGA_FIFO_3D_HWVERSION));
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000081 break;
82 }
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010083 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010084 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
85 !vmw_fp->gb_aware)
86 param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
87 else
88 param->value = dev_priv->memory_size;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010089 break;
90 case DRM_VMW_PARAM_3D_CAPS_SIZE:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010091 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
92 vmw_fp->gb_aware)
93 param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
94 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
95 param->value = sizeof(struct svga_3d_compat_cap) +
96 sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010097 else
98 param->value = (SVGA_FIFO_3D_CAPS_LAST -
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010099 SVGA_FIFO_3D_CAPS + 1) *
100 sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100101 break;
Thomas Hellstrom311474d2012-11-21 12:34:47 +0100102 case DRM_VMW_PARAM_MAX_MOB_MEMORY:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100103 vmw_fp->gb_aware = true;
Thomas Hellstrom311474d2012-11-21 12:34:47 +0100104 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
105 break;
Charmaine Lee857aea12014-02-12 12:07:38 +0100106 case DRM_VMW_PARAM_MAX_MOB_SIZE:
107 param->value = dev_priv->max_mob_size;
108 break;
Sinclair Yeh35c05122015-06-26 01:42:06 -0700109 case DRM_VMW_PARAM_SCREEN_TARGET:
110 param->value =
111 (dev_priv->active_display_unit == vmw_du_screen_target);
112 break;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700113 case DRM_VMW_PARAM_DX:
114 param->value = dev_priv->has_dx;
115 break;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000116 default:
117 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
118 param->param);
119 return -EINVAL;
120 }
121
122 return 0;
123}
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000124
Thomas Hellstrom53c1e532015-10-26 04:42:31 -0700125static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
126{
127 /* If the header is updated, update the format test as well! */
128 BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);
129
130 if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
131 cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
132 fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
133 SVGADX_DXFMT_MULTISAMPLE_4 |
134 SVGADX_DXFMT_MULTISAMPLE_8);
135 else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
136 return 0;
137
138 return fmt_value;
139}
140
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100141static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
142 size_t size)
143{
144 struct svga_3d_compat_cap *compat_cap =
145 (struct svga_3d_compat_cap *) bounce;
146 unsigned int i;
147 size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
148 unsigned int max_size;
149
150 if (size < pair_offset)
151 return -EINVAL;
152
153 max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
154
155 if (max_size > SVGA3D_DEVCAP_MAX)
156 max_size = SVGA3D_DEVCAP_MAX;
157
158 compat_cap->header.length =
159 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
160 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
161
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800162 spin_lock(&dev_priv->cap_lock);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100163 for (i = 0; i < max_size; ++i) {
164 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
165 compat_cap->pairs[i][0] = i;
Thomas Hellstrom53c1e532015-10-26 04:42:31 -0700166 compat_cap->pairs[i][1] = vmw_mask_multisample
167 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100168 }
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800169 spin_unlock(&dev_priv->cap_lock);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100170
171 return 0;
172}
173
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000174
175int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
176 struct drm_file *file_priv)
177{
178 struct drm_vmw_get_3d_cap_arg *arg =
179 (struct drm_vmw_get_3d_cap_arg *) data;
180 struct vmw_private *dev_priv = vmw_priv(dev);
181 uint32_t size;
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100182 u32 *fifo_mem;
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000183 void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
184 void *bounce;
185 int ret;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100186 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100187 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000188
189 if (unlikely(arg->pad64 != 0)) {
190 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
191 return -EINVAL;
192 }
193
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100194 if (gb_objects && vmw_fp->gb_aware)
195 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
196 else if (gb_objects)
197 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100198 else
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100199 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
200 sizeof(uint32_t);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000201
202 if (arg->max_size < size)
203 size = arg->max_size;
204
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100205 bounce = vzalloc(size);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000206 if (unlikely(bounce == NULL)) {
207 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
208 return -ENOMEM;
209 }
210
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100211 if (gb_objects && vmw_fp->gb_aware) {
212 int i, num;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100213 uint32_t *bounce32 = (uint32_t *) bounce;
214
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100215 num = size / sizeof(uint32_t);
Charmaine Lee2f633e52015-08-10 10:45:11 -0700216 if (num > SVGA3D_DEVCAP_MAX)
217 num = SVGA3D_DEVCAP_MAX;
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100218
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800219 spin_lock(&dev_priv->cap_lock);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100220 for (i = 0; i < num; ++i) {
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100221 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
Thomas Hellstrom53c1e532015-10-26 04:42:31 -0700222 *bounce32++ = vmw_mask_multisample
223 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100224 }
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800225 spin_unlock(&dev_priv->cap_lock);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100226 } else if (gb_objects) {
227 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
228 if (unlikely(ret != 0))
229 goto out_err;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100230 } else {
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100231 fifo_mem = dev_priv->mmio_virt;
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100232 memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100233 }
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000234
235 ret = copy_to_user(buffer, bounce, size);
Dan Carpenter888155b2012-11-12 11:07:24 +0000236 if (ret)
237 ret = -EFAULT;
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100238out_err:
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000239 vfree(bounce);
240
241 if (unlikely(ret != 0))
242 DRM_ERROR("Failed to report 3D caps info.\n");
243
244 return ret;
245}
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200246
247int vmw_present_ioctl(struct drm_device *dev, void *data,
248 struct drm_file *file_priv)
249{
250 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
251 struct vmw_private *dev_priv = vmw_priv(dev);
252 struct drm_vmw_present_arg *arg =
253 (struct drm_vmw_present_arg *)data;
254 struct vmw_surface *surface;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200255 struct drm_vmw_rect __user *clips_ptr;
256 struct drm_vmw_rect *clips = NULL;
Daniel Vetter786b99e2012-12-02 21:53:40 +0100257 struct drm_framebuffer *fb;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200258 struct vmw_framebuffer *vfb;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000259 struct vmw_resource *res;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200260 uint32_t num_clips;
261 int ret;
262
263 num_clips = arg->num_clips;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700264 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200265
266 if (unlikely(num_clips == 0))
267 return 0;
268
269 if (clips_ptr == NULL) {
270 DRM_ERROR("Variable clips_ptr must be specified.\n");
271 ret = -EINVAL;
272 goto out_clips;
273 }
274
Thomas Meyer24bb5a02011-11-29 22:08:00 +0100275 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200276 if (clips == NULL) {
277 DRM_ERROR("Failed to allocate clip rect list.\n");
278 ret = -ENOMEM;
279 goto out_clips;
280 }
281
282 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
283 if (ret) {
284 DRM_ERROR("Failed to copy clip rects from userspace.\n");
Dan Carpenterd2c184f2011-10-18 09:09:19 +0300285 ret = -EFAULT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200286 goto out_no_copy;
287 }
288
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100289 drm_modeset_lock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200290
Daniel Vetter786b99e2012-12-02 21:53:40 +0100291 fb = drm_framebuffer_lookup(dev, arg->fb_id);
292 if (!fb) {
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200293 DRM_ERROR("Invalid framebuffer id.\n");
Ville Syrjälä43789b92013-10-17 13:35:06 +0300294 ret = -ENOENT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200295 goto out_no_fb;
296 }
Daniel Vetter786b99e2012-12-02 21:53:40 +0100297 vfb = vmw_framebuffer_to_vfb(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200298
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100299 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200300 if (unlikely(ret != 0))
301 goto out_no_ttm_lock;
302
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000303 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
304 user_surface_converter,
305 &res);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200306 if (ret)
307 goto out_no_surface;
308
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000309 surface = vmw_res_to_srf(res);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200310 ret = vmw_kms_present(dev_priv, file_priv,
311 vfb, surface, arg->sid,
312 arg->dest_x, arg->dest_y,
313 clips, num_clips);
314
315 /* vmw_user_surface_lookup takes one ref so does new_fb */
316 vmw_surface_unreference(&surface);
317
318out_no_surface:
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100319 ttm_read_unlock(&dev_priv->reservation_sem);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200320out_no_ttm_lock:
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100321 drm_framebuffer_unreference(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200322out_no_fb:
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100323 drm_modeset_unlock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200324out_no_copy:
325 kfree(clips);
326out_clips:
327 return ret;
328}
329
330int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *file_priv)
332{
333 struct vmw_private *dev_priv = vmw_priv(dev);
334 struct drm_vmw_present_readback_arg *arg =
335 (struct drm_vmw_present_readback_arg *)data;
336 struct drm_vmw_fence_rep __user *user_fence_rep =
337 (struct drm_vmw_fence_rep __user *)
338 (unsigned long)arg->fence_rep;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200339 struct drm_vmw_rect __user *clips_ptr;
340 struct drm_vmw_rect *clips = NULL;
Daniel Vetter786b99e2012-12-02 21:53:40 +0100341 struct drm_framebuffer *fb;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200342 struct vmw_framebuffer *vfb;
343 uint32_t num_clips;
344 int ret;
345
346 num_clips = arg->num_clips;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700347 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200348
349 if (unlikely(num_clips == 0))
350 return 0;
351
352 if (clips_ptr == NULL) {
353 DRM_ERROR("Argument clips_ptr must be specified.\n");
354 ret = -EINVAL;
355 goto out_clips;
356 }
357
Thomas Meyer24bb5a02011-11-29 22:08:00 +0100358 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200359 if (clips == NULL) {
360 DRM_ERROR("Failed to allocate clip rect list.\n");
361 ret = -ENOMEM;
362 goto out_clips;
363 }
364
365 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
366 if (ret) {
367 DRM_ERROR("Failed to copy clip rects from userspace.\n");
Dan Carpenterd2c184f2011-10-18 09:09:19 +0300368 ret = -EFAULT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200369 goto out_no_copy;
370 }
371
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100372 drm_modeset_lock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200373
Daniel Vetter786b99e2012-12-02 21:53:40 +0100374 fb = drm_framebuffer_lookup(dev, arg->fb_id);
375 if (!fb) {
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200376 DRM_ERROR("Invalid framebuffer id.\n");
Ville Syrjälä43789b92013-10-17 13:35:06 +0300377 ret = -ENOENT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200378 goto out_no_fb;
379 }
380
Daniel Vetter786b99e2012-12-02 21:53:40 +0100381 vfb = vmw_framebuffer_to_vfb(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200382 if (!vfb->dmabuf) {
383 DRM_ERROR("Framebuffer not dmabuf backed.\n");
384 ret = -EINVAL;
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100385 goto out_no_ttm_lock;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200386 }
387
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100388 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200389 if (unlikely(ret != 0))
390 goto out_no_ttm_lock;
391
392 ret = vmw_kms_readback(dev_priv, file_priv,
393 vfb, user_fence_rep,
394 clips, num_clips);
395
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100396 ttm_read_unlock(&dev_priv->reservation_sem);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200397out_no_ttm_lock:
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100398 drm_framebuffer_unreference(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200399out_no_fb:
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100400 drm_modeset_unlock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200401out_no_copy:
402 kfree(clips);
403out_clips:
404 return ret;
405}
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200406
407
408/**
409 * vmw_fops_poll - wrapper around the drm_poll function
410 *
411 * @filp: See the linux fops poll documentation.
412 * @wait: See the linux fops poll documentation.
413 *
414 * Wrapper around the drm_poll function that makes sure the device is
415 * processing the fifo if drm_poll decides to wait.
416 */
417unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
418{
419 struct drm_file *file_priv = filp->private_data;
420 struct vmw_private *dev_priv =
421 vmw_priv(file_priv->minor->dev);
422
423 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
424 return drm_poll(filp, wait);
425}
426
427
428/**
429 * vmw_fops_read - wrapper around the drm_read function
430 *
431 * @filp: See the linux fops read documentation.
432 * @buffer: See the linux fops read documentation.
433 * @count: See the linux fops read documentation.
434 * offset: See the linux fops read documentation.
435 *
436 * Wrapper around the drm_read function that makes sure the device is
437 * processing the fifo if drm_read decides to wait.
438 */
439ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
440 size_t count, loff_t *offset)
441{
442 struct drm_file *file_priv = filp->private_data;
443 struct vmw_private *dev_priv =
444 vmw_priv(file_priv->minor->dev);
445
446 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
447 return drm_read(filp, buffer, count, offset);
448}