blob: 6db98289b8a4773d9069719ea2c071867e3e3fd6 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/vmwgfx_drm.h>
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +020030#include "vmwgfx_kms.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010032struct svga_3d_compat_cap {
33 SVGA3dCapsRecordHeader header;
34 SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
35};
36
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037int vmw_getparam_ioctl(struct drm_device *dev, void *data,
38 struct drm_file *file_priv)
39{
40 struct vmw_private *dev_priv = vmw_priv(dev);
41 struct drm_vmw_getparam_arg *param =
42 (struct drm_vmw_getparam_arg *)data;
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010043 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000044
45 switch (param->param) {
46 case DRM_VMW_PARAM_NUM_STREAMS:
47 param->value = vmw_overlay_num_overlays(dev_priv);
48 break;
49 case DRM_VMW_PARAM_NUM_FREE_STREAMS:
50 param->value = vmw_overlay_num_free_overlays(dev_priv);
51 break;
52 case DRM_VMW_PARAM_3D:
Jakob Bornecrantz8e19a952010-01-30 03:38:06 +000053 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054 break;
Thomas Hellstromf77cef32010-02-09 19:41:55 +000055 case DRM_VMW_PARAM_HW_CAPS:
56 param->value = dev_priv->capabilities;
57 break;
58 case DRM_VMW_PARAM_FIFO_CAPS:
59 param->value = dev_priv->fifo.capabilities;
60 break;
Thomas Hellstrom30f47fc82010-10-05 12:43:06 +020061 case DRM_VMW_PARAM_MAX_FB_SIZE:
Thomas Hellstrombc2d6502012-11-21 10:32:36 +010062 param->value = dev_priv->prim_bb_mem;
Thomas Hellstrom30f47fc82010-10-05 12:43:06 +020063 break;
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000064 case DRM_VMW_PARAM_FIFO_HW_VERSION:
65 {
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -070066 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
Thomas Hellstromebd4c6f2011-11-28 13:19:08 +010067 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000068
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010069 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
70 param->value = SVGA3D_HWVERSION_WS8_B1;
71 break;
72 }
73
Thomas Hellstromebd4c6f2011-11-28 13:19:08 +010074 param->value =
75 ioread32(fifo_mem +
76 ((fifo->capabilities &
77 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
78 SVGA_FIFO_3D_HWVERSION_REVISED :
79 SVGA_FIFO_3D_HWVERSION));
Thomas Hellstromf63f6a52011-09-01 20:18:41 +000080 break;
81 }
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010082 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010083 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
84 !vmw_fp->gb_aware)
85 param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
86 else
87 param->value = dev_priv->memory_size;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010088 break;
89 case DRM_VMW_PARAM_3D_CAPS_SIZE:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010090 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
91 vmw_fp->gb_aware)
92 param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
93 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
94 param->value = sizeof(struct svga_3d_compat_cap) +
95 sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +010096 else
97 param->value = (SVGA_FIFO_3D_CAPS_LAST -
Thomas Hellstroma6fc9552014-01-31 10:21:10 +010098 SVGA_FIFO_3D_CAPS + 1) *
99 sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100100 break;
Thomas Hellstrom311474d2012-11-21 12:34:47 +0100101 case DRM_VMW_PARAM_MAX_MOB_MEMORY:
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100102 vmw_fp->gb_aware = true;
Thomas Hellstrom311474d2012-11-21 12:34:47 +0100103 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
104 break;
Charmaine Lee857aea12014-02-12 12:07:38 +0100105 case DRM_VMW_PARAM_MAX_MOB_SIZE:
106 param->value = dev_priv->max_mob_size;
107 break;
Sinclair Yeh35c05122015-06-26 01:42:06 -0700108 case DRM_VMW_PARAM_SCREEN_TARGET:
109 param->value =
110 (dev_priv->active_display_unit == vmw_du_screen_target);
111 break;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000112 default:
113 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
114 param->param);
115 return -EINVAL;
116 }
117
118 return 0;
119}
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000120
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100121static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
122 size_t size)
123{
124 struct svga_3d_compat_cap *compat_cap =
125 (struct svga_3d_compat_cap *) bounce;
126 unsigned int i;
127 size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
128 unsigned int max_size;
129
130 if (size < pair_offset)
131 return -EINVAL;
132
133 max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
134
135 if (max_size > SVGA3D_DEVCAP_MAX)
136 max_size = SVGA3D_DEVCAP_MAX;
137
138 compat_cap->header.length =
139 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
140 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
141
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800142 spin_lock(&dev_priv->cap_lock);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100143 for (i = 0; i < max_size; ++i) {
144 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
145 compat_cap->pairs[i][0] = i;
146 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
147 }
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800148 spin_unlock(&dev_priv->cap_lock);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100149
150 return 0;
151}
152
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000153
154int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
155 struct drm_file *file_priv)
156{
157 struct drm_vmw_get_3d_cap_arg *arg =
158 (struct drm_vmw_get_3d_cap_arg *) data;
159 struct vmw_private *dev_priv = vmw_priv(dev);
160 uint32_t size;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700161 u32 __iomem *fifo_mem;
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000162 void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
163 void *bounce;
164 int ret;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100165 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100166 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000167
168 if (unlikely(arg->pad64 != 0)) {
169 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
170 return -EINVAL;
171 }
172
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100173 if (gb_objects && vmw_fp->gb_aware)
174 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
175 else if (gb_objects)
176 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100177 else
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100178 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
179 sizeof(uint32_t);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000180
181 if (arg->max_size < size)
182 size = arg->max_size;
183
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100184 bounce = vzalloc(size);
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000185 if (unlikely(bounce == NULL)) {
186 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
187 return -ENOMEM;
188 }
189
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100190 if (gb_objects && vmw_fp->gb_aware) {
191 int i, num;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100192 uint32_t *bounce32 = (uint32_t *) bounce;
193
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100194 num = size / sizeof(uint32_t);
195 if (num > SVGA3D_DEVCAP_MAX)
196 num = SVGA3D_DEVCAP_MAX;
197
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800198 spin_lock(&dev_priv->cap_lock);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100199 for (i = 0; i < num; ++i) {
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100200 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
201 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
202 }
Thomas Hellstrom496eb6f2015-01-14 02:33:39 -0800203 spin_unlock(&dev_priv->cap_lock);
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100204 } else if (gb_objects) {
205 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
206 if (unlikely(ret != 0))
207 goto out_err;
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100208 } else {
Thomas Hellstrom716a2fd2012-11-21 10:37:20 +0100209 fifo_mem = dev_priv->mmio_virt;
210 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
211 }
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000212
213 ret = copy_to_user(buffer, bounce, size);
Dan Carpenter888155b2012-11-12 11:07:24 +0000214 if (ret)
215 ret = -EFAULT;
Thomas Hellstroma6fc9552014-01-31 10:21:10 +0100216out_err:
Thomas Hellstromf63f6a52011-09-01 20:18:41 +0000217 vfree(bounce);
218
219 if (unlikely(ret != 0))
220 DRM_ERROR("Failed to report 3D caps info.\n");
221
222 return ret;
223}
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200224
225int vmw_present_ioctl(struct drm_device *dev, void *data,
226 struct drm_file *file_priv)
227{
228 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
229 struct vmw_private *dev_priv = vmw_priv(dev);
230 struct drm_vmw_present_arg *arg =
231 (struct drm_vmw_present_arg *)data;
232 struct vmw_surface *surface;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200233 struct drm_vmw_rect __user *clips_ptr;
234 struct drm_vmw_rect *clips = NULL;
Daniel Vetter786b99e2012-12-02 21:53:40 +0100235 struct drm_framebuffer *fb;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200236 struct vmw_framebuffer *vfb;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000237 struct vmw_resource *res;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200238 uint32_t num_clips;
239 int ret;
240
241 num_clips = arg->num_clips;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700242 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200243
244 if (unlikely(num_clips == 0))
245 return 0;
246
247 if (clips_ptr == NULL) {
248 DRM_ERROR("Variable clips_ptr must be specified.\n");
249 ret = -EINVAL;
250 goto out_clips;
251 }
252
Thomas Meyer24bb5a02011-11-29 22:08:00 +0100253 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200254 if (clips == NULL) {
255 DRM_ERROR("Failed to allocate clip rect list.\n");
256 ret = -ENOMEM;
257 goto out_clips;
258 }
259
260 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
261 if (ret) {
262 DRM_ERROR("Failed to copy clip rects from userspace.\n");
Dan Carpenterd2c184f2011-10-18 09:09:19 +0300263 ret = -EFAULT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200264 goto out_no_copy;
265 }
266
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100267 drm_modeset_lock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200268
Daniel Vetter786b99e2012-12-02 21:53:40 +0100269 fb = drm_framebuffer_lookup(dev, arg->fb_id);
270 if (!fb) {
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200271 DRM_ERROR("Invalid framebuffer id.\n");
Ville Syrjälä43789b92013-10-17 13:35:06 +0300272 ret = -ENOENT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200273 goto out_no_fb;
274 }
Daniel Vetter786b99e2012-12-02 21:53:40 +0100275 vfb = vmw_framebuffer_to_vfb(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200276
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100277 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200278 if (unlikely(ret != 0))
279 goto out_no_ttm_lock;
280
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000281 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
282 user_surface_converter,
283 &res);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200284 if (ret)
285 goto out_no_surface;
286
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000287 surface = vmw_res_to_srf(res);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200288 ret = vmw_kms_present(dev_priv, file_priv,
289 vfb, surface, arg->sid,
290 arg->dest_x, arg->dest_y,
291 clips, num_clips);
292
293 /* vmw_user_surface_lookup takes one ref so does new_fb */
294 vmw_surface_unreference(&surface);
295
296out_no_surface:
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100297 ttm_read_unlock(&dev_priv->reservation_sem);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200298out_no_ttm_lock:
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100299 drm_framebuffer_unreference(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200300out_no_fb:
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100301 drm_modeset_unlock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200302out_no_copy:
303 kfree(clips);
304out_clips:
305 return ret;
306}
307
308int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
309 struct drm_file *file_priv)
310{
311 struct vmw_private *dev_priv = vmw_priv(dev);
312 struct drm_vmw_present_readback_arg *arg =
313 (struct drm_vmw_present_readback_arg *)data;
314 struct drm_vmw_fence_rep __user *user_fence_rep =
315 (struct drm_vmw_fence_rep __user *)
316 (unsigned long)arg->fence_rep;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200317 struct drm_vmw_rect __user *clips_ptr;
318 struct drm_vmw_rect *clips = NULL;
Daniel Vetter786b99e2012-12-02 21:53:40 +0100319 struct drm_framebuffer *fb;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200320 struct vmw_framebuffer *vfb;
321 uint32_t num_clips;
322 int ret;
323
324 num_clips = arg->num_clips;
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700325 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200326
327 if (unlikely(num_clips == 0))
328 return 0;
329
330 if (clips_ptr == NULL) {
331 DRM_ERROR("Argument clips_ptr must be specified.\n");
332 ret = -EINVAL;
333 goto out_clips;
334 }
335
Thomas Meyer24bb5a02011-11-29 22:08:00 +0100336 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200337 if (clips == NULL) {
338 DRM_ERROR("Failed to allocate clip rect list.\n");
339 ret = -ENOMEM;
340 goto out_clips;
341 }
342
343 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
344 if (ret) {
345 DRM_ERROR("Failed to copy clip rects from userspace.\n");
Dan Carpenterd2c184f2011-10-18 09:09:19 +0300346 ret = -EFAULT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200347 goto out_no_copy;
348 }
349
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100350 drm_modeset_lock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200351
Daniel Vetter786b99e2012-12-02 21:53:40 +0100352 fb = drm_framebuffer_lookup(dev, arg->fb_id);
353 if (!fb) {
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200354 DRM_ERROR("Invalid framebuffer id.\n");
Ville Syrjälä43789b92013-10-17 13:35:06 +0300355 ret = -ENOENT;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200356 goto out_no_fb;
357 }
358
Daniel Vetter786b99e2012-12-02 21:53:40 +0100359 vfb = vmw_framebuffer_to_vfb(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200360 if (!vfb->dmabuf) {
361 DRM_ERROR("Framebuffer not dmabuf backed.\n");
362 ret = -EINVAL;
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100363 goto out_no_ttm_lock;
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200364 }
365
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100366 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200367 if (unlikely(ret != 0))
368 goto out_no_ttm_lock;
369
370 ret = vmw_kms_readback(dev_priv, file_priv,
371 vfb, user_fence_rep,
372 clips, num_clips);
373
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100374 ttm_read_unlock(&dev_priv->reservation_sem);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200375out_no_ttm_lock:
Daniel Vetter2fd5eab2012-12-11 16:28:34 +0100376 drm_framebuffer_unreference(fb);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200377out_no_fb:
Daniel Vetterbbe4b992012-12-02 01:48:38 +0100378 drm_modeset_unlock_all(dev);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200379out_no_copy:
380 kfree(clips);
381out_clips:
382 return ret;
383}
Thomas Hellstrom5438ae82011-10-10 12:23:27 +0200384
385
386/**
387 * vmw_fops_poll - wrapper around the drm_poll function
388 *
389 * @filp: See the linux fops poll documentation.
390 * @wait: See the linux fops poll documentation.
391 *
392 * Wrapper around the drm_poll function that makes sure the device is
393 * processing the fifo if drm_poll decides to wait.
394 */
395unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
396{
397 struct drm_file *file_priv = filp->private_data;
398 struct vmw_private *dev_priv =
399 vmw_priv(file_priv->minor->dev);
400
401 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
402 return drm_poll(filp, wait);
403}
404
405
406/**
407 * vmw_fops_read - wrapper around the drm_read function
408 *
409 * @filp: See the linux fops read documentation.
410 * @buffer: See the linux fops read documentation.
411 * @count: See the linux fops read documentation.
412 * offset: See the linux fops read documentation.
413 *
414 * Wrapper around the drm_read function that makes sure the device is
415 * processing the fifo if drm_read decides to wait.
416 */
417ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
418 size_t count, loff_t *offset)
419{
420 struct drm_file *file_priv = filp->private_data;
421 struct vmw_private *dev_priv =
422 vmw_priv(file_priv->minor->dev);
423
424 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
425 return drm_read(filp, buffer, count, offset);
426}