blob: 234a3cef1c25545c7aaaa522837abe9a766d222a [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yehc8261a92015-06-26 01:23:42 -07003 * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020030
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031/* Might need a hrtimer here? */
32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33
Sinclair Yehc8261a92015-06-26 01:23:42 -070034void vmw_du_cleanup(struct vmw_display_unit *du)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000035{
36 if (du->cursor_surface)
37 vmw_surface_unreference(&du->cursor_surface);
38 if (du->cursor_dmabuf)
39 vmw_dmabuf_unreference(&du->cursor_dmabuf);
Thomas Wood34ea3d32014-05-29 16:57:41 +010040 drm_connector_unregister(&du->connector);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000041 drm_crtc_cleanup(&du->crtc);
42 drm_encoder_cleanup(&du->encoder);
43 drm_connector_cleanup(&du->connector);
44}
45
46/*
47 * Display Unit Cursor functions
48 */
49
50int vmw_cursor_update_image(struct vmw_private *dev_priv,
51 u32 *image, u32 width, u32 height,
52 u32 hotspotX, u32 hotspotY)
53{
54 struct {
55 u32 cmd;
56 SVGAFifoCmdDefineAlphaCursor cursor;
57 } *cmd;
58 u32 image_size = width * height * 4;
59 u32 cmd_size = sizeof(*cmd) + image_size;
60
61 if (!image)
62 return -EINVAL;
63
64 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
65 if (unlikely(cmd == NULL)) {
66 DRM_ERROR("Fifo reserve failed.\n");
67 return -ENOMEM;
68 }
69
70 memset(cmd, 0, sizeof(*cmd));
71
72 memcpy(&cmd[1], image, image_size);
73
74 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
75 cmd->cursor.id = cpu_to_le32(0);
76 cmd->cursor.width = cpu_to_le32(width);
77 cmd->cursor.height = cpu_to_le32(height);
78 cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
79 cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
80
81 vmw_fifo_commit(dev_priv, cmd_size);
82
83 return 0;
84}
85
Jakob Bornecrantz6a91d972011-11-28 13:19:10 +010086int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
87 struct vmw_dma_buffer *dmabuf,
88 u32 width, u32 height,
89 u32 hotspotX, u32 hotspotY)
90{
91 struct ttm_bo_kmap_obj map;
92 unsigned long kmap_offset;
93 unsigned long kmap_num;
94 void *virtual;
95 bool dummy;
96 int ret;
97
98 kmap_offset = 0;
99 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
100
Thierry Redingee3939e2014-07-21 13:15:51 +0200101 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
Jakob Bornecrantz6a91d972011-11-28 13:19:10 +0100102 if (unlikely(ret != 0)) {
103 DRM_ERROR("reserve failed\n");
104 return -EINVAL;
105 }
106
107 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
108 if (unlikely(ret != 0))
109 goto err_unreserve;
110
111 virtual = ttm_kmap_obj_virtual(&map, &dummy);
112 ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
113 hotspotX, hotspotY);
114
115 ttm_bo_kunmap(&map);
116err_unreserve:
117 ttm_bo_unreserve(&dmabuf->base);
118
119 return ret;
120}
121
122
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000123void vmw_cursor_update_position(struct vmw_private *dev_priv,
124 bool show, int x, int y)
125{
126 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
127 uint32_t count;
128
129 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
130 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
131 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
132 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
133 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
134}
135
136int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
137 uint32_t handle, uint32_t width, uint32_t height)
138{
139 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000140 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
141 struct vmw_surface *surface = NULL;
142 struct vmw_dma_buffer *dmabuf = NULL;
143 int ret;
144
Daniel Vetterbfb89922012-12-02 13:48:21 +0100145 /*
146 * FIXME: Unclear whether there's any global state touched by the
147 * cursor_set function, especially vmw_cursor_update_position looks
148 * suspicious. For now take the easy route and reacquire all locks. We
149 * can do this since the caller in the drm core doesn't check anything
150 * which is protected by any looks.
151 */
Rob Clark21e88622014-10-30 13:39:04 -0400152 drm_modeset_unlock_crtc(crtc);
Daniel Vetterbfb89922012-12-02 13:48:21 +0100153 drm_modeset_lock_all(dev_priv->dev);
154
Jakob Bornecrantzbaa91d642011-11-09 10:25:28 +0100155 /* A lot of the code assumes this */
Daniel Vetterbfb89922012-12-02 13:48:21 +0100156 if (handle && (width != 64 || height != 64)) {
157 ret = -EINVAL;
158 goto out;
159 }
Jakob Bornecrantzbaa91d642011-11-09 10:25:28 +0100160
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000161 if (handle) {
Ville Syrjäläa5d0f572013-06-03 16:10:41 +0300162 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
163
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100164 ret = vmw_user_lookup_handle(dev_priv, tfile,
165 handle, &surface, &dmabuf);
166 if (ret) {
167 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
Daniel Vetterbfb89922012-12-02 13:48:21 +0100168 ret = -EINVAL;
169 goto out;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000170 }
171 }
172
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100173 /* need to do this before taking down old image */
174 if (surface && !surface->snooper.image) {
175 DRM_ERROR("surface not suitable for cursor\n");
176 vmw_surface_unreference(&surface);
Daniel Vetterbfb89922012-12-02 13:48:21 +0100177 ret = -EINVAL;
178 goto out;
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100179 }
180
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000181 /* takedown old cursor */
182 if (du->cursor_surface) {
183 du->cursor_surface->snooper.crtc = NULL;
184 vmw_surface_unreference(&du->cursor_surface);
185 }
186 if (du->cursor_dmabuf)
187 vmw_dmabuf_unreference(&du->cursor_dmabuf);
188
189 /* setup new image */
190 if (surface) {
191 /* vmw_user_surface_lookup takes one reference */
192 du->cursor_surface = surface;
193
194 du->cursor_surface->snooper.crtc = crtc;
195 du->cursor_age = du->cursor_surface->snooper.age;
196 vmw_cursor_update_image(dev_priv, surface->snooper.image,
197 64, 64, du->hotspot_x, du->hotspot_y);
198 } else if (dmabuf) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000199 /* vmw_user_surface_lookup takes one reference */
200 du->cursor_dmabuf = dmabuf;
201
Jakob Bornecrantz6a91d972011-11-28 13:19:10 +0100202 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
203 du->hotspot_x, du->hotspot_y);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000204 } else {
205 vmw_cursor_update_position(dev_priv, false, 0, 0);
Daniel Vetterbfb89922012-12-02 13:48:21 +0100206 ret = 0;
207 goto out;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000208 }
209
Thomas Hellstromda7653d2011-11-02 09:43:12 +0100210 vmw_cursor_update_position(dev_priv, true,
211 du->cursor_x + du->hotspot_x,
212 du->cursor_y + du->hotspot_y);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000213
Daniel Vetterbfb89922012-12-02 13:48:21 +0100214 ret = 0;
215out:
216 drm_modeset_unlock_all(dev_priv->dev);
Daniel Vetter4d02e2d2014-11-11 10:12:00 +0100217 drm_modeset_lock_crtc(crtc, crtc->cursor);
Daniel Vetterbfb89922012-12-02 13:48:21 +0100218
219 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000220}
221
222int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
223{
224 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
225 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
226 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
227
228 du->cursor_x = x + crtc->x;
229 du->cursor_y = y + crtc->y;
230
Daniel Vetterdac35662012-12-02 15:24:10 +0100231 /*
232 * FIXME: Unclear whether there's any global state touched by the
233 * cursor_set function, especially vmw_cursor_update_position looks
234 * suspicious. For now take the easy route and reacquire all locks. We
235 * can do this since the caller in the drm core doesn't check anything
236 * which is protected by any looks.
237 */
Rob Clark21e88622014-10-30 13:39:04 -0400238 drm_modeset_unlock_crtc(crtc);
Daniel Vetterdac35662012-12-02 15:24:10 +0100239 drm_modeset_lock_all(dev_priv->dev);
240
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000241 vmw_cursor_update_position(dev_priv, shown,
Thomas Hellstromda7653d2011-11-02 09:43:12 +0100242 du->cursor_x + du->hotspot_x,
243 du->cursor_y + du->hotspot_y);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000244
Daniel Vetterdac35662012-12-02 15:24:10 +0100245 drm_modeset_unlock_all(dev_priv->dev);
Daniel Vetter4d02e2d2014-11-11 10:12:00 +0100246 drm_modeset_lock_crtc(crtc, crtc->cursor);
Daniel Vetterdac35662012-12-02 15:24:10 +0100247
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000248 return 0;
249}
250
251void vmw_kms_cursor_snoop(struct vmw_surface *srf,
252 struct ttm_object_file *tfile,
253 struct ttm_buffer_object *bo,
254 SVGA3dCmdHeader *header)
255{
256 struct ttm_bo_kmap_obj map;
257 unsigned long kmap_offset;
258 unsigned long kmap_num;
259 SVGA3dCopyBox *box;
260 unsigned box_count;
261 void *virtual;
262 bool dummy;
263 struct vmw_dma_cmd {
264 SVGA3dCmdHeader header;
265 SVGA3dCmdSurfaceDMA dma;
266 } *cmd;
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100267 int i, ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000268
269 cmd = container_of(header, struct vmw_dma_cmd, header);
270
271 /* No snooper installed */
272 if (!srf->snooper.image)
273 return;
274
275 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
276 DRM_ERROR("face and mipmap for cursors should never != 0\n");
277 return;
278 }
279
280 if (cmd->header.size < 64) {
281 DRM_ERROR("at least one full copy box must be given\n");
282 return;
283 }
284
285 box = (SVGA3dCopyBox *)&cmd[1];
286 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
287 sizeof(SVGA3dCopyBox);
288
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100289 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000290 box->x != 0 || box->y != 0 || box->z != 0 ||
291 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100292 box->d != 1 || box_count != 1) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000293 /* TODO handle none page aligned offsets */
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100294 /* TODO handle more dst & src != 0 */
295 /* TODO handle more then one copy */
296 DRM_ERROR("Cant snoop dma request for cursor!\n");
297 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
298 box->srcx, box->srcy, box->srcz,
299 box->x, box->y, box->z,
300 box->w, box->h, box->d, box_count,
301 cmd->dma.guest.ptr.offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000302 return;
303 }
304
305 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
306 kmap_num = (64*64*4) >> PAGE_SHIFT;
307
Thierry Redingee3939e2014-07-21 13:15:51 +0200308 ret = ttm_bo_reserve(bo, true, false, false, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000309 if (unlikely(ret != 0)) {
310 DRM_ERROR("reserve failed\n");
311 return;
312 }
313
314 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
315 if (unlikely(ret != 0))
316 goto err_unreserve;
317
318 virtual = ttm_kmap_obj_virtual(&map, &dummy);
319
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100320 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
321 memcpy(srf->snooper.image, virtual, 64*64*4);
322 } else {
323 /* Image is unsigned pointer. */
324 for (i = 0; i < box->h; i++)
325 memcpy(srf->snooper.image + i * 64,
326 virtual + i * cmd->dma.guest.pitch,
327 box->w * 4);
328 }
329
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000330 srf->snooper.age++;
331
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000332 ttm_bo_kunmap(&map);
333err_unreserve:
334 ttm_bo_unreserve(bo);
335}
336
337void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
338{
339 struct drm_device *dev = dev_priv->dev;
340 struct vmw_display_unit *du;
341 struct drm_crtc *crtc;
342
343 mutex_lock(&dev->mode_config.mutex);
344
345 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
346 du = vmw_crtc_to_du(crtc);
347 if (!du->cursor_surface ||
348 du->cursor_age == du->cursor_surface->snooper.age)
349 continue;
350
351 du->cursor_age = du->cursor_surface->snooper.age;
352 vmw_cursor_update_image(dev_priv,
353 du->cursor_surface->snooper.image,
354 64, 64, du->hotspot_x, du->hotspot_y);
355 }
356
357 mutex_unlock(&dev->mode_config.mutex);
358}
359
360/*
361 * Generic framebuffer code
362 */
363
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000364/*
365 * Surface framebuffer code
366 */
367
Rashika Kheria847c5962014-01-06 22:18:10 +0530368static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000369{
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200370 struct vmw_framebuffer_surface *vfbs =
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000371 vmw_framebuffer_to_vfbs(framebuffer);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200372 struct vmw_master *vmaster = vmw_master(vfbs->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000373
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200374
375 mutex_lock(&vmaster->fb_surf_mutex);
376 list_del(&vfbs->head);
377 mutex_unlock(&vmaster->fb_surf_mutex);
378
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200379 drm_master_put(&vfbs->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000380 drm_framebuffer_cleanup(framebuffer);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200381 vmw_surface_unreference(&vfbs->surface);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200382 ttm_base_object_unref(&vfbs->base.user_obj);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000383
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200384 kfree(vfbs);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000385}
386
Rashika Kheria847c5962014-01-06 22:18:10 +0530387static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200388 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000389 unsigned flags, unsigned color,
390 struct drm_clip_rect *clips,
391 unsigned num_clips)
392{
393 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
394 struct vmw_framebuffer_surface *vfbs =
395 vmw_framebuffer_to_vfbs(framebuffer);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000396 struct drm_clip_rect norect;
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200397 int ret, inc = 1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000398
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200399 if (unlikely(vfbs->master != file_priv->master))
400 return -EINVAL;
401
Sinclair Yehc8261a92015-06-26 01:23:42 -0700402 /* Legacy Display Unit does not support 3D */
403 if (dev_priv->active_display_unit == vmw_du_legacy)
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200404 return -EINVAL;
405
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200406 drm_modeset_lock_all(dev_priv->dev);
407
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100408 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200409 if (unlikely(ret != 0)) {
410 drm_modeset_unlock_all(dev_priv->dev);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200411 return ret;
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200412 }
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200413
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000414 if (!num_clips) {
415 num_clips = 1;
416 clips = &norect;
417 norect.x1 = norect.y1 = 0;
418 norect.x2 = framebuffer->width;
419 norect.y2 = framebuffer->height;
420 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
421 num_clips /= 2;
422 inc = 2; /* skip source rects */
423 }
424
Sinclair Yehc8261a92015-06-26 01:23:42 -0700425 if (dev_priv->active_display_unit == vmw_du_screen_object)
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700426 ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
427 clips, NULL, NULL, 0, 0,
428 num_clips, inc, NULL);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700429 else
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700430 ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
431 clips, NULL, NULL, 0, 0,
432 num_clips, inc, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000433
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700434 vmw_fifo_flush(dev_priv, false);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100435 ttm_read_unlock(&dev_priv->reservation_sem);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200436
437 drm_modeset_unlock_all(dev_priv->dev);
438
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000439 return 0;
440}
441
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700442/**
443 * vmw_kms_readback - Perform a readback from the screen system to
444 * a dma-buffer backed framebuffer.
445 *
446 * @dev_priv: Pointer to the device private structure.
447 * @file_priv: Pointer to a struct drm_file identifying the caller.
448 * Must be set to NULL if @user_fence_rep is NULL.
449 * @vfb: Pointer to the dma-buffer backed framebuffer.
450 * @user_fence_rep: User-space provided structure for fence information.
451 * Must be set to non-NULL if @file_priv is non-NULL.
452 * @vclips: Array of clip rects.
453 * @num_clips: Number of clip rects in @vclips.
454 *
455 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
456 * interrupted.
457 */
458int vmw_kms_readback(struct vmw_private *dev_priv,
459 struct drm_file *file_priv,
460 struct vmw_framebuffer *vfb,
461 struct drm_vmw_fence_rep __user *user_fence_rep,
462 struct drm_vmw_rect *vclips,
463 uint32_t num_clips)
464{
465 switch (dev_priv->active_display_unit) {
466 case vmw_du_screen_object:
467 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
468 user_fence_rep, vclips, num_clips);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700469 case vmw_du_screen_target:
470 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
471 user_fence_rep, NULL, vclips, num_clips,
472 1, false, true);
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700473 default:
474 WARN_ONCE(true,
475 "Readback called with invalid display system.\n");
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700476}
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700477
478 return -ENOSYS;
479}
480
481
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000482static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
483 .destroy = vmw_framebuffer_surface_destroy,
484 .dirty = vmw_framebuffer_surface_dirty,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000485};
486
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200487static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200488 struct drm_file *file_priv,
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200489 struct vmw_surface *surface,
490 struct vmw_framebuffer **out,
491 const struct drm_mode_fb_cmd
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700492 *mode_cmd,
493 bool is_dmabuf_proxy)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000494
495{
496 struct drm_device *dev = dev_priv->dev;
497 struct vmw_framebuffer_surface *vfbs;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200498 enum SVGA3dSurfaceFormat format;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200499 struct vmw_master *vmaster = vmw_master(file_priv->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000500 int ret;
501
Sinclair Yehc8261a92015-06-26 01:23:42 -0700502 /* 3D is only supported on HWv8 and newer hosts */
503 if (dev_priv->active_display_unit == vmw_du_legacy)
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200504 return -ENOSYS;
505
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200506 /*
507 * Sanity checks.
508 */
509
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100510 /* Surface must be marked as a scanout. */
511 if (unlikely(!surface->scanout))
512 return -EINVAL;
513
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200514 if (unlikely(surface->mip_levels[0] != 1 ||
515 surface->num_sizes != 1 ||
Thomas Hellstromb360a3c2014-01-15 08:51:36 +0100516 surface->base_size.width < mode_cmd->width ||
517 surface->base_size.height < mode_cmd->height ||
518 surface->base_size.depth != 1)) {
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200519 DRM_ERROR("Incompatible surface dimensions "
520 "for requested mode.\n");
521 return -EINVAL;
522 }
523
524 switch (mode_cmd->depth) {
525 case 32:
526 format = SVGA3D_A8R8G8B8;
527 break;
528 case 24:
529 format = SVGA3D_X8R8G8B8;
530 break;
531 case 16:
532 format = SVGA3D_R5G6B5;
533 break;
534 case 15:
535 format = SVGA3D_A1R5G5B5;
536 break;
537 default:
538 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
539 return -EINVAL;
540 }
541
542 if (unlikely(format != surface->format)) {
543 DRM_ERROR("Invalid surface format for requested mode.\n");
544 return -EINVAL;
545 }
546
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000547 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
548 if (!vfbs) {
549 ret = -ENOMEM;
550 goto out_err1;
551 }
552
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000553 if (!vmw_surface_reference(surface)) {
554 DRM_ERROR("failed to reference surface %p\n", surface);
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100555 ret = -EINVAL;
556 goto out_err2;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000557 }
558
559 /* XXX get the first 3 from the surface info */
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200560 vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
Ville Syrjälä01f2c772011-12-20 00:06:49 +0200561 vfbs->base.base.pitches[0] = mode_cmd->pitch;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200562 vfbs->base.base.depth = mode_cmd->depth;
563 vfbs->base.base.width = mode_cmd->width;
564 vfbs->base.base.height = mode_cmd->height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000565 vfbs->surface = surface;
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200566 vfbs->base.user_handle = mode_cmd->handle;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200567 vfbs->master = drm_master_get(file_priv->master);
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700568 vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200569
570 mutex_lock(&vmaster->fb_surf_mutex);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200571 list_add_tail(&vfbs->head, &vmaster->fb_surf);
572 mutex_unlock(&vmaster->fb_surf_mutex);
573
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000574 *out = &vfbs->base;
575
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100576 ret = drm_framebuffer_init(dev, &vfbs->base.base,
577 &vmw_framebuffer_surface_funcs);
578 if (ret)
579 goto out_err3;
580
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000581 return 0;
582
583out_err3:
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100584 vmw_surface_unreference(&surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000585out_err2:
586 kfree(vfbs);
587out_err1:
588 return ret;
589}
590
591/*
592 * Dmabuf framebuffer code
593 */
594
Rashika Kheria847c5962014-01-06 22:18:10 +0530595static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000596{
597 struct vmw_framebuffer_dmabuf *vfbd =
598 vmw_framebuffer_to_vfbd(framebuffer);
599
600 drm_framebuffer_cleanup(framebuffer);
601 vmw_dmabuf_unreference(&vfbd->buffer);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200602 ttm_base_object_unref(&vfbd->base.user_obj);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000603
604 kfree(vfbd);
605}
606
Rashika Kheria847c5962014-01-06 22:18:10 +0530607static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200608 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000609 unsigned flags, unsigned color,
610 struct drm_clip_rect *clips,
611 unsigned num_clips)
612{
613 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200614 struct vmw_framebuffer_dmabuf *vfbd =
615 vmw_framebuffer_to_vfbd(framebuffer);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000616 struct drm_clip_rect norect;
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200617 int ret, increment = 1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000618
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200619 drm_modeset_lock_all(dev_priv->dev);
620
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100621 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200622 if (unlikely(ret != 0)) {
623 drm_modeset_unlock_all(dev_priv->dev);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200624 return ret;
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200625 }
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200626
Thomas Hellstromdf1c93b2010-01-13 22:28:36 +0100627 if (!num_clips) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000628 num_clips = 1;
629 clips = &norect;
630 norect.x1 = norect.y1 = 0;
631 norect.x2 = framebuffer->width;
632 norect.y2 = framebuffer->height;
633 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
634 num_clips /= 2;
635 increment = 2;
636 }
637
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700638 switch (dev_priv->active_display_unit) {
639 case vmw_du_screen_target:
640 ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
641 clips, NULL, num_clips, increment,
642 true, true);
643 break;
644 case vmw_du_screen_object:
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700645 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700646 clips, num_clips, increment,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700647 true,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700648 NULL);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700649 break;
650 default:
651 ret = -ENOSYS;
652 WARN_ONCE(true,
653 "Dirty called with invalid display system.\n");
654 break;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200655 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000656
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700657 vmw_fifo_flush(dev_priv, false);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100658 ttm_read_unlock(&dev_priv->reservation_sem);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200659
660 drm_modeset_unlock_all(dev_priv->dev);
661
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200662 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000663}
664
665static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
666 .destroy = vmw_framebuffer_dmabuf_destroy,
667 .dirty = vmw_framebuffer_dmabuf_dirty,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000668};
669
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200670/**
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200671 * Pin the dmabuffer to the start of vram.
672 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000673static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
674{
675 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
676 struct vmw_framebuffer_dmabuf *vfbd =
677 vmw_framebuffer_to_vfbd(&vfb->base);
678 int ret;
679
Sinclair Yehc8261a92015-06-26 01:23:42 -0700680 /* This code should only be used with Legacy Display Unit */
681 BUG_ON(dev_priv->active_display_unit != vmw_du_legacy);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200682
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000683 vmw_overlay_pause_all(dev_priv);
684
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700685 ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, vfbd->buffer, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000686
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000687 vmw_overlay_resume_all(dev_priv);
688
Jakob Bornecrantz316ab132010-05-28 11:22:05 +0200689 WARN_ON(ret != 0);
690
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000691 return 0;
692}
693
694static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
695{
696 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
697 struct vmw_framebuffer_dmabuf *vfbd =
698 vmw_framebuffer_to_vfbd(&vfb->base);
699
700 if (!vfbd->buffer) {
701 WARN_ON(!vfbd->buffer);
702 return 0;
703 }
704
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200705 return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000706}
707
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700708/**
709 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
710 *
711 * @dev: DRM device
712 * @mode_cmd: parameters for the new surface
713 * @dmabuf_mob: MOB backing the DMA buf
714 * @srf_out: newly created surface
715 *
716 * When the content FB is a DMA buf, we create a surface as a proxy to the
717 * same buffer. This way we can do a surface copy rather than a surface DMA.
718 * This is a more efficient approach
719 *
720 * RETURNS:
721 * 0 on success, error code otherwise
722 */
723static int vmw_create_dmabuf_proxy(struct drm_device *dev,
724 struct drm_mode_fb_cmd *mode_cmd,
725 struct vmw_dma_buffer *dmabuf_mob,
726 struct vmw_surface **srf_out)
727{
728 uint32_t format;
729 struct drm_vmw_size content_base_size;
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700730 struct vmw_resource *res;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700731 int ret;
732
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700733 switch (mode_cmd->depth) {
734 case 32:
735 case 24:
736 format = SVGA3D_X8R8G8B8;
737 break;
738
739 case 16:
740 case 15:
741 format = SVGA3D_R5G6B5;
742 break;
743
744 case 8:
745 format = SVGA3D_P8;
746 break;
747
748 default:
749 DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
750 return -EINVAL;
751 }
752
753 content_base_size.width = mode_cmd->width;
754 content_base_size.height = mode_cmd->height;
755 content_base_size.depth = 1;
756
757 ret = vmw_surface_gb_priv_define(dev,
758 0, /* kernel visible only */
759 0, /* flags */
760 format,
761 true, /* can be a scanout buffer */
762 1, /* num of mip levels */
763 0,
764 content_base_size,
765 srf_out);
766 if (ret) {
767 DRM_ERROR("Failed to allocate proxy content buffer\n");
768 return ret;
769 }
770
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700771 res = &(*srf_out)->res;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700772
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700773 /* Reserve and switch the backing mob. */
774 mutex_lock(&res->dev_priv->cmdbuf_mutex);
775 (void) vmw_resource_reserve(res, false, true);
776 vmw_dmabuf_unreference(&res->backup);
777 res->backup = vmw_dmabuf_reference(dmabuf_mob);
778 res->backup_offset = 0;
779 vmw_resource_unreserve(res, NULL, 0);
780 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700781
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700782 return 0;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700783}
784
785
786
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200787static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
788 struct vmw_dma_buffer *dmabuf,
789 struct vmw_framebuffer **out,
790 const struct drm_mode_fb_cmd
791 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000792
793{
794 struct drm_device *dev = dev_priv->dev;
795 struct vmw_framebuffer_dmabuf *vfbd;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200796 unsigned int requested_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000797 int ret;
798
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200799 requested_size = mode_cmd->height * mode_cmd->pitch;
800 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
801 DRM_ERROR("Screen buffer object size is too small "
802 "for requested mode.\n");
803 return -EINVAL;
804 }
805
Jakob Bornecrantzc337ada2011-10-04 20:13:34 +0200806 /* Limited framebuffer color depth support for screen objects */
Sinclair Yehc8261a92015-06-26 01:23:42 -0700807 if (dev_priv->active_display_unit == vmw_du_screen_object) {
Jakob Bornecrantzc337ada2011-10-04 20:13:34 +0200808 switch (mode_cmd->depth) {
809 case 32:
810 case 24:
811 /* Only support 32 bpp for 32 and 24 depth fbs */
812 if (mode_cmd->bpp == 32)
813 break;
814
815 DRM_ERROR("Invalid color depth/bbp: %d %d\n",
816 mode_cmd->depth, mode_cmd->bpp);
817 return -EINVAL;
818 case 16:
819 case 15:
820 /* Only support 16 bpp for 16 and 15 depth fbs */
821 if (mode_cmd->bpp == 16)
822 break;
823
824 DRM_ERROR("Invalid color depth/bbp: %d %d\n",
825 mode_cmd->depth, mode_cmd->bpp);
826 return -EINVAL;
827 default:
828 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
829 return -EINVAL;
830 }
831 }
832
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000833 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
834 if (!vfbd) {
835 ret = -ENOMEM;
836 goto out_err1;
837 }
838
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000839 if (!vmw_dmabuf_reference(dmabuf)) {
840 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100841 ret = -EINVAL;
842 goto out_err2;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000843 }
844
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200845 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
Ville Syrjälä01f2c772011-12-20 00:06:49 +0200846 vfbd->base.base.pitches[0] = mode_cmd->pitch;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200847 vfbd->base.base.depth = mode_cmd->depth;
848 vfbd->base.base.width = mode_cmd->width;
849 vfbd->base.base.height = mode_cmd->height;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700850 if (dev_priv->active_display_unit == vmw_du_legacy) {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200851 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
852 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
853 }
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200854 vfbd->base.dmabuf = true;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000855 vfbd->buffer = dmabuf;
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200856 vfbd->base.user_handle = mode_cmd->handle;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000857 *out = &vfbd->base;
858
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100859 ret = drm_framebuffer_init(dev, &vfbd->base.base,
860 &vmw_framebuffer_dmabuf_funcs);
861 if (ret)
862 goto out_err3;
863
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000864 return 0;
865
866out_err3:
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100867 vmw_dmabuf_unreference(&dmabuf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000868out_err2:
869 kfree(vfbd);
870out_err1:
871 return ret;
872}
873
874/*
875 * Generic Kernel modesetting functions
876 */
877
878static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
879 struct drm_file *file_priv,
Jesse Barnes308e5bc2011-11-14 14:51:28 -0800880 struct drm_mode_fb_cmd2 *mode_cmd2)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000881{
882 struct vmw_private *dev_priv = vmw_priv(dev);
883 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
884 struct vmw_framebuffer *vfb = NULL;
885 struct vmw_surface *surface = NULL;
886 struct vmw_dma_buffer *bo = NULL;
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200887 struct ttm_base_object *user_obj;
Jesse Barnes308e5bc2011-11-14 14:51:28 -0800888 struct drm_mode_fb_cmd mode_cmd;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700889 bool is_dmabuf_proxy = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000890 int ret;
891
Jesse Barnes308e5bc2011-11-14 14:51:28 -0800892 mode_cmd.width = mode_cmd2->width;
893 mode_cmd.height = mode_cmd2->height;
894 mode_cmd.pitch = mode_cmd2->pitches[0];
895 mode_cmd.handle = mode_cmd2->handles[0];
Dave Airlie248dbc22011-11-29 20:02:54 +0000896 drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
Jesse Barnes308e5bc2011-11-14 14:51:28 -0800897 &mode_cmd.bpp);
898
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200899 /**
900 * This code should be conditioned on Screen Objects not being used.
901 * If screen objects are used, we can allocate a GMR to hold the
902 * requested framebuffer.
903 */
904
Xi Wang8a783892011-12-21 05:18:33 -0500905 if (!vmw_kms_validate_mode_vram(dev_priv,
Linus Torvalds1a464cb2012-01-10 11:04:36 -0800906 mode_cmd.pitch,
907 mode_cmd.height)) {
Sinclair Yehc8261a92015-06-26 01:23:42 -0700908 DRM_ERROR("Requested mode exceed bounding box limit.\n");
Jakob Bornecrantzd9826402011-11-03 21:03:04 +0100909 return ERR_PTR(-ENOMEM);
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200910 }
911
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200912 /*
913 * Take a reference on the user object of the resource
914 * backing the kms fb. This ensures that user-space handle
915 * lookups on that resource will always work as long as
916 * it's registered with a kms framebuffer. This is important,
917 * since vmw_execbuf_process identifies resources in the
918 * command stream using user-space handles.
919 */
920
Jesse Barnes308e5bc2011-11-14 14:51:28 -0800921 user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200922 if (unlikely(user_obj == NULL)) {
923 DRM_ERROR("Could not locate requested kms frame buffer.\n");
924 return ERR_PTR(-ENOENT);
925 }
926
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200927 /**
928 * End conditioned code.
929 */
930
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100931 /* returns either a dmabuf or surface */
932 ret = vmw_user_lookup_handle(dev_priv, tfile,
Dave Airlie4cf73122011-12-21 09:50:56 +0000933 mode_cmd.handle,
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100934 &surface, &bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000935 if (ret)
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100936 goto err_out;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000937
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700938 /*
939 * We cannot use the SurfaceDMA command in an non-accelerated VM,
940 * therefore, wrap the DMA buf in a surface so we can use the
941 * SurfaceCopy command.
942 */
943 if (bo && !(dev_priv->capabilities & SVGA_CAP_3D) &&
944 dev_priv->active_display_unit == vmw_du_screen_target) {
945 ret = vmw_create_dmabuf_proxy(dev_priv->dev, &mode_cmd, bo,
946 &surface);
947 if (ret)
948 goto err_out;
949
950 is_dmabuf_proxy = true;
951 }
952
953 /* Create the new framebuffer depending one what we have */
954 if (surface)
955 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
956 surface, &vfb, &mode_cmd,
957 is_dmabuf_proxy);
958 else if (bo)
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100959 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
Dave Airlie4cf73122011-12-21 09:50:56 +0000960 &mode_cmd);
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100961 else
962 BUG();
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000963
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100964err_out:
965 /* vmw_user_lookup_handle takes one ref so does new_fb */
966 if (bo)
967 vmw_dmabuf_unreference(&bo);
968 if (surface)
969 vmw_surface_unreference(&surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000970
971 if (ret) {
972 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200973 ttm_base_object_unref(&user_obj);
Chris Wilsoncce13ff2010-08-08 13:36:38 +0100974 return ERR_PTR(ret);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +0200975 } else
976 vfb->user_obj = user_obj;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000977
978 return &vfb->base;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000979}
980
Laurent Pincharte6ecefa2012-05-17 13:27:23 +0200981static const struct drm_mode_config_funcs vmw_kms_funcs = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000982 .fb_create = vmw_kms_fb_create,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000983};
984
Sinclair Yehc8261a92015-06-26 01:23:42 -0700985int vmw_kms_generic_present(struct vmw_private *dev_priv,
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200986 struct drm_file *file_priv,
987 struct vmw_framebuffer *vfb,
988 struct vmw_surface *surface,
989 uint32_t sid,
990 int32_t destX, int32_t destY,
991 struct drm_vmw_rect *clips,
992 uint32_t num_clips)
993{
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700994 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
995 &surface->res, destX, destY,
996 num_clips, 1, NULL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +0200997}
998
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700999
Sinclair Yehc8261a92015-06-26 01:23:42 -07001000int vmw_kms_present(struct vmw_private *dev_priv,
1001 struct drm_file *file_priv,
1002 struct vmw_framebuffer *vfb,
1003 struct vmw_surface *surface,
1004 uint32_t sid,
1005 int32_t destX, int32_t destY,
1006 struct drm_vmw_rect *clips,
1007 uint32_t num_clips)
1008{
Sinclair Yeh35c05122015-06-26 01:42:06 -07001009 int ret;
1010
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001011 switch (dev_priv->active_display_unit) {
1012 case vmw_du_screen_target:
1013 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1014 &surface->res, destX, destY,
1015 num_clips, 1, NULL);
1016 break;
1017 case vmw_du_screen_object:
1018 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1019 sid, destX, destY, clips,
1020 num_clips);
1021 break;
1022 default:
1023 WARN_ONCE(true,
1024 "Present called with invalid display system.\n");
1025 ret = -ENOSYS;
1026 break;
1027 }
Sinclair Yeh35c05122015-06-26 01:42:06 -07001028 if (ret)
1029 return ret;
1030
1031 vmw_fifo_flush(dev_priv, false);
1032
1033 return 0;
Sinclair Yehc8261a92015-06-26 01:23:42 -07001034}
1035
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001036int vmw_kms_init(struct vmw_private *dev_priv)
1037{
1038 struct drm_device *dev = dev_priv->dev;
1039 int ret;
1040
1041 drm_mode_config_init(dev);
1042 dev->mode_config.funcs = &vmw_kms_funcs;
Jakob Bornecrantz3bef3572010-02-09 19:41:57 +00001043 dev->mode_config.min_width = 1;
1044 dev->mode_config.min_height = 1;
Jakob Bornecrantz7e71f8a2010-05-28 11:21:54 +02001045 /* assumed largest fb size */
1046 dev->mode_config.max_width = 8192;
1047 dev->mode_config.max_height = 8192;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001048
Sinclair Yeh35c05122015-06-26 01:42:06 -07001049 ret = vmw_kms_stdu_init_display(dev_priv);
1050 if (ret) {
1051 ret = vmw_kms_sou_init_display(dev_priv);
1052 if (ret) /* Fallback */
1053 ret = vmw_kms_ldu_init_display(dev_priv);
1054 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001055
Sinclair Yehc8261a92015-06-26 01:23:42 -07001056 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001057}
1058
1059int vmw_kms_close(struct vmw_private *dev_priv)
1060{
Sinclair Yehc8261a92015-06-26 01:23:42 -07001061 int ret;
1062
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001063 /*
1064 * Docs says we should take the lock before calling this function
1065 * but since it destroys encoders and our destructor calls
1066 * drm_encoder_cleanup which takes the lock we deadlock.
1067 */
1068 drm_mode_config_cleanup(dev_priv->dev);
Sinclair Yehc8261a92015-06-26 01:23:42 -07001069 if (dev_priv->active_display_unit == vmw_du_screen_object)
1070 ret = vmw_kms_sou_close_display(dev_priv);
Sinclair Yeh35c05122015-06-26 01:42:06 -07001071 else if (dev_priv->active_display_unit == vmw_du_screen_target)
1072 ret = vmw_kms_stdu_close_display(dev_priv);
Jakob Bornecrantzc0d18312011-11-09 10:25:26 +01001073 else
Sinclair Yehc8261a92015-06-26 01:23:42 -07001074 ret = vmw_kms_ldu_close_display(dev_priv);
1075
1076 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001077}
1078
1079int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1080 struct drm_file *file_priv)
1081{
1082 struct drm_vmw_cursor_bypass_arg *arg = data;
1083 struct vmw_display_unit *du;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001084 struct drm_crtc *crtc;
1085 int ret = 0;
1086
1087
1088 mutex_lock(&dev->mode_config.mutex);
1089 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1090
1091 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1092 du = vmw_crtc_to_du(crtc);
1093 du->hotspot_x = arg->xhot;
1094 du->hotspot_y = arg->yhot;
1095 }
1096
1097 mutex_unlock(&dev->mode_config.mutex);
1098 return 0;
1099 }
1100
Rob Clarka4cd5d62014-07-17 23:30:02 -04001101 crtc = drm_crtc_find(dev, arg->crtc_id);
1102 if (!crtc) {
Ville Syrjälä4ae87ff2013-10-17 13:35:05 +03001103 ret = -ENOENT;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001104 goto out;
1105 }
1106
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001107 du = vmw_crtc_to_du(crtc);
1108
1109 du->hotspot_x = arg->xhot;
1110 du->hotspot_y = arg->yhot;
1111
1112out:
1113 mutex_unlock(&dev->mode_config.mutex);
1114
1115 return ret;
1116}
1117
Michel Dänzer0bef23f2011-08-31 07:42:50 +00001118int vmw_kms_write_svga(struct vmw_private *vmw_priv,
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001119 unsigned width, unsigned height, unsigned pitch,
Michel Dänzer6558429b2011-08-31 07:42:49 +00001120 unsigned bpp, unsigned depth)
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001121{
1122 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1123 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1124 else if (vmw_fifo_have_pitchlock(vmw_priv))
1125 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1126 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1127 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
Michel Dänzer6558429b2011-08-31 07:42:49 +00001128 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
Michel Dänzer0bef23f2011-08-31 07:42:50 +00001129
1130 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1131 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1132 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1133 return -EINVAL;
1134 }
1135
1136 return 0;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001137}
1138
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001139int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1140{
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001141 struct vmw_vga_topology_state *save;
1142 uint32_t i;
1143
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001144 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1145 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001146 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001147 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1148 vmw_priv->vga_pitchlock =
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001149 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001150 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001151 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
Sinclair Yehc8261a92015-06-26 01:23:42 -07001152 SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001153
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001154 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1155 return 0;
1156
1157 vmw_priv->num_displays = vmw_read(vmw_priv,
1158 SVGA_REG_NUM_GUEST_DISPLAYS);
1159
Thomas Hellstrom029e50b2010-10-05 12:43:08 +02001160 if (vmw_priv->num_displays == 0)
1161 vmw_priv->num_displays = 1;
1162
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001163 for (i = 0; i < vmw_priv->num_displays; ++i) {
1164 save = &vmw_priv->vga_save[i];
1165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1166 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1167 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1168 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1169 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1170 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1171 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001172 if (i == 0 && vmw_priv->num_displays == 1 &&
1173 save->width == 0 && save->height == 0) {
1174
1175 /*
1176 * It should be fairly safe to assume that these
1177 * values are uninitialized.
1178 */
1179
1180 save->width = vmw_priv->vga_width - save->pos_x;
1181 save->height = vmw_priv->vga_height - save->pos_y;
1182 }
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001183 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001184
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001185 return 0;
1186}
1187
1188int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1189{
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001190 struct vmw_vga_topology_state *save;
1191 uint32_t i;
1192
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001193 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1194 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001195 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001196 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1197 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1198 vmw_priv->vga_pitchlock);
1199 else if (vmw_fifo_have_pitchlock(vmw_priv))
1200 iowrite32(vmw_priv->vga_pitchlock,
1201 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001202
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001203 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1204 return 0;
1205
1206 for (i = 0; i < vmw_priv->num_displays; ++i) {
1207 save = &vmw_priv->vga_save[i];
1208 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1209 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1210 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1211 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1212 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1213 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1214 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1215 }
1216
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001217 return 0;
1218}
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +02001219
Thomas Hellstrome133e732010-10-05 12:43:04 +02001220bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1221 uint32_t pitch,
1222 uint32_t height)
1223{
Sinclair Yeh35c05122015-06-26 01:42:06 -07001224 return ((u64) pitch * (u64) height) < (u64)
1225 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
1226 dev_priv->prim_bb_mem : dev_priv->vram_size);
Thomas Hellstrome133e732010-10-05 12:43:04 +02001227}
1228
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001229
1230/**
1231 * Function called by DRM code called with vbl_lock held.
1232 */
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001233u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1234{
1235 return 0;
1236}
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001237
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001238/**
1239 * Function called by DRM code called with vbl_lock held.
1240 */
1241int vmw_enable_vblank(struct drm_device *dev, int crtc)
1242{
1243 return -ENOSYS;
1244}
1245
1246/**
1247 * Function called by DRM code called with vbl_lock held.
1248 */
1249void vmw_disable_vblank(struct drm_device *dev, int crtc)
1250{
1251}
1252
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001253
1254/*
1255 * Small shared kms functions.
1256 */
1257
Rashika Kheria847c5962014-01-06 22:18:10 +05301258static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001259 struct drm_vmw_rect *rects)
1260{
1261 struct drm_device *dev = dev_priv->dev;
1262 struct vmw_display_unit *du;
1263 struct drm_connector *con;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001264
1265 mutex_lock(&dev->mode_config.mutex);
1266
1267#if 0
Thomas Hellstrom6ea77d12011-10-04 20:13:36 +02001268 {
1269 unsigned int i;
1270
1271 DRM_INFO("%s: new layout ", __func__);
1272 for (i = 0; i < num; i++)
1273 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
1274 rects[i].w, rects[i].h);
1275 DRM_INFO("\n");
1276 }
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001277#endif
1278
1279 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1280 du = vmw_connector_to_du(con);
1281 if (num > du->unit) {
1282 du->pref_width = rects[du->unit].w;
1283 du->pref_height = rects[du->unit].h;
1284 du->pref_active = true;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001285 du->gui_x = rects[du->unit].x;
1286 du->gui_y = rects[du->unit].y;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001287 } else {
1288 du->pref_width = 800;
1289 du->pref_height = 600;
1290 du->pref_active = false;
1291 }
1292 con->status = vmw_du_connector_detect(con, true);
1293 }
1294
1295 mutex_unlock(&dev->mode_config.mutex);
1296
1297 return 0;
1298}
1299
1300void vmw_du_crtc_save(struct drm_crtc *crtc)
1301{
1302}
1303
1304void vmw_du_crtc_restore(struct drm_crtc *crtc)
1305{
1306}
1307
1308void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1309 u16 *r, u16 *g, u16 *b,
1310 uint32_t start, uint32_t size)
1311{
1312 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1313 int i;
1314
1315 for (i = 0; i < size; i++) {
1316 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1317 r[i], g[i], b[i]);
1318 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1319 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1320 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1321 }
1322}
1323
1324void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1325{
1326}
1327
1328void vmw_du_connector_save(struct drm_connector *connector)
1329{
1330}
1331
1332void vmw_du_connector_restore(struct drm_connector *connector)
1333{
1334}
1335
1336enum drm_connector_status
1337vmw_du_connector_detect(struct drm_connector *connector, bool force)
1338{
1339 uint32_t num_displays;
1340 struct drm_device *dev = connector->dev;
1341 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001342 struct vmw_display_unit *du = vmw_connector_to_du(connector);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001343
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001344 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001345
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001346 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1347 du->pref_active) ?
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001348 connector_status_connected : connector_status_disconnected);
1349}
1350
1351static struct drm_display_mode vmw_kms_connector_builtin[] = {
1352 /* 640x480@60Hz */
1353 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
1354 752, 800, 0, 480, 489, 492, 525, 0,
1355 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1356 /* 800x600@60Hz */
1357 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
1358 968, 1056, 0, 600, 601, 605, 628, 0,
1359 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1360 /* 1024x768@60Hz */
1361 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1362 1184, 1344, 0, 768, 771, 777, 806, 0,
1363 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1364 /* 1152x864@75Hz */
1365 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1366 1344, 1600, 0, 864, 865, 868, 900, 0,
1367 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1368 /* 1280x768@60Hz */
1369 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1370 1472, 1664, 0, 768, 771, 778, 798, 0,
1371 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1372 /* 1280x800@60Hz */
1373 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1374 1480, 1680, 0, 800, 803, 809, 831, 0,
1375 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
1376 /* 1280x960@60Hz */
1377 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1378 1488, 1800, 0, 960, 961, 964, 1000, 0,
1379 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1380 /* 1280x1024@60Hz */
1381 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1382 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1383 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1384 /* 1360x768@60Hz */
1385 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1386 1536, 1792, 0, 768, 771, 777, 795, 0,
1387 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1388 /* 1440x1050@60Hz */
1389 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1390 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1391 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1392 /* 1440x900@60Hz */
1393 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1394 1672, 1904, 0, 900, 903, 909, 934, 0,
1395 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1396 /* 1600x1200@60Hz */
1397 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1398 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1399 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1400 /* 1680x1050@60Hz */
1401 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1402 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1403 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1404 /* 1792x1344@60Hz */
1405 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
1406 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1407 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1408 /* 1853x1392@60Hz */
1409 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
1410 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1411 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1412 /* 1920x1200@60Hz */
1413 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
1414 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1415 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1416 /* 1920x1440@60Hz */
1417 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
1418 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1419 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1420 /* 2560x1600@60Hz */
1421 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
1422 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
1423 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1424 /* Terminate */
1425 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1426};
1427
Thomas Hellstrom1543b4d2011-11-02 09:43:10 +01001428/**
1429 * vmw_guess_mode_timing - Provide fake timings for a
1430 * 60Hz vrefresh mode.
1431 *
1432 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1433 * members filled in.
1434 */
1435static void vmw_guess_mode_timing(struct drm_display_mode *mode)
1436{
1437 mode->hsync_start = mode->hdisplay + 50;
1438 mode->hsync_end = mode->hsync_start + 50;
1439 mode->htotal = mode->hsync_end + 50;
1440
1441 mode->vsync_start = mode->vdisplay + 50;
1442 mode->vsync_end = mode->vsync_start + 50;
1443 mode->vtotal = mode->vsync_end + 50;
1444
1445 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1446 mode->vrefresh = drm_mode_vrefresh(mode);
1447}
1448
1449
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001450int vmw_du_connector_fill_modes(struct drm_connector *connector,
1451 uint32_t max_width, uint32_t max_height)
1452{
1453 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1454 struct drm_device *dev = connector->dev;
1455 struct vmw_private *dev_priv = vmw_priv(dev);
1456 struct drm_display_mode *mode = NULL;
1457 struct drm_display_mode *bmode;
1458 struct drm_display_mode prefmode = { DRM_MODE("preferred",
1459 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1460 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1461 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1462 };
1463 int i;
Sinclair Yeh9a723842014-10-31 09:58:06 +01001464 u32 assumed_bpp = 2;
1465
1466 /*
1467 * If using screen objects, then assume 32-bpp because that's what the
1468 * SVGA device is assuming
1469 */
Sinclair Yehc8261a92015-06-26 01:23:42 -07001470 if (dev_priv->active_display_unit == vmw_du_screen_object)
Sinclair Yeh9a723842014-10-31 09:58:06 +01001471 assumed_bpp = 4;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001472
Sinclair Yeh35c05122015-06-26 01:42:06 -07001473 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1474 max_width = min(max_width, dev_priv->stdu_max_width);
1475 max_height = min(max_height, dev_priv->stdu_max_height);
1476 }
1477
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001478 /* Add preferred mode */
Sinclair Yehc8261a92015-06-26 01:23:42 -07001479 mode = drm_mode_duplicate(dev, &prefmode);
1480 if (!mode)
1481 return 0;
1482 mode->hdisplay = du->pref_width;
1483 mode->vdisplay = du->pref_height;
1484 vmw_guess_mode_timing(mode);
Jakob Bornecrantz55bde5b2011-11-03 21:03:05 +01001485
Sinclair Yehc8261a92015-06-26 01:23:42 -07001486 if (vmw_kms_validate_mode_vram(dev_priv,
1487 mode->hdisplay * assumed_bpp,
1488 mode->vdisplay)) {
1489 drm_mode_probed_add(connector, mode);
1490 } else {
1491 drm_mode_destroy(dev, mode);
1492 mode = NULL;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001493 }
1494
Sinclair Yehc8261a92015-06-26 01:23:42 -07001495 if (du->pref_mode) {
1496 list_del_init(&du->pref_mode->head);
1497 drm_mode_destroy(dev, du->pref_mode);
1498 }
1499
1500 /* mode might be null here, this is intended */
1501 du->pref_mode = mode;
1502
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001503 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1504 bmode = &vmw_kms_connector_builtin[i];
1505 if (bmode->hdisplay > max_width ||
1506 bmode->vdisplay > max_height)
1507 continue;
1508
Sinclair Yeh9a723842014-10-31 09:58:06 +01001509 if (!vmw_kms_validate_mode_vram(dev_priv,
1510 bmode->hdisplay * assumed_bpp,
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001511 bmode->vdisplay))
1512 continue;
1513
1514 mode = drm_mode_duplicate(dev, bmode);
1515 if (!mode)
1516 return 0;
1517 mode->vrefresh = drm_mode_vrefresh(mode);
1518
1519 drm_mode_probed_add(connector, mode);
1520 }
1521
Jakob Bornecrantzd41025c2011-11-03 21:03:07 +01001522 /* Move the prefered mode first, help apps pick the right mode. */
1523 if (du->pref_mode)
1524 list_move(&du->pref_mode->head, &connector->probed_modes);
1525
Dave Airlieb87577b2014-05-01 09:26:53 +10001526 drm_mode_connector_list_update(connector, true);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001527
1528 return 1;
1529}
1530
1531int vmw_du_connector_set_property(struct drm_connector *connector,
1532 struct drm_property *property,
1533 uint64_t val)
1534{
1535 return 0;
1536}
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001537
1538
1539int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1540 struct drm_file *file_priv)
1541{
1542 struct vmw_private *dev_priv = vmw_priv(dev);
1543 struct drm_vmw_update_layout_arg *arg =
1544 (struct drm_vmw_update_layout_arg *)data;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001545 void __user *user_rects;
1546 struct drm_vmw_rect *rects;
1547 unsigned rects_size;
1548 int ret;
1549 int i;
1550 struct drm_mode_config *mode_config = &dev->mode_config;
Sinclair Yehc8261a92015-06-26 01:23:42 -07001551 struct drm_vmw_rect bounding_box = {0};
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001552
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001553 if (!arg->num_outputs) {
1554 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1555 vmw_du_update_layout(dev_priv, 1, &def_rect);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07001556 return 0;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001557 }
1558
1559 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
Xi Wangbab9efc2011-11-28 12:25:43 +01001560 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1561 GFP_KERNEL);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07001562 if (unlikely(!rects))
1563 return -ENOMEM;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001564
1565 user_rects = (void __user *)(unsigned long)arg->rects;
1566 ret = copy_from_user(rects, user_rects, rects_size);
1567 if (unlikely(ret != 0)) {
1568 DRM_ERROR("Failed to get rects.\n");
1569 ret = -EFAULT;
1570 goto out_free;
1571 }
1572
1573 for (i = 0; i < arg->num_outputs; ++i) {
Xi Wangbab9efc2011-11-28 12:25:43 +01001574 if (rects[i].x < 0 ||
1575 rects[i].y < 0 ||
1576 rects[i].x + rects[i].w > mode_config->max_width ||
1577 rects[i].y + rects[i].h > mode_config->max_height) {
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001578 DRM_ERROR("Invalid GUI layout.\n");
1579 ret = -EINVAL;
1580 goto out_free;
1581 }
Sinclair Yehc8261a92015-06-26 01:23:42 -07001582
1583 /*
1584 * bounding_box.w and bunding_box.h are used as
1585 * lower-right coordinates
1586 */
1587 if (rects[i].x + rects[i].w > bounding_box.w)
1588 bounding_box.w = rects[i].x + rects[i].w;
1589
1590 if (rects[i].y + rects[i].h > bounding_box.h)
1591 bounding_box.h = rects[i].y + rects[i].h;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001592 }
1593
Sinclair Yeh35c05122015-06-26 01:42:06 -07001594 /*
1595 * For Screen Target Display Unit, all the displays must fit
1596 * inside of maximum texture size.
1597 */
1598 if (dev_priv->active_display_unit == vmw_du_screen_target)
1599 if (bounding_box.w > dev_priv->texture_max_width ||
1600 bounding_box.h > dev_priv->texture_max_height) {
1601 DRM_ERROR("Layout exceeds maximum texture size\n");
1602 ret = -EINVAL;
1603 goto out_free;
1604 }
1605
1606
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001607 vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
1608
1609out_free:
1610 kfree(rects);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001611 return ret;
1612}
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07001613
1614/**
1615 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1616 * on a set of cliprects and a set of display units.
1617 *
1618 * @dev_priv: Pointer to a device private structure.
1619 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1620 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1621 * Cliprects are given in framebuffer coordinates.
1622 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1623 * be NULL. Cliprects are given in source coordinates.
1624 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1625 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1626 * @num_clips: Number of cliprects in the @clips or @vclips array.
1627 * @increment: Integer with which to increment the clip counter when looping.
1628 * Used to skip a predetermined number of clip rects.
1629 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1630 */
1631int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1632 struct vmw_framebuffer *framebuffer,
1633 const struct drm_clip_rect *clips,
1634 const struct drm_vmw_rect *vclips,
1635 s32 dest_x, s32 dest_y,
1636 int num_clips,
1637 int increment,
1638 struct vmw_kms_dirty *dirty)
1639{
1640 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1641 struct drm_crtc *crtc;
1642 u32 num_units = 0;
1643 u32 i, k;
1644 int ret;
1645
1646 dirty->dev_priv = dev_priv;
1647
1648 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1649 if (crtc->primary->fb != &framebuffer->base)
1650 continue;
1651 units[num_units++] = vmw_crtc_to_du(crtc);
1652 }
1653
1654 for (k = 0; k < num_units; k++) {
1655 struct vmw_display_unit *unit = units[k];
1656 s32 crtc_x = unit->crtc.x;
1657 s32 crtc_y = unit->crtc.y;
1658 s32 crtc_width = unit->crtc.mode.hdisplay;
1659 s32 crtc_height = unit->crtc.mode.vdisplay;
1660 const struct drm_clip_rect *clips_ptr = clips;
1661 const struct drm_vmw_rect *vclips_ptr = vclips;
1662
1663 dirty->unit = unit;
1664 if (dirty->fifo_reserve_size > 0) {
1665 dirty->cmd = vmw_fifo_reserve(dev_priv,
1666 dirty->fifo_reserve_size);
1667 if (!dirty->cmd) {
1668 DRM_ERROR("Couldn't reserve fifo space "
1669 "for dirty blits.\n");
1670 return ret;
1671 }
1672 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1673 }
1674 dirty->num_hits = 0;
1675 for (i = 0; i < num_clips; i++, clips_ptr += increment,
1676 vclips_ptr += increment) {
1677 s32 clip_left;
1678 s32 clip_top;
1679
1680 /*
1681 * Select clip array type. Note that integer type
1682 * in @clips is unsigned short, whereas in @vclips
1683 * it's 32-bit.
1684 */
1685 if (clips) {
1686 dirty->fb_x = (s32) clips_ptr->x1;
1687 dirty->fb_y = (s32) clips_ptr->y1;
1688 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
1689 crtc_x;
1690 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
1691 crtc_y;
1692 } else {
1693 dirty->fb_x = vclips_ptr->x;
1694 dirty->fb_y = vclips_ptr->y;
1695 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
1696 dest_x - crtc_x;
1697 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
1698 dest_y - crtc_y;
1699 }
1700
1701 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
1702 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
1703
1704 /* Skip this clip if it's outside the crtc region */
1705 if (dirty->unit_x1 >= crtc_width ||
1706 dirty->unit_y1 >= crtc_height ||
1707 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
1708 continue;
1709
1710 /* Clip right and bottom to crtc limits */
1711 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
1712 crtc_width);
1713 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
1714 crtc_height);
1715
1716 /* Clip left and top to crtc limits */
1717 clip_left = min_t(s32, dirty->unit_x1, 0);
1718 clip_top = min_t(s32, dirty->unit_y1, 0);
1719 dirty->unit_x1 -= clip_left;
1720 dirty->unit_y1 -= clip_top;
1721 dirty->fb_x -= clip_left;
1722 dirty->fb_y -= clip_top;
1723
1724 dirty->clip(dirty);
1725 }
1726
1727 dirty->fifo_commit(dirty);
1728 }
1729
1730 return 0;
1731}
1732
1733/**
1734 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
1735 * command submission.
1736 *
1737 * @dev_priv. Pointer to a device private structure.
1738 * @buf: The buffer object
1739 * @interruptible: Whether to perform waits as interruptible.
1740 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
1741 * The buffer will be validated as a GMR. Already pinned buffers will not be
1742 * validated.
1743 *
1744 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
1745 * interrupted by a signal.
1746 */
1747int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
1748 struct vmw_dma_buffer *buf,
1749 bool interruptible,
1750 bool validate_as_mob)
1751{
1752 struct ttm_buffer_object *bo = &buf->base;
1753 int ret;
1754
1755 ttm_bo_reserve(bo, false, false, interruptible, 0);
1756 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
1757 validate_as_mob);
1758 if (ret)
1759 ttm_bo_unreserve(bo);
1760
1761 return ret;
1762}
1763
1764/**
1765 * vmw_kms_helper_buffer_revert - Undo the actions of
1766 * vmw_kms_helper_buffer_prepare.
1767 *
1768 * @res: Pointer to the buffer object.
1769 *
1770 * Helper to be used if an error forces the caller to undo the actions of
1771 * vmw_kms_helper_buffer_prepare.
1772 */
1773void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
1774{
1775 if (buf)
1776 ttm_bo_unreserve(&buf->base);
1777}
1778
1779/**
1780 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
1781 * kms command submission.
1782 *
1783 * @dev_priv: Pointer to a device private structure.
1784 * @file_priv: Pointer to a struct drm_file representing the caller's
1785 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
1786 * if non-NULL, @user_fence_rep must be non-NULL.
1787 * @buf: The buffer object.
1788 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1789 * ref-counted fence pointer is returned here.
1790 * @user_fence_rep: Optional pointer to a user-space provided struct
1791 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
1792 * function copies fence data to user-space in a fail-safe manner.
1793 */
1794void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
1795 struct drm_file *file_priv,
1796 struct vmw_dma_buffer *buf,
1797 struct vmw_fence_obj **out_fence,
1798 struct drm_vmw_fence_rep __user *
1799 user_fence_rep)
1800{
1801 struct vmw_fence_obj *fence;
1802 uint32_t handle;
1803 int ret;
1804
1805 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
1806 file_priv ? &handle : NULL);
1807 if (buf)
1808 vmw_fence_single_bo(&buf->base, fence);
1809 if (file_priv)
1810 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
1811 ret, user_fence_rep, fence,
1812 handle);
1813 if (out_fence)
1814 *out_fence = fence;
1815 else
1816 vmw_fence_obj_unreference(&fence);
1817
1818 vmw_kms_helper_buffer_revert(buf);
1819}
1820
1821
1822/**
1823 * vmw_kms_helper_resource_revert - Undo the actions of
1824 * vmw_kms_helper_resource_prepare.
1825 *
1826 * @res: Pointer to the resource. Typically a surface.
1827 *
1828 * Helper to be used if an error forces the caller to undo the actions of
1829 * vmw_kms_helper_resource_prepare.
1830 */
1831void vmw_kms_helper_resource_revert(struct vmw_resource *res)
1832{
1833 vmw_kms_helper_buffer_revert(res->backup);
1834 vmw_resource_unreserve(res, NULL, 0);
1835 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1836}
1837
1838/**
1839 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
1840 * command submission.
1841 *
1842 * @res: Pointer to the resource. Typically a surface.
1843 * @interruptible: Whether to perform waits as interruptible.
1844 *
1845 * Reserves and validates also the backup buffer if a guest-backed resource.
1846 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1847 * interrupted by a signal.
1848 */
1849int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
1850 bool interruptible)
1851{
1852 int ret = 0;
1853
1854 if (interruptible)
1855 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
1856 else
1857 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1858
1859 if (unlikely(ret != 0))
1860 return -ERESTARTSYS;
1861
1862 ret = vmw_resource_reserve(res, interruptible, false);
1863 if (ret)
1864 goto out_unlock;
1865
1866 if (res->backup) {
1867 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
1868 interruptible,
1869 res->dev_priv->has_mob);
1870 if (ret)
1871 goto out_unreserve;
1872 }
1873 ret = vmw_resource_validate(res);
1874 if (ret)
1875 goto out_revert;
1876 return 0;
1877
1878out_revert:
1879 vmw_kms_helper_buffer_revert(res->backup);
1880out_unreserve:
1881 vmw_resource_unreserve(res, NULL, 0);
1882out_unlock:
1883 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1884 return ret;
1885}
1886
1887/**
1888 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
1889 * kms command submission.
1890 *
1891 * @res: Pointer to the resource. Typically a surface.
1892 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1893 * ref-counted fence pointer is returned here.
1894 */
1895void vmw_kms_helper_resource_finish(struct vmw_resource *res,
1896 struct vmw_fence_obj **out_fence)
1897{
1898 if (res->backup || out_fence)
1899 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
1900 out_fence, NULL);
1901
1902 vmw_resource_unreserve(res, NULL, 0);
1903 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1904}
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001905
1906/**
1907 * vmw_kms_update_proxy - Helper function to update a proxy surface from
1908 * its backing MOB.
1909 *
1910 * @res: Pointer to the surface resource
1911 * @clips: Clip rects in framebuffer (surface) space.
1912 * @num_clips: Number of clips in @clips.
1913 * @increment: Integer with which to increment the clip counter when looping.
1914 * Used to skip a predetermined number of clip rects.
1915 *
1916 * This function makes sure the proxy surface is updated from its backing MOB
1917 * using the region given by @clips. The surface resource @res and its backing
1918 * MOB needs to be reserved and validated on call.
1919 */
1920int vmw_kms_update_proxy(struct vmw_resource *res,
1921 const struct drm_clip_rect *clips,
1922 unsigned num_clips,
1923 int increment)
1924{
1925 struct vmw_private *dev_priv = res->dev_priv;
1926 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
1927 struct {
1928 SVGA3dCmdHeader header;
1929 SVGA3dCmdUpdateGBImage body;
1930 } *cmd;
1931 SVGA3dBox *box;
1932 size_t copy_size = 0;
1933 int i;
1934
1935 if (!clips)
1936 return 0;
1937
1938 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
1939 if (!cmd) {
1940 DRM_ERROR("Couldn't reserve fifo space for proxy surface "
1941 "update.\n");
1942 return -ENOMEM;
1943 }
1944
1945 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
1946 box = &cmd->body.box;
1947
1948 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
1949 cmd->header.size = sizeof(cmd->body);
1950 cmd->body.image.sid = res->id;
1951 cmd->body.image.face = 0;
1952 cmd->body.image.mipmap = 0;
1953
1954 if (clips->x1 > size->width || clips->x2 > size->width ||
1955 clips->y1 > size->height || clips->y2 > size->height) {
1956 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
1957 return -EINVAL;
1958 }
1959
1960 box->x = clips->x1;
1961 box->y = clips->y1;
1962 box->z = 0;
1963 box->w = clips->x2 - clips->x1;
1964 box->h = clips->y2 - clips->y1;
1965 box->d = 1;
1966
1967 copy_size += sizeof(*cmd);
1968 }
1969
1970 vmw_fifo_commit(dev_priv, copy_size);
1971
1972 return 0;
1973}