blob: 073b3e1c9cc98fac529e59ef2117ce61f832a091 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
30/* Might need a hrtimer here? */
31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
32
Thomas Hellstrom22ee8612010-05-28 11:22:00 +020033static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
34static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000035
36void vmw_display_unit_cleanup(struct vmw_display_unit *du)
37{
38 if (du->cursor_surface)
39 vmw_surface_unreference(&du->cursor_surface);
40 if (du->cursor_dmabuf)
41 vmw_dmabuf_unreference(&du->cursor_dmabuf);
42 drm_crtc_cleanup(&du->crtc);
43 drm_encoder_cleanup(&du->encoder);
44 drm_connector_cleanup(&du->connector);
45}
46
47/*
48 * Display Unit Cursor functions
49 */
50
51int vmw_cursor_update_image(struct vmw_private *dev_priv,
52 u32 *image, u32 width, u32 height,
53 u32 hotspotX, u32 hotspotY)
54{
55 struct {
56 u32 cmd;
57 SVGAFifoCmdDefineAlphaCursor cursor;
58 } *cmd;
59 u32 image_size = width * height * 4;
60 u32 cmd_size = sizeof(*cmd) + image_size;
61
62 if (!image)
63 return -EINVAL;
64
65 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
66 if (unlikely(cmd == NULL)) {
67 DRM_ERROR("Fifo reserve failed.\n");
68 return -ENOMEM;
69 }
70
71 memset(cmd, 0, sizeof(*cmd));
72
73 memcpy(&cmd[1], image, image_size);
74
75 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
76 cmd->cursor.id = cpu_to_le32(0);
77 cmd->cursor.width = cpu_to_le32(width);
78 cmd->cursor.height = cpu_to_le32(height);
79 cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
80 cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
81
82 vmw_fifo_commit(dev_priv, cmd_size);
83
84 return 0;
85}
86
87void vmw_cursor_update_position(struct vmw_private *dev_priv,
88 bool show, int x, int y)
89{
90 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
91 uint32_t count;
92
93 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
94 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
95 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
96 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
97 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
98}
99
100int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
101 uint32_t handle, uint32_t width, uint32_t height)
102{
103 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
104 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
105 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
106 struct vmw_surface *surface = NULL;
107 struct vmw_dma_buffer *dmabuf = NULL;
108 int ret;
109
110 if (handle) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100111 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
112 handle, &surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000113 if (!ret) {
114 if (!surface->snooper.image) {
115 DRM_ERROR("surface not suitable for cursor\n");
116 return -EINVAL;
117 }
118 } else {
119 ret = vmw_user_dmabuf_lookup(tfile,
120 handle, &dmabuf);
121 if (ret) {
122 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
123 return -EINVAL;
124 }
125 }
126 }
127
128 /* takedown old cursor */
129 if (du->cursor_surface) {
130 du->cursor_surface->snooper.crtc = NULL;
131 vmw_surface_unreference(&du->cursor_surface);
132 }
133 if (du->cursor_dmabuf)
134 vmw_dmabuf_unreference(&du->cursor_dmabuf);
135
136 /* setup new image */
137 if (surface) {
138 /* vmw_user_surface_lookup takes one reference */
139 du->cursor_surface = surface;
140
141 du->cursor_surface->snooper.crtc = crtc;
142 du->cursor_age = du->cursor_surface->snooper.age;
143 vmw_cursor_update_image(dev_priv, surface->snooper.image,
144 64, 64, du->hotspot_x, du->hotspot_y);
145 } else if (dmabuf) {
146 struct ttm_bo_kmap_obj map;
147 unsigned long kmap_offset;
148 unsigned long kmap_num;
149 void *virtual;
150 bool dummy;
151
152 /* vmw_user_surface_lookup takes one reference */
153 du->cursor_dmabuf = dmabuf;
154
155 kmap_offset = 0;
156 kmap_num = (64*64*4) >> PAGE_SHIFT;
157
158 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
159 if (unlikely(ret != 0)) {
160 DRM_ERROR("reserve failed\n");
161 return -EINVAL;
162 }
163
164 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
165 if (unlikely(ret != 0))
166 goto err_unreserve;
167
168 virtual = ttm_kmap_obj_virtual(&map, &dummy);
169 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
170 du->hotspot_x, du->hotspot_y);
171
172 ttm_bo_kunmap(&map);
173err_unreserve:
174 ttm_bo_unreserve(&dmabuf->base);
175
176 } else {
177 vmw_cursor_update_position(dev_priv, false, 0, 0);
178 return 0;
179 }
180
181 vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
182
183 return 0;
184}
185
186int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
187{
188 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
189 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
190 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
191
192 du->cursor_x = x + crtc->x;
193 du->cursor_y = y + crtc->y;
194
195 vmw_cursor_update_position(dev_priv, shown,
196 du->cursor_x, du->cursor_y);
197
198 return 0;
199}
200
201void vmw_kms_cursor_snoop(struct vmw_surface *srf,
202 struct ttm_object_file *tfile,
203 struct ttm_buffer_object *bo,
204 SVGA3dCmdHeader *header)
205{
206 struct ttm_bo_kmap_obj map;
207 unsigned long kmap_offset;
208 unsigned long kmap_num;
209 SVGA3dCopyBox *box;
210 unsigned box_count;
211 void *virtual;
212 bool dummy;
213 struct vmw_dma_cmd {
214 SVGA3dCmdHeader header;
215 SVGA3dCmdSurfaceDMA dma;
216 } *cmd;
217 int ret;
218
219 cmd = container_of(header, struct vmw_dma_cmd, header);
220
221 /* No snooper installed */
222 if (!srf->snooper.image)
223 return;
224
225 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
226 DRM_ERROR("face and mipmap for cursors should never != 0\n");
227 return;
228 }
229
230 if (cmd->header.size < 64) {
231 DRM_ERROR("at least one full copy box must be given\n");
232 return;
233 }
234
235 box = (SVGA3dCopyBox *)&cmd[1];
236 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
237 sizeof(SVGA3dCopyBox);
238
239 if (cmd->dma.guest.pitch != (64 * 4) ||
240 cmd->dma.guest.ptr.offset % PAGE_SIZE ||
241 box->x != 0 || box->y != 0 || box->z != 0 ||
242 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
243 box->w != 64 || box->h != 64 || box->d != 1 ||
244 box_count != 1) {
245 /* TODO handle none page aligned offsets */
246 /* TODO handle partial uploads and pitch != 256 */
247 /* TODO handle more then one copy (size != 64) */
248 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
249 return;
250 }
251
252 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
253 kmap_num = (64*64*4) >> PAGE_SHIFT;
254
255 ret = ttm_bo_reserve(bo, true, false, false, 0);
256 if (unlikely(ret != 0)) {
257 DRM_ERROR("reserve failed\n");
258 return;
259 }
260
261 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
262 if (unlikely(ret != 0))
263 goto err_unreserve;
264
265 virtual = ttm_kmap_obj_virtual(&map, &dummy);
266
267 memcpy(srf->snooper.image, virtual, 64*64*4);
268 srf->snooper.age++;
269
270 /* we can't call this function from this function since execbuf has
271 * reserved fifo space.
272 *
273 * if (srf->snooper.crtc)
274 * vmw_ldu_crtc_cursor_update_image(dev_priv,
275 * srf->snooper.image, 64, 64,
276 * du->hotspot_x, du->hotspot_y);
277 */
278
279 ttm_bo_kunmap(&map);
280err_unreserve:
281 ttm_bo_unreserve(bo);
282}
283
284void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
285{
286 struct drm_device *dev = dev_priv->dev;
287 struct vmw_display_unit *du;
288 struct drm_crtc *crtc;
289
290 mutex_lock(&dev->mode_config.mutex);
291
292 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
293 du = vmw_crtc_to_du(crtc);
294 if (!du->cursor_surface ||
295 du->cursor_age == du->cursor_surface->snooper.age)
296 continue;
297
298 du->cursor_age = du->cursor_surface->snooper.age;
299 vmw_cursor_update_image(dev_priv,
300 du->cursor_surface->snooper.image,
301 64, 64, du->hotspot_x, du->hotspot_y);
302 }
303
304 mutex_unlock(&dev->mode_config.mutex);
305}
306
307/*
308 * Generic framebuffer code
309 */
310
311int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
312 struct drm_file *file_priv,
313 unsigned int *handle)
314{
315 if (handle)
316 handle = 0;
317
318 return 0;
319}
320
321/*
322 * Surface framebuffer code
323 */
324
325#define vmw_framebuffer_to_vfbs(x) \
326 container_of(x, struct vmw_framebuffer_surface, base.base)
327
328struct vmw_framebuffer_surface {
329 struct vmw_framebuffer base;
330 struct vmw_surface *surface;
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200331 struct vmw_dma_buffer *buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000332 struct delayed_work d_work;
333 struct mutex work_lock;
334 bool present_fs;
335};
336
337void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
338{
339 struct vmw_framebuffer_surface *vfb =
340 vmw_framebuffer_to_vfbs(framebuffer);
341
342 cancel_delayed_work_sync(&vfb->d_work);
343 drm_framebuffer_cleanup(framebuffer);
344 vmw_surface_unreference(&vfb->surface);
345
346 kfree(framebuffer);
347}
348
349static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
350{
351 struct delayed_work *d_work =
352 container_of(work, struct delayed_work, work);
353 struct vmw_framebuffer_surface *vfbs =
354 container_of(d_work, struct vmw_framebuffer_surface, d_work);
355 struct vmw_surface *surf = vfbs->surface;
356 struct drm_framebuffer *framebuffer = &vfbs->base.base;
357 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
358
359 struct {
360 SVGA3dCmdHeader header;
361 SVGA3dCmdPresent body;
362 SVGA3dCopyRect cr;
363 } *cmd;
364
365 mutex_lock(&vfbs->work_lock);
366 if (!vfbs->present_fs)
367 goto out_unlock;
368
369 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
370 if (unlikely(cmd == NULL))
371 goto out_resched;
372
373 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
374 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
375 cmd->body.sid = cpu_to_le32(surf->res.id);
376 cmd->cr.x = cpu_to_le32(0);
377 cmd->cr.y = cpu_to_le32(0);
378 cmd->cr.srcx = cmd->cr.x;
379 cmd->cr.srcy = cmd->cr.y;
380 cmd->cr.w = cpu_to_le32(framebuffer->width);
381 cmd->cr.h = cpu_to_le32(framebuffer->height);
382 vfbs->present_fs = false;
383 vmw_fifo_commit(dev_priv, sizeof(*cmd));
384out_resched:
385 /**
386 * Will not re-add if already pending.
387 */
388 schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
389out_unlock:
390 mutex_unlock(&vfbs->work_lock);
391}
392
393
394int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200395 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000396 unsigned flags, unsigned color,
397 struct drm_clip_rect *clips,
398 unsigned num_clips)
399{
400 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
401 struct vmw_framebuffer_surface *vfbs =
402 vmw_framebuffer_to_vfbs(framebuffer);
403 struct vmw_surface *surf = vfbs->surface;
404 struct drm_clip_rect norect;
405 SVGA3dCopyRect *cr;
406 int i, inc = 1;
407
408 struct {
409 SVGA3dCmdHeader header;
410 SVGA3dCmdPresent body;
411 SVGA3dCopyRect cr;
412 } *cmd;
413
414 if (!num_clips ||
415 !(dev_priv->fifo.capabilities &
416 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
417 int ret;
418
419 mutex_lock(&vfbs->work_lock);
420 vfbs->present_fs = true;
421 ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
422 mutex_unlock(&vfbs->work_lock);
423 if (ret) {
424 /**
425 * No work pending, Force immediate present.
426 */
427 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
428 }
429 return 0;
430 }
431
432 if (!num_clips) {
433 num_clips = 1;
434 clips = &norect;
435 norect.x1 = norect.y1 = 0;
436 norect.x2 = framebuffer->width;
437 norect.y2 = framebuffer->height;
438 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
439 num_clips /= 2;
440 inc = 2; /* skip source rects */
441 }
442
443 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
444 if (unlikely(cmd == NULL)) {
445 DRM_ERROR("Fifo reserve failed.\n");
446 return -ENOMEM;
447 }
448
449 memset(cmd, 0, sizeof(*cmd));
450
451 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
452 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
453 cmd->body.sid = cpu_to_le32(surf->res.id);
454
455 for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
456 cr->x = cpu_to_le16(clips->x1);
457 cr->y = cpu_to_le16(clips->y1);
458 cr->srcx = cr->x;
459 cr->srcy = cr->y;
460 cr->w = cpu_to_le16(clips->x2 - clips->x1);
461 cr->h = cpu_to_le16(clips->y2 - clips->y1);
462 }
463
464 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
465
466 return 0;
467}
468
469static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
470 .destroy = vmw_framebuffer_surface_destroy,
471 .dirty = vmw_framebuffer_surface_dirty,
472 .create_handle = vmw_framebuffer_create_handle,
473};
474
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200475static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
476 struct vmw_surface *surface,
477 struct vmw_framebuffer **out,
478 const struct drm_mode_fb_cmd
479 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000480
481{
482 struct drm_device *dev = dev_priv->dev;
483 struct vmw_framebuffer_surface *vfbs;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200484 enum SVGA3dSurfaceFormat format;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000485 int ret;
486
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200487 /*
488 * Sanity checks.
489 */
490
491 if (unlikely(surface->mip_levels[0] != 1 ||
492 surface->num_sizes != 1 ||
493 surface->sizes[0].width < mode_cmd->width ||
494 surface->sizes[0].height < mode_cmd->height ||
495 surface->sizes[0].depth != 1)) {
496 DRM_ERROR("Incompatible surface dimensions "
497 "for requested mode.\n");
498 return -EINVAL;
499 }
500
501 switch (mode_cmd->depth) {
502 case 32:
503 format = SVGA3D_A8R8G8B8;
504 break;
505 case 24:
506 format = SVGA3D_X8R8G8B8;
507 break;
508 case 16:
509 format = SVGA3D_R5G6B5;
510 break;
511 case 15:
512 format = SVGA3D_A1R5G5B5;
513 break;
514 default:
515 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
516 return -EINVAL;
517 }
518
519 if (unlikely(format != surface->format)) {
520 DRM_ERROR("Invalid surface format for requested mode.\n");
521 return -EINVAL;
522 }
523
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000524 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
525 if (!vfbs) {
526 ret = -ENOMEM;
527 goto out_err1;
528 }
529
530 ret = drm_framebuffer_init(dev, &vfbs->base.base,
531 &vmw_framebuffer_surface_funcs);
532 if (ret)
533 goto out_err2;
534
535 if (!vmw_surface_reference(surface)) {
536 DRM_ERROR("failed to reference surface %p\n", surface);
537 goto out_err3;
538 }
539
540 /* XXX get the first 3 from the surface info */
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200541 vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
542 vfbs->base.base.pitch = mode_cmd->pitch;
543 vfbs->base.base.depth = mode_cmd->depth;
544 vfbs->base.base.width = mode_cmd->width;
545 vfbs->base.base.height = mode_cmd->height;
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200546 vfbs->base.pin = &vmw_surface_dmabuf_pin;
547 vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000548 vfbs->surface = surface;
549 mutex_init(&vfbs->work_lock);
550 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
551 *out = &vfbs->base;
552
553 return 0;
554
555out_err3:
556 drm_framebuffer_cleanup(&vfbs->base.base);
557out_err2:
558 kfree(vfbs);
559out_err1:
560 return ret;
561}
562
563/*
564 * Dmabuf framebuffer code
565 */
566
567#define vmw_framebuffer_to_vfbd(x) \
568 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
569
570struct vmw_framebuffer_dmabuf {
571 struct vmw_framebuffer base;
572 struct vmw_dma_buffer *buffer;
573};
574
575void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
576{
577 struct vmw_framebuffer_dmabuf *vfbd =
578 vmw_framebuffer_to_vfbd(framebuffer);
579
580 drm_framebuffer_cleanup(framebuffer);
581 vmw_dmabuf_unreference(&vfbd->buffer);
582
583 kfree(vfbd);
584}
585
586int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200587 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000588 unsigned flags, unsigned color,
589 struct drm_clip_rect *clips,
590 unsigned num_clips)
591{
592 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
593 struct drm_clip_rect norect;
594 struct {
595 uint32_t header;
596 SVGAFifoCmdUpdate body;
597 } *cmd;
598 int i, increment = 1;
599
Thomas Hellstromdf1c93b2010-01-13 22:28:36 +0100600 if (!num_clips) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000601 num_clips = 1;
602 clips = &norect;
603 norect.x1 = norect.y1 = 0;
604 norect.x2 = framebuffer->width;
605 norect.y2 = framebuffer->height;
606 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
607 num_clips /= 2;
608 increment = 2;
609 }
610
611 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
612 if (unlikely(cmd == NULL)) {
613 DRM_ERROR("Fifo reserve failed.\n");
614 return -ENOMEM;
615 }
616
617 for (i = 0; i < num_clips; i++, clips += increment) {
618 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
Thomas Hellstromdf1c93b2010-01-13 22:28:36 +0100619 cmd[i].body.x = cpu_to_le32(clips->x1);
620 cmd[i].body.y = cpu_to_le32(clips->y1);
621 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
622 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000623 }
624
625 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
626
627 return 0;
628}
629
630static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
631 .destroy = vmw_framebuffer_dmabuf_destroy,
632 .dirty = vmw_framebuffer_dmabuf_dirty,
633 .create_handle = vmw_framebuffer_create_handle,
634};
635
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200636static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
637{
638 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
639 struct vmw_framebuffer_surface *vfbs =
640 vmw_framebuffer_to_vfbs(&vfb->base);
641 unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
642 int ret;
643
644 vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
645 if (unlikely(vfbs->buffer == NULL))
646 return -ENOMEM;
647
648 vmw_overlay_pause_all(dev_priv);
649 ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
650 &vmw_vram_ne_placement,
651 false, &vmw_dmabuf_bo_free);
652 vmw_overlay_resume_all(dev_priv);
653
654 return ret;
655}
656
657static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
658{
659 struct ttm_buffer_object *bo;
660 struct vmw_framebuffer_surface *vfbs =
661 vmw_framebuffer_to_vfbs(&vfb->base);
662
663 bo = &vfbs->buffer->base;
664 ttm_bo_unref(&bo);
665 vfbs->buffer = NULL;
666
667 return 0;
668}
669
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000670static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
671{
672 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
673 struct vmw_framebuffer_dmabuf *vfbd =
674 vmw_framebuffer_to_vfbd(&vfb->base);
675 int ret;
676
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200677
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000678 vmw_overlay_pause_all(dev_priv);
679
680 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
681
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000682 vmw_overlay_resume_all(dev_priv);
683
Jakob Bornecrantz316ab132010-05-28 11:22:05 +0200684 WARN_ON(ret != 0);
685
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000686 return 0;
687}
688
689static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
690{
691 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
692 struct vmw_framebuffer_dmabuf *vfbd =
693 vmw_framebuffer_to_vfbd(&vfb->base);
694
695 if (!vfbd->buffer) {
696 WARN_ON(!vfbd->buffer);
697 return 0;
698 }
699
700 return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
701}
702
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200703static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
704 struct vmw_dma_buffer *dmabuf,
705 struct vmw_framebuffer **out,
706 const struct drm_mode_fb_cmd
707 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000708
709{
710 struct drm_device *dev = dev_priv->dev;
711 struct vmw_framebuffer_dmabuf *vfbd;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200712 unsigned int requested_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000713 int ret;
714
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200715 requested_size = mode_cmd->height * mode_cmd->pitch;
716 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
717 DRM_ERROR("Screen buffer object size is too small "
718 "for requested mode.\n");
719 return -EINVAL;
720 }
721
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000722 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
723 if (!vfbd) {
724 ret = -ENOMEM;
725 goto out_err1;
726 }
727
728 ret = drm_framebuffer_init(dev, &vfbd->base.base,
729 &vmw_framebuffer_dmabuf_funcs);
730 if (ret)
731 goto out_err2;
732
733 if (!vmw_dmabuf_reference(dmabuf)) {
734 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
735 goto out_err3;
736 }
737
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200738 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
739 vfbd->base.base.pitch = mode_cmd->pitch;
740 vfbd->base.base.depth = mode_cmd->depth;
741 vfbd->base.base.width = mode_cmd->width;
742 vfbd->base.base.height = mode_cmd->height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000743 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
744 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
745 vfbd->buffer = dmabuf;
746 *out = &vfbd->base;
747
748 return 0;
749
750out_err3:
751 drm_framebuffer_cleanup(&vfbd->base.base);
752out_err2:
753 kfree(vfbd);
754out_err1:
755 return ret;
756}
757
758/*
759 * Generic Kernel modesetting functions
760 */
761
762static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
763 struct drm_file *file_priv,
764 struct drm_mode_fb_cmd *mode_cmd)
765{
766 struct vmw_private *dev_priv = vmw_priv(dev);
767 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
768 struct vmw_framebuffer *vfb = NULL;
769 struct vmw_surface *surface = NULL;
770 struct vmw_dma_buffer *bo = NULL;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200771 unsigned int required_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000772 int ret;
773
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200774 /**
775 * This code should be conditioned on Screen Objects not being used.
776 * If screen objects are used, we can allocate a GMR to hold the
777 * requested framebuffer.
778 */
779
780 required_size = mode_cmd->pitch * mode_cmd->height;
781 if (unlikely(required_size > dev_priv->vram_size)) {
782 DRM_ERROR("VRAM size is too small for requested mode.\n");
783 return NULL;
784 }
785
786 /**
787 * End conditioned code.
788 */
789
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100790 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
791 mode_cmd->handle, &surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000792 if (ret)
793 goto try_dmabuf;
794
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000795 if (!surface->scanout)
796 goto err_not_scanout;
797
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000798 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200799 mode_cmd);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000800
801 /* vmw_user_surface_lookup takes one ref so does new_fb */
802 vmw_surface_unreference(&surface);
803
804 if (ret) {
805 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
Chris Wilsoncce13ff2010-08-08 13:36:38 +0100806 return ERR_PTR(ret);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000807 }
808 return &vfb->base;
809
810try_dmabuf:
811 DRM_INFO("%s: trying buffer\n", __func__);
812
813 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
814 if (ret) {
815 DRM_ERROR("failed to find buffer: %i\n", ret);
Chris Wilsoncce13ff2010-08-08 13:36:38 +0100816 return ERR_PTR(-ENOENT);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000817 }
818
819 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200820 mode_cmd);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000821
822 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
823 vmw_dmabuf_unreference(&bo);
824
825 if (ret) {
826 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
Chris Wilsoncce13ff2010-08-08 13:36:38 +0100827 return ERR_PTR(ret);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000828 }
829
830 return &vfb->base;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000831
832err_not_scanout:
833 DRM_ERROR("surface not marked as scanout\n");
834 /* vmw_user_surface_lookup takes one ref */
835 vmw_surface_unreference(&surface);
836
Chris Wilsoncce13ff2010-08-08 13:36:38 +0100837 return ERR_PTR(-EINVAL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000838}
839
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000840static struct drm_mode_config_funcs vmw_kms_funcs = {
841 .fb_create = vmw_kms_fb_create,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000842};
843
844int vmw_kms_init(struct vmw_private *dev_priv)
845{
846 struct drm_device *dev = dev_priv->dev;
847 int ret;
848
849 drm_mode_config_init(dev);
850 dev->mode_config.funcs = &vmw_kms_funcs;
Jakob Bornecrantz3bef3572010-02-09 19:41:57 +0000851 dev->mode_config.min_width = 1;
852 dev->mode_config.min_height = 1;
Jakob Bornecrantz7e71f8a2010-05-28 11:21:54 +0200853 /* assumed largest fb size */
854 dev->mode_config.max_width = 8192;
855 dev->mode_config.max_height = 8192;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000856
857 ret = vmw_kms_init_legacy_display_system(dev_priv);
858
859 return 0;
860}
861
862int vmw_kms_close(struct vmw_private *dev_priv)
863{
864 /*
865 * Docs says we should take the lock before calling this function
866 * but since it destroys encoders and our destructor calls
867 * drm_encoder_cleanup which takes the lock we deadlock.
868 */
869 drm_mode_config_cleanup(dev_priv->dev);
870 vmw_kms_close_legacy_display_system(dev_priv);
871 return 0;
872}
873
874int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
875 struct drm_file *file_priv)
876{
877 struct drm_vmw_cursor_bypass_arg *arg = data;
878 struct vmw_display_unit *du;
879 struct drm_mode_object *obj;
880 struct drm_crtc *crtc;
881 int ret = 0;
882
883
884 mutex_lock(&dev->mode_config.mutex);
885 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
886
887 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
888 du = vmw_crtc_to_du(crtc);
889 du->hotspot_x = arg->xhot;
890 du->hotspot_y = arg->yhot;
891 }
892
893 mutex_unlock(&dev->mode_config.mutex);
894 return 0;
895 }
896
897 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
898 if (!obj) {
899 ret = -EINVAL;
900 goto out;
901 }
902
903 crtc = obj_to_crtc(obj);
904 du = vmw_crtc_to_du(crtc);
905
906 du->hotspot_x = arg->xhot;
907 du->hotspot_y = arg->yhot;
908
909out:
910 mutex_unlock(&dev->mode_config.mutex);
911
912 return ret;
913}
914
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200915void vmw_kms_write_svga(struct vmw_private *vmw_priv,
916 unsigned width, unsigned height, unsigned pitch,
917 unsigned bbp, unsigned depth)
918{
919 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
920 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
921 else if (vmw_fifo_have_pitchlock(vmw_priv))
922 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
923 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
924 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
925 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
926 vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
927 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
928 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
929 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
930}
931
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000932int vmw_kms_save_vga(struct vmw_private *vmw_priv)
933{
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200934 struct vmw_vga_topology_state *save;
935 uint32_t i;
936
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000937 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
938 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000939 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200940 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000941 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
942 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000943 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200944 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200945 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
946 vmw_priv->vga_pitchlock =
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200947 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200948 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200949 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
950 SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000951
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200952 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
953 return 0;
954
955 vmw_priv->num_displays = vmw_read(vmw_priv,
956 SVGA_REG_NUM_GUEST_DISPLAYS);
957
958 for (i = 0; i < vmw_priv->num_displays; ++i) {
959 save = &vmw_priv->vga_save[i];
960 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
961 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
962 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
963 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
964 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
965 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
966 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200967 if (i == 0 && vmw_priv->num_displays == 1 &&
968 save->width == 0 && save->height == 0) {
969
970 /*
971 * It should be fairly safe to assume that these
972 * values are uninitialized.
973 */
974
975 save->width = vmw_priv->vga_width - save->pos_x;
976 save->height = vmw_priv->vga_height - save->pos_y;
977 }
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200978 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +0200979
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000980 return 0;
981}
982
983int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
984{
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200985 struct vmw_vga_topology_state *save;
986 uint32_t i;
987
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000988 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
989 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000990 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +0200991 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000992 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
993 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
994 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
995 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200996 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
997 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
998 vmw_priv->vga_pitchlock);
999 else if (vmw_fifo_have_pitchlock(vmw_priv))
1000 iowrite32(vmw_priv->vga_pitchlock,
1001 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001002
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001003 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1004 return 0;
1005
1006 for (i = 0; i < vmw_priv->num_displays; ++i) {
1007 save = &vmw_priv->vga_save[i];
1008 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1009 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1010 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1011 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1012 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1013 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1014 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1015 }
1016
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001017 return 0;
1018}
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +02001019
1020int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1021 struct drm_file *file_priv)
1022{
1023 struct vmw_private *dev_priv = vmw_priv(dev);
1024 struct drm_vmw_update_layout_arg *arg =
1025 (struct drm_vmw_update_layout_arg *)data;
1026 struct vmw_master *vmaster = vmw_master(file_priv->master);
1027 void __user *user_rects;
1028 struct drm_vmw_rect *rects;
1029 unsigned rects_size;
1030 int ret;
1031
1032 ret = ttm_read_lock(&vmaster->lock, true);
1033 if (unlikely(ret != 0))
1034 return ret;
1035
1036 if (!arg->num_outputs) {
1037 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1038 vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
1039 goto out_unlock;
1040 }
1041
1042 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1043 rects = kzalloc(rects_size, GFP_KERNEL);
1044 if (unlikely(!rects)) {
1045 ret = -ENOMEM;
1046 goto out_unlock;
1047 }
1048
1049 user_rects = (void __user *)(unsigned long)arg->rects;
1050 ret = copy_from_user(rects, user_rects, rects_size);
1051 if (unlikely(ret != 0)) {
1052 DRM_ERROR("Failed to get rects.\n");
Dan Carpenter4ede00c2010-07-10 16:30:04 +02001053 ret = -EFAULT;
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +02001054 goto out_free;
1055 }
1056
1057 vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
1058
1059out_free:
1060 kfree(rects);
1061out_unlock:
1062 ttm_read_unlock(&vmaster->lock);
1063 return ret;
1064}
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001065
1066u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1067{
1068 return 0;
1069}