blob: 346e2321b016afb08c7ba54f3b328f1b24ebc052 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020030
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031/* Might need a hrtimer here? */
32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33
Thomas Hellstrom22ee8612010-05-28 11:22:00 +020034static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
35static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000036
37void vmw_display_unit_cleanup(struct vmw_display_unit *du)
38{
39 if (du->cursor_surface)
40 vmw_surface_unreference(&du->cursor_surface);
41 if (du->cursor_dmabuf)
42 vmw_dmabuf_unreference(&du->cursor_dmabuf);
43 drm_crtc_cleanup(&du->crtc);
44 drm_encoder_cleanup(&du->encoder);
45 drm_connector_cleanup(&du->connector);
46}
47
48/*
49 * Display Unit Cursor functions
50 */
51
52int vmw_cursor_update_image(struct vmw_private *dev_priv,
53 u32 *image, u32 width, u32 height,
54 u32 hotspotX, u32 hotspotY)
55{
56 struct {
57 u32 cmd;
58 SVGAFifoCmdDefineAlphaCursor cursor;
59 } *cmd;
60 u32 image_size = width * height * 4;
61 u32 cmd_size = sizeof(*cmd) + image_size;
62
63 if (!image)
64 return -EINVAL;
65
66 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
67 if (unlikely(cmd == NULL)) {
68 DRM_ERROR("Fifo reserve failed.\n");
69 return -ENOMEM;
70 }
71
72 memset(cmd, 0, sizeof(*cmd));
73
74 memcpy(&cmd[1], image, image_size);
75
76 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
77 cmd->cursor.id = cpu_to_le32(0);
78 cmd->cursor.width = cpu_to_le32(width);
79 cmd->cursor.height = cpu_to_le32(height);
80 cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
81 cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
82
83 vmw_fifo_commit(dev_priv, cmd_size);
84
85 return 0;
86}
87
88void vmw_cursor_update_position(struct vmw_private *dev_priv,
89 bool show, int x, int y)
90{
91 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
92 uint32_t count;
93
94 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
95 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
96 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
97 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
98 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
99}
100
101int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
102 uint32_t handle, uint32_t width, uint32_t height)
103{
104 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
105 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
106 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
107 struct vmw_surface *surface = NULL;
108 struct vmw_dma_buffer *dmabuf = NULL;
109 int ret;
110
111 if (handle) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100112 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
113 handle, &surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000114 if (!ret) {
115 if (!surface->snooper.image) {
116 DRM_ERROR("surface not suitable for cursor\n");
117 return -EINVAL;
118 }
119 } else {
120 ret = vmw_user_dmabuf_lookup(tfile,
121 handle, &dmabuf);
122 if (ret) {
123 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
124 return -EINVAL;
125 }
126 }
127 }
128
129 /* takedown old cursor */
130 if (du->cursor_surface) {
131 du->cursor_surface->snooper.crtc = NULL;
132 vmw_surface_unreference(&du->cursor_surface);
133 }
134 if (du->cursor_dmabuf)
135 vmw_dmabuf_unreference(&du->cursor_dmabuf);
136
137 /* setup new image */
138 if (surface) {
139 /* vmw_user_surface_lookup takes one reference */
140 du->cursor_surface = surface;
141
142 du->cursor_surface->snooper.crtc = crtc;
143 du->cursor_age = du->cursor_surface->snooper.age;
144 vmw_cursor_update_image(dev_priv, surface->snooper.image,
145 64, 64, du->hotspot_x, du->hotspot_y);
146 } else if (dmabuf) {
147 struct ttm_bo_kmap_obj map;
148 unsigned long kmap_offset;
149 unsigned long kmap_num;
150 void *virtual;
151 bool dummy;
152
153 /* vmw_user_surface_lookup takes one reference */
154 du->cursor_dmabuf = dmabuf;
155
156 kmap_offset = 0;
157 kmap_num = (64*64*4) >> PAGE_SHIFT;
158
159 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
160 if (unlikely(ret != 0)) {
161 DRM_ERROR("reserve failed\n");
162 return -EINVAL;
163 }
164
165 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
166 if (unlikely(ret != 0))
167 goto err_unreserve;
168
169 virtual = ttm_kmap_obj_virtual(&map, &dummy);
170 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
171 du->hotspot_x, du->hotspot_y);
172
173 ttm_bo_kunmap(&map);
174err_unreserve:
175 ttm_bo_unreserve(&dmabuf->base);
176
177 } else {
178 vmw_cursor_update_position(dev_priv, false, 0, 0);
179 return 0;
180 }
181
182 vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
183
184 return 0;
185}
186
187int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
188{
189 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
190 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
191 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
192
193 du->cursor_x = x + crtc->x;
194 du->cursor_y = y + crtc->y;
195
196 vmw_cursor_update_position(dev_priv, shown,
197 du->cursor_x, du->cursor_y);
198
199 return 0;
200}
201
202void vmw_kms_cursor_snoop(struct vmw_surface *srf,
203 struct ttm_object_file *tfile,
204 struct ttm_buffer_object *bo,
205 SVGA3dCmdHeader *header)
206{
207 struct ttm_bo_kmap_obj map;
208 unsigned long kmap_offset;
209 unsigned long kmap_num;
210 SVGA3dCopyBox *box;
211 unsigned box_count;
212 void *virtual;
213 bool dummy;
214 struct vmw_dma_cmd {
215 SVGA3dCmdHeader header;
216 SVGA3dCmdSurfaceDMA dma;
217 } *cmd;
218 int ret;
219
220 cmd = container_of(header, struct vmw_dma_cmd, header);
221
222 /* No snooper installed */
223 if (!srf->snooper.image)
224 return;
225
226 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
227 DRM_ERROR("face and mipmap for cursors should never != 0\n");
228 return;
229 }
230
231 if (cmd->header.size < 64) {
232 DRM_ERROR("at least one full copy box must be given\n");
233 return;
234 }
235
236 box = (SVGA3dCopyBox *)&cmd[1];
237 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
238 sizeof(SVGA3dCopyBox);
239
240 if (cmd->dma.guest.pitch != (64 * 4) ||
241 cmd->dma.guest.ptr.offset % PAGE_SIZE ||
242 box->x != 0 || box->y != 0 || box->z != 0 ||
243 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
244 box->w != 64 || box->h != 64 || box->d != 1 ||
245 box_count != 1) {
246 /* TODO handle none page aligned offsets */
247 /* TODO handle partial uploads and pitch != 256 */
248 /* TODO handle more then one copy (size != 64) */
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300249 DRM_ERROR("lazy programmer, can't handle weird stuff\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000250 return;
251 }
252
253 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
254 kmap_num = (64*64*4) >> PAGE_SHIFT;
255
256 ret = ttm_bo_reserve(bo, true, false, false, 0);
257 if (unlikely(ret != 0)) {
258 DRM_ERROR("reserve failed\n");
259 return;
260 }
261
262 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
263 if (unlikely(ret != 0))
264 goto err_unreserve;
265
266 virtual = ttm_kmap_obj_virtual(&map, &dummy);
267
268 memcpy(srf->snooper.image, virtual, 64*64*4);
269 srf->snooper.age++;
270
271 /* we can't call this function from this function since execbuf has
272 * reserved fifo space.
273 *
274 * if (srf->snooper.crtc)
275 * vmw_ldu_crtc_cursor_update_image(dev_priv,
276 * srf->snooper.image, 64, 64,
277 * du->hotspot_x, du->hotspot_y);
278 */
279
280 ttm_bo_kunmap(&map);
281err_unreserve:
282 ttm_bo_unreserve(bo);
283}
284
285void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
286{
287 struct drm_device *dev = dev_priv->dev;
288 struct vmw_display_unit *du;
289 struct drm_crtc *crtc;
290
291 mutex_lock(&dev->mode_config.mutex);
292
293 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
294 du = vmw_crtc_to_du(crtc);
295 if (!du->cursor_surface ||
296 du->cursor_age == du->cursor_surface->snooper.age)
297 continue;
298
299 du->cursor_age = du->cursor_surface->snooper.age;
300 vmw_cursor_update_image(dev_priv,
301 du->cursor_surface->snooper.image,
302 64, 64, du->hotspot_x, du->hotspot_y);
303 }
304
305 mutex_unlock(&dev->mode_config.mutex);
306}
307
308/*
309 * Generic framebuffer code
310 */
311
312int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
313 struct drm_file *file_priv,
314 unsigned int *handle)
315{
316 if (handle)
317 handle = 0;
318
319 return 0;
320}
321
322/*
323 * Surface framebuffer code
324 */
325
326#define vmw_framebuffer_to_vfbs(x) \
327 container_of(x, struct vmw_framebuffer_surface, base.base)
328
329struct vmw_framebuffer_surface {
330 struct vmw_framebuffer base;
331 struct vmw_surface *surface;
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200332 struct vmw_dma_buffer *buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000333 struct delayed_work d_work;
334 struct mutex work_lock;
335 bool present_fs;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200336 struct list_head head;
337 struct drm_master *master;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000338};
339
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200340/**
341 * vmw_kms_idle_workqueues - Flush workqueues on this master
342 *
343 * @vmaster - Pointer identifying the master, for the surfaces of which
344 * we idle the dirty work queues.
345 *
346 * This function should be called with the ttm lock held in exclusive mode
347 * to idle all dirty work queues before the fifo is taken down.
348 *
349 * The work task may actually requeue itself, but after the flush returns we're
350 * sure that there's nothing to present, since the ttm lock is held in
351 * exclusive mode, so the fifo will never get used.
352 */
353
354void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
355{
356 struct vmw_framebuffer_surface *entry;
357
358 mutex_lock(&vmaster->fb_surf_mutex);
359 list_for_each_entry(entry, &vmaster->fb_surf, head) {
360 if (cancel_delayed_work_sync(&entry->d_work))
361 (void) entry->d_work.work.func(&entry->d_work.work);
362
363 (void) cancel_delayed_work_sync(&entry->d_work);
364 }
365 mutex_unlock(&vmaster->fb_surf_mutex);
366}
367
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000368void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
369{
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200370 struct vmw_framebuffer_surface *vfbs =
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000371 vmw_framebuffer_to_vfbs(framebuffer);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200372 struct vmw_master *vmaster = vmw_master(vfbs->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000373
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200374
375 mutex_lock(&vmaster->fb_surf_mutex);
376 list_del(&vfbs->head);
377 mutex_unlock(&vmaster->fb_surf_mutex);
378
379 cancel_delayed_work_sync(&vfbs->d_work);
380 drm_master_put(&vfbs->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000381 drm_framebuffer_cleanup(framebuffer);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200382 vmw_surface_unreference(&vfbs->surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000383
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200384 kfree(vfbs);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000385}
386
387static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
388{
389 struct delayed_work *d_work =
390 container_of(work, struct delayed_work, work);
391 struct vmw_framebuffer_surface *vfbs =
392 container_of(d_work, struct vmw_framebuffer_surface, d_work);
393 struct vmw_surface *surf = vfbs->surface;
394 struct drm_framebuffer *framebuffer = &vfbs->base.base;
395 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
396
397 struct {
398 SVGA3dCmdHeader header;
399 SVGA3dCmdPresent body;
400 SVGA3dCopyRect cr;
401 } *cmd;
402
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200403 /**
404 * Strictly we should take the ttm_lock in read mode before accessing
405 * the fifo, to make sure the fifo is present and up. However,
406 * instead we flush all workqueues under the ttm lock in exclusive mode
407 * before taking down the fifo.
408 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000409 mutex_lock(&vfbs->work_lock);
410 if (!vfbs->present_fs)
411 goto out_unlock;
412
413 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
414 if (unlikely(cmd == NULL))
415 goto out_resched;
416
417 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
418 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
419 cmd->body.sid = cpu_to_le32(surf->res.id);
420 cmd->cr.x = cpu_to_le32(0);
421 cmd->cr.y = cpu_to_le32(0);
422 cmd->cr.srcx = cmd->cr.x;
423 cmd->cr.srcy = cmd->cr.y;
424 cmd->cr.w = cpu_to_le32(framebuffer->width);
425 cmd->cr.h = cpu_to_le32(framebuffer->height);
426 vfbs->present_fs = false;
427 vmw_fifo_commit(dev_priv, sizeof(*cmd));
428out_resched:
429 /**
430 * Will not re-add if already pending.
431 */
432 schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
433out_unlock:
434 mutex_unlock(&vfbs->work_lock);
435}
436
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200437static int do_surface_dirty_ldu(struct vmw_private *dev_priv,
438 struct vmw_framebuffer *framebuffer,
439 struct vmw_surface *surf,
440 unsigned flags, unsigned color,
441 struct drm_clip_rect *clips,
442 unsigned num_clips, int inc)
443{
444 SVGA3dCopyRect *cr;
445 int i;
446
447 struct {
448 SVGA3dCmdHeader header;
449 SVGA3dCmdPresent body;
450 SVGA3dCopyRect cr;
451 } *cmd;
452
453 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) *
454 sizeof(cmd->cr));
455 if (unlikely(cmd == NULL)) {
456 DRM_ERROR("Fifo reserve failed.\n");
457 return -ENOMEM;
458 }
459
460 memset(cmd, 0, sizeof(*cmd));
461
462 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
463 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips *
464 sizeof(cmd->cr));
465 cmd->body.sid = cpu_to_le32(surf->res.id);
466
467 for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
468 cr->x = cpu_to_le16(clips->x1);
469 cr->y = cpu_to_le16(clips->y1);
470 cr->srcx = cr->x;
471 cr->srcy = cr->y;
472 cr->w = cpu_to_le16(clips->x2 - clips->x1);
473 cr->h = cpu_to_le16(clips->y2 - clips->y1);
474 }
475
476 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) *
477 sizeof(cmd->cr));
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200478
479 return 0;
480}
481
482static int do_surface_dirty_sou(struct vmw_private *dev_priv,
483 struct vmw_framebuffer *framebuffer,
484 struct vmw_surface *surf,
485 unsigned flags, unsigned color,
486 struct drm_clip_rect *clips,
487 unsigned num_clips, int inc)
488{
489 int left = clips->x2, right = clips->x1;
490 int top = clips->y2, bottom = clips->y1;
491 size_t fifo_size;
492 int i;
493
494 struct {
495 SVGA3dCmdHeader header;
496 SVGA3dCmdBlitSurfaceToScreen body;
497 } *cmd;
498
499
500 fifo_size = sizeof(*cmd);
501 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
502 if (unlikely(cmd == NULL)) {
503 DRM_ERROR("Fifo reserve failed.\n");
504 return -ENOMEM;
505 }
506
507 memset(cmd, 0, fifo_size);
508
509 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
510 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
511
512 cmd->body.srcImage.sid = cpu_to_le32(surf->res.id);
513 cmd->body.destScreenId = SVGA_ID_INVALID; /* virtual coords */
514
515 for (i = 0; i < num_clips; i++, clips += inc) {
516 left = min_t(int, left, (int)clips->x1);
517 right = max_t(int, right, (int)clips->x2);
518 top = min_t(int, top, (int)clips->y1);
519 bottom = max_t(int, bottom, (int)clips->y2);
520 }
521
522 cmd->body.srcRect.left = left;
523 cmd->body.srcRect.right = right;
524 cmd->body.srcRect.top = top;
525 cmd->body.srcRect.bottom = bottom;
526
527 cmd->body.destRect.left = left;
528 cmd->body.destRect.right = right;
529 cmd->body.destRect.top = top;
530 cmd->body.destRect.bottom = bottom;
531
532 vmw_fifo_commit(dev_priv, fifo_size);
533
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200534 return 0;
535}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000536
537int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200538 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000539 unsigned flags, unsigned color,
540 struct drm_clip_rect *clips,
541 unsigned num_clips)
542{
543 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200544 struct vmw_master *vmaster = vmw_master(file_priv->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000545 struct vmw_framebuffer_surface *vfbs =
546 vmw_framebuffer_to_vfbs(framebuffer);
547 struct vmw_surface *surf = vfbs->surface;
548 struct drm_clip_rect norect;
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200549 int ret, inc = 1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000550
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200551 if (unlikely(vfbs->master != file_priv->master))
552 return -EINVAL;
553
554 ret = ttm_read_lock(&vmaster->lock, true);
555 if (unlikely(ret != 0))
556 return ret;
557
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200558 /* Are we using screen objects? */
559 if (!dev_priv->sou_priv) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000560 int ret;
561
562 mutex_lock(&vfbs->work_lock);
563 vfbs->present_fs = true;
564 ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
565 mutex_unlock(&vfbs->work_lock);
566 if (ret) {
567 /**
568 * No work pending, Force immediate present.
569 */
570 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
571 }
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200572 ttm_read_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000573 return 0;
574 }
575
576 if (!num_clips) {
577 num_clips = 1;
578 clips = &norect;
579 norect.x1 = norect.y1 = 0;
580 norect.x2 = framebuffer->width;
581 norect.y2 = framebuffer->height;
582 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
583 num_clips /= 2;
584 inc = 2; /* skip source rects */
585 }
586
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200587 if (!dev_priv->sou_priv)
588 ret = do_surface_dirty_ldu(dev_priv, &vfbs->base, surf,
589 flags, color,
590 clips, num_clips, inc);
591 else
592 ret = do_surface_dirty_sou(dev_priv, &vfbs->base, surf,
593 flags, color,
594 clips, num_clips, inc);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000595
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200596 ttm_read_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000597 return 0;
598}
599
600static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
601 .destroy = vmw_framebuffer_surface_destroy,
602 .dirty = vmw_framebuffer_surface_dirty,
603 .create_handle = vmw_framebuffer_create_handle,
604};
605
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200606static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200607 struct drm_file *file_priv,
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200608 struct vmw_surface *surface,
609 struct vmw_framebuffer **out,
610 const struct drm_mode_fb_cmd
611 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000612
613{
614 struct drm_device *dev = dev_priv->dev;
615 struct vmw_framebuffer_surface *vfbs;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200616 enum SVGA3dSurfaceFormat format;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200617 struct vmw_master *vmaster = vmw_master(file_priv->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000618 int ret;
619
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200620 /*
621 * Sanity checks.
622 */
623
624 if (unlikely(surface->mip_levels[0] != 1 ||
625 surface->num_sizes != 1 ||
626 surface->sizes[0].width < mode_cmd->width ||
627 surface->sizes[0].height < mode_cmd->height ||
628 surface->sizes[0].depth != 1)) {
629 DRM_ERROR("Incompatible surface dimensions "
630 "for requested mode.\n");
631 return -EINVAL;
632 }
633
634 switch (mode_cmd->depth) {
635 case 32:
636 format = SVGA3D_A8R8G8B8;
637 break;
638 case 24:
639 format = SVGA3D_X8R8G8B8;
640 break;
641 case 16:
642 format = SVGA3D_R5G6B5;
643 break;
644 case 15:
645 format = SVGA3D_A1R5G5B5;
646 break;
Michel Dänzerf01b7ba2011-08-31 07:42:47 +0000647 case 8:
648 format = SVGA3D_LUMINANCE8;
649 break;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200650 default:
651 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
652 return -EINVAL;
653 }
654
655 if (unlikely(format != surface->format)) {
656 DRM_ERROR("Invalid surface format for requested mode.\n");
657 return -EINVAL;
658 }
659
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000660 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
661 if (!vfbs) {
662 ret = -ENOMEM;
663 goto out_err1;
664 }
665
666 ret = drm_framebuffer_init(dev, &vfbs->base.base,
667 &vmw_framebuffer_surface_funcs);
668 if (ret)
669 goto out_err2;
670
671 if (!vmw_surface_reference(surface)) {
672 DRM_ERROR("failed to reference surface %p\n", surface);
673 goto out_err3;
674 }
675
676 /* XXX get the first 3 from the surface info */
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200677 vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
678 vfbs->base.base.pitch = mode_cmd->pitch;
679 vfbs->base.base.depth = mode_cmd->depth;
680 vfbs->base.base.width = mode_cmd->width;
681 vfbs->base.base.height = mode_cmd->height;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200682 /* Don't need to fill start of vram with empty
683 * buffer if we have screen objects support.
684 */
685 if (!dev_priv->sou_priv) {
686 vfbs->base.pin = &vmw_surface_dmabuf_pin;
687 vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
688 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000689 vfbs->surface = surface;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200690 vfbs->master = drm_master_get(file_priv->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000691 mutex_init(&vfbs->work_lock);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200692
693 mutex_lock(&vmaster->fb_surf_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000694 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200695 list_add_tail(&vfbs->head, &vmaster->fb_surf);
696 mutex_unlock(&vmaster->fb_surf_mutex);
697
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000698 *out = &vfbs->base;
699
700 return 0;
701
702out_err3:
703 drm_framebuffer_cleanup(&vfbs->base.base);
704out_err2:
705 kfree(vfbs);
706out_err1:
707 return ret;
708}
709
710/*
711 * Dmabuf framebuffer code
712 */
713
714#define vmw_framebuffer_to_vfbd(x) \
715 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
716
717struct vmw_framebuffer_dmabuf {
718 struct vmw_framebuffer base;
719 struct vmw_dma_buffer *buffer;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200720 uint32_t handle;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000721};
722
723void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
724{
725 struct vmw_framebuffer_dmabuf *vfbd =
726 vmw_framebuffer_to_vfbd(framebuffer);
727
728 drm_framebuffer_cleanup(framebuffer);
729 vmw_dmabuf_unreference(&vfbd->buffer);
730
731 kfree(vfbd);
732}
733
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200734static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
735 struct vmw_framebuffer *framebuffer,
736 struct vmw_dma_buffer *buffer,
737 unsigned flags, unsigned color,
738 struct drm_clip_rect *clips,
739 unsigned num_clips, int increment)
740{
741 size_t fifo_size;
742 int i;
743
744 struct {
745 uint32_t header;
746 SVGAFifoCmdUpdate body;
747 } *cmd;
748
749 fifo_size = sizeof(*cmd) * num_clips;
750 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
751 if (unlikely(cmd == NULL)) {
752 DRM_ERROR("Fifo reserve failed.\n");
753 return -ENOMEM;
754 }
755
756 memset(cmd, 0, fifo_size);
757 for (i = 0; i < num_clips; i++, clips += increment) {
758 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
759 cmd[i].body.x = cpu_to_le32(clips->x1);
760 cmd[i].body.y = cpu_to_le32(clips->y1);
761 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
762 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
763 }
764
765 vmw_fifo_commit(dev_priv, fifo_size);
766 return 0;
767}
768
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200769static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
770 struct vmw_private *dev_priv,
771 struct vmw_framebuffer *framebuffer,
772 struct vmw_dma_buffer *buffer,
773 unsigned flags, unsigned color,
774 struct drm_clip_rect *clips,
775 unsigned num_clips, int increment)
776{
777 struct vmw_framebuffer_dmabuf *vfbd =
778 vmw_framebuffer_to_vfbd(&framebuffer->base);
779 size_t fifo_size;
780 int i, ret;
781
782 struct {
783 uint32_t header;
784 SVGAFifoCmdDefineGMRFB body;
785 } *cmd;
786 struct {
787 uint32_t header;
788 SVGAFifoCmdBlitGMRFBToScreen body;
789 } *blits;
790
791 fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips;
792 cmd = kmalloc(fifo_size, GFP_KERNEL);
793 if (unlikely(cmd == NULL)) {
794 DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
795 return -ENOMEM;
796 }
797
798 memset(cmd, 0, fifo_size);
799 cmd->header = SVGA_CMD_DEFINE_GMRFB;
800 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
801 cmd->body.format.colorDepth = framebuffer->base.depth;
802 cmd->body.format.reserved = 0;
803 cmd->body.bytesPerLine = framebuffer->base.pitch;
804 cmd->body.ptr.gmrId = vfbd->handle;
805 cmd->body.ptr.offset = 0;
806
807 blits = (void *)&cmd[1];
808 for (i = 0; i < num_clips; i++, clips += increment) {
809 blits[i].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
810 blits[i].body.srcOrigin.x = clips->x1;
811 blits[i].body.srcOrigin.y = clips->y1;
812 blits[i].body.destRect.left = clips->x1;
813 blits[i].body.destRect.top = clips->y1;
814 blits[i].body.destRect.right = clips->x2;
815 blits[i].body.destRect.bottom = clips->y2;
816 }
817
818 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
819 fifo_size, 0, NULL);
820
821 kfree(cmd);
822
823 return ret;
824}
825
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000826int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200827 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000828 unsigned flags, unsigned color,
829 struct drm_clip_rect *clips,
830 unsigned num_clips)
831{
832 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200833 struct vmw_master *vmaster = vmw_master(file_priv->master);
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200834 struct vmw_framebuffer_dmabuf *vfbd =
835 vmw_framebuffer_to_vfbd(framebuffer);
836 struct vmw_dma_buffer *dmabuf = vfbd->buffer;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000837 struct drm_clip_rect norect;
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200838 int ret, increment = 1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000839
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200840 ret = ttm_read_lock(&vmaster->lock, true);
841 if (unlikely(ret != 0))
842 return ret;
843
Thomas Hellstromdf1c93b2010-01-13 22:28:36 +0100844 if (!num_clips) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000845 num_clips = 1;
846 clips = &norect;
847 norect.x1 = norect.y1 = 0;
848 norect.x2 = framebuffer->width;
849 norect.y2 = framebuffer->height;
850 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
851 num_clips /= 2;
852 increment = 2;
853 }
854
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200855 if (dev_priv->ldu_priv) {
856 ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, dmabuf,
857 flags, color,
858 clips, num_clips, increment);
859 } else {
860 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
861 dmabuf, flags, color,
862 clips, num_clips, increment);
863 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000864
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200865 ttm_read_unlock(&vmaster->lock);
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200866 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000867}
868
869static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
870 .destroy = vmw_framebuffer_dmabuf_destroy,
871 .dirty = vmw_framebuffer_dmabuf_dirty,
872 .create_handle = vmw_framebuffer_create_handle,
873};
874
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200875/**
876 * We need to reserve the start of vram because the host might
877 * scribble to it at mode changes, so we need to reserve it.
878 */
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200879static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
880{
881 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
882 struct vmw_framebuffer_surface *vfbs =
883 vmw_framebuffer_to_vfbs(&vfb->base);
884 unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
885 int ret;
Jakob Bornecrantz5f898d92011-10-04 20:13:15 +0200886 struct ttm_placement ne_placement = vmw_vram_ne_placement;
887
888 ne_placement.lpfn = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200889
890 vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
891 if (unlikely(vfbs->buffer == NULL))
892 return -ENOMEM;
893
894 vmw_overlay_pause_all(dev_priv);
895 ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
896 &vmw_vram_ne_placement,
897 false, &vmw_dmabuf_bo_free);
898 vmw_overlay_resume_all(dev_priv);
Thomas Hellstrom1ef07242010-11-02 13:21:49 +0000899 if (unlikely(ret != 0))
900 vfbs->buffer = NULL;
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200901
902 return ret;
903}
904
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200905/**
906 * See vmw_surface_dmabuf_pin.
907 */
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200908static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
909{
910 struct ttm_buffer_object *bo;
911 struct vmw_framebuffer_surface *vfbs =
912 vmw_framebuffer_to_vfbs(&vfb->base);
913
Thomas Hellstrom1ef07242010-11-02 13:21:49 +0000914 if (unlikely(vfbs->buffer == NULL))
915 return 0;
916
Thomas Hellstrom22ee8612010-05-28 11:22:00 +0200917 bo = &vfbs->buffer->base;
918 ttm_bo_unref(&bo);
919 vfbs->buffer = NULL;
920
921 return 0;
922}
923
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200924/**
925 * Pin the dmabuffer to the start of vram.
926 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000927static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
928{
929 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
930 struct vmw_framebuffer_dmabuf *vfbd =
931 vmw_framebuffer_to_vfbd(&vfb->base);
932 int ret;
933
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200934 /* This code should not be used with screen objects */
935 BUG_ON(dev_priv->sou_priv);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200936
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000937 vmw_overlay_pause_all(dev_priv);
938
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200939 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000940
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000941 vmw_overlay_resume_all(dev_priv);
942
Jakob Bornecrantz316ab132010-05-28 11:22:05 +0200943 WARN_ON(ret != 0);
944
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000945 return 0;
946}
947
948static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
949{
950 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
951 struct vmw_framebuffer_dmabuf *vfbd =
952 vmw_framebuffer_to_vfbd(&vfb->base);
953
954 if (!vfbd->buffer) {
955 WARN_ON(!vfbd->buffer);
956 return 0;
957 }
958
Jakob Bornecrantzd991ef02011-10-04 20:13:21 +0200959 return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000960}
961
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200962static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
963 struct vmw_dma_buffer *dmabuf,
964 struct vmw_framebuffer **out,
965 const struct drm_mode_fb_cmd
966 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000967
968{
969 struct drm_device *dev = dev_priv->dev;
970 struct vmw_framebuffer_dmabuf *vfbd;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200971 unsigned int requested_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000972 int ret;
973
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200974 requested_size = mode_cmd->height * mode_cmd->pitch;
975 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
976 DRM_ERROR("Screen buffer object size is too small "
977 "for requested mode.\n");
978 return -EINVAL;
979 }
980
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000981 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
982 if (!vfbd) {
983 ret = -ENOMEM;
984 goto out_err1;
985 }
986
987 ret = drm_framebuffer_init(dev, &vfbd->base.base,
988 &vmw_framebuffer_dmabuf_funcs);
989 if (ret)
990 goto out_err2;
991
992 if (!vmw_dmabuf_reference(dmabuf)) {
993 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
994 goto out_err3;
995 }
996
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200997 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
998 vfbd->base.base.pitch = mode_cmd->pitch;
999 vfbd->base.base.depth = mode_cmd->depth;
1000 vfbd->base.base.width = mode_cmd->width;
1001 vfbd->base.base.height = mode_cmd->height;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +02001002 if (!dev_priv->sou_priv) {
1003 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
1004 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
1005 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001006 vfbd->buffer = dmabuf;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +02001007 vfbd->handle = mode_cmd->handle;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001008 *out = &vfbd->base;
1009
1010 return 0;
1011
1012out_err3:
1013 drm_framebuffer_cleanup(&vfbd->base.base);
1014out_err2:
1015 kfree(vfbd);
1016out_err1:
1017 return ret;
1018}
1019
1020/*
1021 * Generic Kernel modesetting functions
1022 */
1023
1024static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1025 struct drm_file *file_priv,
1026 struct drm_mode_fb_cmd *mode_cmd)
1027{
1028 struct vmw_private *dev_priv = vmw_priv(dev);
1029 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1030 struct vmw_framebuffer *vfb = NULL;
1031 struct vmw_surface *surface = NULL;
1032 struct vmw_dma_buffer *bo = NULL;
Thomas Hellstrome133e732010-10-05 12:43:04 +02001033 u64 required_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001034 int ret;
1035
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001036 /**
1037 * This code should be conditioned on Screen Objects not being used.
1038 * If screen objects are used, we can allocate a GMR to hold the
1039 * requested framebuffer.
1040 */
1041
1042 required_size = mode_cmd->pitch * mode_cmd->height;
Thomas Hellstrome133e732010-10-05 12:43:04 +02001043 if (unlikely(required_size > (u64) dev_priv->vram_size)) {
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001044 DRM_ERROR("VRAM size is too small for requested mode.\n");
1045 return NULL;
1046 }
1047
1048 /**
1049 * End conditioned code.
1050 */
1051
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001052 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
1053 mode_cmd->handle, &surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001054 if (ret)
1055 goto try_dmabuf;
1056
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +00001057 if (!surface->scanout)
1058 goto err_not_scanout;
1059
Thomas Hellstrom3a939a52010-10-05 12:43:03 +02001060 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
1061 &vfb, mode_cmd);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001062
1063 /* vmw_user_surface_lookup takes one ref so does new_fb */
1064 vmw_surface_unreference(&surface);
1065
1066 if (ret) {
1067 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
Chris Wilsoncce13ff2010-08-08 13:36:38 +01001068 return ERR_PTR(ret);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001069 }
1070 return &vfb->base;
1071
1072try_dmabuf:
1073 DRM_INFO("%s: trying buffer\n", __func__);
1074
1075 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
1076 if (ret) {
1077 DRM_ERROR("failed to find buffer: %i\n", ret);
Chris Wilsoncce13ff2010-08-08 13:36:38 +01001078 return ERR_PTR(-ENOENT);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001079 }
1080
1081 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001082 mode_cmd);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001083
1084 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
1085 vmw_dmabuf_unreference(&bo);
1086
1087 if (ret) {
1088 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
Chris Wilsoncce13ff2010-08-08 13:36:38 +01001089 return ERR_PTR(ret);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001090 }
1091
1092 return &vfb->base;
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +00001093
1094err_not_scanout:
1095 DRM_ERROR("surface not marked as scanout\n");
1096 /* vmw_user_surface_lookup takes one ref */
1097 vmw_surface_unreference(&surface);
1098
Chris Wilsoncce13ff2010-08-08 13:36:38 +01001099 return ERR_PTR(-EINVAL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001100}
1101
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001102static struct drm_mode_config_funcs vmw_kms_funcs = {
1103 .fb_create = vmw_kms_fb_create,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001104};
1105
1106int vmw_kms_init(struct vmw_private *dev_priv)
1107{
1108 struct drm_device *dev = dev_priv->dev;
1109 int ret;
1110
1111 drm_mode_config_init(dev);
1112 dev->mode_config.funcs = &vmw_kms_funcs;
Jakob Bornecrantz3bef3572010-02-09 19:41:57 +00001113 dev->mode_config.min_width = 1;
1114 dev->mode_config.min_height = 1;
Jakob Bornecrantz7e71f8a2010-05-28 11:21:54 +02001115 /* assumed largest fb size */
1116 dev->mode_config.max_width = 8192;
1117 dev->mode_config.max_height = 8192;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001118
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +02001119 ret = vmw_kms_init_screen_object_display(dev_priv);
1120 if (ret) /* Fallback */
1121 (void)vmw_kms_init_legacy_display_system(dev_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001122
1123 return 0;
1124}
1125
1126int vmw_kms_close(struct vmw_private *dev_priv)
1127{
1128 /*
1129 * Docs says we should take the lock before calling this function
1130 * but since it destroys encoders and our destructor calls
1131 * drm_encoder_cleanup which takes the lock we deadlock.
1132 */
1133 drm_mode_config_cleanup(dev_priv->dev);
1134 vmw_kms_close_legacy_display_system(dev_priv);
1135 return 0;
1136}
1137
1138int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1139 struct drm_file *file_priv)
1140{
1141 struct drm_vmw_cursor_bypass_arg *arg = data;
1142 struct vmw_display_unit *du;
1143 struct drm_mode_object *obj;
1144 struct drm_crtc *crtc;
1145 int ret = 0;
1146
1147
1148 mutex_lock(&dev->mode_config.mutex);
1149 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1150
1151 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1152 du = vmw_crtc_to_du(crtc);
1153 du->hotspot_x = arg->xhot;
1154 du->hotspot_y = arg->yhot;
1155 }
1156
1157 mutex_unlock(&dev->mode_config.mutex);
1158 return 0;
1159 }
1160
1161 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
1162 if (!obj) {
1163 ret = -EINVAL;
1164 goto out;
1165 }
1166
1167 crtc = obj_to_crtc(obj);
1168 du = vmw_crtc_to_du(crtc);
1169
1170 du->hotspot_x = arg->xhot;
1171 du->hotspot_y = arg->yhot;
1172
1173out:
1174 mutex_unlock(&dev->mode_config.mutex);
1175
1176 return ret;
1177}
1178
Michel Dänzer0bef23f2011-08-31 07:42:50 +00001179int vmw_kms_write_svga(struct vmw_private *vmw_priv,
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001180 unsigned width, unsigned height, unsigned pitch,
Michel Dänzer6558429b2011-08-31 07:42:49 +00001181 unsigned bpp, unsigned depth)
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001182{
1183 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1184 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1185 else if (vmw_fifo_have_pitchlock(vmw_priv))
1186 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1187 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1188 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
Michel Dänzer6558429b2011-08-31 07:42:49 +00001189 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
Michel Dänzer0bef23f2011-08-31 07:42:50 +00001190
1191 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1192 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1193 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1194 return -EINVAL;
1195 }
1196
1197 return 0;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001198}
1199
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001200int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1201{
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001202 struct vmw_vga_topology_state *save;
1203 uint32_t i;
1204
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001205 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1206 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001207 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001208 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1209 vmw_priv->vga_pitchlock =
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001210 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001211 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001212 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
1213 SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001214
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001215 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1216 return 0;
1217
1218 vmw_priv->num_displays = vmw_read(vmw_priv,
1219 SVGA_REG_NUM_GUEST_DISPLAYS);
1220
Thomas Hellstrom029e50b2010-10-05 12:43:08 +02001221 if (vmw_priv->num_displays == 0)
1222 vmw_priv->num_displays = 1;
1223
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001224 for (i = 0; i < vmw_priv->num_displays; ++i) {
1225 save = &vmw_priv->vga_save[i];
1226 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1227 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1228 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1229 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1230 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1231 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1232 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001233 if (i == 0 && vmw_priv->num_displays == 1 &&
1234 save->width == 0 && save->height == 0) {
1235
1236 /*
1237 * It should be fairly safe to assume that these
1238 * values are uninitialized.
1239 */
1240
1241 save->width = vmw_priv->vga_width - save->pos_x;
1242 save->height = vmw_priv->vga_height - save->pos_y;
1243 }
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001244 }
Thomas Hellstrom30c78bb2010-10-01 10:21:48 +02001245
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001246 return 0;
1247}
1248
1249int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1250{
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001251 struct vmw_vga_topology_state *save;
1252 uint32_t i;
1253
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001254 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1255 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001256 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001257 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1258 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1259 vmw_priv->vga_pitchlock);
1260 else if (vmw_fifo_have_pitchlock(vmw_priv))
1261 iowrite32(vmw_priv->vga_pitchlock,
1262 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001263
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001264 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1265 return 0;
1266
1267 for (i = 0; i < vmw_priv->num_displays; ++i) {
1268 save = &vmw_priv->vga_save[i];
1269 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1270 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1271 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1272 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1273 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1274 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1275 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1276 }
1277
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001278 return 0;
1279}
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +02001280
Thomas Hellstrome133e732010-10-05 12:43:04 +02001281bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1282 uint32_t pitch,
1283 uint32_t height)
1284{
1285 return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
1286}
1287
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001288u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1289{
1290 return 0;
1291}
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001292
1293
1294/*
1295 * Small shared kms functions.
1296 */
1297
1298int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1299 struct drm_vmw_rect *rects)
1300{
1301 struct drm_device *dev = dev_priv->dev;
1302 struct vmw_display_unit *du;
1303 struct drm_connector *con;
1304 int i;
1305
1306 mutex_lock(&dev->mode_config.mutex);
1307
1308#if 0
1309 DRM_INFO("%s: new layout ", __func__);
1310 for (i = 0; i < (int)num; i++)
1311 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
1312 rects[i].w, rects[i].h);
1313 DRM_INFO("\n");
1314#else
1315 (void)i;
1316#endif
1317
1318 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1319 du = vmw_connector_to_du(con);
1320 if (num > du->unit) {
1321 du->pref_width = rects[du->unit].w;
1322 du->pref_height = rects[du->unit].h;
1323 du->pref_active = true;
1324 } else {
1325 du->pref_width = 800;
1326 du->pref_height = 600;
1327 du->pref_active = false;
1328 }
1329 con->status = vmw_du_connector_detect(con, true);
1330 }
1331
1332 mutex_unlock(&dev->mode_config.mutex);
1333
1334 return 0;
1335}
1336
1337void vmw_du_crtc_save(struct drm_crtc *crtc)
1338{
1339}
1340
1341void vmw_du_crtc_restore(struct drm_crtc *crtc)
1342{
1343}
1344
1345void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1346 u16 *r, u16 *g, u16 *b,
1347 uint32_t start, uint32_t size)
1348{
1349 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1350 int i;
1351
1352 for (i = 0; i < size; i++) {
1353 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1354 r[i], g[i], b[i]);
1355 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1356 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1357 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1358 }
1359}
1360
1361void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1362{
1363}
1364
1365void vmw_du_connector_save(struct drm_connector *connector)
1366{
1367}
1368
1369void vmw_du_connector_restore(struct drm_connector *connector)
1370{
1371}
1372
1373enum drm_connector_status
1374vmw_du_connector_detect(struct drm_connector *connector, bool force)
1375{
1376 uint32_t num_displays;
1377 struct drm_device *dev = connector->dev;
1378 struct vmw_private *dev_priv = vmw_priv(dev);
1379
1380 mutex_lock(&dev_priv->hw_mutex);
1381 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1382 mutex_unlock(&dev_priv->hw_mutex);
1383
1384 return ((vmw_connector_to_du(connector)->unit < num_displays) ?
1385 connector_status_connected : connector_status_disconnected);
1386}
1387
1388static struct drm_display_mode vmw_kms_connector_builtin[] = {
1389 /* 640x480@60Hz */
1390 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
1391 752, 800, 0, 480, 489, 492, 525, 0,
1392 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1393 /* 800x600@60Hz */
1394 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
1395 968, 1056, 0, 600, 601, 605, 628, 0,
1396 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1397 /* 1024x768@60Hz */
1398 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1399 1184, 1344, 0, 768, 771, 777, 806, 0,
1400 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1401 /* 1152x864@75Hz */
1402 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1403 1344, 1600, 0, 864, 865, 868, 900, 0,
1404 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1405 /* 1280x768@60Hz */
1406 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1407 1472, 1664, 0, 768, 771, 778, 798, 0,
1408 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1409 /* 1280x800@60Hz */
1410 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1411 1480, 1680, 0, 800, 803, 809, 831, 0,
1412 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
1413 /* 1280x960@60Hz */
1414 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1415 1488, 1800, 0, 960, 961, 964, 1000, 0,
1416 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1417 /* 1280x1024@60Hz */
1418 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1419 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1420 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1421 /* 1360x768@60Hz */
1422 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1423 1536, 1792, 0, 768, 771, 777, 795, 0,
1424 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1425 /* 1440x1050@60Hz */
1426 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1427 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1428 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1429 /* 1440x900@60Hz */
1430 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1431 1672, 1904, 0, 900, 903, 909, 934, 0,
1432 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1433 /* 1600x1200@60Hz */
1434 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1435 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1436 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1437 /* 1680x1050@60Hz */
1438 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1439 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1440 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1441 /* 1792x1344@60Hz */
1442 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
1443 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1444 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1445 /* 1853x1392@60Hz */
1446 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
1447 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1448 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1449 /* 1920x1200@60Hz */
1450 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
1451 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1452 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1453 /* 1920x1440@60Hz */
1454 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
1455 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1456 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1457 /* 2560x1600@60Hz */
1458 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
1459 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
1460 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1461 /* Terminate */
1462 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1463};
1464
1465int vmw_du_connector_fill_modes(struct drm_connector *connector,
1466 uint32_t max_width, uint32_t max_height)
1467{
1468 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1469 struct drm_device *dev = connector->dev;
1470 struct vmw_private *dev_priv = vmw_priv(dev);
1471 struct drm_display_mode *mode = NULL;
1472 struct drm_display_mode *bmode;
1473 struct drm_display_mode prefmode = { DRM_MODE("preferred",
1474 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1475 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1476 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1477 };
1478 int i;
1479
1480 /* Add preferred mode */
1481 {
1482 mode = drm_mode_duplicate(dev, &prefmode);
1483 if (!mode)
1484 return 0;
1485 mode->hdisplay = du->pref_width;
1486 mode->vdisplay = du->pref_height;
1487 mode->vrefresh = drm_mode_vrefresh(mode);
1488 if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
1489 mode->vdisplay)) {
1490 drm_mode_probed_add(connector, mode);
1491
1492 if (du->pref_mode) {
1493 list_del_init(&du->pref_mode->head);
1494 drm_mode_destroy(dev, du->pref_mode);
1495 }
1496
1497 du->pref_mode = mode;
1498 }
1499 }
1500
1501 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1502 bmode = &vmw_kms_connector_builtin[i];
1503 if (bmode->hdisplay > max_width ||
1504 bmode->vdisplay > max_height)
1505 continue;
1506
1507 if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
1508 bmode->vdisplay))
1509 continue;
1510
1511 mode = drm_mode_duplicate(dev, bmode);
1512 if (!mode)
1513 return 0;
1514 mode->vrefresh = drm_mode_vrefresh(mode);
1515
1516 drm_mode_probed_add(connector, mode);
1517 }
1518
1519 drm_mode_connector_list_update(connector);
1520
1521 return 1;
1522}
1523
1524int vmw_du_connector_set_property(struct drm_connector *connector,
1525 struct drm_property *property,
1526 uint64_t val)
1527{
1528 return 0;
1529}