blob: ec0506794dbdc35c6d65a1e91d2e9ddbbafd36d8 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
Sinclair Yeh9c2542a2017-03-23 11:33:39 -070029#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_helper.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020032
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000033/* Might need a hrtimer here? */
34#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
35
Sinclair Yehc8261a92015-06-26 01:23:42 -070036void vmw_du_cleanup(struct vmw_display_unit *du)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037{
Sinclair Yeh36cc79b2017-03-23 11:28:11 -070038 drm_plane_cleanup(&du->primary);
39 drm_plane_cleanup(&du->cursor);
40
Thomas Wood34ea3d32014-05-29 16:57:41 +010041 drm_connector_unregister(&du->connector);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042 drm_crtc_cleanup(&du->crtc);
43 drm_encoder_cleanup(&du->encoder);
44 drm_connector_cleanup(&du->connector);
45}
46
47/*
48 * Display Unit Cursor functions
49 */
50
Sinclair Yeh36cc79b2017-03-23 11:28:11 -070051static int vmw_cursor_update_image(struct vmw_private *dev_priv,
52 u32 *image, u32 width, u32 height,
53 u32 hotspotX, u32 hotspotY)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054{
55 struct {
56 u32 cmd;
57 SVGAFifoCmdDefineAlphaCursor cursor;
58 } *cmd;
59 u32 image_size = width * height * 4;
60 u32 cmd_size = sizeof(*cmd) + image_size;
61
62 if (!image)
63 return -EINVAL;
64
65 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
66 if (unlikely(cmd == NULL)) {
67 DRM_ERROR("Fifo reserve failed.\n");
68 return -ENOMEM;
69 }
70
71 memset(cmd, 0, sizeof(*cmd));
72
73 memcpy(&cmd[1], image, image_size);
74
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -070075 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
76 cmd->cursor.id = 0;
77 cmd->cursor.width = width;
78 cmd->cursor.height = height;
79 cmd->cursor.hotspotX = hotspotX;
80 cmd->cursor.hotspotY = hotspotY;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000081
Thomas Hellstrom4e0858a2015-11-05 02:18:55 -080082 vmw_fifo_commit_flush(dev_priv, cmd_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000083
84 return 0;
85}
86
Sinclair Yeh36cc79b2017-03-23 11:28:11 -070087static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
88 struct vmw_dma_buffer *dmabuf,
89 u32 width, u32 height,
90 u32 hotspotX, u32 hotspotY)
Jakob Bornecrantz6a91d972011-11-28 13:19:10 +010091{
92 struct ttm_bo_kmap_obj map;
93 unsigned long kmap_offset;
94 unsigned long kmap_num;
95 void *virtual;
96 bool dummy;
97 int ret;
98
99 kmap_offset = 0;
100 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
101
Christian Königdfd5e502016-04-06 11:12:03 +0200102 ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
Jakob Bornecrantz6a91d972011-11-28 13:19:10 +0100103 if (unlikely(ret != 0)) {
104 DRM_ERROR("reserve failed\n");
105 return -EINVAL;
106 }
107
108 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
109 if (unlikely(ret != 0))
110 goto err_unreserve;
111
112 virtual = ttm_kmap_obj_virtual(&map, &dummy);
113 ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
114 hotspotX, hotspotY);
115
116 ttm_bo_kunmap(&map);
117err_unreserve:
118 ttm_bo_unreserve(&dmabuf->base);
119
120 return ret;
121}
122
123
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700124static void vmw_cursor_update_position(struct vmw_private *dev_priv,
125 bool show, int x, int y)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000126{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100127 u32 *fifo_mem = dev_priv->mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000128 uint32_t count;
129
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700130 spin_lock(&dev_priv->cursor_lock);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100131 vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
132 vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
133 vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
134 count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
135 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700136 spin_unlock(&dev_priv->cursor_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000137}
138
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100139
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000140void vmw_kms_cursor_snoop(struct vmw_surface *srf,
141 struct ttm_object_file *tfile,
142 struct ttm_buffer_object *bo,
143 SVGA3dCmdHeader *header)
144{
145 struct ttm_bo_kmap_obj map;
146 unsigned long kmap_offset;
147 unsigned long kmap_num;
148 SVGA3dCopyBox *box;
149 unsigned box_count;
150 void *virtual;
151 bool dummy;
152 struct vmw_dma_cmd {
153 SVGA3dCmdHeader header;
154 SVGA3dCmdSurfaceDMA dma;
155 } *cmd;
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100156 int i, ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000157
158 cmd = container_of(header, struct vmw_dma_cmd, header);
159
160 /* No snooper installed */
161 if (!srf->snooper.image)
162 return;
163
164 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
165 DRM_ERROR("face and mipmap for cursors should never != 0\n");
166 return;
167 }
168
169 if (cmd->header.size < 64) {
170 DRM_ERROR("at least one full copy box must be given\n");
171 return;
172 }
173
174 box = (SVGA3dCopyBox *)&cmd[1];
175 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
176 sizeof(SVGA3dCopyBox);
177
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100178 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179 box->x != 0 || box->y != 0 || box->z != 0 ||
180 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100181 box->d != 1 || box_count != 1) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000182 /* TODO handle none page aligned offsets */
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100183 /* TODO handle more dst & src != 0 */
184 /* TODO handle more then one copy */
185 DRM_ERROR("Cant snoop dma request for cursor!\n");
186 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
187 box->srcx, box->srcy, box->srcz,
188 box->x, box->y, box->z,
189 box->w, box->h, box->d, box_count,
190 cmd->dma.guest.ptr.offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000191 return;
192 }
193
194 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
195 kmap_num = (64*64*4) >> PAGE_SHIFT;
196
Christian Königdfd5e502016-04-06 11:12:03 +0200197 ret = ttm_bo_reserve(bo, true, false, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000198 if (unlikely(ret != 0)) {
199 DRM_ERROR("reserve failed\n");
200 return;
201 }
202
203 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
204 if (unlikely(ret != 0))
205 goto err_unreserve;
206
207 virtual = ttm_kmap_obj_virtual(&map, &dummy);
208
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100209 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
210 memcpy(srf->snooper.image, virtual, 64*64*4);
211 } else {
212 /* Image is unsigned pointer. */
213 for (i = 0; i < box->h; i++)
214 memcpy(srf->snooper.image + i * 64,
215 virtual + i * cmd->dma.guest.pitch,
216 box->w * 4);
217 }
218
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000219 srf->snooper.age++;
220
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000221 ttm_bo_kunmap(&map);
222err_unreserve:
223 ttm_bo_unreserve(bo);
224}
225
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100226/**
227 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
228 *
229 * @dev_priv: Pointer to the device private struct.
230 *
231 * Clears all legacy hotspots.
232 */
233void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
234{
235 struct drm_device *dev = dev_priv->dev;
236 struct vmw_display_unit *du;
237 struct drm_crtc *crtc;
238
239 drm_modeset_lock_all(dev);
240 drm_for_each_crtc(crtc, dev) {
241 du = vmw_crtc_to_du(crtc);
242
243 du->hotspot_x = 0;
244 du->hotspot_y = 0;
245 }
246 drm_modeset_unlock_all(dev);
247}
248
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000249void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
250{
251 struct drm_device *dev = dev_priv->dev;
252 struct vmw_display_unit *du;
253 struct drm_crtc *crtc;
254
255 mutex_lock(&dev->mode_config.mutex);
256
257 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
258 du = vmw_crtc_to_du(crtc);
259 if (!du->cursor_surface ||
260 du->cursor_age == du->cursor_surface->snooper.age)
261 continue;
262
263 du->cursor_age = du->cursor_surface->snooper.age;
264 vmw_cursor_update_image(dev_priv,
265 du->cursor_surface->snooper.image,
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100266 64, 64,
267 du->hotspot_x + du->core_hotspot_x,
268 du->hotspot_y + du->core_hotspot_y);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000269 }
270
271 mutex_unlock(&dev->mode_config.mutex);
272}
273
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700274
275
276/**
277 * vmw_du_cursor_plane_update() - Update cursor image and location
278 *
279 * @plane: plane object to update
280 * @crtc: owning CRTC of @plane
281 * @fb: framebuffer to flip onto plane
282 * @crtc_x: x offset of plane on crtc
283 * @crtc_y: y offset of plane on crtc
284 * @crtc_w: width of plane rectangle on crtc
285 * @crtc_h: height of plane rectangle on crtc
286 * @src_x: Not used
287 * @src_y: Not used
288 * @src_w: Not used
289 * @src_h: Not used
290 *
291 *
292 * RETURNS:
293 * Zero on success, error code on failure
294 */
295int vmw_du_cursor_plane_update(struct drm_plane *plane,
296 struct drm_crtc *crtc,
297 struct drm_framebuffer *fb,
298 int crtc_x, int crtc_y,
299 unsigned int crtc_w,
300 unsigned int crtc_h,
301 uint32_t src_x, uint32_t src_y,
302 uint32_t src_w, uint32_t src_h)
303{
304 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
305 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
306 struct vmw_surface *surface = NULL;
307 struct vmw_dma_buffer *dmabuf = NULL;
308 s32 hotspot_x, hotspot_y;
309 int ret;
310
311 hotspot_x = du->hotspot_x + fb->hot_x;
312 hotspot_y = du->hotspot_y + fb->hot_y;
313
314 /* A lot of the code assumes this */
315 if (crtc_w != 64 || crtc_h != 64) {
316 ret = -EINVAL;
317 goto out;
318 }
319
320 if (vmw_framebuffer_to_vfb(fb)->dmabuf)
321 dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
322 else
323 surface = vmw_framebuffer_to_vfbs(fb)->surface;
324
325 if (surface && !surface->snooper.image) {
326 DRM_ERROR("surface not suitable for cursor\n");
327 ret = -EINVAL;
328 goto out;
329 }
330
331 /* setup new image */
332 ret = 0;
333 if (surface) {
334 /* vmw_user_surface_lookup takes one reference */
335 du->cursor_surface = surface;
336
337 du->cursor_age = du->cursor_surface->snooper.age;
338
339 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
340 64, 64, hotspot_x, hotspot_y);
341 } else if (dmabuf) {
342 /* vmw_user_surface_lookup takes one reference */
343 du->cursor_dmabuf = dmabuf;
344
345 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
346 hotspot_x, hotspot_y);
347 } else {
348 vmw_cursor_update_position(dev_priv, false, 0, 0);
349 goto out;
350 }
351
352 if (!ret) {
353 du->cursor_x = crtc_x + du->set_gui_x;
354 du->cursor_y = crtc_y + du->set_gui_y;
355
356 vmw_cursor_update_position(dev_priv, true,
357 du->cursor_x + hotspot_x,
358 du->cursor_y + hotspot_y);
359 }
360
361out:
362 return ret;
363}
364
365
366int vmw_du_cursor_plane_disable(struct drm_plane *plane)
367{
368 if (plane->fb) {
369 drm_framebuffer_unreference(plane->fb);
370 plane->fb = NULL;
371 }
372
373 return -EINVAL;
374}
375
376
377void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
378{
379 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
380
381 drm_plane_cleanup(plane);
382}
383
384
385void vmw_du_primary_plane_destroy(struct drm_plane *plane)
386{
387 drm_plane_cleanup(plane);
388
389 /* Planes are static in our case so we don't free it */
390}
391
392
Sinclair Yeh9c2542a2017-03-23 11:33:39 -0700393/**
394 * vmw_du_crtc_duplicate_state - duplicate crtc state
395 * @crtc: DRM crtc
396 *
397 * Allocates and returns a copy of the crtc state (both common and
398 * vmw-specific) for the specified crtc.
399 *
400 * Returns: The newly allocated crtc state, or NULL on failure.
401 */
402struct drm_crtc_state *
403vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
404{
405 struct drm_crtc_state *state;
406 struct vmw_crtc_state *vcs;
407
408 if (WARN_ON(!crtc->state))
409 return NULL;
410
411 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
412
413 if (!vcs)
414 return NULL;
415
416 state = &vcs->base;
417
418 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
419
420 return state;
421}
422
423
424/**
425 * vmw_du_crtc_reset - creates a blank vmw crtc state
426 * @crtc: DRM crtc
427 *
428 * Resets the atomic state for @crtc by freeing the state pointer (which
429 * might be NULL, e.g. at driver load time) and allocating a new empty state
430 * object.
431 */
432void vmw_du_crtc_reset(struct drm_crtc *crtc)
433{
434 struct vmw_crtc_state *vcs;
435
436
437 if (crtc->state) {
438 __drm_atomic_helper_crtc_destroy_state(crtc->state);
439
440 kfree(vmw_crtc_state_to_vcs(crtc->state));
441 }
442
443 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
444
445 if (!vcs) {
446 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
447 return;
448 }
449
450 crtc->state = &vcs->base;
451 crtc->state->crtc = crtc;
452}
453
454
455/**
456 * vmw_du_crtc_destroy_state - destroy crtc state
457 * @crtc: DRM crtc
458 * @state: state object to destroy
459 *
460 * Destroys the crtc state (both common and vmw-specific) for the
461 * specified plane.
462 */
463void
464vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
465 struct drm_crtc_state *state)
466{
467 drm_atomic_helper_crtc_destroy_state(crtc, state);
468}
469
470
Sinclair Yehcc5ec452017-03-23 11:36:05 -0700471/**
472 * vmw_du_plane_duplicate_state - duplicate plane state
473 * @plane: drm plane
474 *
475 * Allocates and returns a copy of the plane state (both common and
476 * vmw-specific) for the specified plane.
477 *
478 * Returns: The newly allocated plane state, or NULL on failure.
479 */
480struct drm_plane_state *
481vmw_du_plane_duplicate_state(struct drm_plane *plane)
482{
483 struct drm_plane_state *state;
484 struct vmw_plane_state *vps;
485
486 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
487
488 if (!vps)
489 return NULL;
490
491 vps->pinned = 0;
492
493 /* Each ref counted resource needs to be acquired again */
494 if (vps->surf)
495 (void) vmw_surface_reference(vps->surf);
496
497 if (vps->dmabuf)
498 (void) vmw_dmabuf_reference(vps->dmabuf);
499
500 state = &vps->base;
501
502 __drm_atomic_helper_plane_duplicate_state(plane, state);
503
504 return state;
505}
506
507
508/**
509 * vmw_du_plane_reset - creates a blank vmw plane state
510 * @plane: drm plane
511 *
512 * Resets the atomic state for @plane by freeing the state pointer (which might
513 * be NULL, e.g. at driver load time) and allocating a new empty state object.
514 */
515void vmw_du_plane_reset(struct drm_plane *plane)
516{
517 struct vmw_plane_state *vps;
518
519
520 if (plane->state)
521 vmw_du_plane_destroy_state(plane, plane->state);
522
523 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
524
525 if (!vps) {
526 DRM_ERROR("Cannot allocate vmw_plane_state\n");
527 return;
528 }
529
530 plane->state = &vps->base;
531 plane->state->plane = plane;
532 plane->state->rotation = DRM_ROTATE_0;
533}
534
535
536/**
537 * vmw_du_plane_destroy_state - destroy plane state
538 * @plane: DRM plane
539 * @state: state object to destroy
540 *
541 * Destroys the plane state (both common and vmw-specific) for the
542 * specified plane.
543 */
544void
545vmw_du_plane_destroy_state(struct drm_plane *plane,
546 struct drm_plane_state *state)
547{
548 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
549
550
551 if (vps->surf)
552 vmw_surface_unreference(&vps->surf);
553
554 if (vps->dmabuf)
555 vmw_dmabuf_unreference(&vps->dmabuf);
556
557 drm_atomic_helper_plane_destroy_state(plane, state);
558}
559
560
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000561/*
562 * Generic framebuffer code
563 */
564
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000565/*
566 * Surface framebuffer code
567 */
568
Rashika Kheria847c5962014-01-06 22:18:10 +0530569static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000570{
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200571 struct vmw_framebuffer_surface *vfbs =
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000572 vmw_framebuffer_to_vfbs(framebuffer);
573
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000574 drm_framebuffer_cleanup(framebuffer);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200575 vmw_surface_unreference(&vfbs->surface);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700576 if (vfbs->base.user_obj)
577 ttm_base_object_unref(&vfbs->base.user_obj);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000578
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200579 kfree(vfbs);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000580}
581
Rashika Kheria847c5962014-01-06 22:18:10 +0530582static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200583 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000584 unsigned flags, unsigned color,
585 struct drm_clip_rect *clips,
586 unsigned num_clips)
587{
588 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
589 struct vmw_framebuffer_surface *vfbs =
590 vmw_framebuffer_to_vfbs(framebuffer);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000591 struct drm_clip_rect norect;
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200592 int ret, inc = 1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000593
Sinclair Yehc8261a92015-06-26 01:23:42 -0700594 /* Legacy Display Unit does not support 3D */
595 if (dev_priv->active_display_unit == vmw_du_legacy)
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200596 return -EINVAL;
597
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200598 drm_modeset_lock_all(dev_priv->dev);
599
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100600 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200601 if (unlikely(ret != 0)) {
602 drm_modeset_unlock_all(dev_priv->dev);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200603 return ret;
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200604 }
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200605
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000606 if (!num_clips) {
607 num_clips = 1;
608 clips = &norect;
609 norect.x1 = norect.y1 = 0;
610 norect.x2 = framebuffer->width;
611 norect.y2 = framebuffer->height;
612 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
613 num_clips /= 2;
614 inc = 2; /* skip source rects */
615 }
616
Sinclair Yehc8261a92015-06-26 01:23:42 -0700617 if (dev_priv->active_display_unit == vmw_du_screen_object)
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700618 ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
619 clips, NULL, NULL, 0, 0,
620 num_clips, inc, NULL);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700621 else
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700622 ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
623 clips, NULL, NULL, 0, 0,
624 num_clips, inc, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000625
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700626 vmw_fifo_flush(dev_priv, false);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100627 ttm_read_unlock(&dev_priv->reservation_sem);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200628
629 drm_modeset_unlock_all(dev_priv->dev);
630
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000631 return 0;
632}
633
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700634/**
635 * vmw_kms_readback - Perform a readback from the screen system to
636 * a dma-buffer backed framebuffer.
637 *
638 * @dev_priv: Pointer to the device private structure.
639 * @file_priv: Pointer to a struct drm_file identifying the caller.
640 * Must be set to NULL if @user_fence_rep is NULL.
641 * @vfb: Pointer to the dma-buffer backed framebuffer.
642 * @user_fence_rep: User-space provided structure for fence information.
643 * Must be set to non-NULL if @file_priv is non-NULL.
644 * @vclips: Array of clip rects.
645 * @num_clips: Number of clip rects in @vclips.
646 *
647 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
648 * interrupted.
649 */
650int vmw_kms_readback(struct vmw_private *dev_priv,
651 struct drm_file *file_priv,
652 struct vmw_framebuffer *vfb,
653 struct drm_vmw_fence_rep __user *user_fence_rep,
654 struct drm_vmw_rect *vclips,
655 uint32_t num_clips)
656{
657 switch (dev_priv->active_display_unit) {
658 case vmw_du_screen_object:
659 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
660 user_fence_rep, vclips, num_clips);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700661 case vmw_du_screen_target:
662 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
663 user_fence_rep, NULL, vclips, num_clips,
664 1, false, true);
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700665 default:
666 WARN_ONCE(true,
667 "Readback called with invalid display system.\n");
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700668}
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700669
670 return -ENOSYS;
671}
672
673
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100674static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000675 .destroy = vmw_framebuffer_surface_destroy,
676 .dirty = vmw_framebuffer_surface_dirty,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000677};
678
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200679static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
680 struct vmw_surface *surface,
681 struct vmw_framebuffer **out,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100682 const struct drm_mode_fb_cmd2
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700683 *mode_cmd,
684 bool is_dmabuf_proxy)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000685
686{
687 struct drm_device *dev = dev_priv->dev;
688 struct vmw_framebuffer_surface *vfbs;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200689 enum SVGA3dSurfaceFormat format;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000690 int ret;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100691 struct drm_format_name_buf format_name;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000692
Sinclair Yehc8261a92015-06-26 01:23:42 -0700693 /* 3D is only supported on HWv8 and newer hosts */
694 if (dev_priv->active_display_unit == vmw_du_legacy)
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200695 return -ENOSYS;
696
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200697 /*
698 * Sanity checks.
699 */
700
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100701 /* Surface must be marked as a scanout. */
702 if (unlikely(!surface->scanout))
703 return -EINVAL;
704
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200705 if (unlikely(surface->mip_levels[0] != 1 ||
706 surface->num_sizes != 1 ||
Thomas Hellstromb360a3c2014-01-15 08:51:36 +0100707 surface->base_size.width < mode_cmd->width ||
708 surface->base_size.height < mode_cmd->height ||
709 surface->base_size.depth != 1)) {
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200710 DRM_ERROR("Incompatible surface dimensions "
711 "for requested mode.\n");
712 return -EINVAL;
713 }
714
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100715 switch (mode_cmd->pixel_format) {
716 case DRM_FORMAT_ARGB8888:
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200717 format = SVGA3D_A8R8G8B8;
718 break;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100719 case DRM_FORMAT_XRGB8888:
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200720 format = SVGA3D_X8R8G8B8;
721 break;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100722 case DRM_FORMAT_RGB565:
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200723 format = SVGA3D_R5G6B5;
724 break;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100725 case DRM_FORMAT_XRGB1555:
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200726 format = SVGA3D_A1R5G5B5;
727 break;
728 default:
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100729 DRM_ERROR("Invalid pixel format: %s\n",
730 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200731 return -EINVAL;
732 }
733
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700734 /*
735 * For DX, surface format validation is done when surface->scanout
736 * is set.
737 */
738 if (!dev_priv->has_dx && format != surface->format) {
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200739 DRM_ERROR("Invalid surface format for requested mode.\n");
740 return -EINVAL;
741 }
742
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000743 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
744 if (!vfbs) {
745 ret = -ENOMEM;
746 goto out_err1;
747 }
748
Ville Syrjäläa3f913c2016-12-14 22:48:59 +0200749 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
Sinclair Yeh05c95012015-08-11 22:53:39 -0700750 vfbs->surface = vmw_surface_reference(surface);
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100751 vfbs->base.user_handle = mode_cmd->handles[0];
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700752 vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200753
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000754 *out = &vfbs->base;
755
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100756 ret = drm_framebuffer_init(dev, &vfbs->base.base,
757 &vmw_framebuffer_surface_funcs);
758 if (ret)
Sinclair Yeh05c95012015-08-11 22:53:39 -0700759 goto out_err2;
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100760
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000761 return 0;
762
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000763out_err2:
Sinclair Yeh05c95012015-08-11 22:53:39 -0700764 vmw_surface_unreference(&surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000765 kfree(vfbs);
766out_err1:
767 return ret;
768}
769
770/*
771 * Dmabuf framebuffer code
772 */
773
Rashika Kheria847c5962014-01-06 22:18:10 +0530774static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000775{
776 struct vmw_framebuffer_dmabuf *vfbd =
777 vmw_framebuffer_to_vfbd(framebuffer);
778
779 drm_framebuffer_cleanup(framebuffer);
780 vmw_dmabuf_unreference(&vfbd->buffer);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700781 if (vfbd->base.user_obj)
782 ttm_base_object_unref(&vfbd->base.user_obj);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000783
784 kfree(vfbd);
785}
786
Rashika Kheria847c5962014-01-06 22:18:10 +0530787static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200788 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000789 unsigned flags, unsigned color,
790 struct drm_clip_rect *clips,
791 unsigned num_clips)
792{
793 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200794 struct vmw_framebuffer_dmabuf *vfbd =
795 vmw_framebuffer_to_vfbd(framebuffer);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000796 struct drm_clip_rect norect;
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200797 int ret, increment = 1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000798
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200799 drm_modeset_lock_all(dev_priv->dev);
800
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100801 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200802 if (unlikely(ret != 0)) {
803 drm_modeset_unlock_all(dev_priv->dev);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200804 return ret;
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200805 }
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200806
Thomas Hellstromdf1c93b2010-01-13 22:28:36 +0100807 if (!num_clips) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000808 num_clips = 1;
809 clips = &norect;
810 norect.x1 = norect.y1 = 0;
811 norect.x2 = framebuffer->width;
812 norect.y2 = framebuffer->height;
813 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
814 num_clips /= 2;
815 increment = 2;
816 }
817
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700818 switch (dev_priv->active_display_unit) {
819 case vmw_du_screen_target:
820 ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
821 clips, NULL, num_clips, increment,
822 true, true);
823 break;
824 case vmw_du_screen_object:
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700825 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100826 clips, NULL, num_clips,
827 increment, true, NULL);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700828 break;
Thomas Hellstrom352b20d2015-06-29 12:57:37 -0700829 case vmw_du_legacy:
830 ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
831 clips, num_clips, increment);
832 break;
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700833 default:
Thomas Hellstrom352b20d2015-06-29 12:57:37 -0700834 ret = -EINVAL;
835 WARN_ONCE(true, "Dirty called with invalid display system.\n");
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700836 break;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200837 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000838
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700839 vmw_fifo_flush(dev_priv, false);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100840 ttm_read_unlock(&dev_priv->reservation_sem);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200841
842 drm_modeset_unlock_all(dev_priv->dev);
843
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200844 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000845}
846
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100847static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000848 .destroy = vmw_framebuffer_dmabuf_destroy,
849 .dirty = vmw_framebuffer_dmabuf_dirty,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000850};
851
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200852/**
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200853 * Pin the dmabuffer to the start of vram.
854 */
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700855static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000856{
857 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700858 struct vmw_dma_buffer *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000859 int ret;
860
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700861 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
862 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200863
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700864 if (!buf)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000865 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000866
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700867 switch (dev_priv->active_display_unit) {
868 case vmw_du_legacy:
869 vmw_overlay_pause_all(dev_priv);
870 ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
871 vmw_overlay_resume_all(dev_priv);
872 break;
873 case vmw_du_screen_object:
874 case vmw_du_screen_target:
875 if (vfb->dmabuf)
876 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
877 false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000878
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700879 return vmw_dmabuf_pin_in_placement(dev_priv, buf,
880 &vmw_mob_placement, false);
881 default:
882 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000883 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000884
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700885 return ret;
886}
887
888static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
889{
890 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
891 struct vmw_dma_buffer *buf;
892
893 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
894 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
895
896 if (WARN_ON(!buf))
897 return 0;
898
899 return vmw_dmabuf_unpin(dev_priv, buf, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000900}
901
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700902/**
903 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
904 *
905 * @dev: DRM device
906 * @mode_cmd: parameters for the new surface
907 * @dmabuf_mob: MOB backing the DMA buf
908 * @srf_out: newly created surface
909 *
910 * When the content FB is a DMA buf, we create a surface as a proxy to the
911 * same buffer. This way we can do a surface copy rather than a surface DMA.
912 * This is a more efficient approach
913 *
914 * RETURNS:
915 * 0 on success, error code otherwise
916 */
917static int vmw_create_dmabuf_proxy(struct drm_device *dev,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100918 const struct drm_mode_fb_cmd2 *mode_cmd,
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700919 struct vmw_dma_buffer *dmabuf_mob,
920 struct vmw_surface **srf_out)
921{
922 uint32_t format;
Thomas Hellstrom8cd9f252017-01-19 10:53:02 -0800923 struct drm_vmw_size content_base_size = {0};
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700924 struct vmw_resource *res;
Thomas Hellstroma50e2bf2016-01-08 20:29:40 +0100925 unsigned int bytes_pp;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100926 struct drm_format_name_buf format_name;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700927 int ret;
928
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100929 switch (mode_cmd->pixel_format) {
930 case DRM_FORMAT_ARGB8888:
931 case DRM_FORMAT_XRGB8888:
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700932 format = SVGA3D_X8R8G8B8;
Thomas Hellstroma50e2bf2016-01-08 20:29:40 +0100933 bytes_pp = 4;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700934 break;
935
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100936 case DRM_FORMAT_RGB565:
937 case DRM_FORMAT_XRGB1555:
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700938 format = SVGA3D_R5G6B5;
Thomas Hellstroma50e2bf2016-01-08 20:29:40 +0100939 bytes_pp = 2;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700940 break;
941
942 case 8:
943 format = SVGA3D_P8;
Thomas Hellstroma50e2bf2016-01-08 20:29:40 +0100944 bytes_pp = 1;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700945 break;
946
947 default:
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100948 DRM_ERROR("Invalid framebuffer format %s\n",
949 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700950 return -EINVAL;
951 }
952
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100953 content_base_size.width = mode_cmd->pitches[0] / bytes_pp;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700954 content_base_size.height = mode_cmd->height;
955 content_base_size.depth = 1;
956
957 ret = vmw_surface_gb_priv_define(dev,
958 0, /* kernel visible only */
959 0, /* flags */
960 format,
961 true, /* can be a scanout buffer */
962 1, /* num of mip levels */
963 0,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700964 0,
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700965 content_base_size,
966 srf_out);
967 if (ret) {
968 DRM_ERROR("Failed to allocate proxy content buffer\n");
969 return ret;
970 }
971
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700972 res = &(*srf_out)->res;
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700973
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700974 /* Reserve and switch the backing mob. */
975 mutex_lock(&res->dev_priv->cmdbuf_mutex);
976 (void) vmw_resource_reserve(res, false, true);
977 vmw_dmabuf_unreference(&res->backup);
978 res->backup = vmw_dmabuf_reference(dmabuf_mob);
979 res->backup_offset = 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700980 vmw_resource_unreserve(res, false, NULL, 0);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700981 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000982
983 return 0;
984}
985
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000986
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000987
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200988static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
989 struct vmw_dma_buffer *dmabuf,
990 struct vmw_framebuffer **out,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100991 const struct drm_mode_fb_cmd2
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200992 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000993
994{
995 struct drm_device *dev = dev_priv->dev;
996 struct vmw_framebuffer_dmabuf *vfbd;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200997 unsigned int requested_size;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100998 struct drm_format_name_buf format_name;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000999 int ret;
1000
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001001 requested_size = mode_cmd->height * mode_cmd->pitches[0];
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001002 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
1003 DRM_ERROR("Screen buffer object size is too small "
1004 "for requested mode.\n");
1005 return -EINVAL;
1006 }
1007
Jakob Bornecrantzc337ada2011-10-04 20:13:34 +02001008 /* Limited framebuffer color depth support for screen objects */
Sinclair Yehc8261a92015-06-26 01:23:42 -07001009 if (dev_priv->active_display_unit == vmw_du_screen_object) {
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001010 switch (mode_cmd->pixel_format) {
1011 case DRM_FORMAT_XRGB8888:
1012 case DRM_FORMAT_ARGB8888:
1013 break;
1014 case DRM_FORMAT_XRGB1555:
1015 case DRM_FORMAT_RGB565:
1016 break;
Jakob Bornecrantzc337ada2011-10-04 20:13:34 +02001017 default:
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001018 DRM_ERROR("Invalid pixel format: %s\n",
1019 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Jakob Bornecrantzc337ada2011-10-04 20:13:34 +02001020 return -EINVAL;
1021 }
1022 }
1023
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001024 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1025 if (!vfbd) {
1026 ret = -ENOMEM;
1027 goto out_err1;
1028 }
1029
Ville Syrjäläa3f913c2016-12-14 22:48:59 +02001030 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +02001031 vfbd->base.dmabuf = true;
Sinclair Yeh05c95012015-08-11 22:53:39 -07001032 vfbd->buffer = vmw_dmabuf_reference(dmabuf);
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001033 vfbd->base.user_handle = mode_cmd->handles[0];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001034 *out = &vfbd->base;
1035
Daniel Vetter80f0b5a2012-12-13 23:39:01 +01001036 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1037 &vmw_framebuffer_dmabuf_funcs);
1038 if (ret)
Sinclair Yeh05c95012015-08-11 22:53:39 -07001039 goto out_err2;
Daniel Vetter80f0b5a2012-12-13 23:39:01 +01001040
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001041 return 0;
1042
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001043out_err2:
Sinclair Yeh05c95012015-08-11 22:53:39 -07001044 vmw_dmabuf_unreference(&dmabuf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001045 kfree(vfbd);
1046out_err1:
1047 return ret;
1048}
1049
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001050/**
1051 * vmw_kms_new_framebuffer - Create a new framebuffer.
1052 *
1053 * @dev_priv: Pointer to device private struct.
1054 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
1055 * Either @dmabuf or @surface must be NULL.
1056 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1057 * Either @dmabuf or @surface must be NULL.
1058 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
1059 * Helps the code to do some important optimizations.
1060 * @mode_cmd: Frame-buffer metadata.
1061 */
1062struct vmw_framebuffer *
1063vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1064 struct vmw_dma_buffer *dmabuf,
1065 struct vmw_surface *surface,
1066 bool only_2d,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001067 const struct drm_mode_fb_cmd2 *mode_cmd)
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001068{
Sinclair Yeh05c95012015-08-11 22:53:39 -07001069 struct vmw_framebuffer *vfb = NULL;
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001070 bool is_dmabuf_proxy = false;
1071 int ret;
1072
1073 /*
1074 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1075 * therefore, wrap the DMA buf in a surface so we can use the
1076 * SurfaceCopy command.
1077 */
1078 if (dmabuf && only_2d &&
1079 dev_priv->active_display_unit == vmw_du_screen_target) {
1080 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
1081 dmabuf, &surface);
1082 if (ret)
1083 return ERR_PTR(ret);
1084
1085 is_dmabuf_proxy = true;
1086 }
1087
1088 /* Create the new framebuffer depending one what we have */
Sinclair Yeh05c95012015-08-11 22:53:39 -07001089 if (surface) {
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001090 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1091 mode_cmd,
1092 is_dmabuf_proxy);
Sinclair Yeh05c95012015-08-11 22:53:39 -07001093
1094 /*
1095 * vmw_create_dmabuf_proxy() adds a reference that is no longer
1096 * needed
1097 */
1098 if (is_dmabuf_proxy)
1099 vmw_surface_unreference(&surface);
1100 } else if (dmabuf) {
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001101 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
1102 mode_cmd);
Sinclair Yeh05c95012015-08-11 22:53:39 -07001103 } else {
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001104 BUG();
Sinclair Yeh05c95012015-08-11 22:53:39 -07001105 }
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001106
1107 if (ret)
1108 return ERR_PTR(ret);
1109
1110 vfb->pin = vmw_framebuffer_pin;
1111 vfb->unpin = vmw_framebuffer_unpin;
1112
1113 return vfb;
1114}
1115
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001116/*
1117 * Generic Kernel modesetting functions
1118 */
1119
1120static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1121 struct drm_file *file_priv,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001122 const struct drm_mode_fb_cmd2 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001123{
1124 struct vmw_private *dev_priv = vmw_priv(dev);
1125 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1126 struct vmw_framebuffer *vfb = NULL;
1127 struct vmw_surface *surface = NULL;
1128 struct vmw_dma_buffer *bo = NULL;
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001129 struct ttm_base_object *user_obj;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001130 int ret;
1131
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001132 /**
1133 * This code should be conditioned on Screen Objects not being used.
1134 * If screen objects are used, we can allocate a GMR to hold the
1135 * requested framebuffer.
1136 */
1137
Xi Wang8a783892011-12-21 05:18:33 -05001138 if (!vmw_kms_validate_mode_vram(dev_priv,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001139 mode_cmd->pitches[0],
1140 mode_cmd->height)) {
Sinclair Yehc8261a92015-06-26 01:23:42 -07001141 DRM_ERROR("Requested mode exceed bounding box limit.\n");
Jakob Bornecrantzd9826402011-11-03 21:03:04 +01001142 return ERR_PTR(-ENOMEM);
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001143 }
1144
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001145 /*
1146 * Take a reference on the user object of the resource
1147 * backing the kms fb. This ensures that user-space handle
1148 * lookups on that resource will always work as long as
1149 * it's registered with a kms framebuffer. This is important,
1150 * since vmw_execbuf_process identifies resources in the
1151 * command stream using user-space handles.
1152 */
1153
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001154 user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001155 if (unlikely(user_obj == NULL)) {
1156 DRM_ERROR("Could not locate requested kms frame buffer.\n");
1157 return ERR_PTR(-ENOENT);
1158 }
1159
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001160 /**
1161 * End conditioned code.
1162 */
1163
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +01001164 /* returns either a dmabuf or surface */
1165 ret = vmw_user_lookup_handle(dev_priv, tfile,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001166 mode_cmd->handles[0],
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +01001167 &surface, &bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001168 if (ret)
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +01001169 goto err_out;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001170
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001171 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1172 !(dev_priv->capabilities & SVGA_CAP_3D),
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001173 mode_cmd);
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001174 if (IS_ERR(vfb)) {
1175 ret = PTR_ERR(vfb);
1176 goto err_out;
1177 }
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +00001178
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +01001179err_out:
1180 /* vmw_user_lookup_handle takes one ref so does new_fb */
1181 if (bo)
1182 vmw_dmabuf_unreference(&bo);
1183 if (surface)
1184 vmw_surface_unreference(&surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001185
1186 if (ret) {
1187 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001188 ttm_base_object_unref(&user_obj);
Chris Wilsoncce13ff2010-08-08 13:36:38 +01001189 return ERR_PTR(ret);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001190 } else
1191 vfb->user_obj = user_obj;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001192
1193 return &vfb->base;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001194}
1195
Laurent Pincharte6ecefa2012-05-17 13:27:23 +02001196static const struct drm_mode_config_funcs vmw_kms_funcs = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001197 .fb_create = vmw_kms_fb_create,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001198};
1199
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001200static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1201 struct drm_file *file_priv,
1202 struct vmw_framebuffer *vfb,
1203 struct vmw_surface *surface,
1204 uint32_t sid,
1205 int32_t destX, int32_t destY,
1206 struct drm_vmw_rect *clips,
1207 uint32_t num_clips)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +02001208{
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001209 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1210 &surface->res, destX, destY,
1211 num_clips, 1, NULL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +02001212}
1213
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001214
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001215int vmw_kms_present(struct vmw_private *dev_priv,
1216 struct drm_file *file_priv,
1217 struct vmw_framebuffer *vfb,
1218 struct vmw_surface *surface,
1219 uint32_t sid,
1220 int32_t destX, int32_t destY,
1221 struct drm_vmw_rect *clips,
1222 uint32_t num_clips)
1223{
Sinclair Yeh35c05122015-06-26 01:42:06 -07001224 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001225
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001226 switch (dev_priv->active_display_unit) {
1227 case vmw_du_screen_target:
1228 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1229 &surface->res, destX, destY,
1230 num_clips, 1, NULL);
1231 break;
1232 case vmw_du_screen_object:
1233 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1234 sid, destX, destY, clips,
1235 num_clips);
1236 break;
1237 default:
1238 WARN_ONCE(true,
1239 "Present called with invalid display system.\n");
1240 ret = -ENOSYS;
1241 break;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001242 }
Sinclair Yeh35c05122015-06-26 01:42:06 -07001243 if (ret)
1244 return ret;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001245
Sinclair Yeh35c05122015-06-26 01:42:06 -07001246 vmw_fifo_flush(dev_priv, false);
Michel Dänzer0bef23f2011-08-31 07:42:50 +00001247
Sinclair Yeh35c05122015-06-26 01:42:06 -07001248 return 0;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001249}
1250
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001251static void
1252vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1253{
1254 if (dev_priv->hotplug_mode_update_property)
1255 return;
1256
1257 dev_priv->hotplug_mode_update_property =
1258 drm_property_create_range(dev_priv->dev,
1259 DRM_MODE_PROP_IMMUTABLE,
1260 "hotplug_mode_update", 0, 1);
1261
1262 if (!dev_priv->hotplug_mode_update_property)
1263 return;
1264
1265}
1266
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001267int vmw_kms_init(struct vmw_private *dev_priv)
1268{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001269 struct drm_device *dev = dev_priv->dev;
1270 int ret;
1271
1272 drm_mode_config_init(dev);
1273 dev->mode_config.funcs = &vmw_kms_funcs;
1274 dev->mode_config.min_width = 1;
1275 dev->mode_config.min_height = 1;
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001276 dev->mode_config.max_width = dev_priv->texture_max_width;
1277 dev->mode_config.max_height = dev_priv->texture_max_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001278
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001279 drm_mode_create_suggested_offset_properties(dev);
1280 vmw_kms_create_hotplug_mode_update_property(dev_priv);
1281
Sinclair Yeh35c05122015-06-26 01:42:06 -07001282 ret = vmw_kms_stdu_init_display(dev_priv);
1283 if (ret) {
1284 ret = vmw_kms_sou_init_display(dev_priv);
1285 if (ret) /* Fallback */
1286 ret = vmw_kms_ldu_init_display(dev_priv);
1287 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001288
Sinclair Yehc8261a92015-06-26 01:23:42 -07001289 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001290}
1291
1292int vmw_kms_close(struct vmw_private *dev_priv)
1293{
Sinclair Yehc8261a92015-06-26 01:23:42 -07001294 int ret;
1295
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001296 /*
1297 * Docs says we should take the lock before calling this function
1298 * but since it destroys encoders and our destructor calls
1299 * drm_encoder_cleanup which takes the lock we deadlock.
1300 */
1301 drm_mode_config_cleanup(dev_priv->dev);
Sinclair Yehc8261a92015-06-26 01:23:42 -07001302 if (dev_priv->active_display_unit == vmw_du_screen_object)
1303 ret = vmw_kms_sou_close_display(dev_priv);
Sinclair Yeh35c05122015-06-26 01:42:06 -07001304 else if (dev_priv->active_display_unit == vmw_du_screen_target)
1305 ret = vmw_kms_stdu_close_display(dev_priv);
Jakob Bornecrantzc0d18312011-11-09 10:25:26 +01001306 else
Sinclair Yehc8261a92015-06-26 01:23:42 -07001307 ret = vmw_kms_ldu_close_display(dev_priv);
1308
1309 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001310}
1311
1312int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1313 struct drm_file *file_priv)
1314{
1315 struct drm_vmw_cursor_bypass_arg *arg = data;
1316 struct vmw_display_unit *du;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001317 struct drm_crtc *crtc;
1318 int ret = 0;
1319
1320
1321 mutex_lock(&dev->mode_config.mutex);
1322 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1323
1324 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1325 du = vmw_crtc_to_du(crtc);
1326 du->hotspot_x = arg->xhot;
1327 du->hotspot_y = arg->yhot;
1328 }
1329
1330 mutex_unlock(&dev->mode_config.mutex);
1331 return 0;
1332 }
1333
Rob Clarka4cd5d62014-07-17 23:30:02 -04001334 crtc = drm_crtc_find(dev, arg->crtc_id);
1335 if (!crtc) {
Ville Syrjälä4ae87ff2013-10-17 13:35:05 +03001336 ret = -ENOENT;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001337 goto out;
1338 }
1339
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001340 du = vmw_crtc_to_du(crtc);
1341
1342 du->hotspot_x = arg->xhot;
1343 du->hotspot_y = arg->yhot;
1344
1345out:
1346 mutex_unlock(&dev->mode_config.mutex);
1347
1348 return ret;
1349}
1350
1351int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1352 unsigned width, unsigned height, unsigned pitch,
1353 unsigned bpp, unsigned depth)
1354{
1355 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1356 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1357 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001358 vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1359 SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001360 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1361 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1362 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1363
1364 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1365 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1366 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1367 return -EINVAL;
1368 }
1369
1370 return 0;
1371}
1372
1373int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1374{
1375 struct vmw_vga_topology_state *save;
1376 uint32_t i;
1377
1378 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1379 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1380 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1381 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1382 vmw_priv->vga_pitchlock =
1383 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1384 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001385 vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
1386 SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001387
1388 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1389 return 0;
1390
1391 vmw_priv->num_displays = vmw_read(vmw_priv,
1392 SVGA_REG_NUM_GUEST_DISPLAYS);
1393
1394 if (vmw_priv->num_displays == 0)
1395 vmw_priv->num_displays = 1;
1396
1397 for (i = 0; i < vmw_priv->num_displays; ++i) {
1398 save = &vmw_priv->vga_save[i];
1399 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1400 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1401 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1402 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1403 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1404 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1405 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1406 if (i == 0 && vmw_priv->num_displays == 1 &&
1407 save->width == 0 && save->height == 0) {
1408
1409 /*
1410 * It should be fairly safe to assume that these
1411 * values are uninitialized.
1412 */
1413
1414 save->width = vmw_priv->vga_width - save->pos_x;
1415 save->height = vmw_priv->vga_height - save->pos_y;
1416 }
1417 }
1418
1419 return 0;
1420}
1421
1422int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1423{
1424 struct vmw_vga_topology_state *save;
1425 uint32_t i;
1426
1427 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1428 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001429 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1430 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1431 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1432 vmw_priv->vga_pitchlock);
1433 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001434 vmw_mmio_write(vmw_priv->vga_pitchlock,
1435 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001436
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001437 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1438 return 0;
1439
1440 for (i = 0; i < vmw_priv->num_displays; ++i) {
1441 save = &vmw_priv->vga_save[i];
1442 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1443 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1444 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1445 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1446 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1447 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1448 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1449 }
1450
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001451 return 0;
1452}
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +02001453
Thomas Hellstrome133e732010-10-05 12:43:04 +02001454bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1455 uint32_t pitch,
1456 uint32_t height)
1457{
Sinclair Yeh35c05122015-06-26 01:42:06 -07001458 return ((u64) pitch * (u64) height) < (u64)
1459 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
1460 dev_priv->prim_bb_mem : dev_priv->vram_size);
Thomas Hellstrome133e732010-10-05 12:43:04 +02001461}
1462
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001463
1464/**
1465 * Function called by DRM code called with vbl_lock held.
1466 */
Thierry Reding88e72712015-09-24 18:35:31 +02001467u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001468{
1469 return 0;
1470}
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001471
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001472/**
1473 * Function called by DRM code called with vbl_lock held.
1474 */
Thierry Reding88e72712015-09-24 18:35:31 +02001475int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001476{
1477 return -ENOSYS;
1478}
1479
1480/**
1481 * Function called by DRM code called with vbl_lock held.
1482 */
Thierry Reding88e72712015-09-24 18:35:31 +02001483void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001484{
1485}
1486
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001487
1488/*
1489 * Small shared kms functions.
1490 */
1491
Rashika Kheria847c5962014-01-06 22:18:10 +05301492static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001493 struct drm_vmw_rect *rects)
1494{
1495 struct drm_device *dev = dev_priv->dev;
1496 struct vmw_display_unit *du;
1497 struct drm_connector *con;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001498
1499 mutex_lock(&dev->mode_config.mutex);
1500
1501#if 0
Thomas Hellstrom6ea77d12011-10-04 20:13:36 +02001502 {
1503 unsigned int i;
1504
1505 DRM_INFO("%s: new layout ", __func__);
1506 for (i = 0; i < num; i++)
1507 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
1508 rects[i].w, rects[i].h);
1509 DRM_INFO("\n");
1510 }
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001511#endif
1512
1513 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1514 du = vmw_connector_to_du(con);
1515 if (num > du->unit) {
1516 du->pref_width = rects[du->unit].w;
1517 du->pref_height = rects[du->unit].h;
1518 du->pref_active = true;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001519 du->gui_x = rects[du->unit].x;
1520 du->gui_y = rects[du->unit].y;
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001521 drm_object_property_set_value
1522 (&con->base, dev->mode_config.suggested_x_property,
1523 du->gui_x);
1524 drm_object_property_set_value
1525 (&con->base, dev->mode_config.suggested_y_property,
1526 du->gui_y);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001527 } else {
1528 du->pref_width = 800;
1529 du->pref_height = 600;
1530 du->pref_active = false;
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001531 drm_object_property_set_value
1532 (&con->base, dev->mode_config.suggested_x_property,
1533 0);
1534 drm_object_property_set_value
1535 (&con->base, dev->mode_config.suggested_y_property,
1536 0);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001537 }
1538 con->status = vmw_du_connector_detect(con, true);
1539 }
1540
1541 mutex_unlock(&dev->mode_config.mutex);
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001542 drm_sysfs_hotplug_event(dev);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001543
1544 return 0;
1545}
1546
Maarten Lankhorst7ea77282016-06-07 12:49:30 +02001547int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1548 u16 *r, u16 *g, u16 *b,
1549 uint32_t size)
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001550{
1551 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1552 int i;
1553
1554 for (i = 0; i < size; i++) {
1555 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1556 r[i], g[i], b[i]);
1557 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1558 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1559 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1560 }
Maarten Lankhorst7ea77282016-06-07 12:49:30 +02001561
1562 return 0;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001563}
1564
Maarten Lankhorst9a69a9a2015-07-21 11:34:55 +02001565int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001566{
Maarten Lankhorst9a69a9a2015-07-21 11:34:55 +02001567 return 0;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001568}
1569
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001570enum drm_connector_status
1571vmw_du_connector_detect(struct drm_connector *connector, bool force)
1572{
1573 uint32_t num_displays;
1574 struct drm_device *dev = connector->dev;
1575 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001576 struct vmw_display_unit *du = vmw_connector_to_du(connector);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001577
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001578 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001579
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001580 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1581 du->pref_active) ?
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001582 connector_status_connected : connector_status_disconnected);
1583}
1584
1585static struct drm_display_mode vmw_kms_connector_builtin[] = {
1586 /* 640x480@60Hz */
1587 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
1588 752, 800, 0, 480, 489, 492, 525, 0,
1589 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1590 /* 800x600@60Hz */
1591 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
1592 968, 1056, 0, 600, 601, 605, 628, 0,
1593 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1594 /* 1024x768@60Hz */
1595 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1596 1184, 1344, 0, 768, 771, 777, 806, 0,
1597 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1598 /* 1152x864@75Hz */
1599 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1600 1344, 1600, 0, 864, 865, 868, 900, 0,
1601 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1602 /* 1280x768@60Hz */
1603 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1604 1472, 1664, 0, 768, 771, 778, 798, 0,
1605 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1606 /* 1280x800@60Hz */
1607 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1608 1480, 1680, 0, 800, 803, 809, 831, 0,
1609 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
1610 /* 1280x960@60Hz */
1611 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1612 1488, 1800, 0, 960, 961, 964, 1000, 0,
1613 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1614 /* 1280x1024@60Hz */
1615 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1616 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1617 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1618 /* 1360x768@60Hz */
1619 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1620 1536, 1792, 0, 768, 771, 777, 795, 0,
1621 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1622 /* 1440x1050@60Hz */
1623 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1624 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1625 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1626 /* 1440x900@60Hz */
1627 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1628 1672, 1904, 0, 900, 903, 909, 934, 0,
1629 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1630 /* 1600x1200@60Hz */
1631 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1632 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1633 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1634 /* 1680x1050@60Hz */
1635 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1636 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1637 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1638 /* 1792x1344@60Hz */
1639 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
1640 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1641 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1642 /* 1853x1392@60Hz */
1643 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
1644 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1645 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1646 /* 1920x1200@60Hz */
1647 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
1648 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1649 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1650 /* 1920x1440@60Hz */
1651 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
1652 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1653 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1654 /* 2560x1600@60Hz */
1655 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
1656 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
1657 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1658 /* Terminate */
1659 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1660};
1661
Thomas Hellstrom1543b4d2011-11-02 09:43:10 +01001662/**
1663 * vmw_guess_mode_timing - Provide fake timings for a
1664 * 60Hz vrefresh mode.
1665 *
1666 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1667 * members filled in.
1668 */
Thomas Hellstroma2787242015-06-29 12:55:07 -07001669void vmw_guess_mode_timing(struct drm_display_mode *mode)
Thomas Hellstrom1543b4d2011-11-02 09:43:10 +01001670{
1671 mode->hsync_start = mode->hdisplay + 50;
1672 mode->hsync_end = mode->hsync_start + 50;
1673 mode->htotal = mode->hsync_end + 50;
1674
1675 mode->vsync_start = mode->vdisplay + 50;
1676 mode->vsync_end = mode->vsync_start + 50;
1677 mode->vtotal = mode->vsync_end + 50;
1678
1679 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1680 mode->vrefresh = drm_mode_vrefresh(mode);
1681}
1682
1683
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001684int vmw_du_connector_fill_modes(struct drm_connector *connector,
1685 uint32_t max_width, uint32_t max_height)
1686{
1687 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1688 struct drm_device *dev = connector->dev;
1689 struct vmw_private *dev_priv = vmw_priv(dev);
1690 struct drm_display_mode *mode = NULL;
1691 struct drm_display_mode *bmode;
1692 struct drm_display_mode prefmode = { DRM_MODE("preferred",
1693 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1694 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1695 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1696 };
1697 int i;
Sinclair Yeh7c20d212016-06-29 11:29:47 -07001698 u32 assumed_bpp = 4;
Sinclair Yeh9a723842014-10-31 09:58:06 +01001699
Sinclair Yeh04319d82016-06-29 12:15:48 -07001700 if (dev_priv->assume_16bpp)
1701 assumed_bpp = 2;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001702
Sinclair Yeh35c05122015-06-26 01:42:06 -07001703 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1704 max_width = min(max_width, dev_priv->stdu_max_width);
1705 max_height = min(max_height, dev_priv->stdu_max_height);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001706 }
1707
1708 /* Add preferred mode */
Sinclair Yehc8261a92015-06-26 01:23:42 -07001709 mode = drm_mode_duplicate(dev, &prefmode);
1710 if (!mode)
1711 return 0;
1712 mode->hdisplay = du->pref_width;
1713 mode->vdisplay = du->pref_height;
1714 vmw_guess_mode_timing(mode);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001715
Sinclair Yehc8261a92015-06-26 01:23:42 -07001716 if (vmw_kms_validate_mode_vram(dev_priv,
1717 mode->hdisplay * assumed_bpp,
1718 mode->vdisplay)) {
1719 drm_mode_probed_add(connector, mode);
1720 } else {
1721 drm_mode_destroy(dev, mode);
1722 mode = NULL;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001723 }
1724
Sinclair Yehc8261a92015-06-26 01:23:42 -07001725 if (du->pref_mode) {
1726 list_del_init(&du->pref_mode->head);
1727 drm_mode_destroy(dev, du->pref_mode);
1728 }
1729
1730 /* mode might be null here, this is intended */
1731 du->pref_mode = mode;
1732
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001733 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1734 bmode = &vmw_kms_connector_builtin[i];
1735 if (bmode->hdisplay > max_width ||
1736 bmode->vdisplay > max_height)
1737 continue;
1738
Sinclair Yeh9a723842014-10-31 09:58:06 +01001739 if (!vmw_kms_validate_mode_vram(dev_priv,
1740 bmode->hdisplay * assumed_bpp,
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001741 bmode->vdisplay))
1742 continue;
1743
1744 mode = drm_mode_duplicate(dev, bmode);
1745 if (!mode)
1746 return 0;
1747 mode->vrefresh = drm_mode_vrefresh(mode);
1748
1749 drm_mode_probed_add(connector, mode);
1750 }
1751
Ville Syrjälä6af3e652015-12-03 23:14:14 +02001752 drm_mode_connector_list_update(connector);
Thomas Hellstromf6b05002015-06-29 12:59:58 -07001753 /* Move the prefered mode first, help apps pick the right mode. */
1754 drm_mode_sort(&connector->modes);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001755
1756 return 1;
1757}
1758
1759int vmw_du_connector_set_property(struct drm_connector *connector,
1760 struct drm_property *property,
1761 uint64_t val)
1762{
Thomas Hellstrom76404ac2016-02-12 09:55:45 +01001763 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1764 struct vmw_private *dev_priv = vmw_priv(connector->dev);
1765
1766 if (property == dev_priv->implicit_placement_property)
1767 du->is_implicit = val;
1768
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001769 return 0;
1770}
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001771
1772
Sinclair Yeh9c2542a2017-03-23 11:33:39 -07001773
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001774int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1775 struct drm_file *file_priv)
1776{
1777 struct vmw_private *dev_priv = vmw_priv(dev);
1778 struct drm_vmw_update_layout_arg *arg =
1779 (struct drm_vmw_update_layout_arg *)data;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001780 void __user *user_rects;
1781 struct drm_vmw_rect *rects;
1782 unsigned rects_size;
1783 int ret;
1784 int i;
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001785 u64 total_pixels = 0;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001786 struct drm_mode_config *mode_config = &dev->mode_config;
Sinclair Yehc8261a92015-06-26 01:23:42 -07001787 struct drm_vmw_rect bounding_box = {0};
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001788
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001789 if (!arg->num_outputs) {
1790 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1791 vmw_du_update_layout(dev_priv, 1, &def_rect);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07001792 return 0;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001793 }
1794
1795 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
Xi Wangbab9efc2011-11-28 12:25:43 +01001796 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1797 GFP_KERNEL);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07001798 if (unlikely(!rects))
1799 return -ENOMEM;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001800
1801 user_rects = (void __user *)(unsigned long)arg->rects;
1802 ret = copy_from_user(rects, user_rects, rects_size);
1803 if (unlikely(ret != 0)) {
1804 DRM_ERROR("Failed to get rects.\n");
1805 ret = -EFAULT;
1806 goto out_free;
1807 }
1808
1809 for (i = 0; i < arg->num_outputs; ++i) {
Xi Wangbab9efc2011-11-28 12:25:43 +01001810 if (rects[i].x < 0 ||
1811 rects[i].y < 0 ||
1812 rects[i].x + rects[i].w > mode_config->max_width ||
1813 rects[i].y + rects[i].h > mode_config->max_height) {
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001814 DRM_ERROR("Invalid GUI layout.\n");
1815 ret = -EINVAL;
1816 goto out_free;
1817 }
Sinclair Yehc8261a92015-06-26 01:23:42 -07001818
1819 /*
1820 * bounding_box.w and bunding_box.h are used as
1821 * lower-right coordinates
1822 */
1823 if (rects[i].x + rects[i].w > bounding_box.w)
1824 bounding_box.w = rects[i].x + rects[i].w;
1825
1826 if (rects[i].y + rects[i].h > bounding_box.h)
1827 bounding_box.h = rects[i].y + rects[i].h;
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001828
1829 total_pixels += (u64) rects[i].w * (u64) rects[i].h;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001830 }
1831
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001832 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1833 /*
1834 * For Screen Targets, the limits for a toplogy are:
1835 * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
1836 * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
1837 */
Thomas Hellstrom0f580382017-01-19 11:01:04 -08001838 u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4;
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001839 u64 pixel_mem = total_pixels * 4;
1840
1841 if (bb_mem > dev_priv->prim_bb_mem) {
1842 DRM_ERROR("Topology is beyond supported limits.\n");
Sinclair Yeh35c05122015-06-26 01:42:06 -07001843 ret = -EINVAL;
1844 goto out_free;
1845 }
1846
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001847 if (pixel_mem > dev_priv->prim_bb_mem) {
1848 DRM_ERROR("Combined output size too large\n");
1849 ret = -EINVAL;
1850 goto out_free;
1851 }
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001852 }
1853
1854 vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
1855
1856out_free:
1857 kfree(rects);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001858 return ret;
1859}
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07001860
1861/**
1862 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1863 * on a set of cliprects and a set of display units.
1864 *
1865 * @dev_priv: Pointer to a device private structure.
1866 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1867 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1868 * Cliprects are given in framebuffer coordinates.
1869 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1870 * be NULL. Cliprects are given in source coordinates.
1871 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1872 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1873 * @num_clips: Number of cliprects in the @clips or @vclips array.
1874 * @increment: Integer with which to increment the clip counter when looping.
1875 * Used to skip a predetermined number of clip rects.
1876 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1877 */
1878int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1879 struct vmw_framebuffer *framebuffer,
1880 const struct drm_clip_rect *clips,
1881 const struct drm_vmw_rect *vclips,
1882 s32 dest_x, s32 dest_y,
1883 int num_clips,
1884 int increment,
1885 struct vmw_kms_dirty *dirty)
1886{
1887 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1888 struct drm_crtc *crtc;
1889 u32 num_units = 0;
1890 u32 i, k;
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07001891
1892 dirty->dev_priv = dev_priv;
1893
1894 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1895 if (crtc->primary->fb != &framebuffer->base)
1896 continue;
1897 units[num_units++] = vmw_crtc_to_du(crtc);
1898 }
1899
1900 for (k = 0; k < num_units; k++) {
1901 struct vmw_display_unit *unit = units[k];
1902 s32 crtc_x = unit->crtc.x;
1903 s32 crtc_y = unit->crtc.y;
1904 s32 crtc_width = unit->crtc.mode.hdisplay;
1905 s32 crtc_height = unit->crtc.mode.vdisplay;
1906 const struct drm_clip_rect *clips_ptr = clips;
1907 const struct drm_vmw_rect *vclips_ptr = vclips;
1908
1909 dirty->unit = unit;
1910 if (dirty->fifo_reserve_size > 0) {
1911 dirty->cmd = vmw_fifo_reserve(dev_priv,
1912 dirty->fifo_reserve_size);
1913 if (!dirty->cmd) {
1914 DRM_ERROR("Couldn't reserve fifo space "
1915 "for dirty blits.\n");
Christian Engelmayerf3b8c0c2015-09-19 00:32:24 +02001916 return -ENOMEM;
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07001917 }
1918 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1919 }
1920 dirty->num_hits = 0;
1921 for (i = 0; i < num_clips; i++, clips_ptr += increment,
1922 vclips_ptr += increment) {
1923 s32 clip_left;
1924 s32 clip_top;
1925
1926 /*
1927 * Select clip array type. Note that integer type
1928 * in @clips is unsigned short, whereas in @vclips
1929 * it's 32-bit.
1930 */
1931 if (clips) {
1932 dirty->fb_x = (s32) clips_ptr->x1;
1933 dirty->fb_y = (s32) clips_ptr->y1;
1934 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
1935 crtc_x;
1936 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
1937 crtc_y;
1938 } else {
1939 dirty->fb_x = vclips_ptr->x;
1940 dirty->fb_y = vclips_ptr->y;
1941 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
1942 dest_x - crtc_x;
1943 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
1944 dest_y - crtc_y;
1945 }
1946
1947 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
1948 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
1949
1950 /* Skip this clip if it's outside the crtc region */
1951 if (dirty->unit_x1 >= crtc_width ||
1952 dirty->unit_y1 >= crtc_height ||
1953 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
1954 continue;
1955
1956 /* Clip right and bottom to crtc limits */
1957 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
1958 crtc_width);
1959 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
1960 crtc_height);
1961
1962 /* Clip left and top to crtc limits */
1963 clip_left = min_t(s32, dirty->unit_x1, 0);
1964 clip_top = min_t(s32, dirty->unit_y1, 0);
1965 dirty->unit_x1 -= clip_left;
1966 dirty->unit_y1 -= clip_top;
1967 dirty->fb_x -= clip_left;
1968 dirty->fb_y -= clip_top;
1969
1970 dirty->clip(dirty);
1971 }
1972
1973 dirty->fifo_commit(dirty);
1974 }
1975
1976 return 0;
1977}
1978
1979/**
1980 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
1981 * command submission.
1982 *
1983 * @dev_priv. Pointer to a device private structure.
1984 * @buf: The buffer object
1985 * @interruptible: Whether to perform waits as interruptible.
1986 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
1987 * The buffer will be validated as a GMR. Already pinned buffers will not be
1988 * validated.
1989 *
1990 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
1991 * interrupted by a signal.
1992 */
1993int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
1994 struct vmw_dma_buffer *buf,
1995 bool interruptible,
1996 bool validate_as_mob)
1997{
1998 struct ttm_buffer_object *bo = &buf->base;
1999 int ret;
2000
Christian Königdfd5e502016-04-06 11:12:03 +02002001 ttm_bo_reserve(bo, false, false, NULL);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002002 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
2003 validate_as_mob);
2004 if (ret)
2005 ttm_bo_unreserve(bo);
2006
2007 return ret;
2008}
2009
2010/**
2011 * vmw_kms_helper_buffer_revert - Undo the actions of
2012 * vmw_kms_helper_buffer_prepare.
2013 *
2014 * @res: Pointer to the buffer object.
2015 *
2016 * Helper to be used if an error forces the caller to undo the actions of
2017 * vmw_kms_helper_buffer_prepare.
2018 */
2019void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
2020{
2021 if (buf)
2022 ttm_bo_unreserve(&buf->base);
2023}
2024
2025/**
2026 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
2027 * kms command submission.
2028 *
2029 * @dev_priv: Pointer to a device private structure.
2030 * @file_priv: Pointer to a struct drm_file representing the caller's
2031 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
2032 * if non-NULL, @user_fence_rep must be non-NULL.
2033 * @buf: The buffer object.
2034 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2035 * ref-counted fence pointer is returned here.
2036 * @user_fence_rep: Optional pointer to a user-space provided struct
2037 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
2038 * function copies fence data to user-space in a fail-safe manner.
2039 */
2040void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2041 struct drm_file *file_priv,
2042 struct vmw_dma_buffer *buf,
2043 struct vmw_fence_obj **out_fence,
2044 struct drm_vmw_fence_rep __user *
2045 user_fence_rep)
2046{
2047 struct vmw_fence_obj *fence;
2048 uint32_t handle;
2049 int ret;
2050
2051 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2052 file_priv ? &handle : NULL);
2053 if (buf)
2054 vmw_fence_single_bo(&buf->base, fence);
2055 if (file_priv)
2056 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2057 ret, user_fence_rep, fence,
2058 handle);
2059 if (out_fence)
2060 *out_fence = fence;
2061 else
2062 vmw_fence_obj_unreference(&fence);
2063
2064 vmw_kms_helper_buffer_revert(buf);
2065}
2066
2067
2068/**
2069 * vmw_kms_helper_resource_revert - Undo the actions of
2070 * vmw_kms_helper_resource_prepare.
2071 *
2072 * @res: Pointer to the resource. Typically a surface.
2073 *
2074 * Helper to be used if an error forces the caller to undo the actions of
2075 * vmw_kms_helper_resource_prepare.
2076 */
2077void vmw_kms_helper_resource_revert(struct vmw_resource *res)
2078{
2079 vmw_kms_helper_buffer_revert(res->backup);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002080 vmw_resource_unreserve(res, false, NULL, 0);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002081 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2082}
2083
2084/**
2085 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
2086 * command submission.
2087 *
2088 * @res: Pointer to the resource. Typically a surface.
2089 * @interruptible: Whether to perform waits as interruptible.
2090 *
2091 * Reserves and validates also the backup buffer if a guest-backed resource.
2092 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
2093 * interrupted by a signal.
2094 */
2095int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2096 bool interruptible)
2097{
2098 int ret = 0;
2099
2100 if (interruptible)
2101 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
2102 else
2103 mutex_lock(&res->dev_priv->cmdbuf_mutex);
2104
2105 if (unlikely(ret != 0))
2106 return -ERESTARTSYS;
2107
2108 ret = vmw_resource_reserve(res, interruptible, false);
2109 if (ret)
2110 goto out_unlock;
2111
2112 if (res->backup) {
2113 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
2114 interruptible,
2115 res->dev_priv->has_mob);
2116 if (ret)
2117 goto out_unreserve;
2118 }
2119 ret = vmw_resource_validate(res);
2120 if (ret)
2121 goto out_revert;
2122 return 0;
2123
2124out_revert:
2125 vmw_kms_helper_buffer_revert(res->backup);
2126out_unreserve:
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002127 vmw_resource_unreserve(res, false, NULL, 0);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002128out_unlock:
2129 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2130 return ret;
2131}
2132
2133/**
2134 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
2135 * kms command submission.
2136 *
2137 * @res: Pointer to the resource. Typically a surface.
2138 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2139 * ref-counted fence pointer is returned here.
2140 */
2141void vmw_kms_helper_resource_finish(struct vmw_resource *res,
2142 struct vmw_fence_obj **out_fence)
2143{
2144 if (res->backup || out_fence)
2145 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
2146 out_fence, NULL);
2147
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002148 vmw_resource_unreserve(res, false, NULL, 0);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002149 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2150}
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07002151
2152/**
2153 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2154 * its backing MOB.
2155 *
2156 * @res: Pointer to the surface resource
2157 * @clips: Clip rects in framebuffer (surface) space.
2158 * @num_clips: Number of clips in @clips.
2159 * @increment: Integer with which to increment the clip counter when looping.
2160 * Used to skip a predetermined number of clip rects.
2161 *
2162 * This function makes sure the proxy surface is updated from its backing MOB
2163 * using the region given by @clips. The surface resource @res and its backing
2164 * MOB needs to be reserved and validated on call.
2165 */
2166int vmw_kms_update_proxy(struct vmw_resource *res,
2167 const struct drm_clip_rect *clips,
2168 unsigned num_clips,
2169 int increment)
2170{
2171 struct vmw_private *dev_priv = res->dev_priv;
2172 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
2173 struct {
2174 SVGA3dCmdHeader header;
2175 SVGA3dCmdUpdateGBImage body;
2176 } *cmd;
2177 SVGA3dBox *box;
2178 size_t copy_size = 0;
2179 int i;
2180
2181 if (!clips)
2182 return 0;
2183
2184 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
2185 if (!cmd) {
2186 DRM_ERROR("Couldn't reserve fifo space for proxy surface "
2187 "update.\n");
2188 return -ENOMEM;
2189 }
2190
2191 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2192 box = &cmd->body.box;
2193
2194 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2195 cmd->header.size = sizeof(cmd->body);
2196 cmd->body.image.sid = res->id;
2197 cmd->body.image.face = 0;
2198 cmd->body.image.mipmap = 0;
2199
2200 if (clips->x1 > size->width || clips->x2 > size->width ||
2201 clips->y1 > size->height || clips->y2 > size->height) {
2202 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2203 return -EINVAL;
2204 }
2205
2206 box->x = clips->x1;
2207 box->y = clips->y1;
2208 box->z = 0;
2209 box->w = clips->x2 - clips->x1;
2210 box->h = clips->y2 - clips->y1;
2211 box->d = 1;
2212
2213 copy_size += sizeof(*cmd);
2214 }
2215
2216 vmw_fifo_commit(dev_priv, copy_size);
2217
2218 return 0;
2219}
Thomas Hellstroma2787242015-06-29 12:55:07 -07002220
2221int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2222 unsigned unit,
2223 u32 max_width,
2224 u32 max_height,
2225 struct drm_connector **p_con,
2226 struct drm_crtc **p_crtc,
2227 struct drm_display_mode **p_mode)
2228{
2229 struct drm_connector *con;
2230 struct vmw_display_unit *du;
2231 struct drm_display_mode *mode;
2232 int i = 0;
2233
2234 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2235 head) {
2236 if (i == unit)
2237 break;
2238
2239 ++i;
2240 }
2241
2242 if (i != unit) {
2243 DRM_ERROR("Could not find initial display unit.\n");
2244 return -EINVAL;
2245 }
2246
2247 if (list_empty(&con->modes))
2248 (void) vmw_du_connector_fill_modes(con, max_width, max_height);
2249
2250 if (list_empty(&con->modes)) {
2251 DRM_ERROR("Could not find initial display mode.\n");
2252 return -EINVAL;
2253 }
2254
2255 du = vmw_connector_to_du(con);
2256 *p_con = con;
2257 *p_crtc = &du->crtc;
2258
2259 list_for_each_entry(mode, &con->modes, head) {
2260 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2261 break;
2262 }
2263
2264 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2265 *p_mode = mode;
2266 else {
2267 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2268 *p_mode = list_first_entry(&con->modes,
2269 struct drm_display_mode,
2270 head);
2271 }
2272
2273 return 0;
2274}
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002275
2276/**
2277 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
2278 *
2279 * @dev_priv: Pointer to a device private struct.
2280 * @du: The display unit of the crtc.
2281 */
2282void vmw_kms_del_active(struct vmw_private *dev_priv,
2283 struct vmw_display_unit *du)
2284{
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002285 mutex_lock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002286 if (du->active_implicit) {
2287 if (--(dev_priv->num_implicit) == 0)
2288 dev_priv->implicit_fb = NULL;
2289 du->active_implicit = false;
2290 }
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002291 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002292}
2293
2294/**
2295 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
2296 *
2297 * @vmw_priv: Pointer to a device private struct.
2298 * @du: The display unit of the crtc.
2299 * @vfb: The implicit framebuffer
2300 *
2301 * Registers a binding to an implicit framebuffer.
2302 */
2303void vmw_kms_add_active(struct vmw_private *dev_priv,
2304 struct vmw_display_unit *du,
2305 struct vmw_framebuffer *vfb)
2306{
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002307 mutex_lock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002308 WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
2309
2310 if (!du->active_implicit && du->is_implicit) {
2311 dev_priv->implicit_fb = vfb;
2312 du->active_implicit = true;
2313 dev_priv->num_implicit++;
2314 }
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002315 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002316}
2317
2318/**
2319 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
2320 *
2321 * @dev_priv: Pointer to device-private struct.
2322 * @crtc: The crtc we want to flip.
2323 *
2324 * Returns true or false depending whether it's OK to flip this crtc
2325 * based on the criterion that we must not have more than one implicit
2326 * frame-buffer at any one time.
2327 */
2328bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
2329 struct drm_crtc *crtc)
2330{
2331 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002332 bool ret;
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002333
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002334 mutex_lock(&dev_priv->global_kms_state_mutex);
2335 ret = !du->is_implicit || dev_priv->num_implicit == 1;
2336 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002337
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002338 return ret;
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002339}
2340
2341/**
2342 * vmw_kms_update_implicit_fb - Update the implicit fb.
2343 *
2344 * @dev_priv: Pointer to device-private struct.
2345 * @crtc: The crtc the new implicit frame-buffer is bound to.
2346 */
2347void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
2348 struct drm_crtc *crtc)
2349{
2350 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2351 struct vmw_framebuffer *vfb;
2352
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002353 mutex_lock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002354
2355 if (!du->is_implicit)
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002356 goto out_unlock;
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002357
2358 vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
2359 WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
2360 dev_priv->implicit_fb != vfb);
2361
2362 dev_priv->implicit_fb = vfb;
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002363out_unlock:
2364 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002365}
Thomas Hellstrom76404ac2016-02-12 09:55:45 +01002366
2367/**
2368 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2369 * property.
2370 *
2371 * @dev_priv: Pointer to a device private struct.
2372 * @immutable: Whether the property is immutable.
2373 *
2374 * Sets up the implicit placement property unless it's already set up.
2375 */
2376void
2377vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
2378 bool immutable)
2379{
2380 if (dev_priv->implicit_placement_property)
2381 return;
2382
2383 dev_priv->implicit_placement_property =
2384 drm_property_create_range(dev_priv->dev,
2385 immutable ?
2386 DRM_MODE_PROP_IMMUTABLE : 0,
2387 "implicit_placement", 0, 1);
2388
2389}