blob: 0c1241d09d143bc1609a467b403ad0a9d1daa456 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
Sinclair Yeh9c2542a2017-03-23 11:33:39 -070029#include <drm/drm_atomic.h>
30#include <drm/drm_atomic_helper.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020032
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000033/* Might need a hrtimer here? */
34#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
35
Sinclair Yehc8261a92015-06-26 01:23:42 -070036void vmw_du_cleanup(struct vmw_display_unit *du)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037{
Sinclair Yeh36cc79b2017-03-23 11:28:11 -070038 drm_plane_cleanup(&du->primary);
39 drm_plane_cleanup(&du->cursor);
40
Thomas Wood34ea3d32014-05-29 16:57:41 +010041 drm_connector_unregister(&du->connector);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000042 drm_crtc_cleanup(&du->crtc);
43 drm_encoder_cleanup(&du->encoder);
44 drm_connector_cleanup(&du->connector);
45}
46
47/*
48 * Display Unit Cursor functions
49 */
50
Sinclair Yeh36cc79b2017-03-23 11:28:11 -070051static int vmw_cursor_update_image(struct vmw_private *dev_priv,
52 u32 *image, u32 width, u32 height,
53 u32 hotspotX, u32 hotspotY)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000054{
55 struct {
56 u32 cmd;
57 SVGAFifoCmdDefineAlphaCursor cursor;
58 } *cmd;
59 u32 image_size = width * height * 4;
60 u32 cmd_size = sizeof(*cmd) + image_size;
61
62 if (!image)
63 return -EINVAL;
64
65 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
66 if (unlikely(cmd == NULL)) {
67 DRM_ERROR("Fifo reserve failed.\n");
68 return -ENOMEM;
69 }
70
71 memset(cmd, 0, sizeof(*cmd));
72
73 memcpy(&cmd[1], image, image_size);
74
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -070075 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
76 cmd->cursor.id = 0;
77 cmd->cursor.width = width;
78 cmd->cursor.height = height;
79 cmd->cursor.hotspotX = hotspotX;
80 cmd->cursor.hotspotY = hotspotY;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000081
Thomas Hellstrom4e0858a2015-11-05 02:18:55 -080082 vmw_fifo_commit_flush(dev_priv, cmd_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000083
84 return 0;
85}
86
Sinclair Yeh36cc79b2017-03-23 11:28:11 -070087static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
88 struct vmw_dma_buffer *dmabuf,
89 u32 width, u32 height,
90 u32 hotspotX, u32 hotspotY)
Jakob Bornecrantz6a91d972011-11-28 13:19:10 +010091{
92 struct ttm_bo_kmap_obj map;
93 unsigned long kmap_offset;
94 unsigned long kmap_num;
95 void *virtual;
96 bool dummy;
97 int ret;
98
99 kmap_offset = 0;
100 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
101
Christian Königdfd5e502016-04-06 11:12:03 +0200102 ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
Jakob Bornecrantz6a91d972011-11-28 13:19:10 +0100103 if (unlikely(ret != 0)) {
104 DRM_ERROR("reserve failed\n");
105 return -EINVAL;
106 }
107
108 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
109 if (unlikely(ret != 0))
110 goto err_unreserve;
111
112 virtual = ttm_kmap_obj_virtual(&map, &dummy);
113 ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
114 hotspotX, hotspotY);
115
116 ttm_bo_kunmap(&map);
117err_unreserve:
118 ttm_bo_unreserve(&dmabuf->base);
119
120 return ret;
121}
122
123
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700124static void vmw_cursor_update_position(struct vmw_private *dev_priv,
125 bool show, int x, int y)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000126{
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100127 u32 *fifo_mem = dev_priv->mmio_virt;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000128 uint32_t count;
129
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700130 spin_lock(&dev_priv->cursor_lock);
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +0100131 vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
132 vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
133 vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
134 count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
135 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700136 spin_unlock(&dev_priv->cursor_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000137}
138
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100139
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000140void vmw_kms_cursor_snoop(struct vmw_surface *srf,
141 struct ttm_object_file *tfile,
142 struct ttm_buffer_object *bo,
143 SVGA3dCmdHeader *header)
144{
145 struct ttm_bo_kmap_obj map;
146 unsigned long kmap_offset;
147 unsigned long kmap_num;
148 SVGA3dCopyBox *box;
149 unsigned box_count;
150 void *virtual;
151 bool dummy;
152 struct vmw_dma_cmd {
153 SVGA3dCmdHeader header;
154 SVGA3dCmdSurfaceDMA dma;
155 } *cmd;
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100156 int i, ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000157
158 cmd = container_of(header, struct vmw_dma_cmd, header);
159
160 /* No snooper installed */
161 if (!srf->snooper.image)
162 return;
163
164 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
165 DRM_ERROR("face and mipmap for cursors should never != 0\n");
166 return;
167 }
168
169 if (cmd->header.size < 64) {
170 DRM_ERROR("at least one full copy box must be given\n");
171 return;
172 }
173
174 box = (SVGA3dCopyBox *)&cmd[1];
175 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
176 sizeof(SVGA3dCopyBox);
177
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100178 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000179 box->x != 0 || box->y != 0 || box->z != 0 ||
180 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100181 box->d != 1 || box_count != 1) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000182 /* TODO handle none page aligned offsets */
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100183 /* TODO handle more dst & src != 0 */
184 /* TODO handle more then one copy */
185 DRM_ERROR("Cant snoop dma request for cursor!\n");
186 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
187 box->srcx, box->srcy, box->srcz,
188 box->x, box->y, box->z,
189 box->w, box->h, box->d, box_count,
190 cmd->dma.guest.ptr.offset);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000191 return;
192 }
193
194 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
195 kmap_num = (64*64*4) >> PAGE_SHIFT;
196
Christian Königdfd5e502016-04-06 11:12:03 +0200197 ret = ttm_bo_reserve(bo, true, false, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000198 if (unlikely(ret != 0)) {
199 DRM_ERROR("reserve failed\n");
200 return;
201 }
202
203 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
204 if (unlikely(ret != 0))
205 goto err_unreserve;
206
207 virtual = ttm_kmap_obj_virtual(&map, &dummy);
208
Jakob Bornecrantz2ac86372011-11-03 21:03:08 +0100209 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
210 memcpy(srf->snooper.image, virtual, 64*64*4);
211 } else {
212 /* Image is unsigned pointer. */
213 for (i = 0; i < box->h; i++)
214 memcpy(srf->snooper.image + i * 64,
215 virtual + i * cmd->dma.guest.pitch,
216 box->w * 4);
217 }
218
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000219 srf->snooper.age++;
220
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000221 ttm_bo_kunmap(&map);
222err_unreserve:
223 ttm_bo_unreserve(bo);
224}
225
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100226/**
227 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
228 *
229 * @dev_priv: Pointer to the device private struct.
230 *
231 * Clears all legacy hotspots.
232 */
233void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
234{
235 struct drm_device *dev = dev_priv->dev;
236 struct vmw_display_unit *du;
237 struct drm_crtc *crtc;
238
239 drm_modeset_lock_all(dev);
240 drm_for_each_crtc(crtc, dev) {
241 du = vmw_crtc_to_du(crtc);
242
243 du->hotspot_x = 0;
244 du->hotspot_y = 0;
245 }
246 drm_modeset_unlock_all(dev);
247}
248
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000249void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
250{
251 struct drm_device *dev = dev_priv->dev;
252 struct vmw_display_unit *du;
253 struct drm_crtc *crtc;
254
255 mutex_lock(&dev->mode_config.mutex);
256
257 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
258 du = vmw_crtc_to_du(crtc);
259 if (!du->cursor_surface ||
260 du->cursor_age == du->cursor_surface->snooper.age)
261 continue;
262
263 du->cursor_age = du->cursor_surface->snooper.age;
264 vmw_cursor_update_image(dev_priv,
265 du->cursor_surface->snooper.image,
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100266 64, 64,
267 du->hotspot_x + du->core_hotspot_x,
268 du->hotspot_y + du->core_hotspot_y);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000269 }
270
271 mutex_unlock(&dev->mode_config.mutex);
272}
273
Sinclair Yeh36cc79b2017-03-23 11:28:11 -0700274
275
276/**
277 * vmw_du_cursor_plane_update() - Update cursor image and location
278 *
279 * @plane: plane object to update
280 * @crtc: owning CRTC of @plane
281 * @fb: framebuffer to flip onto plane
282 * @crtc_x: x offset of plane on crtc
283 * @crtc_y: y offset of plane on crtc
284 * @crtc_w: width of plane rectangle on crtc
285 * @crtc_h: height of plane rectangle on crtc
286 * @src_x: Not used
287 * @src_y: Not used
288 * @src_w: Not used
289 * @src_h: Not used
290 *
291 *
292 * RETURNS:
293 * Zero on success, error code on failure
294 */
295int vmw_du_cursor_plane_update(struct drm_plane *plane,
296 struct drm_crtc *crtc,
297 struct drm_framebuffer *fb,
298 int crtc_x, int crtc_y,
299 unsigned int crtc_w,
300 unsigned int crtc_h,
301 uint32_t src_x, uint32_t src_y,
302 uint32_t src_w, uint32_t src_h)
303{
304 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
305 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
306 struct vmw_surface *surface = NULL;
307 struct vmw_dma_buffer *dmabuf = NULL;
308 s32 hotspot_x, hotspot_y;
309 int ret;
310
311 hotspot_x = du->hotspot_x + fb->hot_x;
312 hotspot_y = du->hotspot_y + fb->hot_y;
313
314 /* A lot of the code assumes this */
315 if (crtc_w != 64 || crtc_h != 64) {
316 ret = -EINVAL;
317 goto out;
318 }
319
320 if (vmw_framebuffer_to_vfb(fb)->dmabuf)
321 dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
322 else
323 surface = vmw_framebuffer_to_vfbs(fb)->surface;
324
325 if (surface && !surface->snooper.image) {
326 DRM_ERROR("surface not suitable for cursor\n");
327 ret = -EINVAL;
328 goto out;
329 }
330
331 /* setup new image */
332 ret = 0;
333 if (surface) {
334 /* vmw_user_surface_lookup takes one reference */
335 du->cursor_surface = surface;
336
337 du->cursor_age = du->cursor_surface->snooper.age;
338
339 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
340 64, 64, hotspot_x, hotspot_y);
341 } else if (dmabuf) {
342 /* vmw_user_surface_lookup takes one reference */
343 du->cursor_dmabuf = dmabuf;
344
345 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
346 hotspot_x, hotspot_y);
347 } else {
348 vmw_cursor_update_position(dev_priv, false, 0, 0);
349 goto out;
350 }
351
352 if (!ret) {
353 du->cursor_x = crtc_x + du->set_gui_x;
354 du->cursor_y = crtc_y + du->set_gui_y;
355
356 vmw_cursor_update_position(dev_priv, true,
357 du->cursor_x + hotspot_x,
358 du->cursor_y + hotspot_y);
359 }
360
361out:
362 return ret;
363}
364
365
366int vmw_du_cursor_plane_disable(struct drm_plane *plane)
367{
368 if (plane->fb) {
369 drm_framebuffer_unreference(plane->fb);
370 plane->fb = NULL;
371 }
372
373 return -EINVAL;
374}
375
376
377void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
378{
379 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
380
381 drm_plane_cleanup(plane);
382}
383
384
385void vmw_du_primary_plane_destroy(struct drm_plane *plane)
386{
387 drm_plane_cleanup(plane);
388
389 /* Planes are static in our case so we don't free it */
390}
391
392
Sinclair Yeh9c2542a2017-03-23 11:33:39 -0700393/**
394 * vmw_du_crtc_duplicate_state - duplicate crtc state
395 * @crtc: DRM crtc
396 *
397 * Allocates and returns a copy of the crtc state (both common and
398 * vmw-specific) for the specified crtc.
399 *
400 * Returns: The newly allocated crtc state, or NULL on failure.
401 */
402struct drm_crtc_state *
403vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
404{
405 struct drm_crtc_state *state;
406 struct vmw_crtc_state *vcs;
407
408 if (WARN_ON(!crtc->state))
409 return NULL;
410
411 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
412
413 if (!vcs)
414 return NULL;
415
416 state = &vcs->base;
417
418 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
419
420 return state;
421}
422
423
424/**
425 * vmw_du_crtc_reset - creates a blank vmw crtc state
426 * @crtc: DRM crtc
427 *
428 * Resets the atomic state for @crtc by freeing the state pointer (which
429 * might be NULL, e.g. at driver load time) and allocating a new empty state
430 * object.
431 */
432void vmw_du_crtc_reset(struct drm_crtc *crtc)
433{
434 struct vmw_crtc_state *vcs;
435
436
437 if (crtc->state) {
438 __drm_atomic_helper_crtc_destroy_state(crtc->state);
439
440 kfree(vmw_crtc_state_to_vcs(crtc->state));
441 }
442
443 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
444
445 if (!vcs) {
446 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
447 return;
448 }
449
450 crtc->state = &vcs->base;
451 crtc->state->crtc = crtc;
452}
453
454
455/**
456 * vmw_du_crtc_destroy_state - destroy crtc state
457 * @crtc: DRM crtc
458 * @state: state object to destroy
459 *
460 * Destroys the crtc state (both common and vmw-specific) for the
461 * specified plane.
462 */
463void
464vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
465 struct drm_crtc_state *state)
466{
467 drm_atomic_helper_crtc_destroy_state(crtc, state);
468}
469
470
Sinclair Yehcc5ec452017-03-23 11:36:05 -0700471/**
472 * vmw_du_plane_duplicate_state - duplicate plane state
473 * @plane: drm plane
474 *
475 * Allocates and returns a copy of the plane state (both common and
476 * vmw-specific) for the specified plane.
477 *
478 * Returns: The newly allocated plane state, or NULL on failure.
479 */
480struct drm_plane_state *
481vmw_du_plane_duplicate_state(struct drm_plane *plane)
482{
483 struct drm_plane_state *state;
484 struct vmw_plane_state *vps;
485
486 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
487
488 if (!vps)
489 return NULL;
490
491 vps->pinned = 0;
492
493 /* Each ref counted resource needs to be acquired again */
494 if (vps->surf)
495 (void) vmw_surface_reference(vps->surf);
496
497 if (vps->dmabuf)
498 (void) vmw_dmabuf_reference(vps->dmabuf);
499
500 state = &vps->base;
501
502 __drm_atomic_helper_plane_duplicate_state(plane, state);
503
504 return state;
505}
506
507
508/**
509 * vmw_du_plane_reset - creates a blank vmw plane state
510 * @plane: drm plane
511 *
512 * Resets the atomic state for @plane by freeing the state pointer (which might
513 * be NULL, e.g. at driver load time) and allocating a new empty state object.
514 */
515void vmw_du_plane_reset(struct drm_plane *plane)
516{
517 struct vmw_plane_state *vps;
518
519
520 if (plane->state)
521 vmw_du_plane_destroy_state(plane, plane->state);
522
523 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
524
525 if (!vps) {
526 DRM_ERROR("Cannot allocate vmw_plane_state\n");
527 return;
528 }
529
530 plane->state = &vps->base;
531 plane->state->plane = plane;
532 plane->state->rotation = DRM_ROTATE_0;
533}
534
535
536/**
537 * vmw_du_plane_destroy_state - destroy plane state
538 * @plane: DRM plane
539 * @state: state object to destroy
540 *
541 * Destroys the plane state (both common and vmw-specific) for the
542 * specified plane.
543 */
544void
545vmw_du_plane_destroy_state(struct drm_plane *plane,
546 struct drm_plane_state *state)
547{
548 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
549
550
551 if (vps->surf)
552 vmw_surface_unreference(&vps->surf);
553
554 if (vps->dmabuf)
555 vmw_dmabuf_unreference(&vps->dmabuf);
556
557 drm_atomic_helper_plane_destroy_state(plane, state);
558}
559
560
Sinclair Yehd7721ca2017-03-23 11:48:44 -0700561/**
562 * vmw_du_connector_duplicate_state - duplicate connector state
563 * @connector: DRM connector
564 *
565 * Allocates and returns a copy of the connector state (both common and
566 * vmw-specific) for the specified connector.
567 *
568 * Returns: The newly allocated connector state, or NULL on failure.
569 */
570struct drm_connector_state *
571vmw_du_connector_duplicate_state(struct drm_connector *connector)
572{
573 struct drm_connector_state *state;
574 struct vmw_connector_state *vcs;
575
576 if (WARN_ON(!connector->state))
577 return NULL;
578
579 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
580
581 if (!vcs)
582 return NULL;
583
584 state = &vcs->base;
585
586 __drm_atomic_helper_connector_duplicate_state(connector, state);
587
588 return state;
589}
590
591
592/**
593 * vmw_du_connector_reset - creates a blank vmw connector state
594 * @connector: DRM connector
595 *
596 * Resets the atomic state for @connector by freeing the state pointer (which
597 * might be NULL, e.g. at driver load time) and allocating a new empty state
598 * object.
599 */
600void vmw_du_connector_reset(struct drm_connector *connector)
601{
602 struct vmw_connector_state *vcs;
603
604
605 if (connector->state) {
606 __drm_atomic_helper_connector_destroy_state(connector->state);
607
608 kfree(vmw_connector_state_to_vcs(connector->state));
609 }
610
611 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
612
613 if (!vcs) {
614 DRM_ERROR("Cannot allocate vmw_connector_state\n");
615 return;
616 }
617
618 __drm_atomic_helper_connector_reset(connector, &vcs->base);
619}
620
621
622/**
623 * vmw_du_connector_destroy_state - destroy connector state
624 * @connector: DRM connector
625 * @state: state object to destroy
626 *
627 * Destroys the connector state (both common and vmw-specific) for the
628 * specified plane.
629 */
630void
631vmw_du_connector_destroy_state(struct drm_connector *connector,
632 struct drm_connector_state *state)
633{
634 drm_atomic_helper_connector_destroy_state(connector, state);
635}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000636/*
637 * Generic framebuffer code
638 */
639
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000640/*
641 * Surface framebuffer code
642 */
643
Rashika Kheria847c5962014-01-06 22:18:10 +0530644static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000645{
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200646 struct vmw_framebuffer_surface *vfbs =
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000647 vmw_framebuffer_to_vfbs(framebuffer);
648
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000649 drm_framebuffer_cleanup(framebuffer);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200650 vmw_surface_unreference(&vfbs->surface);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700651 if (vfbs->base.user_obj)
652 ttm_base_object_unref(&vfbs->base.user_obj);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000653
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200654 kfree(vfbs);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000655}
656
Rashika Kheria847c5962014-01-06 22:18:10 +0530657static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200658 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000659 unsigned flags, unsigned color,
660 struct drm_clip_rect *clips,
661 unsigned num_clips)
662{
663 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
664 struct vmw_framebuffer_surface *vfbs =
665 vmw_framebuffer_to_vfbs(framebuffer);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000666 struct drm_clip_rect norect;
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200667 int ret, inc = 1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000668
Sinclair Yehc8261a92015-06-26 01:23:42 -0700669 /* Legacy Display Unit does not support 3D */
670 if (dev_priv->active_display_unit == vmw_du_legacy)
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200671 return -EINVAL;
672
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200673 drm_modeset_lock_all(dev_priv->dev);
674
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100675 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200676 if (unlikely(ret != 0)) {
677 drm_modeset_unlock_all(dev_priv->dev);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200678 return ret;
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200679 }
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200680
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000681 if (!num_clips) {
682 num_clips = 1;
683 clips = &norect;
684 norect.x1 = norect.y1 = 0;
685 norect.x2 = framebuffer->width;
686 norect.y2 = framebuffer->height;
687 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
688 num_clips /= 2;
689 inc = 2; /* skip source rects */
690 }
691
Sinclair Yehc8261a92015-06-26 01:23:42 -0700692 if (dev_priv->active_display_unit == vmw_du_screen_object)
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700693 ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
694 clips, NULL, NULL, 0, 0,
695 num_clips, inc, NULL);
Sinclair Yeh35c05122015-06-26 01:42:06 -0700696 else
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700697 ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
698 clips, NULL, NULL, 0, 0,
699 num_clips, inc, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000700
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700701 vmw_fifo_flush(dev_priv, false);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100702 ttm_read_unlock(&dev_priv->reservation_sem);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200703
704 drm_modeset_unlock_all(dev_priv->dev);
705
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000706 return 0;
707}
708
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700709/**
710 * vmw_kms_readback - Perform a readback from the screen system to
711 * a dma-buffer backed framebuffer.
712 *
713 * @dev_priv: Pointer to the device private structure.
714 * @file_priv: Pointer to a struct drm_file identifying the caller.
715 * Must be set to NULL if @user_fence_rep is NULL.
716 * @vfb: Pointer to the dma-buffer backed framebuffer.
717 * @user_fence_rep: User-space provided structure for fence information.
718 * Must be set to non-NULL if @file_priv is non-NULL.
719 * @vclips: Array of clip rects.
720 * @num_clips: Number of clip rects in @vclips.
721 *
722 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
723 * interrupted.
724 */
725int vmw_kms_readback(struct vmw_private *dev_priv,
726 struct drm_file *file_priv,
727 struct vmw_framebuffer *vfb,
728 struct drm_vmw_fence_rep __user *user_fence_rep,
729 struct drm_vmw_rect *vclips,
730 uint32_t num_clips)
731{
732 switch (dev_priv->active_display_unit) {
733 case vmw_du_screen_object:
734 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
735 user_fence_rep, vclips, num_clips);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700736 case vmw_du_screen_target:
737 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
738 user_fence_rep, NULL, vclips, num_clips,
739 1, false, true);
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700740 default:
741 WARN_ONCE(true,
742 "Readback called with invalid display system.\n");
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700743}
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700744
745 return -ENOSYS;
746}
747
748
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100749static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000750 .destroy = vmw_framebuffer_surface_destroy,
751 .dirty = vmw_framebuffer_surface_dirty,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000752};
753
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200754static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
755 struct vmw_surface *surface,
756 struct vmw_framebuffer **out,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100757 const struct drm_mode_fb_cmd2
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700758 *mode_cmd,
759 bool is_dmabuf_proxy)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000760
761{
762 struct drm_device *dev = dev_priv->dev;
763 struct vmw_framebuffer_surface *vfbs;
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200764 enum SVGA3dSurfaceFormat format;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000765 int ret;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100766 struct drm_format_name_buf format_name;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000767
Sinclair Yehc8261a92015-06-26 01:23:42 -0700768 /* 3D is only supported on HWv8 and newer hosts */
769 if (dev_priv->active_display_unit == vmw_du_legacy)
Jakob Bornecrantz01e81412011-10-04 20:13:24 +0200770 return -ENOSYS;
771
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200772 /*
773 * Sanity checks.
774 */
775
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +0100776 /* Surface must be marked as a scanout. */
777 if (unlikely(!surface->scanout))
778 return -EINVAL;
779
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200780 if (unlikely(surface->mip_levels[0] != 1 ||
781 surface->num_sizes != 1 ||
Thomas Hellstromb360a3c2014-01-15 08:51:36 +0100782 surface->base_size.width < mode_cmd->width ||
783 surface->base_size.height < mode_cmd->height ||
784 surface->base_size.depth != 1)) {
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200785 DRM_ERROR("Incompatible surface dimensions "
786 "for requested mode.\n");
787 return -EINVAL;
788 }
789
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100790 switch (mode_cmd->pixel_format) {
791 case DRM_FORMAT_ARGB8888:
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200792 format = SVGA3D_A8R8G8B8;
793 break;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100794 case DRM_FORMAT_XRGB8888:
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200795 format = SVGA3D_X8R8G8B8;
796 break;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100797 case DRM_FORMAT_RGB565:
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200798 format = SVGA3D_R5G6B5;
799 break;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100800 case DRM_FORMAT_XRGB1555:
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200801 format = SVGA3D_A1R5G5B5;
802 break;
803 default:
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100804 DRM_ERROR("Invalid pixel format: %s\n",
805 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200806 return -EINVAL;
807 }
808
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700809 /*
810 * For DX, surface format validation is done when surface->scanout
811 * is set.
812 */
813 if (!dev_priv->has_dx && format != surface->format) {
Thomas Hellstromd3216a02010-10-05 12:42:59 +0200814 DRM_ERROR("Invalid surface format for requested mode.\n");
815 return -EINVAL;
816 }
817
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000818 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
819 if (!vfbs) {
820 ret = -ENOMEM;
821 goto out_err1;
822 }
823
Ville Syrjäläa3f913c2016-12-14 22:48:59 +0200824 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
Sinclair Yeh05c95012015-08-11 22:53:39 -0700825 vfbs->surface = vmw_surface_reference(surface);
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100826 vfbs->base.user_handle = mode_cmd->handles[0];
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700827 vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200828
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000829 *out = &vfbs->base;
830
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100831 ret = drm_framebuffer_init(dev, &vfbs->base.base,
832 &vmw_framebuffer_surface_funcs);
833 if (ret)
Sinclair Yeh05c95012015-08-11 22:53:39 -0700834 goto out_err2;
Daniel Vetter80f0b5a2012-12-13 23:39:01 +0100835
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000836 return 0;
837
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000838out_err2:
Sinclair Yeh05c95012015-08-11 22:53:39 -0700839 vmw_surface_unreference(&surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000840 kfree(vfbs);
841out_err1:
842 return ret;
843}
844
845/*
846 * Dmabuf framebuffer code
847 */
848
Rashika Kheria847c5962014-01-06 22:18:10 +0530849static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000850{
851 struct vmw_framebuffer_dmabuf *vfbd =
852 vmw_framebuffer_to_vfbd(framebuffer);
853
854 drm_framebuffer_cleanup(framebuffer);
855 vmw_dmabuf_unreference(&vfbd->buffer);
Thomas Hellstroma2787242015-06-29 12:55:07 -0700856 if (vfbd->base.user_obj)
857 ttm_base_object_unref(&vfbd->base.user_obj);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000858
859 kfree(vfbd);
860}
861
Rashika Kheria847c5962014-01-06 22:18:10 +0530862static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
Thomas Hellstrom02b00162010-10-05 12:43:02 +0200863 struct drm_file *file_priv,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000864 unsigned flags, unsigned color,
865 struct drm_clip_rect *clips,
866 unsigned num_clips)
867{
868 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200869 struct vmw_framebuffer_dmabuf *vfbd =
870 vmw_framebuffer_to_vfbd(framebuffer);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000871 struct drm_clip_rect norect;
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200872 int ret, increment = 1;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000873
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200874 drm_modeset_lock_all(dev_priv->dev);
875
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100876 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200877 if (unlikely(ret != 0)) {
878 drm_modeset_unlock_all(dev_priv->dev);
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200879 return ret;
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200880 }
Thomas Hellstrom3a939a52010-10-05 12:43:03 +0200881
Thomas Hellstromdf1c93b2010-01-13 22:28:36 +0100882 if (!num_clips) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000883 num_clips = 1;
884 clips = &norect;
885 norect.x1 = norect.y1 = 0;
886 norect.x2 = framebuffer->width;
887 norect.y2 = framebuffer->height;
888 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
889 num_clips /= 2;
890 increment = 2;
891 }
892
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700893 switch (dev_priv->active_display_unit) {
894 case vmw_du_screen_target:
895 ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
896 clips, NULL, num_clips, increment,
897 true, true);
898 break;
899 case vmw_du_screen_object:
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700900 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100901 clips, NULL, num_clips,
902 increment, true, NULL);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700903 break;
Thomas Hellstrom352b20d2015-06-29 12:57:37 -0700904 case vmw_du_legacy:
905 ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
906 clips, num_clips, increment);
907 break;
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700908 default:
Thomas Hellstrom352b20d2015-06-29 12:57:37 -0700909 ret = -EINVAL;
910 WARN_ONCE(true, "Dirty called with invalid display system.\n");
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700911 break;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200912 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000913
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -0700914 vmw_fifo_flush(dev_priv, false);
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100915 ttm_read_unlock(&dev_priv->reservation_sem);
Ville Syrjälä73e9efd2013-12-04 14:13:58 +0200916
917 drm_modeset_unlock_all(dev_priv->dev);
918
Jakob Bornecrantz5deb65c2011-10-04 20:13:18 +0200919 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000920}
921
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100922static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000923 .destroy = vmw_framebuffer_dmabuf_destroy,
924 .dirty = vmw_framebuffer_dmabuf_dirty,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000925};
926
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200927/**
Jakob Bornecrantz497a3ff2011-10-04 20:13:14 +0200928 * Pin the dmabuffer to the start of vram.
929 */
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700930static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000931{
932 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700933 struct vmw_dma_buffer *buf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000934 int ret;
935
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700936 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
937 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +0200938
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700939 if (!buf)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000940 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000941
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700942 switch (dev_priv->active_display_unit) {
943 case vmw_du_legacy:
944 vmw_overlay_pause_all(dev_priv);
945 ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
946 vmw_overlay_resume_all(dev_priv);
947 break;
948 case vmw_du_screen_object:
949 case vmw_du_screen_target:
950 if (vfb->dmabuf)
951 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
952 false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000953
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700954 return vmw_dmabuf_pin_in_placement(dev_priv, buf,
955 &vmw_mob_placement, false);
956 default:
957 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000958 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000959
Thomas Hellstromfd006a42015-06-28 02:50:56 -0700960 return ret;
961}
962
963static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
964{
965 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
966 struct vmw_dma_buffer *buf;
967
968 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
969 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
970
971 if (WARN_ON(!buf))
972 return 0;
973
974 return vmw_dmabuf_unpin(dev_priv, buf, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000975}
976
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700977/**
978 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
979 *
980 * @dev: DRM device
981 * @mode_cmd: parameters for the new surface
982 * @dmabuf_mob: MOB backing the DMA buf
983 * @srf_out: newly created surface
984 *
985 * When the content FB is a DMA buf, we create a surface as a proxy to the
986 * same buffer. This way we can do a surface copy rather than a surface DMA.
987 * This is a more efficient approach
988 *
989 * RETURNS:
990 * 0 on success, error code otherwise
991 */
992static int vmw_create_dmabuf_proxy(struct drm_device *dev,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +0100993 const struct drm_mode_fb_cmd2 *mode_cmd,
Sinclair Yehf89c6c32015-06-26 01:54:28 -0700994 struct vmw_dma_buffer *dmabuf_mob,
995 struct vmw_surface **srf_out)
996{
997 uint32_t format;
Thomas Hellstrom8cd9f252017-01-19 10:53:02 -0800998 struct drm_vmw_size content_base_size = {0};
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700999 struct vmw_resource *res;
Thomas Hellstroma50e2bf2016-01-08 20:29:40 +01001000 unsigned int bytes_pp;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001001 struct drm_format_name_buf format_name;
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001002 int ret;
1003
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001004 switch (mode_cmd->pixel_format) {
1005 case DRM_FORMAT_ARGB8888:
1006 case DRM_FORMAT_XRGB8888:
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001007 format = SVGA3D_X8R8G8B8;
Thomas Hellstroma50e2bf2016-01-08 20:29:40 +01001008 bytes_pp = 4;
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001009 break;
1010
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001011 case DRM_FORMAT_RGB565:
1012 case DRM_FORMAT_XRGB1555:
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001013 format = SVGA3D_R5G6B5;
Thomas Hellstroma50e2bf2016-01-08 20:29:40 +01001014 bytes_pp = 2;
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001015 break;
1016
1017 case 8:
1018 format = SVGA3D_P8;
Thomas Hellstroma50e2bf2016-01-08 20:29:40 +01001019 bytes_pp = 1;
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001020 break;
1021
1022 default:
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001023 DRM_ERROR("Invalid framebuffer format %s\n",
1024 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001025 return -EINVAL;
1026 }
1027
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001028 content_base_size.width = mode_cmd->pitches[0] / bytes_pp;
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001029 content_base_size.height = mode_cmd->height;
1030 content_base_size.depth = 1;
1031
1032 ret = vmw_surface_gb_priv_define(dev,
1033 0, /* kernel visible only */
1034 0, /* flags */
1035 format,
1036 true, /* can be a scanout buffer */
1037 1, /* num of mip levels */
1038 0,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001039 0,
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001040 content_base_size,
1041 srf_out);
1042 if (ret) {
1043 DRM_ERROR("Failed to allocate proxy content buffer\n");
1044 return ret;
1045 }
1046
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001047 res = &(*srf_out)->res;
Sinclair Yehf89c6c32015-06-26 01:54:28 -07001048
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001049 /* Reserve and switch the backing mob. */
1050 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1051 (void) vmw_resource_reserve(res, false, true);
1052 vmw_dmabuf_unreference(&res->backup);
1053 res->backup = vmw_dmabuf_reference(dmabuf_mob);
1054 res->backup_offset = 0;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001055 vmw_resource_unreserve(res, false, NULL, 0);
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001056 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001057
1058 return 0;
1059}
1060
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001061
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001062
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001063static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1064 struct vmw_dma_buffer *dmabuf,
1065 struct vmw_framebuffer **out,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001066 const struct drm_mode_fb_cmd2
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001067 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001068
1069{
1070 struct drm_device *dev = dev_priv->dev;
1071 struct vmw_framebuffer_dmabuf *vfbd;
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001072 unsigned int requested_size;
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001073 struct drm_format_name_buf format_name;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001074 int ret;
1075
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001076 requested_size = mode_cmd->height * mode_cmd->pitches[0];
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001077 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
1078 DRM_ERROR("Screen buffer object size is too small "
1079 "for requested mode.\n");
1080 return -EINVAL;
1081 }
1082
Jakob Bornecrantzc337ada2011-10-04 20:13:34 +02001083 /* Limited framebuffer color depth support for screen objects */
Sinclair Yehc8261a92015-06-26 01:23:42 -07001084 if (dev_priv->active_display_unit == vmw_du_screen_object) {
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001085 switch (mode_cmd->pixel_format) {
1086 case DRM_FORMAT_XRGB8888:
1087 case DRM_FORMAT_ARGB8888:
1088 break;
1089 case DRM_FORMAT_XRGB1555:
1090 case DRM_FORMAT_RGB565:
1091 break;
Jakob Bornecrantzc337ada2011-10-04 20:13:34 +02001092 default:
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001093 DRM_ERROR("Invalid pixel format: %s\n",
1094 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Jakob Bornecrantzc337ada2011-10-04 20:13:34 +02001095 return -EINVAL;
1096 }
1097 }
1098
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001099 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1100 if (!vfbd) {
1101 ret = -ENOMEM;
1102 goto out_err1;
1103 }
1104
Ville Syrjäläa3f913c2016-12-14 22:48:59 +02001105 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +02001106 vfbd->base.dmabuf = true;
Sinclair Yeh05c95012015-08-11 22:53:39 -07001107 vfbd->buffer = vmw_dmabuf_reference(dmabuf);
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001108 vfbd->base.user_handle = mode_cmd->handles[0];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001109 *out = &vfbd->base;
1110
Daniel Vetter80f0b5a2012-12-13 23:39:01 +01001111 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1112 &vmw_framebuffer_dmabuf_funcs);
1113 if (ret)
Sinclair Yeh05c95012015-08-11 22:53:39 -07001114 goto out_err2;
Daniel Vetter80f0b5a2012-12-13 23:39:01 +01001115
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001116 return 0;
1117
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001118out_err2:
Sinclair Yeh05c95012015-08-11 22:53:39 -07001119 vmw_dmabuf_unreference(&dmabuf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001120 kfree(vfbd);
1121out_err1:
1122 return ret;
1123}
1124
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001125/**
1126 * vmw_kms_new_framebuffer - Create a new framebuffer.
1127 *
1128 * @dev_priv: Pointer to device private struct.
1129 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
1130 * Either @dmabuf or @surface must be NULL.
1131 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1132 * Either @dmabuf or @surface must be NULL.
1133 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
1134 * Helps the code to do some important optimizations.
1135 * @mode_cmd: Frame-buffer metadata.
1136 */
1137struct vmw_framebuffer *
1138vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1139 struct vmw_dma_buffer *dmabuf,
1140 struct vmw_surface *surface,
1141 bool only_2d,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001142 const struct drm_mode_fb_cmd2 *mode_cmd)
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001143{
Sinclair Yeh05c95012015-08-11 22:53:39 -07001144 struct vmw_framebuffer *vfb = NULL;
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001145 bool is_dmabuf_proxy = false;
1146 int ret;
1147
1148 /*
1149 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1150 * therefore, wrap the DMA buf in a surface so we can use the
1151 * SurfaceCopy command.
1152 */
1153 if (dmabuf && only_2d &&
1154 dev_priv->active_display_unit == vmw_du_screen_target) {
1155 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
1156 dmabuf, &surface);
1157 if (ret)
1158 return ERR_PTR(ret);
1159
1160 is_dmabuf_proxy = true;
1161 }
1162
1163 /* Create the new framebuffer depending one what we have */
Sinclair Yeh05c95012015-08-11 22:53:39 -07001164 if (surface) {
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001165 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1166 mode_cmd,
1167 is_dmabuf_proxy);
Sinclair Yeh05c95012015-08-11 22:53:39 -07001168
1169 /*
1170 * vmw_create_dmabuf_proxy() adds a reference that is no longer
1171 * needed
1172 */
1173 if (is_dmabuf_proxy)
1174 vmw_surface_unreference(&surface);
1175 } else if (dmabuf) {
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001176 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
1177 mode_cmd);
Sinclair Yeh05c95012015-08-11 22:53:39 -07001178 } else {
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001179 BUG();
Sinclair Yeh05c95012015-08-11 22:53:39 -07001180 }
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001181
1182 if (ret)
1183 return ERR_PTR(ret);
1184
1185 vfb->pin = vmw_framebuffer_pin;
1186 vfb->unpin = vmw_framebuffer_unpin;
1187
1188 return vfb;
1189}
1190
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001191/*
1192 * Generic Kernel modesetting functions
1193 */
1194
1195static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1196 struct drm_file *file_priv,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001197 const struct drm_mode_fb_cmd2 *mode_cmd)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001198{
1199 struct vmw_private *dev_priv = vmw_priv(dev);
1200 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1201 struct vmw_framebuffer *vfb = NULL;
1202 struct vmw_surface *surface = NULL;
1203 struct vmw_dma_buffer *bo = NULL;
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001204 struct ttm_base_object *user_obj;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001205 int ret;
1206
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001207 /**
1208 * This code should be conditioned on Screen Objects not being used.
1209 * If screen objects are used, we can allocate a GMR to hold the
1210 * requested framebuffer.
1211 */
1212
Xi Wang8a783892011-12-21 05:18:33 -05001213 if (!vmw_kms_validate_mode_vram(dev_priv,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001214 mode_cmd->pitches[0],
1215 mode_cmd->height)) {
Sinclair Yehc8261a92015-06-26 01:23:42 -07001216 DRM_ERROR("Requested mode exceed bounding box limit.\n");
Jakob Bornecrantzd9826402011-11-03 21:03:04 +01001217 return ERR_PTR(-ENOMEM);
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001218 }
1219
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001220 /*
1221 * Take a reference on the user object of the resource
1222 * backing the kms fb. This ensures that user-space handle
1223 * lookups on that resource will always work as long as
1224 * it's registered with a kms framebuffer. This is important,
1225 * since vmw_execbuf_process identifies resources in the
1226 * command stream using user-space handles.
1227 */
1228
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001229 user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001230 if (unlikely(user_obj == NULL)) {
1231 DRM_ERROR("Could not locate requested kms frame buffer.\n");
1232 return ERR_PTR(-ENOENT);
1233 }
1234
Thomas Hellstromd3216a02010-10-05 12:42:59 +02001235 /**
1236 * End conditioned code.
1237 */
1238
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +01001239 /* returns either a dmabuf or surface */
1240 ret = vmw_user_lookup_handle(dev_priv, tfile,
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001241 mode_cmd->handles[0],
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +01001242 &surface, &bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001243 if (ret)
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +01001244 goto err_out;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001245
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001246 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1247 !(dev_priv->capabilities & SVGA_CAP_3D),
Daniel Vetterdabdcdc2016-12-02 08:07:40 +01001248 mode_cmd);
Thomas Hellstromfd006a42015-06-28 02:50:56 -07001249 if (IS_ERR(vfb)) {
1250 ret = PTR_ERR(vfb);
1251 goto err_out;
1252 }
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +00001253
Jakob Bornecrantze7ac9212011-11-28 13:19:12 +01001254err_out:
1255 /* vmw_user_lookup_handle takes one ref so does new_fb */
1256 if (bo)
1257 vmw_dmabuf_unreference(&bo);
1258 if (surface)
1259 vmw_surface_unreference(&surface);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001260
1261 if (ret) {
1262 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001263 ttm_base_object_unref(&user_obj);
Chris Wilsoncce13ff2010-08-08 13:36:38 +01001264 return ERR_PTR(ret);
Thomas Hellstrom90ff18b2011-10-04 20:13:32 +02001265 } else
1266 vfb->user_obj = user_obj;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001267
1268 return &vfb->base;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001269}
1270
Laurent Pincharte6ecefa2012-05-17 13:27:23 +02001271static const struct drm_mode_config_funcs vmw_kms_funcs = {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001272 .fb_create = vmw_kms_fb_create,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001273};
1274
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07001275static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1276 struct drm_file *file_priv,
1277 struct vmw_framebuffer *vfb,
1278 struct vmw_surface *surface,
1279 uint32_t sid,
1280 int32_t destX, int32_t destY,
1281 struct drm_vmw_rect *clips,
1282 uint32_t num_clips)
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +02001283{
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -07001284 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1285 &surface->res, destX, destY,
1286 num_clips, 1, NULL);
Jakob Bornecrantz2fcd5a72011-10-04 20:13:26 +02001287}
1288
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001289
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001290int vmw_kms_present(struct vmw_private *dev_priv,
1291 struct drm_file *file_priv,
1292 struct vmw_framebuffer *vfb,
1293 struct vmw_surface *surface,
1294 uint32_t sid,
1295 int32_t destX, int32_t destY,
1296 struct drm_vmw_rect *clips,
1297 uint32_t num_clips)
1298{
Sinclair Yeh35c05122015-06-26 01:42:06 -07001299 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001300
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001301 switch (dev_priv->active_display_unit) {
1302 case vmw_du_screen_target:
1303 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1304 &surface->res, destX, destY,
1305 num_clips, 1, NULL);
1306 break;
1307 case vmw_du_screen_object:
1308 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1309 sid, destX, destY, clips,
1310 num_clips);
1311 break;
1312 default:
1313 WARN_ONCE(true,
1314 "Present called with invalid display system.\n");
1315 ret = -ENOSYS;
1316 break;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001317 }
Sinclair Yeh35c05122015-06-26 01:42:06 -07001318 if (ret)
1319 return ret;
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001320
Sinclair Yeh35c05122015-06-26 01:42:06 -07001321 vmw_fifo_flush(dev_priv, false);
Michel Dänzer0bef23f2011-08-31 07:42:50 +00001322
Sinclair Yeh35c05122015-06-26 01:42:06 -07001323 return 0;
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001324}
1325
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001326static void
1327vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1328{
1329 if (dev_priv->hotplug_mode_update_property)
1330 return;
1331
1332 dev_priv->hotplug_mode_update_property =
1333 drm_property_create_range(dev_priv->dev,
1334 DRM_MODE_PROP_IMMUTABLE,
1335 "hotplug_mode_update", 0, 1);
1336
1337 if (!dev_priv->hotplug_mode_update_property)
1338 return;
1339
1340}
1341
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001342int vmw_kms_init(struct vmw_private *dev_priv)
1343{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001344 struct drm_device *dev = dev_priv->dev;
1345 int ret;
1346
1347 drm_mode_config_init(dev);
1348 dev->mode_config.funcs = &vmw_kms_funcs;
1349 dev->mode_config.min_width = 1;
1350 dev->mode_config.min_height = 1;
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001351 dev->mode_config.max_width = dev_priv->texture_max_width;
1352 dev->mode_config.max_height = dev_priv->texture_max_height;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001353
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001354 drm_mode_create_suggested_offset_properties(dev);
1355 vmw_kms_create_hotplug_mode_update_property(dev_priv);
1356
Sinclair Yeh35c05122015-06-26 01:42:06 -07001357 ret = vmw_kms_stdu_init_display(dev_priv);
1358 if (ret) {
1359 ret = vmw_kms_sou_init_display(dev_priv);
1360 if (ret) /* Fallback */
1361 ret = vmw_kms_ldu_init_display(dev_priv);
1362 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001363
Sinclair Yehc8261a92015-06-26 01:23:42 -07001364 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001365}
1366
1367int vmw_kms_close(struct vmw_private *dev_priv)
1368{
Sinclair Yehc8261a92015-06-26 01:23:42 -07001369 int ret;
1370
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001371 /*
1372 * Docs says we should take the lock before calling this function
1373 * but since it destroys encoders and our destructor calls
1374 * drm_encoder_cleanup which takes the lock we deadlock.
1375 */
1376 drm_mode_config_cleanup(dev_priv->dev);
Sinclair Yehc8261a92015-06-26 01:23:42 -07001377 if (dev_priv->active_display_unit == vmw_du_screen_object)
1378 ret = vmw_kms_sou_close_display(dev_priv);
Sinclair Yeh35c05122015-06-26 01:42:06 -07001379 else if (dev_priv->active_display_unit == vmw_du_screen_target)
1380 ret = vmw_kms_stdu_close_display(dev_priv);
Jakob Bornecrantzc0d18312011-11-09 10:25:26 +01001381 else
Sinclair Yehc8261a92015-06-26 01:23:42 -07001382 ret = vmw_kms_ldu_close_display(dev_priv);
1383
1384 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001385}
1386
1387int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1388 struct drm_file *file_priv)
1389{
1390 struct drm_vmw_cursor_bypass_arg *arg = data;
1391 struct vmw_display_unit *du;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001392 struct drm_crtc *crtc;
1393 int ret = 0;
1394
1395
1396 mutex_lock(&dev->mode_config.mutex);
1397 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1398
1399 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1400 du = vmw_crtc_to_du(crtc);
1401 du->hotspot_x = arg->xhot;
1402 du->hotspot_y = arg->yhot;
1403 }
1404
1405 mutex_unlock(&dev->mode_config.mutex);
1406 return 0;
1407 }
1408
Rob Clarka4cd5d62014-07-17 23:30:02 -04001409 crtc = drm_crtc_find(dev, arg->crtc_id);
1410 if (!crtc) {
Ville Syrjälä4ae87ff2013-10-17 13:35:05 +03001411 ret = -ENOENT;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001412 goto out;
1413 }
1414
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001415 du = vmw_crtc_to_du(crtc);
1416
1417 du->hotspot_x = arg->xhot;
1418 du->hotspot_y = arg->yhot;
1419
1420out:
1421 mutex_unlock(&dev->mode_config.mutex);
1422
1423 return ret;
1424}
1425
1426int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1427 unsigned width, unsigned height, unsigned pitch,
1428 unsigned bpp, unsigned depth)
1429{
1430 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1431 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1432 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001433 vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1434 SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001435 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1436 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1437 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1438
1439 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1440 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1441 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1442 return -EINVAL;
1443 }
1444
1445 return 0;
1446}
1447
1448int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1449{
1450 struct vmw_vga_topology_state *save;
1451 uint32_t i;
1452
1453 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1454 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1455 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1456 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1457 vmw_priv->vga_pitchlock =
1458 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1459 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001460 vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
1461 SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001462
1463 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1464 return 0;
1465
1466 vmw_priv->num_displays = vmw_read(vmw_priv,
1467 SVGA_REG_NUM_GUEST_DISPLAYS);
1468
1469 if (vmw_priv->num_displays == 0)
1470 vmw_priv->num_displays = 1;
1471
1472 for (i = 0; i < vmw_priv->num_displays; ++i) {
1473 save = &vmw_priv->vga_save[i];
1474 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1475 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1476 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1477 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1478 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1479 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1480 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1481 if (i == 0 && vmw_priv->num_displays == 1 &&
1482 save->width == 0 && save->height == 0) {
1483
1484 /*
1485 * It should be fairly safe to assume that these
1486 * values are uninitialized.
1487 */
1488
1489 save->width = vmw_priv->vga_width - save->pos_x;
1490 save->height = vmw_priv->vga_height - save->pos_y;
1491 }
1492 }
1493
1494 return 0;
1495}
1496
1497int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1498{
1499 struct vmw_vga_topology_state *save;
1500 uint32_t i;
1501
1502 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1503 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
Jakob Bornecrantzd7e19582010-05-28 11:21:59 +02001504 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1505 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1506 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1507 vmw_priv->vga_pitchlock);
1508 else if (vmw_fifo_have_pitchlock(vmw_priv))
Thomas Hellstromb76ff5e2015-10-28 10:44:04 +01001509 vmw_mmio_write(vmw_priv->vga_pitchlock,
1510 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001511
Thomas Hellstrom7c4f7782010-06-01 11:38:17 +02001512 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1513 return 0;
1514
1515 for (i = 0; i < vmw_priv->num_displays; ++i) {
1516 save = &vmw_priv->vga_save[i];
1517 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1518 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1519 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1520 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1521 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1522 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1523 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1524 }
1525
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001526 return 0;
1527}
Jakob Bornecrantzd8bd19d2010-06-01 11:54:20 +02001528
Thomas Hellstrome133e732010-10-05 12:43:04 +02001529bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1530 uint32_t pitch,
1531 uint32_t height)
1532{
Sinclair Yeh35c05122015-06-26 01:42:06 -07001533 return ((u64) pitch * (u64) height) < (u64)
1534 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
1535 dev_priv->prim_bb_mem : dev_priv->vram_size);
Thomas Hellstrome133e732010-10-05 12:43:04 +02001536}
1537
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001538
1539/**
1540 * Function called by DRM code called with vbl_lock held.
1541 */
Thierry Reding88e72712015-09-24 18:35:31 +02001542u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Thomas Hellstrom7a1c2f62010-10-01 10:21:49 +02001543{
1544 return 0;
1545}
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001546
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001547/**
1548 * Function called by DRM code called with vbl_lock held.
1549 */
Thierry Reding88e72712015-09-24 18:35:31 +02001550int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001551{
1552 return -ENOSYS;
1553}
1554
1555/**
1556 * Function called by DRM code called with vbl_lock held.
1557 */
Thierry Reding88e72712015-09-24 18:35:31 +02001558void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jakob Bornecrantz1c482ab2011-10-17 11:59:45 +02001559{
1560}
1561
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001562
1563/*
1564 * Small shared kms functions.
1565 */
1566
Rashika Kheria847c5962014-01-06 22:18:10 +05301567static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001568 struct drm_vmw_rect *rects)
1569{
1570 struct drm_device *dev = dev_priv->dev;
1571 struct vmw_display_unit *du;
1572 struct drm_connector *con;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001573
1574 mutex_lock(&dev->mode_config.mutex);
1575
1576#if 0
Thomas Hellstrom6ea77d12011-10-04 20:13:36 +02001577 {
1578 unsigned int i;
1579
1580 DRM_INFO("%s: new layout ", __func__);
1581 for (i = 0; i < num; i++)
1582 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
1583 rects[i].w, rects[i].h);
1584 DRM_INFO("\n");
1585 }
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001586#endif
1587
1588 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1589 du = vmw_connector_to_du(con);
1590 if (num > du->unit) {
1591 du->pref_width = rects[du->unit].w;
1592 du->pref_height = rects[du->unit].h;
1593 du->pref_active = true;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001594 du->gui_x = rects[du->unit].x;
1595 du->gui_y = rects[du->unit].y;
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001596 drm_object_property_set_value
1597 (&con->base, dev->mode_config.suggested_x_property,
1598 du->gui_x);
1599 drm_object_property_set_value
1600 (&con->base, dev->mode_config.suggested_y_property,
1601 du->gui_y);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001602 } else {
1603 du->pref_width = 800;
1604 du->pref_height = 600;
1605 du->pref_active = false;
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001606 drm_object_property_set_value
1607 (&con->base, dev->mode_config.suggested_x_property,
1608 0);
1609 drm_object_property_set_value
1610 (&con->base, dev->mode_config.suggested_y_property,
1611 0);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001612 }
1613 con->status = vmw_du_connector_detect(con, true);
1614 }
1615
1616 mutex_unlock(&dev->mode_config.mutex);
Thomas Hellstrom578e6092016-02-12 09:45:42 +01001617 drm_sysfs_hotplug_event(dev);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001618
1619 return 0;
1620}
1621
Maarten Lankhorst7ea77282016-06-07 12:49:30 +02001622int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1623 u16 *r, u16 *g, u16 *b,
1624 uint32_t size)
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001625{
1626 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1627 int i;
1628
1629 for (i = 0; i < size; i++) {
1630 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1631 r[i], g[i], b[i]);
1632 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1633 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1634 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1635 }
Maarten Lankhorst7ea77282016-06-07 12:49:30 +02001636
1637 return 0;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001638}
1639
Maarten Lankhorst9a69a9a2015-07-21 11:34:55 +02001640int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001641{
Maarten Lankhorst9a69a9a2015-07-21 11:34:55 +02001642 return 0;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001643}
1644
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001645enum drm_connector_status
1646vmw_du_connector_detect(struct drm_connector *connector, bool force)
1647{
1648 uint32_t num_displays;
1649 struct drm_device *dev = connector->dev;
1650 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001651 struct vmw_display_unit *du = vmw_connector_to_du(connector);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001652
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001653 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001654
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001655 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1656 du->pref_active) ?
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001657 connector_status_connected : connector_status_disconnected);
1658}
1659
1660static struct drm_display_mode vmw_kms_connector_builtin[] = {
1661 /* 640x480@60Hz */
1662 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
1663 752, 800, 0, 480, 489, 492, 525, 0,
1664 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1665 /* 800x600@60Hz */
1666 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
1667 968, 1056, 0, 600, 601, 605, 628, 0,
1668 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1669 /* 1024x768@60Hz */
1670 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1671 1184, 1344, 0, 768, 771, 777, 806, 0,
1672 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1673 /* 1152x864@75Hz */
1674 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1675 1344, 1600, 0, 864, 865, 868, 900, 0,
1676 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1677 /* 1280x768@60Hz */
1678 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1679 1472, 1664, 0, 768, 771, 778, 798, 0,
1680 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1681 /* 1280x800@60Hz */
1682 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1683 1480, 1680, 0, 800, 803, 809, 831, 0,
1684 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
1685 /* 1280x960@60Hz */
1686 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1687 1488, 1800, 0, 960, 961, 964, 1000, 0,
1688 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1689 /* 1280x1024@60Hz */
1690 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1691 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1692 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1693 /* 1360x768@60Hz */
1694 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1695 1536, 1792, 0, 768, 771, 777, 795, 0,
1696 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1697 /* 1440x1050@60Hz */
1698 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1699 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1700 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1701 /* 1440x900@60Hz */
1702 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1703 1672, 1904, 0, 900, 903, 909, 934, 0,
1704 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1705 /* 1600x1200@60Hz */
1706 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1707 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1708 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1709 /* 1680x1050@60Hz */
1710 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1711 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1712 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1713 /* 1792x1344@60Hz */
1714 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
1715 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1716 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1717 /* 1853x1392@60Hz */
1718 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
1719 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1720 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1721 /* 1920x1200@60Hz */
1722 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
1723 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1724 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1725 /* 1920x1440@60Hz */
1726 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
1727 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1728 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1729 /* 2560x1600@60Hz */
1730 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
1731 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
1732 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1733 /* Terminate */
1734 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1735};
1736
Thomas Hellstrom1543b4d2011-11-02 09:43:10 +01001737/**
1738 * vmw_guess_mode_timing - Provide fake timings for a
1739 * 60Hz vrefresh mode.
1740 *
1741 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1742 * members filled in.
1743 */
Thomas Hellstroma2787242015-06-29 12:55:07 -07001744void vmw_guess_mode_timing(struct drm_display_mode *mode)
Thomas Hellstrom1543b4d2011-11-02 09:43:10 +01001745{
1746 mode->hsync_start = mode->hdisplay + 50;
1747 mode->hsync_end = mode->hsync_start + 50;
1748 mode->htotal = mode->hsync_end + 50;
1749
1750 mode->vsync_start = mode->vdisplay + 50;
1751 mode->vsync_end = mode->vsync_start + 50;
1752 mode->vtotal = mode->vsync_end + 50;
1753
1754 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1755 mode->vrefresh = drm_mode_vrefresh(mode);
1756}
1757
1758
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001759int vmw_du_connector_fill_modes(struct drm_connector *connector,
1760 uint32_t max_width, uint32_t max_height)
1761{
1762 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1763 struct drm_device *dev = connector->dev;
1764 struct vmw_private *dev_priv = vmw_priv(dev);
1765 struct drm_display_mode *mode = NULL;
1766 struct drm_display_mode *bmode;
1767 struct drm_display_mode prefmode = { DRM_MODE("preferred",
1768 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1769 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1770 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1771 };
1772 int i;
Sinclair Yeh7c20d212016-06-29 11:29:47 -07001773 u32 assumed_bpp = 4;
Sinclair Yeh9a723842014-10-31 09:58:06 +01001774
Sinclair Yeh04319d82016-06-29 12:15:48 -07001775 if (dev_priv->assume_16bpp)
1776 assumed_bpp = 2;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001777
Sinclair Yeh35c05122015-06-26 01:42:06 -07001778 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1779 max_width = min(max_width, dev_priv->stdu_max_width);
1780 max_height = min(max_height, dev_priv->stdu_max_height);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001781 }
1782
1783 /* Add preferred mode */
Sinclair Yehc8261a92015-06-26 01:23:42 -07001784 mode = drm_mode_duplicate(dev, &prefmode);
1785 if (!mode)
1786 return 0;
1787 mode->hdisplay = du->pref_width;
1788 mode->vdisplay = du->pref_height;
1789 vmw_guess_mode_timing(mode);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001790
Sinclair Yehc8261a92015-06-26 01:23:42 -07001791 if (vmw_kms_validate_mode_vram(dev_priv,
1792 mode->hdisplay * assumed_bpp,
1793 mode->vdisplay)) {
1794 drm_mode_probed_add(connector, mode);
1795 } else {
1796 drm_mode_destroy(dev, mode);
1797 mode = NULL;
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001798 }
1799
Sinclair Yehc8261a92015-06-26 01:23:42 -07001800 if (du->pref_mode) {
1801 list_del_init(&du->pref_mode->head);
1802 drm_mode_destroy(dev, du->pref_mode);
1803 }
1804
1805 /* mode might be null here, this is intended */
1806 du->pref_mode = mode;
1807
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001808 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1809 bmode = &vmw_kms_connector_builtin[i];
1810 if (bmode->hdisplay > max_width ||
1811 bmode->vdisplay > max_height)
1812 continue;
1813
Sinclair Yeh9a723842014-10-31 09:58:06 +01001814 if (!vmw_kms_validate_mode_vram(dev_priv,
1815 bmode->hdisplay * assumed_bpp,
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001816 bmode->vdisplay))
1817 continue;
1818
1819 mode = drm_mode_duplicate(dev, bmode);
1820 if (!mode)
1821 return 0;
1822 mode->vrefresh = drm_mode_vrefresh(mode);
1823
1824 drm_mode_probed_add(connector, mode);
1825 }
1826
Ville Syrjälä6af3e652015-12-03 23:14:14 +02001827 drm_mode_connector_list_update(connector);
Thomas Hellstromf6b05002015-06-29 12:59:58 -07001828 /* Move the prefered mode first, help apps pick the right mode. */
1829 drm_mode_sort(&connector->modes);
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001830
1831 return 1;
1832}
1833
1834int vmw_du_connector_set_property(struct drm_connector *connector,
1835 struct drm_property *property,
1836 uint64_t val)
1837{
Thomas Hellstrom76404ac2016-02-12 09:55:45 +01001838 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1839 struct vmw_private *dev_priv = vmw_priv(connector->dev);
1840
1841 if (property == dev_priv->implicit_placement_property)
1842 du->is_implicit = val;
1843
Jakob Bornecrantz626ab772011-10-04 20:13:20 +02001844 return 0;
1845}
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001846
1847
Sinclair Yeh9c2542a2017-03-23 11:33:39 -07001848
Sinclair Yehd7721ca2017-03-23 11:48:44 -07001849/**
1850 * vmw_du_connector_atomic_set_property - Atomic version of get property
1851 *
1852 * @crtc - crtc the property is associated with
1853 *
1854 * Returns:
1855 * Zero on success, negative errno on failure.
1856 */
1857int
1858vmw_du_connector_atomic_set_property(struct drm_connector *connector,
1859 struct drm_connector_state *state,
1860 struct drm_property *property,
1861 uint64_t val)
1862{
1863 struct vmw_private *dev_priv = vmw_priv(connector->dev);
1864 struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
1865 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1866
1867
1868 if (property == dev_priv->implicit_placement_property) {
1869 vcs->is_implicit = val;
1870
1871 /*
1872 * We should really be doing a drm_atomic_commit() to
1873 * commit the new state, but since this doesn't cause
1874 * an immedate state change, this is probably ok
1875 */
1876 du->is_implicit = vcs->is_implicit;
1877 } else {
1878 return -EINVAL;
1879 }
1880
1881 return 0;
1882}
1883
1884
1885/**
1886 * vmw_du_connector_atomic_get_property - Atomic version of get property
1887 *
1888 * @connector - connector the property is associated with
1889 *
1890 * Returns:
1891 * Zero on success, negative errno on failure.
1892 */
1893int
1894vmw_du_connector_atomic_get_property(struct drm_connector *connector,
1895 const struct drm_connector_state *state,
1896 struct drm_property *property,
1897 uint64_t *val)
1898{
1899 struct vmw_private *dev_priv = vmw_priv(connector->dev);
1900 struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
1901
1902 if (property == dev_priv->implicit_placement_property)
1903 *val = vcs->is_implicit;
1904 else {
1905 DRM_ERROR("Invalid Property %s\n", property->name);
1906 return -EINVAL;
1907 }
1908
1909 return 0;
1910}
1911
1912
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001913int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1914 struct drm_file *file_priv)
1915{
1916 struct vmw_private *dev_priv = vmw_priv(dev);
1917 struct drm_vmw_update_layout_arg *arg =
1918 (struct drm_vmw_update_layout_arg *)data;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001919 void __user *user_rects;
1920 struct drm_vmw_rect *rects;
1921 unsigned rects_size;
1922 int ret;
1923 int i;
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001924 u64 total_pixels = 0;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001925 struct drm_mode_config *mode_config = &dev->mode_config;
Sinclair Yehc8261a92015-06-26 01:23:42 -07001926 struct drm_vmw_rect bounding_box = {0};
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001927
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001928 if (!arg->num_outputs) {
1929 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1930 vmw_du_update_layout(dev_priv, 1, &def_rect);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07001931 return 0;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001932 }
1933
1934 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
Xi Wangbab9efc2011-11-28 12:25:43 +01001935 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1936 GFP_KERNEL);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07001937 if (unlikely(!rects))
1938 return -ENOMEM;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001939
1940 user_rects = (void __user *)(unsigned long)arg->rects;
1941 ret = copy_from_user(rects, user_rects, rects_size);
1942 if (unlikely(ret != 0)) {
1943 DRM_ERROR("Failed to get rects.\n");
1944 ret = -EFAULT;
1945 goto out_free;
1946 }
1947
1948 for (i = 0; i < arg->num_outputs; ++i) {
Xi Wangbab9efc2011-11-28 12:25:43 +01001949 if (rects[i].x < 0 ||
1950 rects[i].y < 0 ||
1951 rects[i].x + rects[i].w > mode_config->max_width ||
1952 rects[i].y + rects[i].h > mode_config->max_height) {
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001953 DRM_ERROR("Invalid GUI layout.\n");
1954 ret = -EINVAL;
1955 goto out_free;
1956 }
Sinclair Yehc8261a92015-06-26 01:23:42 -07001957
1958 /*
1959 * bounding_box.w and bunding_box.h are used as
1960 * lower-right coordinates
1961 */
1962 if (rects[i].x + rects[i].w > bounding_box.w)
1963 bounding_box.w = rects[i].x + rects[i].w;
1964
1965 if (rects[i].y + rects[i].h > bounding_box.h)
1966 bounding_box.h = rects[i].y + rects[i].h;
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001967
1968 total_pixels += (u64) rects[i].w * (u64) rects[i].h;
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001969 }
1970
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001971 if (dev_priv->active_display_unit == vmw_du_screen_target) {
1972 /*
1973 * For Screen Targets, the limits for a toplogy are:
1974 * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
1975 * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
1976 */
Thomas Hellstrom0f580382017-01-19 11:01:04 -08001977 u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4;
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001978 u64 pixel_mem = total_pixels * 4;
1979
1980 if (bb_mem > dev_priv->prim_bb_mem) {
1981 DRM_ERROR("Topology is beyond supported limits.\n");
Sinclair Yeh35c05122015-06-26 01:42:06 -07001982 ret = -EINVAL;
1983 goto out_free;
1984 }
1985
Sinclair Yeh65ade7d2015-07-16 10:49:13 -07001986 if (pixel_mem > dev_priv->prim_bb_mem) {
1987 DRM_ERROR("Combined output size too large\n");
1988 ret = -EINVAL;
1989 goto out_free;
1990 }
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001991 }
1992
1993 vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
1994
1995out_free:
1996 kfree(rects);
Thomas Hellstromcd2b89e2011-10-25 23:35:53 +02001997 return ret;
1998}
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07001999
2000/**
2001 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2002 * on a set of cliprects and a set of display units.
2003 *
2004 * @dev_priv: Pointer to a device private structure.
2005 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2006 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2007 * Cliprects are given in framebuffer coordinates.
2008 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2009 * be NULL. Cliprects are given in source coordinates.
2010 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2011 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2012 * @num_clips: Number of cliprects in the @clips or @vclips array.
2013 * @increment: Integer with which to increment the clip counter when looping.
2014 * Used to skip a predetermined number of clip rects.
2015 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2016 */
2017int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2018 struct vmw_framebuffer *framebuffer,
2019 const struct drm_clip_rect *clips,
2020 const struct drm_vmw_rect *vclips,
2021 s32 dest_x, s32 dest_y,
2022 int num_clips,
2023 int increment,
2024 struct vmw_kms_dirty *dirty)
2025{
2026 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2027 struct drm_crtc *crtc;
2028 u32 num_units = 0;
2029 u32 i, k;
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002030
2031 dirty->dev_priv = dev_priv;
2032
2033 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
2034 if (crtc->primary->fb != &framebuffer->base)
2035 continue;
2036 units[num_units++] = vmw_crtc_to_du(crtc);
2037 }
2038
2039 for (k = 0; k < num_units; k++) {
2040 struct vmw_display_unit *unit = units[k];
2041 s32 crtc_x = unit->crtc.x;
2042 s32 crtc_y = unit->crtc.y;
2043 s32 crtc_width = unit->crtc.mode.hdisplay;
2044 s32 crtc_height = unit->crtc.mode.vdisplay;
2045 const struct drm_clip_rect *clips_ptr = clips;
2046 const struct drm_vmw_rect *vclips_ptr = vclips;
2047
2048 dirty->unit = unit;
2049 if (dirty->fifo_reserve_size > 0) {
2050 dirty->cmd = vmw_fifo_reserve(dev_priv,
2051 dirty->fifo_reserve_size);
2052 if (!dirty->cmd) {
2053 DRM_ERROR("Couldn't reserve fifo space "
2054 "for dirty blits.\n");
Christian Engelmayerf3b8c0c2015-09-19 00:32:24 +02002055 return -ENOMEM;
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002056 }
2057 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2058 }
2059 dirty->num_hits = 0;
2060 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2061 vclips_ptr += increment) {
2062 s32 clip_left;
2063 s32 clip_top;
2064
2065 /*
2066 * Select clip array type. Note that integer type
2067 * in @clips is unsigned short, whereas in @vclips
2068 * it's 32-bit.
2069 */
2070 if (clips) {
2071 dirty->fb_x = (s32) clips_ptr->x1;
2072 dirty->fb_y = (s32) clips_ptr->y1;
2073 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2074 crtc_x;
2075 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2076 crtc_y;
2077 } else {
2078 dirty->fb_x = vclips_ptr->x;
2079 dirty->fb_y = vclips_ptr->y;
2080 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2081 dest_x - crtc_x;
2082 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2083 dest_y - crtc_y;
2084 }
2085
2086 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2087 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2088
2089 /* Skip this clip if it's outside the crtc region */
2090 if (dirty->unit_x1 >= crtc_width ||
2091 dirty->unit_y1 >= crtc_height ||
2092 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2093 continue;
2094
2095 /* Clip right and bottom to crtc limits */
2096 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2097 crtc_width);
2098 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2099 crtc_height);
2100
2101 /* Clip left and top to crtc limits */
2102 clip_left = min_t(s32, dirty->unit_x1, 0);
2103 clip_top = min_t(s32, dirty->unit_y1, 0);
2104 dirty->unit_x1 -= clip_left;
2105 dirty->unit_y1 -= clip_top;
2106 dirty->fb_x -= clip_left;
2107 dirty->fb_y -= clip_top;
2108
2109 dirty->clip(dirty);
2110 }
2111
2112 dirty->fifo_commit(dirty);
2113 }
2114
2115 return 0;
2116}
2117
2118/**
2119 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
2120 * command submission.
2121 *
2122 * @dev_priv. Pointer to a device private structure.
2123 * @buf: The buffer object
2124 * @interruptible: Whether to perform waits as interruptible.
2125 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
2126 * The buffer will be validated as a GMR. Already pinned buffers will not be
2127 * validated.
2128 *
2129 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
2130 * interrupted by a signal.
2131 */
2132int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
2133 struct vmw_dma_buffer *buf,
2134 bool interruptible,
2135 bool validate_as_mob)
2136{
2137 struct ttm_buffer_object *bo = &buf->base;
2138 int ret;
2139
Christian Königdfd5e502016-04-06 11:12:03 +02002140 ttm_bo_reserve(bo, false, false, NULL);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002141 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
2142 validate_as_mob);
2143 if (ret)
2144 ttm_bo_unreserve(bo);
2145
2146 return ret;
2147}
2148
2149/**
2150 * vmw_kms_helper_buffer_revert - Undo the actions of
2151 * vmw_kms_helper_buffer_prepare.
2152 *
2153 * @res: Pointer to the buffer object.
2154 *
2155 * Helper to be used if an error forces the caller to undo the actions of
2156 * vmw_kms_helper_buffer_prepare.
2157 */
2158void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
2159{
2160 if (buf)
2161 ttm_bo_unreserve(&buf->base);
2162}
2163
2164/**
2165 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
2166 * kms command submission.
2167 *
2168 * @dev_priv: Pointer to a device private structure.
2169 * @file_priv: Pointer to a struct drm_file representing the caller's
2170 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
2171 * if non-NULL, @user_fence_rep must be non-NULL.
2172 * @buf: The buffer object.
2173 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2174 * ref-counted fence pointer is returned here.
2175 * @user_fence_rep: Optional pointer to a user-space provided struct
2176 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
2177 * function copies fence data to user-space in a fail-safe manner.
2178 */
2179void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2180 struct drm_file *file_priv,
2181 struct vmw_dma_buffer *buf,
2182 struct vmw_fence_obj **out_fence,
2183 struct drm_vmw_fence_rep __user *
2184 user_fence_rep)
2185{
2186 struct vmw_fence_obj *fence;
2187 uint32_t handle;
2188 int ret;
2189
2190 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2191 file_priv ? &handle : NULL);
2192 if (buf)
2193 vmw_fence_single_bo(&buf->base, fence);
2194 if (file_priv)
2195 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2196 ret, user_fence_rep, fence,
2197 handle);
2198 if (out_fence)
2199 *out_fence = fence;
2200 else
2201 vmw_fence_obj_unreference(&fence);
2202
2203 vmw_kms_helper_buffer_revert(buf);
2204}
2205
2206
2207/**
2208 * vmw_kms_helper_resource_revert - Undo the actions of
2209 * vmw_kms_helper_resource_prepare.
2210 *
2211 * @res: Pointer to the resource. Typically a surface.
2212 *
2213 * Helper to be used if an error forces the caller to undo the actions of
2214 * vmw_kms_helper_resource_prepare.
2215 */
2216void vmw_kms_helper_resource_revert(struct vmw_resource *res)
2217{
2218 vmw_kms_helper_buffer_revert(res->backup);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002219 vmw_resource_unreserve(res, false, NULL, 0);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002220 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2221}
2222
2223/**
2224 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
2225 * command submission.
2226 *
2227 * @res: Pointer to the resource. Typically a surface.
2228 * @interruptible: Whether to perform waits as interruptible.
2229 *
2230 * Reserves and validates also the backup buffer if a guest-backed resource.
2231 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
2232 * interrupted by a signal.
2233 */
2234int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2235 bool interruptible)
2236{
2237 int ret = 0;
2238
2239 if (interruptible)
2240 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
2241 else
2242 mutex_lock(&res->dev_priv->cmdbuf_mutex);
2243
2244 if (unlikely(ret != 0))
2245 return -ERESTARTSYS;
2246
2247 ret = vmw_resource_reserve(res, interruptible, false);
2248 if (ret)
2249 goto out_unlock;
2250
2251 if (res->backup) {
2252 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
2253 interruptible,
2254 res->dev_priv->has_mob);
2255 if (ret)
2256 goto out_unreserve;
2257 }
2258 ret = vmw_resource_validate(res);
2259 if (ret)
2260 goto out_revert;
2261 return 0;
2262
2263out_revert:
2264 vmw_kms_helper_buffer_revert(res->backup);
2265out_unreserve:
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002266 vmw_resource_unreserve(res, false, NULL, 0);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002267out_unlock:
2268 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2269 return ret;
2270}
2271
2272/**
2273 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
2274 * kms command submission.
2275 *
2276 * @res: Pointer to the resource. Typically a surface.
2277 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2278 * ref-counted fence pointer is returned here.
2279 */
2280void vmw_kms_helper_resource_finish(struct vmw_resource *res,
2281 struct vmw_fence_obj **out_fence)
2282{
2283 if (res->backup || out_fence)
2284 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
2285 out_fence, NULL);
2286
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002287 vmw_resource_unreserve(res, false, NULL, 0);
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07002288 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2289}
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07002290
2291/**
2292 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2293 * its backing MOB.
2294 *
2295 * @res: Pointer to the surface resource
2296 * @clips: Clip rects in framebuffer (surface) space.
2297 * @num_clips: Number of clips in @clips.
2298 * @increment: Integer with which to increment the clip counter when looping.
2299 * Used to skip a predetermined number of clip rects.
2300 *
2301 * This function makes sure the proxy surface is updated from its backing MOB
2302 * using the region given by @clips. The surface resource @res and its backing
2303 * MOB needs to be reserved and validated on call.
2304 */
2305int vmw_kms_update_proxy(struct vmw_resource *res,
2306 const struct drm_clip_rect *clips,
2307 unsigned num_clips,
2308 int increment)
2309{
2310 struct vmw_private *dev_priv = res->dev_priv;
2311 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
2312 struct {
2313 SVGA3dCmdHeader header;
2314 SVGA3dCmdUpdateGBImage body;
2315 } *cmd;
2316 SVGA3dBox *box;
2317 size_t copy_size = 0;
2318 int i;
2319
2320 if (!clips)
2321 return 0;
2322
2323 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
2324 if (!cmd) {
2325 DRM_ERROR("Couldn't reserve fifo space for proxy surface "
2326 "update.\n");
2327 return -ENOMEM;
2328 }
2329
2330 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2331 box = &cmd->body.box;
2332
2333 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2334 cmd->header.size = sizeof(cmd->body);
2335 cmd->body.image.sid = res->id;
2336 cmd->body.image.face = 0;
2337 cmd->body.image.mipmap = 0;
2338
2339 if (clips->x1 > size->width || clips->x2 > size->width ||
2340 clips->y1 > size->height || clips->y2 > size->height) {
2341 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2342 return -EINVAL;
2343 }
2344
2345 box->x = clips->x1;
2346 box->y = clips->y1;
2347 box->z = 0;
2348 box->w = clips->x2 - clips->x1;
2349 box->h = clips->y2 - clips->y1;
2350 box->d = 1;
2351
2352 copy_size += sizeof(*cmd);
2353 }
2354
2355 vmw_fifo_commit(dev_priv, copy_size);
2356
2357 return 0;
2358}
Thomas Hellstroma2787242015-06-29 12:55:07 -07002359
2360int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2361 unsigned unit,
2362 u32 max_width,
2363 u32 max_height,
2364 struct drm_connector **p_con,
2365 struct drm_crtc **p_crtc,
2366 struct drm_display_mode **p_mode)
2367{
2368 struct drm_connector *con;
2369 struct vmw_display_unit *du;
2370 struct drm_display_mode *mode;
2371 int i = 0;
2372
2373 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2374 head) {
2375 if (i == unit)
2376 break;
2377
2378 ++i;
2379 }
2380
2381 if (i != unit) {
2382 DRM_ERROR("Could not find initial display unit.\n");
2383 return -EINVAL;
2384 }
2385
2386 if (list_empty(&con->modes))
2387 (void) vmw_du_connector_fill_modes(con, max_width, max_height);
2388
2389 if (list_empty(&con->modes)) {
2390 DRM_ERROR("Could not find initial display mode.\n");
2391 return -EINVAL;
2392 }
2393
2394 du = vmw_connector_to_du(con);
2395 *p_con = con;
2396 *p_crtc = &du->crtc;
2397
2398 list_for_each_entry(mode, &con->modes, head) {
2399 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2400 break;
2401 }
2402
2403 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2404 *p_mode = mode;
2405 else {
2406 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2407 *p_mode = list_first_entry(&con->modes,
2408 struct drm_display_mode,
2409 head);
2410 }
2411
2412 return 0;
2413}
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002414
2415/**
2416 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
2417 *
2418 * @dev_priv: Pointer to a device private struct.
2419 * @du: The display unit of the crtc.
2420 */
2421void vmw_kms_del_active(struct vmw_private *dev_priv,
2422 struct vmw_display_unit *du)
2423{
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002424 mutex_lock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002425 if (du->active_implicit) {
2426 if (--(dev_priv->num_implicit) == 0)
2427 dev_priv->implicit_fb = NULL;
2428 du->active_implicit = false;
2429 }
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002430 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002431}
2432
2433/**
2434 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
2435 *
2436 * @vmw_priv: Pointer to a device private struct.
2437 * @du: The display unit of the crtc.
2438 * @vfb: The implicit framebuffer
2439 *
2440 * Registers a binding to an implicit framebuffer.
2441 */
2442void vmw_kms_add_active(struct vmw_private *dev_priv,
2443 struct vmw_display_unit *du,
2444 struct vmw_framebuffer *vfb)
2445{
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002446 mutex_lock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002447 WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
2448
2449 if (!du->active_implicit && du->is_implicit) {
2450 dev_priv->implicit_fb = vfb;
2451 du->active_implicit = true;
2452 dev_priv->num_implicit++;
2453 }
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002454 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002455}
2456
2457/**
2458 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
2459 *
2460 * @dev_priv: Pointer to device-private struct.
2461 * @crtc: The crtc we want to flip.
2462 *
2463 * Returns true or false depending whether it's OK to flip this crtc
2464 * based on the criterion that we must not have more than one implicit
2465 * frame-buffer at any one time.
2466 */
2467bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
2468 struct drm_crtc *crtc)
2469{
2470 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002471 bool ret;
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002472
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002473 mutex_lock(&dev_priv->global_kms_state_mutex);
2474 ret = !du->is_implicit || dev_priv->num_implicit == 1;
2475 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002476
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002477 return ret;
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002478}
2479
2480/**
2481 * vmw_kms_update_implicit_fb - Update the implicit fb.
2482 *
2483 * @dev_priv: Pointer to device-private struct.
2484 * @crtc: The crtc the new implicit frame-buffer is bound to.
2485 */
2486void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
2487 struct drm_crtc *crtc)
2488{
2489 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2490 struct vmw_framebuffer *vfb;
2491
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002492 mutex_lock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002493
2494 if (!du->is_implicit)
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002495 goto out_unlock;
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002496
2497 vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
2498 WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
2499 dev_priv->implicit_fb != vfb);
2500
2501 dev_priv->implicit_fb = vfb;
Thomas Hellstrom93cd1682016-05-03 11:24:35 +02002502out_unlock:
2503 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +01002504}
Thomas Hellstrom76404ac2016-02-12 09:55:45 +01002505
2506/**
2507 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2508 * property.
2509 *
2510 * @dev_priv: Pointer to a device private struct.
2511 * @immutable: Whether the property is immutable.
2512 *
2513 * Sets up the implicit placement property unless it's already set up.
2514 */
2515void
2516vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
2517 bool immutable)
2518{
2519 if (dev_priv->implicit_placement_property)
2520 return;
2521
2522 dev_priv->implicit_placement_property =
2523 drm_property_create_range(dev_priv->dev,
2524 immutable ?
2525 DRM_MODE_PROP_IMMUTABLE : 0,
2526 "implicit_placement", 0, 1);
2527
2528}