blob: b74eae2b8594d54a6083311fae8dd9fd8854e2d9 [file] [log] [blame]
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +02001/**************************************************************************
2 *
Sinclair Yeh54fbde82015-07-29 12:38:02 -07003 * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +02004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
Daniel Vetter3cb9ae42014-10-29 10:03:57 +010029#include <drm/drm_plane_helper.h>
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020030
31
32#define vmw_crtc_to_sou(x) \
33 container_of(x, struct vmw_screen_object_unit, base.crtc)
34#define vmw_encoder_to_sou(x) \
35 container_of(x, struct vmw_screen_object_unit, base.encoder)
36#define vmw_connector_to_sou(x) \
37 container_of(x, struct vmw_screen_object_unit, base.connector)
38
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -070039/**
40 * struct vmw_kms_sou_surface_dirty - Closure structure for
41 * blit surface to screen command.
42 * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
43 * @left: Left side of bounding box.
44 * @right: Right side of bounding box.
45 * @top: Top side of bounding box.
46 * @bottom: Bottom side of bounding box.
47 * @dst_x: Difference between source clip rects and framebuffer coordinates.
48 * @dst_y: Difference between source clip rects and framebuffer coordinates.
49 * @sid: Surface id of surface to copy from.
50 */
51struct vmw_kms_sou_surface_dirty {
52 struct vmw_kms_dirty base;
53 s32 left, right, top, bottom;
54 s32 dst_x, dst_y;
55 u32 sid;
56};
57
58/*
59 * SVGA commands that are used by this code. Please see the device headers
60 * for explanation.
61 */
62struct vmw_kms_sou_readback_blit {
63 uint32 header;
64 SVGAFifoCmdBlitScreenToGMRFB body;
65};
66
67struct vmw_kms_sou_dmabuf_blit {
68 uint32 header;
69 SVGAFifoCmdBlitGMRFBToScreen body;
70};
71
72struct vmw_kms_sou_dirty_cmd {
73 SVGA3dCmdHeader header;
74 SVGA3dCmdBlitSurfaceToScreen body;
75};
76
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020077/**
78 * Display unit using screen objects.
79 */
80struct vmw_screen_object_unit {
81 struct vmw_display_unit base;
82
83 unsigned long buffer_size; /**< Size of allocated buffer */
84 struct vmw_dma_buffer *buffer; /**< Backing store buffer */
85
86 bool defined;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020087};
88
89static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
90{
Sinclair Yehc8261a92015-06-26 01:23:42 -070091 vmw_du_cleanup(&sou->base);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +020092 kfree(sou);
93}
94
95
96/*
97 * Screen Object Display Unit CRTC functions
98 */
99
100static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
101{
102 vmw_sou_destroy(vmw_crtc_to_sou(crtc));
103}
104
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200105/**
106 * Send the fifo command to create a screen.
107 */
108static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
109 struct vmw_screen_object_unit *sou,
110 uint32_t x, uint32_t y,
111 struct drm_display_mode *mode)
112{
113 size_t fifo_size;
114
115 struct {
116 struct {
117 uint32_t cmdType;
118 } header;
119 SVGAScreenObject obj;
120 } *cmd;
121
122 BUG_ON(!sou->buffer);
123
124 fifo_size = sizeof(*cmd);
125 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
126 /* The hardware has hung, nothing we can do about it here. */
127 if (unlikely(cmd == NULL)) {
128 DRM_ERROR("Fifo reserve failed.\n");
129 return -ENOMEM;
130 }
131
132 memset(cmd, 0, fifo_size);
133 cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
134 cmd->obj.structSize = sizeof(SVGAScreenObject);
135 cmd->obj.id = sou->base.unit;
136 cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
137 (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
138 cmd->obj.size.width = mode->hdisplay;
139 cmd->obj.size.height = mode->vdisplay;
Thomas Hellstrom69874272011-11-02 09:43:11 +0100140 if (sou->base.is_implicit) {
141 cmd->obj.root.x = x;
142 cmd->obj.root.y = y;
143 } else {
144 cmd->obj.root.x = sou->base.gui_x;
145 cmd->obj.root.y = sou->base.gui_y;
146 }
Thomas Hellstrom6dd687b2016-02-12 09:57:15 +0100147 sou->base.set_gui_x = cmd->obj.root.x;
148 sou->base.set_gui_y = cmd->obj.root.y;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200149
150 /* Ok to assume that buffer is pinned in vram */
Thomas Hellstromb37a6b92011-10-04 20:13:28 +0200151 vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200152 cmd->obj.backingStore.pitch = mode->hdisplay * 4;
153
154 vmw_fifo_commit(dev_priv, fifo_size);
155
156 sou->defined = true;
157
158 return 0;
159}
160
161/**
162 * Send the fifo command to destroy a screen.
163 */
164static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
165 struct vmw_screen_object_unit *sou)
166{
167 size_t fifo_size;
168 int ret;
169
170 struct {
171 struct {
172 uint32_t cmdType;
173 } header;
174 SVGAFifoCmdDestroyScreen body;
175 } *cmd;
176
177 /* no need to do anything */
178 if (unlikely(!sou->defined))
179 return 0;
180
181 fifo_size = sizeof(*cmd);
182 cmd = vmw_fifo_reserve(dev_priv, fifo_size);
183 /* the hardware has hung, nothing we can do about it here */
184 if (unlikely(cmd == NULL)) {
185 DRM_ERROR("Fifo reserve failed.\n");
186 return -ENOMEM;
187 }
188
189 memset(cmd, 0, fifo_size);
190 cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
191 cmd->body.screenId = sou->base.unit;
192
193 vmw_fifo_commit(dev_priv, fifo_size);
194
195 /* Force sync */
196 ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
197 if (unlikely(ret != 0))
198 DRM_ERROR("Failed to sync with HW");
199 else
200 sou->defined = false;
201
202 return ret;
203}
204
205/**
206 * Free the backing store.
207 */
208static void vmw_sou_backing_free(struct vmw_private *dev_priv,
209 struct vmw_screen_object_unit *sou)
210{
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700211 vmw_dmabuf_unreference(&sou->buffer);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200212 sou->buffer_size = 0;
213}
214
215/**
216 * Allocate the backing store for the buffer.
217 */
218static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
219 struct vmw_screen_object_unit *sou,
220 unsigned long size)
221{
222 int ret;
223
224 if (sou->buffer_size == size)
225 return 0;
226
227 if (sou->buffer)
228 vmw_sou_backing_free(dev_priv, sou);
229
230 sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
231 if (unlikely(sou->buffer == NULL))
232 return -ENOMEM;
233
234 /* After we have alloced the backing store might not be able to
235 * resume the overlays, this is preferred to failing to alloc.
236 */
237 vmw_overlay_pause_all(dev_priv);
238 ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
239 &vmw_vram_ne_placement,
240 false, &vmw_dmabuf_bo_free);
241 vmw_overlay_resume_all(dev_priv);
242
243 if (unlikely(ret != 0))
244 sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
245 else
246 sou->buffer_size = size;
247
248 return ret;
249}
250
251static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
252{
253 struct vmw_private *dev_priv;
254 struct vmw_screen_object_unit *sou;
255 struct drm_connector *connector;
256 struct drm_display_mode *mode;
257 struct drm_encoder *encoder;
258 struct vmw_framebuffer *vfb;
259 struct drm_framebuffer *fb;
260 struct drm_crtc *crtc;
261 int ret = 0;
262
263 if (!set)
264 return -EINVAL;
265
266 if (!set->crtc)
267 return -EINVAL;
268
269 /* get the sou */
270 crtc = set->crtc;
271 sou = vmw_crtc_to_sou(crtc);
272 vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
273 dev_priv = vmw_priv(crtc->dev);
274
275 if (set->num_connectors > 1) {
Sinclair Yehc8261a92015-06-26 01:23:42 -0700276 DRM_ERROR("Too many connectors\n");
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200277 return -EINVAL;
278 }
279
280 if (set->num_connectors == 1 &&
281 set->connectors[0] != &sou->base.connector) {
Sinclair Yehc8261a92015-06-26 01:23:42 -0700282 DRM_ERROR("Connector doesn't match %p %p\n",
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200283 set->connectors[0], &sou->base.connector);
284 return -EINVAL;
285 }
286
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100287 /* Only one active implicit frame-buffer at a time. */
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200288 mutex_lock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom69874272011-11-02 09:43:11 +0100289 if (sou->base.is_implicit &&
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100290 dev_priv->implicit_fb && vfb &&
291 !(dev_priv->num_implicit == 1 &&
292 sou->base.active_implicit) &&
293 dev_priv->implicit_fb != vfb) {
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200294 mutex_unlock(&dev_priv->global_kms_state_mutex);
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100295 DRM_ERROR("Multiple implicit framebuffers not supported.\n");
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200296 return -EINVAL;
297 }
Thomas Hellstrom93cd1682016-05-03 11:24:35 +0200298 mutex_unlock(&dev_priv->global_kms_state_mutex);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200299
300 /* since they always map one to one these are safe */
301 connector = &sou->base.connector;
302 encoder = &sou->base.encoder;
303
304 /* should we turn the crtc off */
305 if (set->num_connectors == 0 || !set->mode || !set->fb) {
306 ret = vmw_sou_fifo_destroy(dev_priv, sou);
307 /* the hardware has hung don't do anything more */
308 if (unlikely(ret != 0))
309 return ret;
310
311 connector->encoder = NULL;
312 encoder->crtc = NULL;
Matt Roperf4510a22014-04-01 15:22:40 -0700313 crtc->primary->fb = NULL;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200314 crtc->x = 0;
315 crtc->y = 0;
Thomas Hellstromc6c1f322013-11-14 03:11:10 -0800316 crtc->enabled = false;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200317
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100318 vmw_kms_del_active(dev_priv, &sou->base);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200319
320 vmw_sou_backing_free(dev_priv, sou);
321
322 return 0;
323 }
324
325
326 /* we now know we want to set a mode */
327 mode = set->mode;
328 fb = set->fb;
329
330 if (set->x + mode->hdisplay > fb->width ||
331 set->y + mode->vdisplay > fb->height) {
332 DRM_ERROR("set outside of framebuffer\n");
333 return -EINVAL;
334 }
335
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700336 vmw_svga_enable(dev_priv);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200337
338 if (mode->hdisplay != crtc->mode.hdisplay ||
339 mode->vdisplay != crtc->mode.vdisplay) {
340 /* no need to check if depth is different, because backing
341 * store depth is forced to 4 by the device.
342 */
343
344 ret = vmw_sou_fifo_destroy(dev_priv, sou);
345 /* the hardware has hung don't do anything more */
346 if (unlikely(ret != 0))
347 return ret;
348
349 vmw_sou_backing_free(dev_priv, sou);
350 }
351
352 if (!sou->buffer) {
353 /* forced to depth 4 by the device */
354 size_t size = mode->hdisplay * mode->vdisplay * 4;
355 ret = vmw_sou_backing_alloc(dev_priv, sou, size);
356 if (unlikely(ret != 0))
357 return ret;
358 }
359
360 ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
361 if (unlikely(ret != 0)) {
362 /*
363 * We are in a bit of a situation here, the hardware has
364 * hung and we may or may not have a buffer hanging of
365 * the screen object, best thing to do is not do anything
366 * if we where defined, if not just turn the crtc of.
367 * Not what userspace wants but it needs to htfu.
368 */
369 if (sou->defined)
370 return ret;
371
372 connector->encoder = NULL;
373 encoder->crtc = NULL;
Matt Roperf4510a22014-04-01 15:22:40 -0700374 crtc->primary->fb = NULL;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200375 crtc->x = 0;
376 crtc->y = 0;
Thomas Hellstromc6c1f322013-11-14 03:11:10 -0800377 crtc->enabled = false;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200378
379 return ret;
380 }
381
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100382 vmw_kms_add_active(dev_priv, &sou->base, vfb);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200383
384 connector->encoder = encoder;
385 encoder->crtc = crtc;
386 crtc->mode = *mode;
Matt Roperf4510a22014-04-01 15:22:40 -0700387 crtc->primary->fb = fb;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200388 crtc->x = set->x;
389 crtc->y = set->y;
Thomas Hellstromc6c1f322013-11-14 03:11:10 -0800390 crtc->enabled = true;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200391
392 return 0;
393}
394
Sinclair Yehc8261a92015-06-26 01:23:42 -0700395static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
396 struct drm_framebuffer *fb,
397 struct drm_pending_vblank_event *event,
398 uint32_t flags)
399{
400 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
401 struct drm_framebuffer *old_fb = crtc->primary->fb;
402 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700403 struct vmw_fence_obj *fence = NULL;
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100404 struct drm_vmw_rect vclips;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700405 int ret;
406
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100407 if (!vmw_kms_crtc_flippable(dev_priv, crtc))
Sinclair Yehc8261a92015-06-26 01:23:42 -0700408 return -EINVAL;
409
410 crtc->primary->fb = fb;
411
412 /* do a full screen dirty update */
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100413 vclips.x = crtc->x;
414 vclips.y = crtc->y;
415 vclips.w = crtc->mode.hdisplay;
416 vclips.h = crtc->mode.vdisplay;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700417
418 if (vfb->dmabuf)
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700419 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100420 NULL, &vclips, 1, 1,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700421 true, &fence);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700422 else
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700423 ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100424 NULL, &vclips, NULL,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700425 0, 0, 1, 1, &fence);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700426
427
428 if (ret != 0)
429 goto out_no_fence;
430 if (!fence) {
431 ret = -EINVAL;
432 goto out_no_fence;
433 }
434
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700435 if (event) {
436 struct drm_file *file_priv = event->base.file_priv;
437
438 ret = vmw_event_fence_action_queue(file_priv, fence,
439 &event->base,
440 &event->event.tv_sec,
441 &event->event.tv_usec,
442 true);
443 }
Sinclair Yehc8261a92015-06-26 01:23:42 -0700444
445 /*
446 * No need to hold on to this now. The only cleanup
447 * we need to do if we fail is unref the fence.
448 */
449 vmw_fence_obj_unreference(&fence);
450
451 if (vmw_crtc_to_du(crtc)->is_implicit)
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100452 vmw_kms_update_implicit_fb(dev_priv, crtc);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700453
454 return ret;
455
456out_no_fence:
457 crtc->primary->fb = old_fb;
458 return ret;
459}
460
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100461static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
Thomas Hellstrom8fbf9d92015-11-26 19:45:16 +0100462 .cursor_set2 = vmw_du_crtc_cursor_set2,
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200463 .cursor_move = vmw_du_crtc_cursor_move,
464 .gamma_set = vmw_du_crtc_gamma_set,
465 .destroy = vmw_sou_crtc_destroy,
466 .set_config = vmw_sou_crtc_set_config,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700467 .page_flip = vmw_sou_crtc_page_flip,
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200468};
469
470/*
471 * Screen Object Display Unit encoder functions
472 */
473
474static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
475{
476 vmw_sou_destroy(vmw_encoder_to_sou(encoder));
477}
478
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100479static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200480 .destroy = vmw_sou_encoder_destroy,
481};
482
483/*
484 * Screen Object Display Unit connector functions
485 */
486
487static void vmw_sou_connector_destroy(struct drm_connector *connector)
488{
489 vmw_sou_destroy(vmw_connector_to_sou(connector));
490}
491
Ville Syrjäläd7955fc2015-12-15 12:21:15 +0100492static const struct drm_connector_funcs vmw_sou_connector_funcs = {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200493 .dpms = vmw_du_connector_dpms,
Thierry Redingd17e67d2016-03-07 18:06:01 +0100494 .detect = vmw_du_connector_detect,
495 .fill_modes = vmw_du_connector_fill_modes,
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200496 .set_property = vmw_du_connector_set_property,
497 .destroy = vmw_sou_connector_destroy,
498};
499
500static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
501{
502 struct vmw_screen_object_unit *sou;
503 struct drm_device *dev = dev_priv->dev;
504 struct drm_connector *connector;
505 struct drm_encoder *encoder;
506 struct drm_crtc *crtc;
507
508 sou = kzalloc(sizeof(*sou), GFP_KERNEL);
509 if (!sou)
510 return -ENOMEM;
511
512 sou->base.unit = unit;
513 crtc = &sou->base.crtc;
514 encoder = &sou->base.encoder;
515 connector = &sou->base.connector;
516
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100517 sou->base.active_implicit = false;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200518 sou->base.pref_active = (unit == 0);
Jakob Bornecrantzeb4f9232012-02-09 16:56:46 +0100519 sou->base.pref_width = dev_priv->initial_width;
520 sou->base.pref_height = dev_priv->initial_height;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200521 sou->base.pref_mode = NULL;
Thomas Hellstrom2e69b252016-02-12 09:59:50 +0100522 sou->base.is_implicit = false;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200523
Sinclair Yehc8261a92015-06-26 01:23:42 -0700524 drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
Thomas Hellstrom305151e2011-10-22 10:36:20 +0200525 DRM_MODE_CONNECTOR_VIRTUAL);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200526 connector->status = vmw_du_connector_detect(connector, true);
527
528 drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
Ville Syrjälä13a3d912015-12-09 16:20:18 +0200529 DRM_MODE_ENCODER_VIRTUAL, NULL);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200530 drm_mode_connector_attach_encoder(connector, encoder);
531 encoder->possible_crtcs = (1 << unit);
532 encoder->possible_clones = 0;
533
Thomas Wood34ea3d32014-05-29 16:57:41 +0100534 (void) drm_connector_register(connector);
Thomas Hellstrom6a0a7a92013-12-02 06:04:38 -0800535
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200536 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
537
538 drm_mode_crtc_set_gamma_size(crtc, 256);
539
Rob Clarkb8b163b2012-10-11 20:47:14 -0500540 drm_object_attach_property(&connector->base,
Thomas Hellstrom578e6092016-02-12 09:45:42 +0100541 dev->mode_config.dirty_info_property,
542 1);
543 drm_object_attach_property(&connector->base,
544 dev_priv->hotplug_mode_update_property, 1);
545 drm_object_attach_property(&connector->base,
546 dev->mode_config.suggested_x_property, 0);
547 drm_object_attach_property(&connector->base,
548 dev->mode_config.suggested_y_property, 0);
Thomas Hellstrom76404ac2016-02-12 09:55:45 +0100549 if (dev_priv->implicit_placement_property)
550 drm_object_attach_property
551 (&connector->base,
552 dev_priv->implicit_placement_property,
553 sou->base.is_implicit);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200554
555 return 0;
556}
557
Sinclair Yehc8261a92015-06-26 01:23:42 -0700558int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200559{
560 struct drm_device *dev = dev_priv->dev;
Jakob Bornecrantz74b5ea32011-10-17 11:59:44 +0200561 int i, ret;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200562
Thomas Hellstrom29a16e92012-11-09 12:26:13 +0000563 if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200564 DRM_INFO("Not using screen objects,"
565 " missing cap SCREEN_OBJECT_2\n");
566 return -ENOSYS;
567 }
568
569 ret = -ENOMEM;
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100570 dev_priv->num_implicit = 0;
571 dev_priv->implicit_fb = NULL;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200572
573 ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
574 if (unlikely(ret != 0))
Thomas Hellstrom75c06852016-02-12 09:00:26 +0100575 return ret;
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200576
Jakob Bornecrantz74b5ea32011-10-17 11:59:44 +0200577 ret = drm_mode_create_dirty_info_property(dev);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200578 if (unlikely(ret != 0))
579 goto err_vblank_cleanup;
580
Thomas Hellstrom76404ac2016-02-12 09:55:45 +0100581 vmw_kms_create_implicit_placement_property(dev_priv, false);
582
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200583 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
584 vmw_sou_init(dev_priv, i);
585
Sinclair Yehc8261a92015-06-26 01:23:42 -0700586 dev_priv->active_display_unit = vmw_du_screen_object;
587
588 DRM_INFO("Screen Objects Display Unit initialized\n");
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200589
590 return 0;
591
592err_vblank_cleanup:
593 drm_vblank_cleanup(dev);
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200594 return ret;
595}
596
Sinclair Yehc8261a92015-06-26 01:23:42 -0700597int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200598{
599 struct drm_device *dev = dev_priv->dev;
600
Jakob Bornecrantz60a16a32011-10-17 11:59:43 +0200601 drm_vblank_cleanup(dev);
602
Jakob Bornecrantz56d1c782011-10-04 20:13:22 +0200603 return 0;
604}
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100605
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700606static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700607 struct vmw_framebuffer *framebuffer)
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100608{
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700609 struct vmw_dma_buffer *buf =
610 container_of(framebuffer, struct vmw_framebuffer_dmabuf,
611 base)->buffer;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700612 int depth = framebuffer->base.depth;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700613 struct {
614 uint32_t header;
615 SVGAFifoCmdDefineGMRFB body;
616 } *cmd;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100617
Sinclair Yehc8261a92015-06-26 01:23:42 -0700618 /* Emulate RGBA support, contrary to svga_reg.h this is not
619 * supported by hosts. This is only a problem if we are reading
620 * this value later and expecting what we uploaded back.
621 */
622 if (depth == 32)
623 depth = 24;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100624
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700625 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
626 if (!cmd) {
627 DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
Sinclair Yehc8261a92015-06-26 01:23:42 -0700628 return -ENOMEM;
629 }
630
Sinclair Yehc8261a92015-06-26 01:23:42 -0700631 cmd->header = SVGA_CMD_DEFINE_GMRFB;
632 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
633 cmd->body.format.colorDepth = depth;
634 cmd->body.format.reserved = 0;
635 cmd->body.bytesPerLine = framebuffer->base.pitches[0];
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700636 /* Buffer is reserved in vram or GMR */
637 vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
638 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Sinclair Yehc8261a92015-06-26 01:23:42 -0700639
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700640 return 0;
641}
Sinclair Yehc8261a92015-06-26 01:23:42 -0700642
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700643/**
644 * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
645 * blit surface to screen command.
646 *
647 * @dirty: The closure structure.
648 *
649 * Fills in the missing fields in the command, and translates the cliprects
650 * to match the destination bounding box encoded.
651 */
652static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
653{
654 struct vmw_kms_sou_surface_dirty *sdirty =
655 container_of(dirty, typeof(*sdirty), base);
656 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
657 s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
658 s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
659 size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
660 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
661 int i;
662
Thomas Hellstromfea7dd52016-02-12 08:26:37 +0100663 if (!dirty->num_hits) {
664 vmw_fifo_commit(dirty->dev_priv, 0);
665 return;
666 }
667
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700668 cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
669 cmd->header.size = sizeof(cmd->body) + region_size;
670
671 /*
672 * Use the destination bounding box to specify destination - and
673 * source bounding regions.
674 */
675 cmd->body.destRect.left = sdirty->left;
676 cmd->body.destRect.right = sdirty->right;
677 cmd->body.destRect.top = sdirty->top;
678 cmd->body.destRect.bottom = sdirty->bottom;
679
680 cmd->body.srcRect.left = sdirty->left + trans_x;
681 cmd->body.srcRect.right = sdirty->right + trans_x;
682 cmd->body.srcRect.top = sdirty->top + trans_y;
683 cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
684
685 cmd->body.srcImage.sid = sdirty->sid;
686 cmd->body.destScreenId = dirty->unit->unit;
687
688 /* Blits are relative to the destination rect. Translate. */
689 for (i = 0; i < dirty->num_hits; ++i, ++blit) {
690 blit->left -= sdirty->left;
691 blit->right -= sdirty->left;
692 blit->top -= sdirty->top;
693 blit->bottom -= sdirty->top;
694 }
695
696 vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
697
698 sdirty->left = sdirty->top = S32_MAX;
699 sdirty->right = sdirty->bottom = S32_MIN;
700}
701
702/**
703 * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
704 *
705 * @dirty: The closure structure
706 *
707 * Encodes a SVGASignedRect cliprect and updates the bounding box of the
708 * BLIT_SURFACE_TO_SCREEN command.
709 */
710static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
711{
712 struct vmw_kms_sou_surface_dirty *sdirty =
713 container_of(dirty, typeof(*sdirty), base);
714 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
715 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
716
717 /* Destination rect. */
718 blit += dirty->num_hits;
719 blit->left = dirty->unit_x1;
720 blit->top = dirty->unit_y1;
721 blit->right = dirty->unit_x2;
722 blit->bottom = dirty->unit_y2;
723
724 /* Destination bounding box */
725 sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
726 sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
727 sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
728 sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
729
730 dirty->num_hits++;
731}
732
733/**
734 * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
735 *
736 * @dev_priv: Pointer to the device private structure.
737 * @framebuffer: Pointer to the surface-buffer backed framebuffer.
738 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
739 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
740 * be NULL.
741 * @srf: Pointer to surface to blit from. If NULL, the surface attached
742 * to @framebuffer will be used.
743 * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
744 * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
745 * @num_clips: Number of clip rects in @clips.
746 * @inc: Increment to use when looping over @clips.
747 * @out_fence: If non-NULL, will return a ref-counted pointer to a
748 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
749 * case the device has already synchronized.
750 *
751 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
752 * interrupted.
753 */
754int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
755 struct vmw_framebuffer *framebuffer,
756 struct drm_clip_rect *clips,
757 struct drm_vmw_rect *vclips,
758 struct vmw_resource *srf,
759 s32 dest_x,
760 s32 dest_y,
761 unsigned num_clips, int inc,
762 struct vmw_fence_obj **out_fence)
763{
764 struct vmw_framebuffer_surface *vfbs =
765 container_of(framebuffer, typeof(*vfbs), base);
766 struct vmw_kms_sou_surface_dirty sdirty;
767 int ret;
768
769 if (!srf)
770 srf = &vfbs->surface->res;
771
772 ret = vmw_kms_helper_resource_prepare(srf, true);
773 if (ret)
774 return ret;
775
776 sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
777 sdirty.base.clip = vmw_sou_surface_clip;
778 sdirty.base.dev_priv = dev_priv;
779 sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
780 sizeof(SVGASignedRect) * num_clips;
781
782 sdirty.sid = srf->id;
783 sdirty.left = sdirty.top = S32_MAX;
784 sdirty.right = sdirty.bottom = S32_MIN;
785 sdirty.dst_x = dest_x;
786 sdirty.dst_y = dest_y;
787
788 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
789 dest_x, dest_y, num_clips, inc,
790 &sdirty.base);
791 vmw_kms_helper_resource_finish(srf, out_fence);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700792
793 return ret;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100794}
795
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700796/**
797 * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
798 *
799 * @dirty: The closure structure.
800 *
801 * Commits a previously built command buffer of readback clips.
802 */
803static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
804{
Thomas Hellstromfea7dd52016-02-12 08:26:37 +0100805 if (!dirty->num_hits) {
806 vmw_fifo_commit(dirty->dev_priv, 0);
807 return;
808 }
809
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700810 vmw_fifo_commit(dirty->dev_priv,
811 sizeof(struct vmw_kms_sou_dmabuf_blit) *
812 dirty->num_hits);
813}
814
815/**
816 * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
817 *
818 * @dirty: The closure structure
819 *
820 * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
821 */
822static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
823{
824 struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
825
826 blit += dirty->num_hits;
827 blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
828 blit->body.destScreenId = dirty->unit->unit;
829 blit->body.srcOrigin.x = dirty->fb_x;
830 blit->body.srcOrigin.y = dirty->fb_y;
831 blit->body.destRect.left = dirty->unit_x1;
832 blit->body.destRect.top = dirty->unit_y1;
833 blit->body.destRect.right = dirty->unit_x2;
834 blit->body.destRect.bottom = dirty->unit_y2;
835 dirty->num_hits++;
836}
837
838/**
839 * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
840 *
841 * @dev_priv: Pointer to the device private structure.
842 * @framebuffer: Pointer to the dma-buffer backed framebuffer.
843 * @clips: Array of clip rects.
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100844 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
845 * be NULL.
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700846 * @num_clips: Number of clip rects in @clips.
847 * @increment: Increment to use when looping over @clips.
848 * @interruptible: Whether to perform waits interruptible if possible.
849 * @out_fence: If non-NULL, will return a ref-counted pointer to a
850 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
851 * case the device has already synchronized.
852 *
853 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
854 * interrupted.
855 */
856int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700857 struct vmw_framebuffer *framebuffer,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700858 struct drm_clip_rect *clips,
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100859 struct drm_vmw_rect *vclips,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700860 unsigned num_clips, int increment,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700861 bool interruptible,
Sinclair Yehc8261a92015-06-26 01:23:42 -0700862 struct vmw_fence_obj **out_fence)
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100863{
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700864 struct vmw_dma_buffer *buf =
865 container_of(framebuffer, struct vmw_framebuffer_dmabuf,
866 base)->buffer;
867 struct vmw_kms_dirty dirty;
868 int ret;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100869
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700870 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
871 false);
872 if (ret)
873 return ret;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100874
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700875 ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700876 if (unlikely(ret != 0))
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700877 goto out_revert;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700878
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700879 dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
880 dirty.clip = vmw_sou_dmabuf_clip;
881 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
882 num_clips;
Thomas Hellstrom897b8182016-02-12 08:32:08 +0100883 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700884 0, 0, num_clips, increment, &dirty);
885 vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700886
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700887 return ret;
Sinclair Yehc8261a92015-06-26 01:23:42 -0700888
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700889out_revert:
890 vmw_kms_helper_buffer_revert(buf);
Sinclair Yehc8261a92015-06-26 01:23:42 -0700891
892 return ret;
Jakob Bornecrantzb5ec4272012-02-09 16:56:45 +0100893}
Sinclair Yehc8261a92015-06-26 01:23:42 -0700894
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700895
896/**
897 * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
898 *
899 * @dirty: The closure structure.
900 *
901 * Commits a previously built command buffer of readback clips.
902 */
903static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
904{
Thomas Hellstromfea7dd52016-02-12 08:26:37 +0100905 if (!dirty->num_hits) {
906 vmw_fifo_commit(dirty->dev_priv, 0);
907 return;
908 }
909
Thomas Hellstrom10b1e0c2015-06-26 02:14:27 -0700910 vmw_fifo_commit(dirty->dev_priv,
911 sizeof(struct vmw_kms_sou_readback_blit) *
912 dirty->num_hits);
913}
914
915/**
916 * vmw_sou_readback_clip - Callback to encode a readback cliprect.
917 *
918 * @dirty: The closure structure
919 *
920 * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
921 */
922static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
923{
924 struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
925
926 blit += dirty->num_hits;
927 blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
928 blit->body.srcScreenId = dirty->unit->unit;
929 blit->body.destOrigin.x = dirty->fb_x;
930 blit->body.destOrigin.y = dirty->fb_y;
931 blit->body.srcRect.left = dirty->unit_x1;
932 blit->body.srcRect.top = dirty->unit_y1;
933 blit->body.srcRect.right = dirty->unit_x2;
934 blit->body.srcRect.bottom = dirty->unit_y2;
935 dirty->num_hits++;
936}
937
938/**
939 * vmw_kms_sou_readback - Perform a readback from the screen object system to
940 * a dma-buffer backed framebuffer.
941 *
942 * @dev_priv: Pointer to the device private structure.
943 * @file_priv: Pointer to a struct drm_file identifying the caller.
944 * Must be set to NULL if @user_fence_rep is NULL.
945 * @vfb: Pointer to the dma-buffer backed framebuffer.
946 * @user_fence_rep: User-space provided structure for fence information.
947 * Must be set to non-NULL if @file_priv is non-NULL.
948 * @vclips: Array of clip rects.
949 * @num_clips: Number of clip rects in @vclips.
950 *
951 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
952 * interrupted.
953 */
954int vmw_kms_sou_readback(struct vmw_private *dev_priv,
955 struct drm_file *file_priv,
956 struct vmw_framebuffer *vfb,
957 struct drm_vmw_fence_rep __user *user_fence_rep,
958 struct drm_vmw_rect *vclips,
959 uint32_t num_clips)
960{
961 struct vmw_dma_buffer *buf =
962 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
963 struct vmw_kms_dirty dirty;
964 int ret;
965
966 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
967 if (ret)
968 return ret;
969
970 ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
971 if (unlikely(ret != 0))
972 goto out_revert;
973
974 dirty.fifo_commit = vmw_sou_readback_fifo_commit;
975 dirty.clip = vmw_sou_readback_clip;
976 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
977 num_clips;
978 ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
979 0, 0, num_clips, 1, &dirty);
980 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
981 user_fence_rep);
982
983 return ret;
984
985out_revert:
986 vmw_kms_helper_buffer_revert(buf);
987
988 return ret;
989}