blob: c98c3475a9f898452f5d6a119c4ab763ac520311 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000047
48static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
49 struct vmw_resource **p_res)
50{
51 int ret = 0;
52 struct vmw_resource *res = *p_res;
53
54 if (!res->on_validate_list) {
55 if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) {
56 DRM_ERROR("Too many resources referenced in "
57 "command stream.\n");
58 ret = -ENOMEM;
59 goto out;
60 }
61 sw_context->resources[sw_context->num_ref_resources++] = res;
62 res->on_validate_list = true;
63 return 0;
64 }
65
66out:
67 vmw_resource_unreference(p_res);
68 return ret;
69}
70
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000071static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
72 struct vmw_sw_context *sw_context,
73 SVGA3dCmdHeader *header)
74{
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000075 struct vmw_resource *ctx;
76
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000077 struct vmw_cid_cmd {
78 SVGA3dCmdHeader header;
79 __le32 cid;
80 } *cmd;
81 int ret;
82
83 cmd = container_of(header, struct vmw_cid_cmd, header);
84 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
85 return 0;
86
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000087 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
88 &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000089 if (unlikely(ret != 0)) {
90 DRM_ERROR("Could not find or use context %u\n",
91 (unsigned) cmd->cid);
92 return ret;
93 }
94
95 sw_context->last_cid = cmd->cid;
96 sw_context->cid_valid = true;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000097 return vmw_resource_to_validate_list(sw_context, &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000098}
99
100static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
101 struct vmw_sw_context *sw_context,
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100102 uint32_t *sid)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000103{
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000104 struct vmw_surface *srf;
105 int ret;
106 struct vmw_resource *res;
107
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100108 if (*sid == SVGA3D_INVALID_ID)
109 return 0;
110
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000111 if (likely((sw_context->sid_valid &&
112 *sid == sw_context->last_sid))) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100113 *sid = sw_context->sid_translation;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000114 return 0;
115 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100116
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000117 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
118 *sid, &srf);
119 if (unlikely(ret != 0)) {
120 DRM_ERROR("Could ot find or use surface 0x%08x "
121 "address 0x%08lx\n",
122 (unsigned int) *sid,
123 (unsigned long) sid);
124 return ret;
125 }
126
127 sw_context->last_sid = *sid;
128 sw_context->sid_valid = true;
129 sw_context->sid_translation = srf->res.id;
130 *sid = sw_context->sid_translation;
131
132 res = &srf->res;
133 return vmw_resource_to_validate_list(sw_context, &res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000134}
135
136
137static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
138 struct vmw_sw_context *sw_context,
139 SVGA3dCmdHeader *header)
140{
141 struct vmw_sid_cmd {
142 SVGA3dCmdHeader header;
143 SVGA3dCmdSetRenderTarget body;
144 } *cmd;
145 int ret;
146
147 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
148 if (unlikely(ret != 0))
149 return ret;
150
151 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100152 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
153 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000154}
155
156static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
157 struct vmw_sw_context *sw_context,
158 SVGA3dCmdHeader *header)
159{
160 struct vmw_sid_cmd {
161 SVGA3dCmdHeader header;
162 SVGA3dCmdSurfaceCopy body;
163 } *cmd;
164 int ret;
165
166 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100167 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000168 if (unlikely(ret != 0))
169 return ret;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100170 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000171}
172
173static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
174 struct vmw_sw_context *sw_context,
175 SVGA3dCmdHeader *header)
176{
177 struct vmw_sid_cmd {
178 SVGA3dCmdHeader header;
179 SVGA3dCmdSurfaceStretchBlt body;
180 } *cmd;
181 int ret;
182
183 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100184 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000185 if (unlikely(ret != 0))
186 return ret;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100187 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000188}
189
190static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
191 struct vmw_sw_context *sw_context,
192 SVGA3dCmdHeader *header)
193{
194 struct vmw_sid_cmd {
195 SVGA3dCmdHeader header;
196 SVGA3dCmdBlitSurfaceToScreen body;
197 } *cmd;
198
199 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100200 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000201}
202
203static int vmw_cmd_present_check(struct vmw_private *dev_priv,
204 struct vmw_sw_context *sw_context,
205 SVGA3dCmdHeader *header)
206{
207 struct vmw_sid_cmd {
208 SVGA3dCmdHeader header;
209 SVGA3dCmdPresent body;
210 } *cmd;
211
212 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100213 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000214}
215
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000216static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
217 struct vmw_sw_context *sw_context,
218 SVGAGuestPtr *ptr,
219 struct vmw_dma_buffer **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000220{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000221 struct vmw_dma_buffer *vmw_bo = NULL;
222 struct ttm_buffer_object *bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000223 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000224 struct vmw_relocation *reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000225 uint32_t cur_validate_node;
226 struct ttm_validate_buffer *val_buf;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000227 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000228
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000229 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
230 if (unlikely(ret != 0)) {
231 DRM_ERROR("Could not find or use GMR region.\n");
232 return -EINVAL;
233 }
234 bo = &vmw_bo->base;
235
236 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000237 DRM_ERROR("Max number relocations per submission"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000238 " exceeded\n");
239 ret = -EINVAL;
240 goto out_no_reloc;
241 }
242
243 reloc = &sw_context->relocs[sw_context->cur_reloc++];
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000244 reloc->location = ptr;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000245
246 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000247 if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000248 DRM_ERROR("Max number of DMA buffers per submission"
249 " exceeded.\n");
250 ret = -EINVAL;
251 goto out_no_reloc;
252 }
253
254 reloc->index = cur_validate_node;
255 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
256 val_buf = &sw_context->val_bufs[cur_validate_node];
257 val_buf->bo = ttm_bo_reference(bo);
Marek Olšákdfadbbd2011-08-13 20:32:11 +0000258 val_buf->usage = TTM_USAGE_READWRITE;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000259 val_buf->new_sync_obj_arg = (void *) DRM_VMW_FENCE_FLAG_EXEC;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000260 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
261 ++sw_context->cur_val_buf;
262 }
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000263 *vmw_bo_p = vmw_bo;
264 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000265
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000266out_no_reloc:
267 vmw_dmabuf_unreference(&vmw_bo);
268 vmw_bo_p = NULL;
269 return ret;
270}
271
272static int vmw_cmd_end_query(struct vmw_private *dev_priv,
273 struct vmw_sw_context *sw_context,
274 SVGA3dCmdHeader *header)
275{
276 struct vmw_dma_buffer *vmw_bo;
277 struct vmw_query_cmd {
278 SVGA3dCmdHeader header;
279 SVGA3dCmdEndQuery q;
280 } *cmd;
281 int ret;
282
283 cmd = container_of(header, struct vmw_query_cmd, header);
284 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
285 if (unlikely(ret != 0))
286 return ret;
287
288 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
289 &cmd->q.guestResult,
290 &vmw_bo);
291 if (unlikely(ret != 0))
292 return ret;
293
294 vmw_dmabuf_unreference(&vmw_bo);
295 return 0;
296}
297
298static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
299 struct vmw_sw_context *sw_context,
300 SVGA3dCmdHeader *header)
301{
302 struct vmw_dma_buffer *vmw_bo;
303 struct vmw_query_cmd {
304 SVGA3dCmdHeader header;
305 SVGA3dCmdWaitForQuery q;
306 } *cmd;
307 int ret;
308
309 cmd = container_of(header, struct vmw_query_cmd, header);
310 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
311 if (unlikely(ret != 0))
312 return ret;
313
314 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
315 &cmd->q.guestResult,
316 &vmw_bo);
317 if (unlikely(ret != 0))
318 return ret;
319
320 vmw_dmabuf_unreference(&vmw_bo);
321 return 0;
322}
323
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000324static int vmw_cmd_dma(struct vmw_private *dev_priv,
325 struct vmw_sw_context *sw_context,
326 SVGA3dCmdHeader *header)
327{
328 struct vmw_dma_buffer *vmw_bo = NULL;
329 struct ttm_buffer_object *bo;
330 struct vmw_surface *srf = NULL;
331 struct vmw_dma_cmd {
332 SVGA3dCmdHeader header;
333 SVGA3dCmdSurfaceDMA dma;
334 } *cmd;
335 int ret;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000336 struct vmw_resource *res;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000337
338 cmd = container_of(header, struct vmw_dma_cmd, header);
339 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
340 &cmd->dma.guest.ptr,
341 &vmw_bo);
342 if (unlikely(ret != 0))
343 return ret;
344
345 bo = &vmw_bo->base;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100346 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
347 cmd->dma.host.sid, &srf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000348 if (ret) {
349 DRM_ERROR("could not find surface\n");
350 goto out_no_reloc;
351 }
352
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000353 /*
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100354 * Patch command stream with device SID.
355 */
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100356 cmd->dma.host.sid = srf->res.id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000357 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000358
359 vmw_dmabuf_unreference(&vmw_bo);
360
361 res = &srf->res;
362 return vmw_resource_to_validate_list(sw_context, &res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000363
364out_no_reloc:
365 vmw_dmabuf_unreference(&vmw_bo);
366 return ret;
367}
368
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100369static int vmw_cmd_draw(struct vmw_private *dev_priv,
370 struct vmw_sw_context *sw_context,
371 SVGA3dCmdHeader *header)
372{
373 struct vmw_draw_cmd {
374 SVGA3dCmdHeader header;
375 SVGA3dCmdDrawPrimitives body;
376 } *cmd;
377 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
378 (unsigned long)header + sizeof(*cmd));
379 SVGA3dPrimitiveRange *range;
380 uint32_t i;
381 uint32_t maxnum;
382 int ret;
383
384 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
385 if (unlikely(ret != 0))
386 return ret;
387
388 cmd = container_of(header, struct vmw_draw_cmd, header);
389 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
390
391 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
392 DRM_ERROR("Illegal number of vertex declarations.\n");
393 return -EINVAL;
394 }
395
396 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
397 ret = vmw_cmd_sid_check(dev_priv, sw_context,
398 &decl->array.surfaceId);
399 if (unlikely(ret != 0))
400 return ret;
401 }
402
403 maxnum = (header->size - sizeof(cmd->body) -
404 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
405 if (unlikely(cmd->body.numRanges > maxnum)) {
406 DRM_ERROR("Illegal number of index ranges.\n");
407 return -EINVAL;
408 }
409
410 range = (SVGA3dPrimitiveRange *) decl;
411 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
412 ret = vmw_cmd_sid_check(dev_priv, sw_context,
413 &range->indexArray.surfaceId);
414 if (unlikely(ret != 0))
415 return ret;
416 }
417 return 0;
418}
419
420
421static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
422 struct vmw_sw_context *sw_context,
423 SVGA3dCmdHeader *header)
424{
425 struct vmw_tex_state_cmd {
426 SVGA3dCmdHeader header;
427 SVGA3dCmdSetTextureState state;
428 };
429
430 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
431 ((unsigned long) header + header->size + sizeof(header));
432 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
433 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
434 int ret;
435
436 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
437 if (unlikely(ret != 0))
438 return ret;
439
440 for (; cur_state < last_state; ++cur_state) {
441 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
442 continue;
443
444 ret = vmw_cmd_sid_check(dev_priv, sw_context,
445 &cur_state->value);
446 if (unlikely(ret != 0))
447 return ret;
448 }
449
450 return 0;
451}
452
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +0200453static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
454 struct vmw_sw_context *sw_context,
455 void *buf)
456{
457 struct vmw_dma_buffer *vmw_bo;
458 int ret;
459
460 struct {
461 uint32_t header;
462 SVGAFifoCmdDefineGMRFB body;
463 } *cmd = buf;
464
465 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
466 &cmd->body.ptr,
467 &vmw_bo);
468 if (unlikely(ret != 0))
469 return ret;
470
471 vmw_dmabuf_unreference(&vmw_bo);
472
473 return ret;
474}
475
476static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
477 struct vmw_sw_context *sw_context,
478 void *buf, uint32_t *size)
479{
480 uint32_t size_remaining = *size;
481 bool need_kernel = true;
482 uint32_t cmd_id;
483
484 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
485 switch (cmd_id) {
486 case SVGA_CMD_UPDATE:
487 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
488 need_kernel = false;
489 break;
490 case SVGA_CMD_DEFINE_GMRFB:
491 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
492 break;
493 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
494 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
495 break;
496 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
497 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
498 break;
499 default:
500 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
501 return -EINVAL;
502 }
503
504 if (*size > size_remaining) {
505 DRM_ERROR("Invalid SVGA command (size mismatch):"
506 " %u.\n", cmd_id);
507 return -EINVAL;
508 }
509
510 if (unlikely(need_kernel && !sw_context->kernel)) {
511 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
512 return -EPERM;
513 }
514
515 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
516 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
517
518 return 0;
519}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000520
521typedef int (*vmw_cmd_func) (struct vmw_private *,
522 struct vmw_sw_context *,
523 SVGA3dCmdHeader *);
524
525#define VMW_CMD_DEF(cmd, func) \
526 [cmd - SVGA_3D_CMD_BASE] = func
527
528static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
529 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
530 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
531 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
532 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
533 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
534 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
535 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
536 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
537 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
538 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
539 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
540 &vmw_cmd_set_render_target_check),
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100541 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000542 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
543 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
544 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
545 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
546 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
547 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
548 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
549 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
550 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
551 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
552 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100553 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000554 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
555 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000556 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
557 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000558 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
559 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
560 &vmw_cmd_blt_surf_screen_check)
561};
562
563static int vmw_cmd_check(struct vmw_private *dev_priv,
564 struct vmw_sw_context *sw_context,
565 void *buf, uint32_t *size)
566{
567 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100568 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000569 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
570 int ret;
571
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +0200572 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
573 /* Handle any none 3D commands */
574 if (unlikely(cmd_id < SVGA_CMD_MAX))
575 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
576
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000577
578 cmd_id = le32_to_cpu(header->id);
579 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
580
581 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100582 if (unlikely(*size > size_remaining))
583 goto out_err;
584
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000585 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
586 goto out_err;
587
588 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
589 if (unlikely(ret != 0))
590 goto out_err;
591
592 return 0;
593out_err:
594 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
595 cmd_id + SVGA_3D_CMD_BASE);
596 return -EINVAL;
597}
598
599static int vmw_cmd_check_all(struct vmw_private *dev_priv,
600 struct vmw_sw_context *sw_context,
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200601 void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000602 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000603{
604 int32_t cur_size = size;
605 int ret;
606
607 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100608 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000609 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
610 if (unlikely(ret != 0))
611 return ret;
612 buf = (void *)((unsigned long) buf + size);
613 cur_size -= size;
614 }
615
616 if (unlikely(cur_size != 0)) {
617 DRM_ERROR("Command verifier out of sync.\n");
618 return -EINVAL;
619 }
620
621 return 0;
622}
623
624static void vmw_free_relocations(struct vmw_sw_context *sw_context)
625{
626 sw_context->cur_reloc = 0;
627}
628
629static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
630{
631 uint32_t i;
632 struct vmw_relocation *reloc;
633 struct ttm_validate_buffer *validate;
634 struct ttm_buffer_object *bo;
635
636 for (i = 0; i < sw_context->cur_reloc; ++i) {
637 reloc = &sw_context->relocs[i];
638 validate = &sw_context->val_bufs[reloc->index];
639 bo = validate->bo;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200640 if (bo->mem.mem_type == TTM_PL_VRAM) {
641 reloc->location->offset += bo->offset;
642 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
643 } else
644 reloc->location->gmrId = bo->mem.start;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000645 }
646 vmw_free_relocations(sw_context);
647}
648
649static void vmw_clear_validations(struct vmw_sw_context *sw_context)
650{
651 struct ttm_validate_buffer *entry, *next;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000652 uint32_t i = sw_context->num_ref_resources;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000653
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000654 /*
655 * Drop references to DMA buffers held during command submission.
656 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000657 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
658 head) {
659 list_del(&entry->head);
660 vmw_dmabuf_validate_clear(entry->bo);
661 ttm_bo_unref(&entry->bo);
662 sw_context->cur_val_buf--;
663 }
664 BUG_ON(sw_context->cur_val_buf != 0);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000665
666 /*
667 * Drop references to resources held during command submission.
668 */
669 while (i-- > 0) {
670 sw_context->resources[i]->on_validate_list = false;
671 vmw_resource_unreference(&sw_context->resources[i]);
672 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000673}
674
675static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
676 struct ttm_buffer_object *bo)
677{
678 int ret;
679
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100680 /**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200681 * Put BO in VRAM if there is space, otherwise as a GMR.
682 * If there is no space in VRAM and GMR ids are all used up,
683 * start evicting GMRs to make room. If the DMA buffer can't be
684 * used as a GMR, this will return -ENOMEM.
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100685 */
686
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200687 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100688 if (likely(ret == 0 || ret == -ERESTARTSYS))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000689 return ret;
690
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100691 /**
692 * If that failed, try VRAM again, this time evicting
693 * previous contents.
694 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000695
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200696 DRM_INFO("Falling through to VRAM.\n");
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000697 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000698 return ret;
699}
700
701
702static int vmw_validate_buffers(struct vmw_private *dev_priv,
703 struct vmw_sw_context *sw_context)
704{
705 struct ttm_validate_buffer *entry;
706 int ret;
707
708 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
709 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
710 if (unlikely(ret != 0))
711 return ret;
712 }
713 return 0;
714}
715
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000716static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
717 uint32_t size)
718{
719 if (likely(sw_context->cmd_bounce_size >= size))
720 return 0;
721
722 if (sw_context->cmd_bounce_size == 0)
723 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
724
725 while (sw_context->cmd_bounce_size < size) {
726 sw_context->cmd_bounce_size =
727 PAGE_ALIGN(sw_context->cmd_bounce_size +
728 (sw_context->cmd_bounce_size >> 1));
729 }
730
731 if (sw_context->cmd_bounce != NULL)
732 vfree(sw_context->cmd_bounce);
733
734 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
735
736 if (sw_context->cmd_bounce == NULL) {
737 DRM_ERROR("Failed to allocate command bounce buffer.\n");
738 sw_context->cmd_bounce_size = 0;
739 return -ENOMEM;
740 }
741
742 return 0;
743}
744
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000745/**
746 * vmw_execbuf_fence_commands - create and submit a command stream fence
747 *
748 * Creates a fence object and submits a command stream marker.
749 * If this fails for some reason, We sync the fifo and return NULL.
750 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +0200751 *
752 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
753 * a userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000754 */
755
756int vmw_execbuf_fence_commands(struct drm_file *file_priv,
757 struct vmw_private *dev_priv,
758 struct vmw_fence_obj **p_fence,
759 uint32_t *p_handle)
760{
761 uint32_t sequence;
762 int ret;
763 bool synced = false;
764
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +0200765 /* p_handle implies file_priv. */
766 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000767
768 ret = vmw_fifo_send_fence(dev_priv, &sequence);
769 if (unlikely(ret != 0)) {
770 DRM_ERROR("Fence submission error. Syncing.\n");
771 synced = true;
772 }
773
774 if (p_handle != NULL)
775 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
776 sequence,
777 DRM_VMW_FENCE_FLAG_EXEC,
778 p_fence, p_handle);
779 else
780 ret = vmw_fence_create(dev_priv->fman, sequence,
781 DRM_VMW_FENCE_FLAG_EXEC,
782 p_fence);
783
784 if (unlikely(ret != 0 && !synced)) {
785 (void) vmw_fallback_wait(dev_priv, false, false,
786 sequence, false,
787 VMW_FENCE_WAIT_TIMEOUT);
788 *p_fence = NULL;
789 }
790
791 return 0;
792}
793
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200794int vmw_execbuf_process(struct drm_file *file_priv,
795 struct vmw_private *dev_priv,
796 void __user *user_commands,
797 void *kernel_commands,
798 uint32_t command_size,
799 uint64_t throttle_us,
800 struct drm_vmw_fence_rep __user *user_fence_rep)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000801{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000802 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200803 struct drm_vmw_fence_rep fence_rep;
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000804 struct vmw_fence_obj *fence;
805 uint32_t handle;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200806 void *cmd;
807 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000808
809 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000810 if (unlikely(ret != 0))
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200811 return -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000812
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200813 if (kernel_commands == NULL) {
814 sw_context->kernel = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000815
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200816 ret = vmw_resize_cmd_bounce(sw_context, command_size);
817 if (unlikely(ret != 0))
818 goto out_unlock;
819
820
821 ret = copy_from_user(sw_context->cmd_bounce,
822 user_commands, command_size);
823
824 if (unlikely(ret != 0)) {
825 ret = -EFAULT;
826 DRM_ERROR("Failed copying commands.\n");
827 goto out_unlock;
828 }
829 kernel_commands = sw_context->cmd_bounce;
830 } else
831 sw_context->kernel = true;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000832
833 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
834 sw_context->cid_valid = false;
835 sw_context->sid_valid = false;
836 sw_context->cur_reloc = 0;
837 sw_context->cur_val_buf = 0;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000838 sw_context->num_ref_resources = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000839
840 INIT_LIST_HEAD(&sw_context->validate_nodes);
841
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200842 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
843 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000844 if (unlikely(ret != 0))
845 goto out_err;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000846
Thomas Hellstrom65705962010-11-17 12:28:31 +0000847 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000848 if (unlikely(ret != 0))
849 goto out_err;
850
851 ret = vmw_validate_buffers(dev_priv, sw_context);
852 if (unlikely(ret != 0))
853 goto out_err;
854
855 vmw_apply_relocations(sw_context);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200856
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200857 if (throttle_us) {
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +0000858 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200859 throttle_us);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200860
861 if (unlikely(ret != 0))
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000862 goto out_throttle;
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200863 }
864
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200865 cmd = vmw_fifo_reserve(dev_priv, command_size);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000866 if (unlikely(cmd == NULL)) {
867 DRM_ERROR("Failed reserving fifo space for commands.\n");
868 ret = -ENOMEM;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200869 goto out_throttle;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000870 }
871
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200872 memcpy(cmd, kernel_commands, command_size);
873 vmw_fifo_commit(dev_priv, command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000874
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000875 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
876 &fence,
877 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000878 /*
879 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000880 * vmw_fifo_send_fence will sync. The error will be propagated to
881 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000882 */
883
884 if (ret != 0)
885 DRM_ERROR("Fence submission error. Syncing.\n");
886
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000887 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
888 (void *) fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000889
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000890 vmw_clear_validations(sw_context);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000891
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000892 if (user_fence_rep) {
893 fence_rep.error = ret;
894 fence_rep.handle = handle;
895 fence_rep.seqno = fence->seqno;
896 vmw_update_seqno(dev_priv, &dev_priv->fifo);
897 fence_rep.passed_seqno = dev_priv->last_read_seqno;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000898
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000899 /*
900 * copy_to_user errors will be detected by user space not
901 * seeing fence_rep::error filled in. Typically
902 * user-space would have pre-set that member to -EFAULT.
903 */
904 ret = copy_to_user(user_fence_rep, &fence_rep,
905 sizeof(fence_rep));
906
907 /*
908 * User-space lost the fence object. We need to sync
909 * and unreference the handle.
910 */
911 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
912 BUG_ON(fence == NULL);
913
914 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
915 handle, TTM_REF_USAGE);
916 DRM_ERROR("Fence copy error. Syncing.\n");
917 (void) vmw_fence_obj_wait(fence,
918 fence->signal_mask,
919 false, false,
920 VMW_FENCE_WAIT_TIMEOUT);
921 }
922 }
923
924 if (likely(fence != NULL))
925 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000926
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200927 mutex_unlock(&dev_priv->cmdbuf_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000928 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200929
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000930out_err:
931 vmw_free_relocations(sw_context);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000932out_throttle:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000933 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
934 vmw_clear_validations(sw_context);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000935out_unlock:
936 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200937 return ret;
938}
939
940
941int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
942 struct drm_file *file_priv)
943{
944 struct vmw_private *dev_priv = vmw_priv(dev);
945 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
946 struct vmw_master *vmaster = vmw_master(file_priv->master);
947 int ret;
948
949 /*
950 * This will allow us to extend the ioctl argument while
951 * maintaining backwards compatibility:
952 * We take different code paths depending on the value of
953 * arg->version.
954 */
955
956 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
957 DRM_ERROR("Incorrect execbuf version.\n");
958 DRM_ERROR("You're running outdated experimental "
959 "vmwgfx user-space drivers.");
960 return -EINVAL;
961 }
962
963 ret = ttm_read_lock(&vmaster->lock, true);
964 if (unlikely(ret != 0))
965 return ret;
966
967 ret = vmw_execbuf_process(file_priv, dev_priv,
968 (void __user *)(unsigned long)arg->commands,
969 NULL, arg->command_size, arg->throttle_us,
970 (void __user *)(unsigned long)arg->fence_rep);
971
972 if (unlikely(ret != 0))
973 goto out_unlock;
974
975 vmw_kms_cursor_post_execbuf(dev_priv);
976
977out_unlock:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000978 ttm_read_unlock(&vmaster->lock);
979 return ret;
980}