blob: bdd67cf83315f590165fbf6b40437454d4e29186 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48 struct vmw_sw_context *sw_context,
49 SVGA3dCmdHeader *header)
50{
51 struct vmw_cid_cmd {
52 SVGA3dCmdHeader header;
53 __le32 cid;
54 } *cmd;
55 int ret;
56
57 cmd = container_of(header, struct vmw_cid_cmd, header);
58 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59 return 0;
60
61 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62 if (unlikely(ret != 0)) {
63 DRM_ERROR("Could not find or use context %u\n",
64 (unsigned) cmd->cid);
65 return ret;
66 }
67
68 sw_context->last_cid = cmd->cid;
69 sw_context->cid_valid = true;
70
71 return 0;
72}
73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context,
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +010076 uint32_t *sid)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000077{
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +010078 if (*sid == SVGA3D_INVALID_ID)
79 return 0;
80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000086
87 if (unlikely(ret != 0)) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +010088 DRM_ERROR("Could ot find or use surface 0x%08x "
89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000092 return ret;
93 }
94
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +010095 sw_context->last_sid = *sid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000096 sw_context->sid_valid = true;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +010097 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000102 return 0;
103}
104
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 struct vmw_sw_context *sw_context,
108 SVGA3dCmdHeader *header)
109{
110 struct vmw_sid_cmd {
111 SVGA3dCmdHeader header;
112 SVGA3dCmdSetRenderTarget body;
113 } *cmd;
114 int ret;
115
116 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
117 if (unlikely(ret != 0))
118 return ret;
119
120 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126 struct vmw_sw_context *sw_context,
127 SVGA3dCmdHeader *header)
128{
129 struct vmw_sid_cmd {
130 SVGA3dCmdHeader header;
131 SVGA3dCmdSurfaceCopy body;
132 } *cmd;
133 int ret;
134
135 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000137 if (unlikely(ret != 0))
138 return ret;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 SVGA3dCmdHeader *header)
145{
146 struct vmw_sid_cmd {
147 SVGA3dCmdHeader header;
148 SVGA3dCmdSurfaceStretchBlt body;
149 } *cmd;
150 int ret;
151
152 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000154 if (unlikely(ret != 0))
155 return ret;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160 struct vmw_sw_context *sw_context,
161 SVGA3dCmdHeader *header)
162{
163 struct vmw_sid_cmd {
164 SVGA3dCmdHeader header;
165 SVGA3dCmdBlitSurfaceToScreen body;
166 } *cmd;
167
168 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173 struct vmw_sw_context *sw_context,
174 SVGA3dCmdHeader *header)
175{
176 struct vmw_sid_cmd {
177 SVGA3dCmdHeader header;
178 SVGA3dCmdPresent body;
179 } *cmd;
180
181 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000183}
184
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context,
187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000189{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000192 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000193 struct vmw_relocation *reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000194 uint32_t cur_validate_node;
195 struct ttm_validate_buffer *val_buf;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000196 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000197
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
199 if (unlikely(ret != 0)) {
200 DRM_ERROR("Could not find or use GMR region.\n");
201 return -EINVAL;
202 }
203 bo = &vmw_bo->base;
204
205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000206 DRM_ERROR("Max number relocations per submission"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000207 " exceeded\n");
208 ret = -EINVAL;
209 goto out_no_reloc;
210 }
211
212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000213 reloc->location = ptr;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000214
215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217 DRM_ERROR("Max number of DMA buffers per submission"
218 " exceeded.\n");
219 ret = -EINVAL;
220 goto out_no_reloc;
221 }
222
223 reloc->index = cur_validate_node;
224 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225 val_buf = &sw_context->val_bufs[cur_validate_node];
226 val_buf->bo = ttm_bo_reference(bo);
227 val_buf->new_sync_obj_arg = (void *) dev_priv;
228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
229 ++sw_context->cur_val_buf;
230 }
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000231 *vmw_bo_p = vmw_bo;
232 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000233
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
315 cmd->dma.host.sid, &srf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000316 if (ret) {
317 DRM_ERROR("could not find surface\n");
318 goto out_no_reloc;
319 }
320
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100321 /**
322 * Patch command stream with device SID.
323 */
324
325 cmd->dma.host.sid = srf->res.id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000326 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100327 /**
328 * FIXME: May deadlock here when called from the
329 * command parsing code.
330 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000331 vmw_surface_unreference(&srf);
332
333out_no_reloc:
334 vmw_dmabuf_unreference(&vmw_bo);
335 return ret;
336}
337
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100338static int vmw_cmd_draw(struct vmw_private *dev_priv,
339 struct vmw_sw_context *sw_context,
340 SVGA3dCmdHeader *header)
341{
342 struct vmw_draw_cmd {
343 SVGA3dCmdHeader header;
344 SVGA3dCmdDrawPrimitives body;
345 } *cmd;
346 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
347 (unsigned long)header + sizeof(*cmd));
348 SVGA3dPrimitiveRange *range;
349 uint32_t i;
350 uint32_t maxnum;
351 int ret;
352
353 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
354 if (unlikely(ret != 0))
355 return ret;
356
357 cmd = container_of(header, struct vmw_draw_cmd, header);
358 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
359
360 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
361 DRM_ERROR("Illegal number of vertex declarations.\n");
362 return -EINVAL;
363 }
364
365 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
366 ret = vmw_cmd_sid_check(dev_priv, sw_context,
367 &decl->array.surfaceId);
368 if (unlikely(ret != 0))
369 return ret;
370 }
371
372 maxnum = (header->size - sizeof(cmd->body) -
373 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
374 if (unlikely(cmd->body.numRanges > maxnum)) {
375 DRM_ERROR("Illegal number of index ranges.\n");
376 return -EINVAL;
377 }
378
379 range = (SVGA3dPrimitiveRange *) decl;
380 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
381 ret = vmw_cmd_sid_check(dev_priv, sw_context,
382 &range->indexArray.surfaceId);
383 if (unlikely(ret != 0))
384 return ret;
385 }
386 return 0;
387}
388
389
390static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
391 struct vmw_sw_context *sw_context,
392 SVGA3dCmdHeader *header)
393{
394 struct vmw_tex_state_cmd {
395 SVGA3dCmdHeader header;
396 SVGA3dCmdSetTextureState state;
397 };
398
399 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
400 ((unsigned long) header + header->size + sizeof(header));
401 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
402 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
403 int ret;
404
405 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
406 if (unlikely(ret != 0))
407 return ret;
408
409 for (; cur_state < last_state; ++cur_state) {
410 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
411 continue;
412
413 ret = vmw_cmd_sid_check(dev_priv, sw_context,
414 &cur_state->value);
415 if (unlikely(ret != 0))
416 return ret;
417 }
418
419 return 0;
420}
421
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000422
423typedef int (*vmw_cmd_func) (struct vmw_private *,
424 struct vmw_sw_context *,
425 SVGA3dCmdHeader *);
426
427#define VMW_CMD_DEF(cmd, func) \
428 [cmd - SVGA_3D_CMD_BASE] = func
429
430static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
431 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
432 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
433 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
434 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
435 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
436 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
437 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
438 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
439 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
440 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
441 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
442 &vmw_cmd_set_render_target_check),
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100443 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000444 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
445 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
446 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
447 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
448 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
449 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
450 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
451 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
452 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
453 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
454 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
462 &vmw_cmd_blt_surf_screen_check)
463};
464
465static int vmw_cmd_check(struct vmw_private *dev_priv,
466 struct vmw_sw_context *sw_context,
467 void *buf, uint32_t *size)
468{
469 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100470 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000471 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
472 int ret;
473
474 cmd_id = ((uint32_t *)buf)[0];
475 if (cmd_id == SVGA_CMD_UPDATE) {
476 *size = 5 << 2;
477 return 0;
478 }
479
480 cmd_id = le32_to_cpu(header->id);
481 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
482
483 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100484 if (unlikely(*size > size_remaining))
485 goto out_err;
486
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000487 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
488 goto out_err;
489
490 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
491 if (unlikely(ret != 0))
492 goto out_err;
493
494 return 0;
495out_err:
496 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
497 cmd_id + SVGA_3D_CMD_BASE);
498 return -EINVAL;
499}
500
501static int vmw_cmd_check_all(struct vmw_private *dev_priv,
502 struct vmw_sw_context *sw_context,
503 void *buf, uint32_t size)
504{
505 int32_t cur_size = size;
506 int ret;
507
508 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100509 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000510 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
511 if (unlikely(ret != 0))
512 return ret;
513 buf = (void *)((unsigned long) buf + size);
514 cur_size -= size;
515 }
516
517 if (unlikely(cur_size != 0)) {
518 DRM_ERROR("Command verifier out of sync.\n");
519 return -EINVAL;
520 }
521
522 return 0;
523}
524
525static void vmw_free_relocations(struct vmw_sw_context *sw_context)
526{
527 sw_context->cur_reloc = 0;
528}
529
530static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
531{
532 uint32_t i;
533 struct vmw_relocation *reloc;
534 struct ttm_validate_buffer *validate;
535 struct ttm_buffer_object *bo;
536
537 for (i = 0; i < sw_context->cur_reloc; ++i) {
538 reloc = &sw_context->relocs[i];
539 validate = &sw_context->val_bufs[reloc->index];
540 bo = validate->bo;
541 reloc->location->offset += bo->offset;
542 reloc->location->gmrId = vmw_dmabuf_gmr(bo);
543 }
544 vmw_free_relocations(sw_context);
545}
546
547static void vmw_clear_validations(struct vmw_sw_context *sw_context)
548{
549 struct ttm_validate_buffer *entry, *next;
550
551 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
552 head) {
553 list_del(&entry->head);
554 vmw_dmabuf_validate_clear(entry->bo);
555 ttm_bo_unref(&entry->bo);
556 sw_context->cur_val_buf--;
557 }
558 BUG_ON(sw_context->cur_val_buf != 0);
559}
560
561static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
562 struct ttm_buffer_object *bo)
563{
564 int ret;
565
566 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
567 return 0;
568
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100569 /**
570 * Put BO in VRAM, only if there is space.
571 */
572
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100574 if (unlikely(ret == -ERESTARTSYS))
575 return ret;
576
577 /**
578 * Otherwise, set it up as GMR.
579 */
580
581 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
582 return 0;
583
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000584 ret = vmw_gmr_bind(dev_priv, bo);
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100585 if (likely(ret == 0 || ret == -ERESTARTSYS))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000586 return ret;
587
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100588 /**
589 * If that failed, try VRAM again, this time evicting
590 * previous contents.
591 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000592
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000594 return ret;
595}
596
597
598static int vmw_validate_buffers(struct vmw_private *dev_priv,
599 struct vmw_sw_context *sw_context)
600{
601 struct ttm_validate_buffer *entry;
602 int ret;
603
604 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
605 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
606 if (unlikely(ret != 0))
607 return ret;
608 }
609 return 0;
610}
611
612int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
613 struct drm_file *file_priv)
614{
615 struct vmw_private *dev_priv = vmw_priv(dev);
616 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
617 struct drm_vmw_fence_rep fence_rep;
618 struct drm_vmw_fence_rep __user *user_fence_rep;
619 int ret;
620 void *user_cmd;
621 void *cmd;
622 uint32_t sequence;
623 struct vmw_sw_context *sw_context = &dev_priv->ctx;
624 struct vmw_master *vmaster = vmw_master(file_priv->master);
625
626 ret = ttm_read_lock(&vmaster->lock, true);
627 if (unlikely(ret != 0))
628 return ret;
629
630 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
631 if (unlikely(ret != 0)) {
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100632 ret = -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000633 goto out_no_cmd_mutex;
634 }
635
636 cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
637 if (unlikely(cmd == NULL)) {
638 DRM_ERROR("Failed reserving fifo space for commands.\n");
639 ret = -ENOMEM;
640 goto out_unlock;
641 }
642
643 user_cmd = (void __user *)(unsigned long)arg->commands;
644 ret = copy_from_user(cmd, user_cmd, arg->command_size);
645
646 if (unlikely(ret != 0)) {
647 DRM_ERROR("Failed copying commands.\n");
648 goto out_commit;
649 }
650
651 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
652 sw_context->cid_valid = false;
653 sw_context->sid_valid = false;
654 sw_context->cur_reloc = 0;
655 sw_context->cur_val_buf = 0;
656
657 INIT_LIST_HEAD(&sw_context->validate_nodes);
658
659 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
660 if (unlikely(ret != 0))
661 goto out_err;
662 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
663 dev_priv->val_seq++);
664 if (unlikely(ret != 0))
665 goto out_err;
666
667 ret = vmw_validate_buffers(dev_priv, sw_context);
668 if (unlikely(ret != 0))
669 goto out_err;
670
671 vmw_apply_relocations(sw_context);
Thomas Hellstrom1925d452010-05-28 11:21:57 +0200672
673 if (arg->throttle_us) {
674 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
675 arg->throttle_us);
676
677 if (unlikely(ret != 0))
678 goto out_err;
679 }
680
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000681 vmw_fifo_commit(dev_priv, arg->command_size);
682
683 ret = vmw_fifo_send_fence(dev_priv, &sequence);
684
685 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
686 (void *)(unsigned long) sequence);
687 vmw_clear_validations(sw_context);
688 mutex_unlock(&dev_priv->cmdbuf_mutex);
689
690 /*
691 * This error is harmless, because if fence submission fails,
692 * vmw_fifo_send_fence will sync.
693 */
694
695 if (ret != 0)
696 DRM_ERROR("Fence submission error. Syncing.\n");
697
698 fence_rep.error = ret;
699 fence_rep.fence_seq = (uint64_t) sequence;
700
701 user_fence_rep = (struct drm_vmw_fence_rep __user *)
702 (unsigned long)arg->fence_rep;
703
704 /*
705 * copy_to_user errors will be detected by user space not
706 * seeing fence_rep::error filled in.
707 */
708
709 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
710
711 vmw_kms_cursor_post_execbuf(dev_priv);
712 ttm_read_unlock(&vmaster->lock);
713 return 0;
714out_err:
715 vmw_free_relocations(sw_context);
716 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
717 vmw_clear_validations(sw_context);
718out_commit:
719 vmw_fifo_commit(dev_priv, 0);
720out_unlock:
721 mutex_unlock(&dev_priv->cmdbuf_mutex);
722out_no_cmd_mutex:
723 ttm_read_unlock(&vmaster->lock);
724 return ret;
725}