blob: efa1d1cc041407921b5fd4ba2d13a34770d6f481 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
Thomas Hellstrombe38ab62011-08-31 07:42:54 +000047static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
48 struct vmw_resource **p_res)
49{
50 int ret = 0;
51 struct vmw_resource *res = *p_res;
52
53 if (!res->on_validate_list) {
54 if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) {
55 DRM_ERROR("Too many resources referenced in "
56 "command stream.\n");
57 ret = -ENOMEM;
58 goto out;
59 }
60 sw_context->resources[sw_context->num_ref_resources++] = res;
61 res->on_validate_list = true;
62 return 0;
63 }
64
65out:
66 vmw_resource_unreference(p_res);
67 return ret;
68}
69
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +020070/**
71 * vmw_bo_to_validate_list - add a bo to a validate list
72 *
73 * @sw_context: The software context used for this command submission batch.
74 * @bo: The buffer object to add.
75 * @fence_flags: Fence flags to be or'ed with any other fence flags for
76 * this buffer on this submission batch.
77 * @p_val_node: If non-NULL Will be updated with the validate node number
78 * on return.
79 *
80 * Returns -EINVAL if the limit of number of buffer objects per command
81 * submission is reached.
82 */
83static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
84 struct ttm_buffer_object *bo,
85 uint32_t fence_flags,
86 uint32_t *p_val_node)
87{
88 uint32_t val_node;
89 struct ttm_validate_buffer *val_buf;
90
91 val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
92
93 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
94 DRM_ERROR("Max number of DMA buffers per submission"
95 " exceeded.\n");
96 return -EINVAL;
97 }
98
99 val_buf = &sw_context->val_bufs[val_node];
100 if (unlikely(val_node == sw_context->cur_val_buf)) {
101 val_buf->new_sync_obj_arg = NULL;
102 val_buf->bo = ttm_bo_reference(bo);
103 val_buf->usage = TTM_USAGE_READWRITE;
104 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
105 ++sw_context->cur_val_buf;
106 }
107
108 val_buf->new_sync_obj_arg = (void *)
109 ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
110 sw_context->fence_flags |= fence_flags;
111
112 if (p_val_node)
113 *p_val_node = val_node;
114
115 return 0;
116}
117
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000118static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
119 struct vmw_sw_context *sw_context,
120 SVGA3dCmdHeader *header)
121{
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000122 struct vmw_resource *ctx;
123
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000124 struct vmw_cid_cmd {
125 SVGA3dCmdHeader header;
126 __le32 cid;
127 } *cmd;
128 int ret;
129
130 cmd = container_of(header, struct vmw_cid_cmd, header);
131 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
132 return 0;
133
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000134 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
135 &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000136 if (unlikely(ret != 0)) {
137 DRM_ERROR("Could not find or use context %u\n",
138 (unsigned) cmd->cid);
139 return ret;
140 }
141
142 sw_context->last_cid = cmd->cid;
143 sw_context->cid_valid = true;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200144 sw_context->cur_ctx = ctx;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000145 return vmw_resource_to_validate_list(sw_context, &ctx);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000146}
147
148static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
149 struct vmw_sw_context *sw_context,
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100150 uint32_t *sid)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000151{
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000152 struct vmw_surface *srf;
153 int ret;
154 struct vmw_resource *res;
155
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100156 if (*sid == SVGA3D_INVALID_ID)
157 return 0;
158
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000159 if (likely((sw_context->sid_valid &&
160 *sid == sw_context->last_sid))) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100161 *sid = sw_context->sid_translation;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000162 return 0;
163 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100164
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200165 ret = vmw_user_surface_lookup_handle(dev_priv,
166 sw_context->tfile,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000167 *sid, &srf);
168 if (unlikely(ret != 0)) {
169 DRM_ERROR("Could ot find or use surface 0x%08x "
170 "address 0x%08lx\n",
171 (unsigned int) *sid,
172 (unsigned long) sid);
173 return ret;
174 }
175
176 sw_context->last_sid = *sid;
177 sw_context->sid_valid = true;
178 sw_context->sid_translation = srf->res.id;
179 *sid = sw_context->sid_translation;
180
181 res = &srf->res;
182 return vmw_resource_to_validate_list(sw_context, &res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000183}
184
185
186static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
187 struct vmw_sw_context *sw_context,
188 SVGA3dCmdHeader *header)
189{
190 struct vmw_sid_cmd {
191 SVGA3dCmdHeader header;
192 SVGA3dCmdSetRenderTarget body;
193 } *cmd;
194 int ret;
195
196 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
197 if (unlikely(ret != 0))
198 return ret;
199
200 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100201 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
202 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000203}
204
205static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
206 struct vmw_sw_context *sw_context,
207 SVGA3dCmdHeader *header)
208{
209 struct vmw_sid_cmd {
210 SVGA3dCmdHeader header;
211 SVGA3dCmdSurfaceCopy body;
212 } *cmd;
213 int ret;
214
215 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100216 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000217 if (unlikely(ret != 0))
218 return ret;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100219 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000220}
221
222static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
223 struct vmw_sw_context *sw_context,
224 SVGA3dCmdHeader *header)
225{
226 struct vmw_sid_cmd {
227 SVGA3dCmdHeader header;
228 SVGA3dCmdSurfaceStretchBlt body;
229 } *cmd;
230 int ret;
231
232 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100233 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000234 if (unlikely(ret != 0))
235 return ret;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100236 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000237}
238
239static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
240 struct vmw_sw_context *sw_context,
241 SVGA3dCmdHeader *header)
242{
243 struct vmw_sid_cmd {
244 SVGA3dCmdHeader header;
245 SVGA3dCmdBlitSurfaceToScreen body;
246 } *cmd;
247
248 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200249
250 if (unlikely(!sw_context->kernel)) {
251 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
252 return -EPERM;
253 }
254
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100255 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000256}
257
258static int vmw_cmd_present_check(struct vmw_private *dev_priv,
259 struct vmw_sw_context *sw_context,
260 SVGA3dCmdHeader *header)
261{
262 struct vmw_sid_cmd {
263 SVGA3dCmdHeader header;
264 SVGA3dCmdPresent body;
265 } *cmd;
266
267 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200268
269 if (unlikely(!sw_context->kernel)) {
270 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
271 return -EPERM;
272 }
273
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100274 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000275}
276
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200277/**
278 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
279 *
280 * @dev_priv: The device private structure.
281 * @cid: The hardware context for the next query.
282 * @new_query_bo: The new buffer holding query results.
283 * @sw_context: The software context used for this command submission.
284 *
285 * This function checks whether @new_query_bo is suitable for holding
286 * query results, and if another buffer currently is pinned for query
287 * results. If so, the function prepares the state of @sw_context for
288 * switching pinned buffers after successful submission of the current
289 * command batch. It also checks whether we're using a new query context.
290 * In that case, it makes sure we emit a query barrier for the old
291 * context before the current query buffer is fenced.
292 */
293static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
294 uint32_t cid,
295 struct ttm_buffer_object *new_query_bo,
296 struct vmw_sw_context *sw_context)
297{
298 int ret;
299 bool add_cid = false;
300 uint32_t cid_to_add;
301
302 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
303
304 if (unlikely(new_query_bo->num_pages > 4)) {
305 DRM_ERROR("Query buffer too large.\n");
306 return -EINVAL;
307 }
308
309 if (unlikely(sw_context->cur_query_bo != NULL)) {
310 BUG_ON(!sw_context->query_cid_valid);
311 add_cid = true;
312 cid_to_add = sw_context->cur_query_cid;
313 ret = vmw_bo_to_validate_list(sw_context,
314 sw_context->cur_query_bo,
315 DRM_VMW_FENCE_FLAG_EXEC,
316 NULL);
317 if (unlikely(ret != 0))
318 return ret;
319 }
320 sw_context->cur_query_bo = new_query_bo;
321
322 ret = vmw_bo_to_validate_list(sw_context,
323 dev_priv->dummy_query_bo,
324 DRM_VMW_FENCE_FLAG_EXEC,
325 NULL);
326 if (unlikely(ret != 0))
327 return ret;
328
329 }
330
331 if (unlikely(cid != sw_context->cur_query_cid &&
332 sw_context->query_cid_valid)) {
333 add_cid = true;
334 cid_to_add = sw_context->cur_query_cid;
335 }
336
337 sw_context->cur_query_cid = cid;
338 sw_context->query_cid_valid = true;
339
340 if (add_cid) {
341 struct vmw_resource *ctx = sw_context->cur_ctx;
342
343 if (list_empty(&ctx->query_head))
344 list_add_tail(&ctx->query_head,
345 &sw_context->query_list);
346 ret = vmw_bo_to_validate_list(sw_context,
347 dev_priv->dummy_query_bo,
348 DRM_VMW_FENCE_FLAG_EXEC,
349 NULL);
350 if (unlikely(ret != 0))
351 return ret;
352 }
353 return 0;
354}
355
356
357/**
358 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
359 *
360 * @dev_priv: The device private structure.
361 * @sw_context: The software context used for this command submission batch.
362 *
363 * This function will check if we're switching query buffers, and will then,
364 * if no other query waits are issued this command submission batch,
365 * issue a dummy occlusion query wait used as a query barrier. When the fence
366 * object following that query wait has signaled, we are sure that all
367 * preseding queries have finished, and the old query buffer can be unpinned.
368 * However, since both the new query buffer and the old one are fenced with
369 * that fence, we can do an asynchronus unpin now, and be sure that the
370 * old query buffer won't be moved until the fence has signaled.
371 *
372 * As mentioned above, both the new - and old query buffers need to be fenced
373 * using a sequence emitted *after* calling this function.
374 */
375static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
376 struct vmw_sw_context *sw_context)
377{
378
379 struct vmw_resource *ctx, *next_ctx;
380 int ret;
381
382 /*
383 * The validate list should still hold references to all
384 * contexts here.
385 */
386
387 list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
388 query_head) {
389 list_del_init(&ctx->query_head);
390
391 BUG_ON(!ctx->on_validate_list);
392
393 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
394
395 if (unlikely(ret != 0))
396 DRM_ERROR("Out of fifo space for dummy query.\n");
397 }
398
399 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
400 if (dev_priv->pinned_bo) {
401 vmw_bo_pin(dev_priv->pinned_bo, false);
402 ttm_bo_unref(&dev_priv->pinned_bo);
403 }
404
405 vmw_bo_pin(sw_context->cur_query_bo, true);
406
407 /*
408 * We pin also the dummy_query_bo buffer so that we
409 * don't need to validate it when emitting
410 * dummy queries in context destroy paths.
411 */
412
413 vmw_bo_pin(dev_priv->dummy_query_bo, true);
414 dev_priv->dummy_query_bo_pinned = true;
415
416 dev_priv->query_cid = sw_context->cur_query_cid;
417 dev_priv->pinned_bo =
418 ttm_bo_reference(sw_context->cur_query_bo);
419 }
420}
421
422/**
423 * vmw_query_switch_backoff - clear query barrier list
424 * @sw_context: The sw context used for this submission batch.
425 *
426 * This function is used as part of an error path, where a previously
427 * set up list of query barriers needs to be cleared.
428 *
429 */
430static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
431{
432 struct list_head *list, *next;
433
434 list_for_each_safe(list, next, &sw_context->query_list) {
435 list_del_init(list);
436 }
437}
438
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000439static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
440 struct vmw_sw_context *sw_context,
441 SVGAGuestPtr *ptr,
442 struct vmw_dma_buffer **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000443{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000444 struct vmw_dma_buffer *vmw_bo = NULL;
445 struct ttm_buffer_object *bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000446 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000447 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000448 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000449
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000450 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
451 if (unlikely(ret != 0)) {
452 DRM_ERROR("Could not find or use GMR region.\n");
453 return -EINVAL;
454 }
455 bo = &vmw_bo->base;
456
457 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000458 DRM_ERROR("Max number relocations per submission"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000459 " exceeded\n");
460 ret = -EINVAL;
461 goto out_no_reloc;
462 }
463
464 reloc = &sw_context->relocs[sw_context->cur_reloc++];
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000465 reloc->location = ptr;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000466
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200467 ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
468 &reloc->index);
469 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000470 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000471
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000472 *vmw_bo_p = vmw_bo;
473 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000474
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000475out_no_reloc:
476 vmw_dmabuf_unreference(&vmw_bo);
477 vmw_bo_p = NULL;
478 return ret;
479}
480
481static int vmw_cmd_end_query(struct vmw_private *dev_priv,
482 struct vmw_sw_context *sw_context,
483 SVGA3dCmdHeader *header)
484{
485 struct vmw_dma_buffer *vmw_bo;
486 struct vmw_query_cmd {
487 SVGA3dCmdHeader header;
488 SVGA3dCmdEndQuery q;
489 } *cmd;
490 int ret;
491
492 cmd = container_of(header, struct vmw_query_cmd, header);
493 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
494 if (unlikely(ret != 0))
495 return ret;
496
497 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
498 &cmd->q.guestResult,
499 &vmw_bo);
500 if (unlikely(ret != 0))
501 return ret;
502
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200503 ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
504 &vmw_bo->base, sw_context);
505
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000506 vmw_dmabuf_unreference(&vmw_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200507 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000508}
509
510static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
511 struct vmw_sw_context *sw_context,
512 SVGA3dCmdHeader *header)
513{
514 struct vmw_dma_buffer *vmw_bo;
515 struct vmw_query_cmd {
516 SVGA3dCmdHeader header;
517 SVGA3dCmdWaitForQuery q;
518 } *cmd;
519 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200520 struct vmw_resource *ctx;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000521
522 cmd = container_of(header, struct vmw_query_cmd, header);
523 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
524 if (unlikely(ret != 0))
525 return ret;
526
527 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
528 &cmd->q.guestResult,
529 &vmw_bo);
530 if (unlikely(ret != 0))
531 return ret;
532
533 vmw_dmabuf_unreference(&vmw_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200534
535 /*
536 * This wait will act as a barrier for previous waits for this
537 * context.
538 */
539
540 ctx = sw_context->cur_ctx;
541 if (!list_empty(&ctx->query_head))
542 list_del_init(&ctx->query_head);
543
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000544 return 0;
545}
546
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000547static int vmw_cmd_dma(struct vmw_private *dev_priv,
548 struct vmw_sw_context *sw_context,
549 SVGA3dCmdHeader *header)
550{
551 struct vmw_dma_buffer *vmw_bo = NULL;
552 struct ttm_buffer_object *bo;
553 struct vmw_surface *srf = NULL;
554 struct vmw_dma_cmd {
555 SVGA3dCmdHeader header;
556 SVGA3dCmdSurfaceDMA dma;
557 } *cmd;
558 int ret;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000559 struct vmw_resource *res;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000560
561 cmd = container_of(header, struct vmw_dma_cmd, header);
562 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
563 &cmd->dma.guest.ptr,
564 &vmw_bo);
565 if (unlikely(ret != 0))
566 return ret;
567
568 bo = &vmw_bo->base;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100569 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
570 cmd->dma.host.sid, &srf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000571 if (ret) {
572 DRM_ERROR("could not find surface\n");
573 goto out_no_reloc;
574 }
575
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000576 /*
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100577 * Patch command stream with device SID.
578 */
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100579 cmd->dma.host.sid = srf->res.id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000580 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000581
582 vmw_dmabuf_unreference(&vmw_bo);
583
584 res = &srf->res;
585 return vmw_resource_to_validate_list(sw_context, &res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000586
587out_no_reloc:
588 vmw_dmabuf_unreference(&vmw_bo);
589 return ret;
590}
591
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100592static int vmw_cmd_draw(struct vmw_private *dev_priv,
593 struct vmw_sw_context *sw_context,
594 SVGA3dCmdHeader *header)
595{
596 struct vmw_draw_cmd {
597 SVGA3dCmdHeader header;
598 SVGA3dCmdDrawPrimitives body;
599 } *cmd;
600 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
601 (unsigned long)header + sizeof(*cmd));
602 SVGA3dPrimitiveRange *range;
603 uint32_t i;
604 uint32_t maxnum;
605 int ret;
606
607 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
608 if (unlikely(ret != 0))
609 return ret;
610
611 cmd = container_of(header, struct vmw_draw_cmd, header);
612 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
613
614 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
615 DRM_ERROR("Illegal number of vertex declarations.\n");
616 return -EINVAL;
617 }
618
619 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
620 ret = vmw_cmd_sid_check(dev_priv, sw_context,
621 &decl->array.surfaceId);
622 if (unlikely(ret != 0))
623 return ret;
624 }
625
626 maxnum = (header->size - sizeof(cmd->body) -
627 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
628 if (unlikely(cmd->body.numRanges > maxnum)) {
629 DRM_ERROR("Illegal number of index ranges.\n");
630 return -EINVAL;
631 }
632
633 range = (SVGA3dPrimitiveRange *) decl;
634 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
635 ret = vmw_cmd_sid_check(dev_priv, sw_context,
636 &range->indexArray.surfaceId);
637 if (unlikely(ret != 0))
638 return ret;
639 }
640 return 0;
641}
642
643
644static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
645 struct vmw_sw_context *sw_context,
646 SVGA3dCmdHeader *header)
647{
648 struct vmw_tex_state_cmd {
649 SVGA3dCmdHeader header;
650 SVGA3dCmdSetTextureState state;
651 };
652
653 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
654 ((unsigned long) header + header->size + sizeof(header));
655 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
656 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
657 int ret;
658
659 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
660 if (unlikely(ret != 0))
661 return ret;
662
663 for (; cur_state < last_state; ++cur_state) {
664 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
665 continue;
666
667 ret = vmw_cmd_sid_check(dev_priv, sw_context,
668 &cur_state->value);
669 if (unlikely(ret != 0))
670 return ret;
671 }
672
673 return 0;
674}
675
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +0200676static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
677 struct vmw_sw_context *sw_context,
678 void *buf)
679{
680 struct vmw_dma_buffer *vmw_bo;
681 int ret;
682
683 struct {
684 uint32_t header;
685 SVGAFifoCmdDefineGMRFB body;
686 } *cmd = buf;
687
688 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
689 &cmd->body.ptr,
690 &vmw_bo);
691 if (unlikely(ret != 0))
692 return ret;
693
694 vmw_dmabuf_unreference(&vmw_bo);
695
696 return ret;
697}
698
699static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
700 struct vmw_sw_context *sw_context,
701 void *buf, uint32_t *size)
702{
703 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +0200704 uint32_t cmd_id;
705
706 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
707 switch (cmd_id) {
708 case SVGA_CMD_UPDATE:
709 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +0200710 break;
711 case SVGA_CMD_DEFINE_GMRFB:
712 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
713 break;
714 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
715 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
716 break;
717 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
718 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
719 break;
720 default:
721 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
722 return -EINVAL;
723 }
724
725 if (*size > size_remaining) {
726 DRM_ERROR("Invalid SVGA command (size mismatch):"
727 " %u.\n", cmd_id);
728 return -EINVAL;
729 }
730
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200731 if (unlikely(!sw_context->kernel)) {
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +0200732 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
733 return -EPERM;
734 }
735
736 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
737 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
738
739 return 0;
740}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000741
742typedef int (*vmw_cmd_func) (struct vmw_private *,
743 struct vmw_sw_context *,
744 SVGA3dCmdHeader *);
745
746#define VMW_CMD_DEF(cmd, func) \
747 [cmd - SVGA_3D_CMD_BASE] = func
748
749static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
750 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
751 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
752 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
753 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
754 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
755 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
756 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
757 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
758 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
759 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
760 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
761 &vmw_cmd_set_render_target_check),
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100762 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000763 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
764 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
765 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
766 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
767 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
768 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
769 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
770 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
771 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
772 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
773 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100774 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000775 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
776 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000777 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
778 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000779 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
780 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
781 &vmw_cmd_blt_surf_screen_check)
782};
783
784static int vmw_cmd_check(struct vmw_private *dev_priv,
785 struct vmw_sw_context *sw_context,
786 void *buf, uint32_t *size)
787{
788 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100789 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000790 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
791 int ret;
792
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +0200793 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
794 /* Handle any none 3D commands */
795 if (unlikely(cmd_id < SVGA_CMD_MAX))
796 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
797
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000798
799 cmd_id = le32_to_cpu(header->id);
800 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
801
802 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100803 if (unlikely(*size > size_remaining))
804 goto out_err;
805
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000806 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
807 goto out_err;
808
809 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
810 if (unlikely(ret != 0))
811 goto out_err;
812
813 return 0;
814out_err:
815 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
816 cmd_id + SVGA_3D_CMD_BASE);
817 return -EINVAL;
818}
819
820static int vmw_cmd_check_all(struct vmw_private *dev_priv,
821 struct vmw_sw_context *sw_context,
Thomas Hellstrom922ade02011-10-04 20:13:17 +0200822 void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000823 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000824{
825 int32_t cur_size = size;
826 int ret;
827
828 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100829 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000830 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
831 if (unlikely(ret != 0))
832 return ret;
833 buf = (void *)((unsigned long) buf + size);
834 cur_size -= size;
835 }
836
837 if (unlikely(cur_size != 0)) {
838 DRM_ERROR("Command verifier out of sync.\n");
839 return -EINVAL;
840 }
841
842 return 0;
843}
844
845static void vmw_free_relocations(struct vmw_sw_context *sw_context)
846{
847 sw_context->cur_reloc = 0;
848}
849
850static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
851{
852 uint32_t i;
853 struct vmw_relocation *reloc;
854 struct ttm_validate_buffer *validate;
855 struct ttm_buffer_object *bo;
856
857 for (i = 0; i < sw_context->cur_reloc; ++i) {
858 reloc = &sw_context->relocs[i];
859 validate = &sw_context->val_bufs[reloc->index];
860 bo = validate->bo;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200861 if (bo->mem.mem_type == TTM_PL_VRAM) {
862 reloc->location->offset += bo->offset;
863 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
864 } else
865 reloc->location->gmrId = bo->mem.start;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000866 }
867 vmw_free_relocations(sw_context);
868}
869
870static void vmw_clear_validations(struct vmw_sw_context *sw_context)
871{
872 struct ttm_validate_buffer *entry, *next;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000873 uint32_t i = sw_context->num_ref_resources;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000874
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000875 /*
876 * Drop references to DMA buffers held during command submission.
877 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000878 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
879 head) {
880 list_del(&entry->head);
881 vmw_dmabuf_validate_clear(entry->bo);
882 ttm_bo_unref(&entry->bo);
883 sw_context->cur_val_buf--;
884 }
885 BUG_ON(sw_context->cur_val_buf != 0);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000886
887 /*
888 * Drop references to resources held during command submission.
889 */
890 while (i-- > 0) {
891 sw_context->resources[i]->on_validate_list = false;
892 vmw_resource_unreference(&sw_context->resources[i]);
893 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000894}
895
896static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
897 struct ttm_buffer_object *bo)
898{
899 int ret;
900
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200901
902 /*
903 * Don't validate pinned buffers.
904 */
905
906 if (bo == dev_priv->pinned_bo ||
907 (bo == dev_priv->dummy_query_bo &&
908 dev_priv->dummy_query_bo_pinned))
909 return 0;
910
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100911 /**
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200912 * Put BO in VRAM if there is space, otherwise as a GMR.
913 * If there is no space in VRAM and GMR ids are all used up,
914 * start evicting GMRs to make room. If the DMA buffer can't be
915 * used as a GMR, this will return -ENOMEM.
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100916 */
917
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200918 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +0100919 if (likely(ret == 0 || ret == -ERESTARTSYS))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000920 return ret;
921
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100922 /**
923 * If that failed, try VRAM again, this time evicting
924 * previous contents.
925 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000926
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200927 DRM_INFO("Falling through to VRAM.\n");
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000928 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000929 return ret;
930}
931
932
933static int vmw_validate_buffers(struct vmw_private *dev_priv,
934 struct vmw_sw_context *sw_context)
935{
936 struct ttm_validate_buffer *entry;
937 int ret;
938
939 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
940 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
941 if (unlikely(ret != 0))
942 return ret;
943 }
944 return 0;
945}
946
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000947static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
948 uint32_t size)
949{
950 if (likely(sw_context->cmd_bounce_size >= size))
951 return 0;
952
953 if (sw_context->cmd_bounce_size == 0)
954 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
955
956 while (sw_context->cmd_bounce_size < size) {
957 sw_context->cmd_bounce_size =
958 PAGE_ALIGN(sw_context->cmd_bounce_size +
959 (sw_context->cmd_bounce_size >> 1));
960 }
961
962 if (sw_context->cmd_bounce != NULL)
963 vfree(sw_context->cmd_bounce);
964
965 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
966
967 if (sw_context->cmd_bounce == NULL) {
968 DRM_ERROR("Failed to allocate command bounce buffer.\n");
969 sw_context->cmd_bounce_size = 0;
970 return -ENOMEM;
971 }
972
973 return 0;
974}
975
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000976/**
977 * vmw_execbuf_fence_commands - create and submit a command stream fence
978 *
979 * Creates a fence object and submits a command stream marker.
980 * If this fails for some reason, We sync the fifo and return NULL.
981 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +0200982 *
983 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
984 * a userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000985 */
986
987int vmw_execbuf_fence_commands(struct drm_file *file_priv,
988 struct vmw_private *dev_priv,
989 struct vmw_fence_obj **p_fence,
990 uint32_t *p_handle)
991{
992 uint32_t sequence;
993 int ret;
994 bool synced = false;
995
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +0200996 /* p_handle implies file_priv. */
997 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +0000998
999 ret = vmw_fifo_send_fence(dev_priv, &sequence);
1000 if (unlikely(ret != 0)) {
1001 DRM_ERROR("Fence submission error. Syncing.\n");
1002 synced = true;
1003 }
1004
1005 if (p_handle != NULL)
1006 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
1007 sequence,
1008 DRM_VMW_FENCE_FLAG_EXEC,
1009 p_fence, p_handle);
1010 else
1011 ret = vmw_fence_create(dev_priv->fman, sequence,
1012 DRM_VMW_FENCE_FLAG_EXEC,
1013 p_fence);
1014
1015 if (unlikely(ret != 0 && !synced)) {
1016 (void) vmw_fallback_wait(dev_priv, false, false,
1017 sequence, false,
1018 VMW_FENCE_WAIT_TIMEOUT);
1019 *p_fence = NULL;
1020 }
1021
1022 return 0;
1023}
1024
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001025int vmw_execbuf_process(struct drm_file *file_priv,
1026 struct vmw_private *dev_priv,
1027 void __user *user_commands,
1028 void *kernel_commands,
1029 uint32_t command_size,
1030 uint64_t throttle_us,
1031 struct drm_vmw_fence_rep __user *user_fence_rep)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001032{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001033 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001034 struct drm_vmw_fence_rep fence_rep;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001035 struct vmw_fence_obj *fence;
1036 uint32_t handle;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001037 void *cmd;
1038 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001039
1040 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001041 if (unlikely(ret != 0))
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001042 return -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001043
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001044 if (kernel_commands == NULL) {
1045 sw_context->kernel = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001046
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001047 ret = vmw_resize_cmd_bounce(sw_context, command_size);
1048 if (unlikely(ret != 0))
1049 goto out_unlock;
1050
1051
1052 ret = copy_from_user(sw_context->cmd_bounce,
1053 user_commands, command_size);
1054
1055 if (unlikely(ret != 0)) {
1056 ret = -EFAULT;
1057 DRM_ERROR("Failed copying commands.\n");
1058 goto out_unlock;
1059 }
1060 kernel_commands = sw_context->cmd_bounce;
1061 } else
1062 sw_context->kernel = true;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001063
1064 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
1065 sw_context->cid_valid = false;
1066 sw_context->sid_valid = false;
1067 sw_context->cur_reloc = 0;
1068 sw_context->cur_val_buf = 0;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001069 sw_context->num_ref_resources = 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001070 sw_context->fence_flags = 0;
1071 INIT_LIST_HEAD(&sw_context->query_list);
1072 sw_context->cur_query_bo = dev_priv->pinned_bo;
1073 sw_context->cur_query_cid = dev_priv->query_cid;
1074 sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001075
1076 INIT_LIST_HEAD(&sw_context->validate_nodes);
1077
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001078 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1079 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001080 if (unlikely(ret != 0))
1081 goto out_err;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001082
Thomas Hellstrom65705962010-11-17 12:28:31 +00001083 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001084 if (unlikely(ret != 0))
1085 goto out_err;
1086
1087 ret = vmw_validate_buffers(dev_priv, sw_context);
1088 if (unlikely(ret != 0))
1089 goto out_err;
1090
1091 vmw_apply_relocations(sw_context);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02001092
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001093 if (throttle_us) {
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +00001094 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001095 throttle_us);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02001096
1097 if (unlikely(ret != 0))
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001098 goto out_throttle;
Thomas Hellstrom1925d452010-05-28 11:21:57 +02001099 }
1100
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001101 cmd = vmw_fifo_reserve(dev_priv, command_size);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001102 if (unlikely(cmd == NULL)) {
1103 DRM_ERROR("Failed reserving fifo space for commands.\n");
1104 ret = -ENOMEM;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001105 goto out_throttle;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001106 }
1107
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001108 memcpy(cmd, kernel_commands, command_size);
1109 vmw_fifo_commit(dev_priv, command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001110
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001111 vmw_query_bo_switch_commit(dev_priv, sw_context);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001112 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1113 &fence,
1114 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001115 /*
1116 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001117 * vmw_fifo_send_fence will sync. The error will be propagated to
1118 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001119 */
1120
1121 if (ret != 0)
1122 DRM_ERROR("Fence submission error. Syncing.\n");
1123
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001124 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
1125 (void *) fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001126
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001127 vmw_clear_validations(sw_context);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001128
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001129 if (user_fence_rep) {
1130 fence_rep.error = ret;
1131 fence_rep.handle = handle;
1132 fence_rep.seqno = fence->seqno;
1133 vmw_update_seqno(dev_priv, &dev_priv->fifo);
1134 fence_rep.passed_seqno = dev_priv->last_read_seqno;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001135
Thomas Hellstromae2a1042011-09-01 20:18:44 +00001136 /*
1137 * copy_to_user errors will be detected by user space not
1138 * seeing fence_rep::error filled in. Typically
1139 * user-space would have pre-set that member to -EFAULT.
1140 */
1141 ret = copy_to_user(user_fence_rep, &fence_rep,
1142 sizeof(fence_rep));
1143
1144 /*
1145 * User-space lost the fence object. We need to sync
1146 * and unreference the handle.
1147 */
1148 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
1149 BUG_ON(fence == NULL);
1150
1151 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1152 handle, TTM_REF_USAGE);
1153 DRM_ERROR("Fence copy error. Syncing.\n");
1154 (void) vmw_fence_obj_wait(fence,
1155 fence->signal_mask,
1156 false, false,
1157 VMW_FENCE_WAIT_TIMEOUT);
1158 }
1159 }
1160
1161 if (likely(fence != NULL))
1162 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001163
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001164 mutex_unlock(&dev_priv->cmdbuf_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001165 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001166
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001167out_err:
1168 vmw_free_relocations(sw_context);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001169out_throttle:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001170 vmw_query_switch_backoff(sw_context);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001171 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
1172 vmw_clear_validations(sw_context);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001173out_unlock:
1174 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001175 return ret;
1176}
1177
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001178/**
1179 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
1180 *
1181 * @dev_priv: The device private structure.
1182 *
1183 * This function is called to idle the fifo and unpin the query buffer
1184 * if the normal way to do this hits an error, which should typically be
1185 * extremely rare.
1186 */
1187static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1188{
1189 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
1190
1191 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
1192 vmw_bo_pin(dev_priv->pinned_bo, false);
1193 vmw_bo_pin(dev_priv->dummy_query_bo, false);
1194 dev_priv->dummy_query_bo_pinned = false;
1195}
1196
1197
1198/**
1199 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1200 * query bo.
1201 *
1202 * @dev_priv: The device private structure.
1203 * @only_on_cid_match: Only flush and unpin if the current active query cid
1204 * matches @cid.
1205 * @cid: Optional context id to match.
1206 *
1207 * This function should be used to unpin the pinned query bo, or
1208 * as a query barrier when we need to make sure that all queries have
1209 * finished before the next fifo command. (For example on hardware
1210 * context destructions where the hardware may otherwise leak unfinished
1211 * queries).
1212 *
1213 * This function does not return any failure codes, but make attempts
1214 * to do safe unpinning in case of errors.
1215 *
1216 * The function will synchronize on the previous query barrier, and will
1217 * thus not finish until that barrier has executed.
1218 */
1219void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1220 bool only_on_cid_match, uint32_t cid)
1221{
1222 int ret = 0;
1223 struct list_head validate_list;
1224 struct ttm_validate_buffer pinned_val, query_val;
1225 struct vmw_fence_obj *fence;
1226
1227 mutex_lock(&dev_priv->cmdbuf_mutex);
1228
1229 if (dev_priv->pinned_bo == NULL)
1230 goto out_unlock;
1231
1232 if (only_on_cid_match && cid != dev_priv->query_cid)
1233 goto out_unlock;
1234
1235 INIT_LIST_HEAD(&validate_list);
1236
1237 pinned_val.new_sync_obj_arg = (void *)(unsigned long)
1238 DRM_VMW_FENCE_FLAG_EXEC;
1239 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1240 list_add_tail(&pinned_val.head, &validate_list);
1241
1242 query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
1243 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1244 list_add_tail(&query_val.head, &validate_list);
1245
1246 do {
1247 ret = ttm_eu_reserve_buffers(&validate_list);
1248 } while (ret == -ERESTARTSYS);
1249
1250 if (unlikely(ret != 0)) {
1251 vmw_execbuf_unpin_panic(dev_priv);
1252 goto out_no_reserve;
1253 }
1254
1255 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
1256 if (unlikely(ret != 0)) {
1257 vmw_execbuf_unpin_panic(dev_priv);
1258 goto out_no_emit;
1259 }
1260
1261 vmw_bo_pin(dev_priv->pinned_bo, false);
1262 vmw_bo_pin(dev_priv->dummy_query_bo, false);
1263 dev_priv->dummy_query_bo_pinned = false;
1264
1265 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1266 ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
1267
1268 ttm_bo_unref(&query_val.bo);
1269 ttm_bo_unref(&pinned_val.bo);
1270 ttm_bo_unref(&dev_priv->pinned_bo);
1271
1272out_unlock:
1273 mutex_unlock(&dev_priv->cmdbuf_mutex);
1274 return;
1275
1276out_no_emit:
1277 ttm_eu_backoff_reservation(&validate_list);
1278out_no_reserve:
1279 ttm_bo_unref(&query_val.bo);
1280 ttm_bo_unref(&pinned_val.bo);
1281 ttm_bo_unref(&dev_priv->pinned_bo);
1282 mutex_unlock(&dev_priv->cmdbuf_mutex);
1283}
1284
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001285
1286int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
1287 struct drm_file *file_priv)
1288{
1289 struct vmw_private *dev_priv = vmw_priv(dev);
1290 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
1291 struct vmw_master *vmaster = vmw_master(file_priv->master);
1292 int ret;
1293
1294 /*
1295 * This will allow us to extend the ioctl argument while
1296 * maintaining backwards compatibility:
1297 * We take different code paths depending on the value of
1298 * arg->version.
1299 */
1300
1301 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
1302 DRM_ERROR("Incorrect execbuf version.\n");
1303 DRM_ERROR("You're running outdated experimental "
1304 "vmwgfx user-space drivers.");
1305 return -EINVAL;
1306 }
1307
1308 ret = ttm_read_lock(&vmaster->lock, true);
1309 if (unlikely(ret != 0))
1310 return ret;
1311
1312 ret = vmw_execbuf_process(file_priv, dev_priv,
1313 (void __user *)(unsigned long)arg->commands,
1314 NULL, arg->command_size, arg->throttle_us,
1315 (void __user *)(unsigned long)arg->fence_rep);
1316
1317 if (unlikely(ret != 0))
1318 goto out_unlock;
1319
1320 vmw_kms_cursor_post_execbuf(dev_priv);
1321
1322out_unlock:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001323 ttm_read_unlock(&vmaster->lock);
1324 return ret;
1325}