blob: 7a5f1eb55c5a0ad09adf8aa02c92367908c49c24 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000032
Thomas Hellstromc0951b72012-11-20 12:19:35 +000033#define VMW_RES_HT_ORDER 12
34
35/**
36 * struct vmw_resource_relocation - Relocation info for resources
37 *
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
42 */
43struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
46 unsigned long offset;
47};
48
49/**
50 * struct vmw_resource_val_node - Validation info for resources
51 *
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070057 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000059 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in
61 * the command stream.
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
63 * reservation. The command stream will provide one.
64 */
65struct vmw_resource_val_node {
66 struct list_head head;
67 struct drm_hash_item hash;
68 struct vmw_resource *res;
69 struct vmw_dma_buffer *new_backup;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070070 struct vmw_ctx_binding_state *staged_bindings;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000071 unsigned long new_backup_offset;
72 bool first_usage;
73 bool no_buffer_needed;
74};
75
76/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +010077 * struct vmw_cmd_entry - Describe a command for the verifier
78 *
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
82 */
83struct vmw_cmd_entry {
84 int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 SVGA3dCmdHeader *);
86 bool user_allow;
87 bool gb_disable;
88 bool gb_enable;
89};
90
91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
94
95/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +000096 * vmw_resource_unreserve - unreserve resources previously reserved for
97 * command submission.
98 *
99 * @list_head: list of resources to unreserve.
100 * @backoff: Whether command submission failed.
101 */
102static void vmw_resource_list_unreserve(struct list_head *list,
103 bool backoff)
104{
105 struct vmw_resource_val_node *val;
106
107 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup =
110 backoff ? NULL : val->new_backup;
111
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700112 /*
113 * Transfer staged context bindings to the
114 * persistent context binding tracker.
115 */
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700116 if (unlikely(val->staged_bindings)) {
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700117 vmw_context_binding_state_transfer
118 (val->res, val->staged_bindings);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700119 kfree(val->staged_bindings);
120 val->staged_bindings = NULL;
121 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000122 vmw_resource_unreserve(res, new_backup,
123 val->new_backup_offset);
124 vmw_dmabuf_unreference(&val->new_backup);
125 }
126}
127
128
129/**
130 * vmw_resource_val_add - Add a resource to the software context's
131 * resource list if it's not already on it.
132 *
133 * @sw_context: Pointer to the software context.
134 * @res: Pointer to the resource.
135 * @p_node On successful return points to a valid pointer to a
136 * struct vmw_resource_val_node, if non-NULL on entry.
137 */
138static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
139 struct vmw_resource *res,
140 struct vmw_resource_val_node **p_node)
141{
142 struct vmw_resource_val_node *node;
143 struct drm_hash_item *hash;
144 int ret;
145
146 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
147 &hash) == 0)) {
148 node = container_of(hash, struct vmw_resource_val_node, hash);
149 node->first_usage = false;
150 if (unlikely(p_node != NULL))
151 *p_node = node;
152 return 0;
153 }
154
155 node = kzalloc(sizeof(*node), GFP_KERNEL);
156 if (unlikely(node == NULL)) {
157 DRM_ERROR("Failed to allocate a resource validation "
158 "entry.\n");
159 return -ENOMEM;
160 }
161
162 node->hash.key = (unsigned long) res;
163 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
164 if (unlikely(ret != 0)) {
165 DRM_ERROR("Failed to initialize a resource validation "
166 "entry.\n");
167 kfree(node);
168 return ret;
169 }
170 list_add_tail(&node->head, &sw_context->resource_list);
171 node->res = vmw_resource_reference(res);
172 node->first_usage = true;
173
174 if (unlikely(p_node != NULL))
175 *p_node = node;
176
177 return 0;
178}
179
180/**
181 * vmw_resource_relocation_add - Add a relocation to the relocation list
182 *
183 * @list: Pointer to head of relocation list.
184 * @res: The resource.
185 * @offset: Offset into the command buffer currently being parsed where the
186 * id that needs fixup is located. Granularity is 4 bytes.
187 */
188static int vmw_resource_relocation_add(struct list_head *list,
189 const struct vmw_resource *res,
190 unsigned long offset)
191{
192 struct vmw_resource_relocation *rel;
193
194 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
195 if (unlikely(rel == NULL)) {
196 DRM_ERROR("Failed to allocate a resource relocation.\n");
197 return -ENOMEM;
198 }
199
200 rel->res = res;
201 rel->offset = offset;
202 list_add_tail(&rel->head, list);
203
204 return 0;
205}
206
207/**
208 * vmw_resource_relocations_free - Free all relocations on a list
209 *
210 * @list: Pointer to the head of the relocation list.
211 */
212static void vmw_resource_relocations_free(struct list_head *list)
213{
214 struct vmw_resource_relocation *rel, *n;
215
216 list_for_each_entry_safe(rel, n, list, head) {
217 list_del(&rel->head);
218 kfree(rel);
219 }
220}
221
222/**
223 * vmw_resource_relocations_apply - Apply all relocations on a list
224 *
225 * @cb: Pointer to the start of the command buffer bein patch. This need
226 * not be the same buffer as the one being parsed when the relocation
227 * list was built, but the contents must be the same modulo the
228 * resource ids.
229 * @list: Pointer to the head of the relocation list.
230 */
231static void vmw_resource_relocations_apply(uint32_t *cb,
232 struct list_head *list)
233{
234 struct vmw_resource_relocation *rel;
235
236 list_for_each_entry(rel, list, head)
237 cb[rel->offset] = rel->res->id;
238}
239
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000240static int vmw_cmd_invalid(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
245}
246
247static int vmw_cmd_ok(struct vmw_private *dev_priv,
248 struct vmw_sw_context *sw_context,
249 SVGA3dCmdHeader *header)
250{
251 return 0;
252}
253
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200254/**
255 * vmw_bo_to_validate_list - add a bo to a validate list
256 *
257 * @sw_context: The software context used for this command submission batch.
258 * @bo: The buffer object to add.
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100259 * @validate_as_mob: Validate this buffer as a MOB.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200260 * @p_val_node: If non-NULL Will be updated with the validate node number
261 * on return.
262 *
263 * Returns -EINVAL if the limit of number of buffer objects per command
264 * submission is reached.
265 */
266static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
267 struct ttm_buffer_object *bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100268 bool validate_as_mob,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200269 uint32_t *p_val_node)
270{
271 uint32_t val_node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000272 struct vmw_validate_buffer *vval_buf;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200273 struct ttm_validate_buffer *val_buf;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000274 struct drm_hash_item *hash;
275 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200276
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000277 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
278 &hash) == 0)) {
279 vval_buf = container_of(hash, struct vmw_validate_buffer,
280 hash);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100281 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
282 DRM_ERROR("Inconsistent buffer usage.\n");
283 return -EINVAL;
284 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000285 val_buf = &vval_buf->base;
286 val_node = vval_buf - sw_context->val_bufs;
287 } else {
288 val_node = sw_context->cur_val_buf;
289 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
290 DRM_ERROR("Max number of DMA buffers per submission "
291 "exceeded.\n");
292 return -EINVAL;
293 }
294 vval_buf = &sw_context->val_bufs[val_node];
295 vval_buf->hash.key = (unsigned long) bo;
296 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
297 if (unlikely(ret != 0)) {
298 DRM_ERROR("Failed to initialize a buffer validation "
299 "entry.\n");
300 return ret;
301 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200302 ++sw_context->cur_val_buf;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000303 val_buf = &vval_buf->base;
304 val_buf->bo = ttm_bo_reference(bo);
305 val_buf->reserved = false;
306 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100307 vval_buf->validate_as_mob = validate_as_mob;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200308 }
309
Maarten Lankhorstbe013362012-10-12 15:01:43 +0000310 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200311
312 if (p_val_node)
313 *p_val_node = val_node;
314
315 return 0;
316}
317
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000318/**
319 * vmw_resources_reserve - Reserve all resources on the sw_context's
320 * resource list.
321 *
322 * @sw_context: Pointer to the software context.
323 *
324 * Note that since vmware's command submission currently is protected by
325 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
326 * since only a single thread at once will attempt this.
327 */
328static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
329{
330 struct vmw_resource_val_node *val;
331 int ret;
332
333 list_for_each_entry(val, &sw_context->resource_list, head) {
334 struct vmw_resource *res = val->res;
335
336 ret = vmw_resource_reserve(res, val->no_buffer_needed);
337 if (unlikely(ret != 0))
338 return ret;
339
340 if (res->backup) {
341 struct ttm_buffer_object *bo = &res->backup->base;
342
343 ret = vmw_bo_to_validate_list
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100344 (sw_context, bo,
345 vmw_resource_needs_backup(res), NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000346
347 if (unlikely(ret != 0))
348 return ret;
349 }
350 }
351 return 0;
352}
353
354/**
355 * vmw_resources_validate - Validate all resources on the sw_context's
356 * resource list.
357 *
358 * @sw_context: Pointer to the software context.
359 *
360 * Before this function is called, all resource backup buffers must have
361 * been validated.
362 */
363static int vmw_resources_validate(struct vmw_sw_context *sw_context)
364{
365 struct vmw_resource_val_node *val;
366 int ret;
367
368 list_for_each_entry(val, &sw_context->resource_list, head) {
369 struct vmw_resource *res = val->res;
370
371 ret = vmw_resource_validate(res);
372 if (unlikely(ret != 0)) {
373 if (ret != -ERESTARTSYS)
374 DRM_ERROR("Failed to validate resource.\n");
375 return ret;
376 }
377 }
378 return 0;
379}
380
381/**
382 * vmw_cmd_res_check - Check that a resource is present and if so, put it
383 * on the resource validate list unless it's already there.
384 *
385 * @dev_priv: Pointer to a device private structure.
386 * @sw_context: Pointer to the software context.
387 * @res_type: Resource type.
388 * @converter: User-space visisble type specific information.
389 * @id: Pointer to the location in the command buffer currently being
390 * parsed from where the user-space resource id handle is located.
391 */
392static int vmw_cmd_res_check(struct vmw_private *dev_priv,
393 struct vmw_sw_context *sw_context,
394 enum vmw_res_type res_type,
395 const struct vmw_user_resource_conv *converter,
396 uint32_t *id,
397 struct vmw_resource_val_node **p_val)
398{
399 struct vmw_res_cache_entry *rcache =
400 &sw_context->res_cache[res_type];
401 struct vmw_resource *res;
402 struct vmw_resource_val_node *node;
403 int ret;
404
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700405 if (*id == SVGA3D_INVALID_ID) {
406 if (p_val)
407 *p_val = NULL;
408 if (res_type == vmw_res_context) {
409 DRM_ERROR("Illegal context invalid id.\n");
410 return -EINVAL;
411 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000412 return 0;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700413 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000414
415 /*
416 * Fastpath in case of repeated commands referencing the same
417 * resource
418 */
419
420 if (likely(rcache->valid && *id == rcache->handle)) {
421 const struct vmw_resource *res = rcache->res;
422
423 rcache->node->first_usage = false;
424 if (p_val)
425 *p_val = rcache->node;
426
427 return vmw_resource_relocation_add
428 (&sw_context->res_relocations, res,
429 id - sw_context->buf_start);
430 }
431
432 ret = vmw_user_resource_lookup_handle(dev_priv,
433 sw_context->tfile,
434 *id,
435 converter,
436 &res);
437 if (unlikely(ret != 0)) {
438 DRM_ERROR("Could not find or use resource 0x%08x.\n",
439 (unsigned) *id);
440 dump_stack();
441 return ret;
442 }
443
444 rcache->valid = true;
445 rcache->res = res;
446 rcache->handle = *id;
447
448 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
449 res,
450 id - sw_context->buf_start);
451 if (unlikely(ret != 0))
452 goto out_no_reloc;
453
454 ret = vmw_resource_val_add(sw_context, res, &node);
455 if (unlikely(ret != 0))
456 goto out_no_reloc;
457
458 rcache->node = node;
459 if (p_val)
460 *p_val = node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700461
462 if (node->first_usage && res_type == vmw_res_context) {
463 node->staged_bindings =
464 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
465 if (node->staged_bindings == NULL) {
466 DRM_ERROR("Failed to allocate context binding "
467 "information.\n");
468 goto out_no_reloc;
469 }
470 INIT_LIST_HEAD(&node->staged_bindings->list);
471 }
472
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000473 vmw_resource_unreference(&res);
474 return 0;
475
476out_no_reloc:
477 BUG_ON(sw_context->error_resource != NULL);
478 sw_context->error_resource = res;
479
480 return ret;
481}
482
483/**
484 * vmw_cmd_cid_check - Check a command header for valid context information.
485 *
486 * @dev_priv: Pointer to a device private structure.
487 * @sw_context: Pointer to the software context.
488 * @header: A command header with an embedded user-space context handle.
489 *
490 * Convenience function: Call vmw_cmd_res_check with the user-space context
491 * handle embedded in @header.
492 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000493static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
494 struct vmw_sw_context *sw_context,
495 SVGA3dCmdHeader *header)
496{
497 struct vmw_cid_cmd {
498 SVGA3dCmdHeader header;
499 __le32 cid;
500 } *cmd;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000501
502 cmd = container_of(header, struct vmw_cid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000503 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
504 user_context_converter, &cmd->cid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000505}
506
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000507static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
508 struct vmw_sw_context *sw_context,
509 SVGA3dCmdHeader *header)
510{
511 struct vmw_sid_cmd {
512 SVGA3dCmdHeader header;
513 SVGA3dCmdSetRenderTarget body;
514 } *cmd;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700515 struct vmw_resource_val_node *ctx_node;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700516 struct vmw_resource_val_node *res_node;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000517 int ret;
518
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700519 cmd = container_of(header, struct vmw_sid_cmd, header);
520
521 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
522 user_context_converter, &cmd->body.cid,
523 &ctx_node);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000524 if (unlikely(ret != 0))
525 return ret;
526
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000527 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
528 user_surface_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700529 &cmd->body.target.sid, &res_node);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700530 if (unlikely(ret != 0))
531 return ret;
532
533 if (dev_priv->has_mob) {
534 struct vmw_ctx_bindinfo bi;
535
536 bi.ctx = ctx_node->res;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700537 bi.res = res_node ? res_node->res : NULL;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700538 bi.bt = vmw_ctx_binding_rt;
539 bi.i1.rt_type = cmd->body.type;
540 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
541 }
542
543 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000544}
545
546static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
547 struct vmw_sw_context *sw_context,
548 SVGA3dCmdHeader *header)
549{
550 struct vmw_sid_cmd {
551 SVGA3dCmdHeader header;
552 SVGA3dCmdSurfaceCopy body;
553 } *cmd;
554 int ret;
555
556 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000557 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
558 user_surface_converter,
559 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000560 if (unlikely(ret != 0))
561 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000562 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
563 user_surface_converter,
564 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000565}
566
567static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
568 struct vmw_sw_context *sw_context,
569 SVGA3dCmdHeader *header)
570{
571 struct vmw_sid_cmd {
572 SVGA3dCmdHeader header;
573 SVGA3dCmdSurfaceStretchBlt body;
574 } *cmd;
575 int ret;
576
577 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000578 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
579 user_surface_converter,
580 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000581 if (unlikely(ret != 0))
582 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000583 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
584 user_surface_converter,
585 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000586}
587
588static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
589 struct vmw_sw_context *sw_context,
590 SVGA3dCmdHeader *header)
591{
592 struct vmw_sid_cmd {
593 SVGA3dCmdHeader header;
594 SVGA3dCmdBlitSurfaceToScreen body;
595 } *cmd;
596
597 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200598
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000599 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
600 user_surface_converter,
601 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000602}
603
604static int vmw_cmd_present_check(struct vmw_private *dev_priv,
605 struct vmw_sw_context *sw_context,
606 SVGA3dCmdHeader *header)
607{
608 struct vmw_sid_cmd {
609 SVGA3dCmdHeader header;
610 SVGA3dCmdPresent body;
611 } *cmd;
612
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200613
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000614 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200615
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000616 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
617 user_surface_converter, &cmd->body.sid,
618 NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000619}
620
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200621/**
622 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
623 *
624 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200625 * @new_query_bo: The new buffer holding query results.
626 * @sw_context: The software context used for this command submission.
627 *
628 * This function checks whether @new_query_bo is suitable for holding
629 * query results, and if another buffer currently is pinned for query
630 * results. If so, the function prepares the state of @sw_context for
631 * switching pinned buffers after successful submission of the current
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000632 * command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200633 */
634static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200635 struct ttm_buffer_object *new_query_bo,
636 struct vmw_sw_context *sw_context)
637{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000638 struct vmw_res_cache_entry *ctx_entry =
639 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200640 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000641
642 BUG_ON(!ctx_entry->valid);
643 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200644
645 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
646
647 if (unlikely(new_query_bo->num_pages > 4)) {
648 DRM_ERROR("Query buffer too large.\n");
649 return -EINVAL;
650 }
651
652 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000653 sw_context->needs_post_query_barrier = true;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200654 ret = vmw_bo_to_validate_list(sw_context,
655 sw_context->cur_query_bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100656 dev_priv->has_mob, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200657 if (unlikely(ret != 0))
658 return ret;
659 }
660 sw_context->cur_query_bo = new_query_bo;
661
662 ret = vmw_bo_to_validate_list(sw_context,
663 dev_priv->dummy_query_bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100664 dev_priv->has_mob, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200665 if (unlikely(ret != 0))
666 return ret;
667
668 }
669
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200670 return 0;
671}
672
673
674/**
675 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
676 *
677 * @dev_priv: The device private structure.
678 * @sw_context: The software context used for this command submission batch.
679 *
680 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200681 * issue a dummy occlusion query wait used as a query barrier. When the fence
682 * object following that query wait has signaled, we are sure that all
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000683 * preceding queries have finished, and the old query buffer can be unpinned.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200684 * However, since both the new query buffer and the old one are fenced with
685 * that fence, we can do an asynchronus unpin now, and be sure that the
686 * old query buffer won't be moved until the fence has signaled.
687 *
688 * As mentioned above, both the new - and old query buffers need to be fenced
689 * using a sequence emitted *after* calling this function.
690 */
691static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
692 struct vmw_sw_context *sw_context)
693{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200694 /*
695 * The validate list should still hold references to all
696 * contexts here.
697 */
698
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000699 if (sw_context->needs_post_query_barrier) {
700 struct vmw_res_cache_entry *ctx_entry =
701 &sw_context->res_cache[vmw_res_context];
702 struct vmw_resource *ctx;
703 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200704
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000705 BUG_ON(!ctx_entry->valid);
706 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200707
708 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
709
710 if (unlikely(ret != 0))
711 DRM_ERROR("Out of fifo space for dummy query.\n");
712 }
713
714 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
715 if (dev_priv->pinned_bo) {
716 vmw_bo_pin(dev_priv->pinned_bo, false);
717 ttm_bo_unref(&dev_priv->pinned_bo);
718 }
719
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000720 if (!sw_context->needs_post_query_barrier) {
721 vmw_bo_pin(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200722
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000723 /*
724 * We pin also the dummy_query_bo buffer so that we
725 * don't need to validate it when emitting
726 * dummy queries in context destroy paths.
727 */
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200728
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000729 vmw_bo_pin(dev_priv->dummy_query_bo, true);
730 dev_priv->dummy_query_bo_pinned = true;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200731
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000732 BUG_ON(sw_context->last_query_ctx == NULL);
733 dev_priv->query_cid = sw_context->last_query_ctx->id;
734 dev_priv->query_cid_valid = true;
735 dev_priv->pinned_bo =
736 ttm_bo_reference(sw_context->cur_query_bo);
737 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200738 }
739}
740
741/**
Thomas Hellstromddcda242012-11-21 11:26:55 +0100742 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
743 * handle to a MOB id.
744 *
745 * @dev_priv: Pointer to a device private structure.
746 * @sw_context: The software context used for this command batch validation.
747 * @id: Pointer to the user-space handle to be translated.
748 * @vmw_bo_p: Points to a location that, on successful return will carry
749 * a reference-counted pointer to the DMA buffer identified by the
750 * user-space handle in @id.
751 *
752 * This function saves information needed to translate a user-space buffer
753 * handle to a MOB id. The translation does not take place immediately, but
754 * during a call to vmw_apply_relocations(). This function builds a relocation
755 * list and a list of buffers to validate. The former needs to be freed using
756 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
757 * needs to be freed using vmw_clear_validations.
758 */
759static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
760 struct vmw_sw_context *sw_context,
761 SVGAMobId *id,
762 struct vmw_dma_buffer **vmw_bo_p)
763{
764 struct vmw_dma_buffer *vmw_bo = NULL;
765 struct ttm_buffer_object *bo;
766 uint32_t handle = *id;
767 struct vmw_relocation *reloc;
768 int ret;
769
770 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
771 if (unlikely(ret != 0)) {
772 DRM_ERROR("Could not find or use MOB buffer.\n");
773 return -EINVAL;
774 }
775 bo = &vmw_bo->base;
776
777 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
778 DRM_ERROR("Max number relocations per submission"
779 " exceeded\n");
780 ret = -EINVAL;
781 goto out_no_reloc;
782 }
783
784 reloc = &sw_context->relocs[sw_context->cur_reloc++];
785 reloc->mob_loc = id;
786 reloc->location = NULL;
787
788 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
789 if (unlikely(ret != 0))
790 goto out_no_reloc;
791
792 *vmw_bo_p = vmw_bo;
793 return 0;
794
795out_no_reloc:
796 vmw_dmabuf_unreference(&vmw_bo);
797 vmw_bo_p = NULL;
798 return ret;
799}
800
801/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000802 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
803 * handle to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200804 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000805 * @dev_priv: Pointer to a device private structure.
806 * @sw_context: The software context used for this command batch validation.
807 * @ptr: Pointer to the user-space handle to be translated.
808 * @vmw_bo_p: Points to a location that, on successful return will carry
809 * a reference-counted pointer to the DMA buffer identified by the
810 * user-space handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200811 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000812 * This function saves information needed to translate a user-space buffer
813 * handle to a valid SVGAGuestPtr. The translation does not take place
814 * immediately, but during a call to vmw_apply_relocations().
815 * This function builds a relocation list and a list of buffers to validate.
816 * The former needs to be freed using either vmw_apply_relocations() or
817 * vmw_free_relocations(). The latter needs to be freed using
818 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200819 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000820static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
821 struct vmw_sw_context *sw_context,
822 SVGAGuestPtr *ptr,
823 struct vmw_dma_buffer **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000824{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000825 struct vmw_dma_buffer *vmw_bo = NULL;
826 struct ttm_buffer_object *bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000827 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000828 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000829 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000830
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000831 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
832 if (unlikely(ret != 0)) {
833 DRM_ERROR("Could not find or use GMR region.\n");
834 return -EINVAL;
835 }
836 bo = &vmw_bo->base;
837
838 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000839 DRM_ERROR("Max number relocations per submission"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000840 " exceeded\n");
841 ret = -EINVAL;
842 goto out_no_reloc;
843 }
844
845 reloc = &sw_context->relocs[sw_context->cur_reloc++];
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000846 reloc->location = ptr;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000847
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100848 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200849 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000850 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000851
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000852 *vmw_bo_p = vmw_bo;
853 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000854
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000855out_no_reloc:
856 vmw_dmabuf_unreference(&vmw_bo);
857 vmw_bo_p = NULL;
858 return ret;
859}
860
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000861/**
Thomas Hellstromddcda242012-11-21 11:26:55 +0100862 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
863 *
864 * @dev_priv: Pointer to a device private struct.
865 * @sw_context: The software context used for this command submission.
866 * @header: Pointer to the command header in the command stream.
867 */
868static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
869 struct vmw_sw_context *sw_context,
870 SVGA3dCmdHeader *header)
871{
872 struct vmw_begin_gb_query_cmd {
873 SVGA3dCmdHeader header;
874 SVGA3dCmdBeginGBQuery q;
875 } *cmd;
876
877 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
878 header);
879
880 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
881 user_context_converter, &cmd->q.cid,
882 NULL);
883}
884
885/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000886 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
887 *
888 * @dev_priv: Pointer to a device private struct.
889 * @sw_context: The software context used for this command submission.
890 * @header: Pointer to the command header in the command stream.
891 */
892static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
893 struct vmw_sw_context *sw_context,
894 SVGA3dCmdHeader *header)
895{
896 struct vmw_begin_query_cmd {
897 SVGA3dCmdHeader header;
898 SVGA3dCmdBeginQuery q;
899 } *cmd;
900
901 cmd = container_of(header, struct vmw_begin_query_cmd,
902 header);
903
Thomas Hellstromddcda242012-11-21 11:26:55 +0100904 if (unlikely(dev_priv->has_mob)) {
905 struct {
906 SVGA3dCmdHeader header;
907 SVGA3dCmdBeginGBQuery q;
908 } gb_cmd;
909
910 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
911
912 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
913 gb_cmd.header.size = cmd->header.size;
914 gb_cmd.q.cid = cmd->q.cid;
915 gb_cmd.q.type = cmd->q.type;
916
917 memcpy(cmd, &gb_cmd, sizeof(*cmd));
918 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
919 }
920
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000921 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
922 user_context_converter, &cmd->q.cid,
923 NULL);
924}
925
926/**
Thomas Hellstromddcda242012-11-21 11:26:55 +0100927 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
928 *
929 * @dev_priv: Pointer to a device private struct.
930 * @sw_context: The software context used for this command submission.
931 * @header: Pointer to the command header in the command stream.
932 */
933static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
934 struct vmw_sw_context *sw_context,
935 SVGA3dCmdHeader *header)
936{
937 struct vmw_dma_buffer *vmw_bo;
938 struct vmw_query_cmd {
939 SVGA3dCmdHeader header;
940 SVGA3dCmdEndGBQuery q;
941 } *cmd;
942 int ret;
943
944 cmd = container_of(header, struct vmw_query_cmd, header);
945 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
946 if (unlikely(ret != 0))
947 return ret;
948
949 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
950 &cmd->q.mobid,
951 &vmw_bo);
952 if (unlikely(ret != 0))
953 return ret;
954
955 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
956
957 vmw_dmabuf_unreference(&vmw_bo);
958 return ret;
959}
960
961/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000962 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
963 *
964 * @dev_priv: Pointer to a device private struct.
965 * @sw_context: The software context used for this command submission.
966 * @header: Pointer to the command header in the command stream.
967 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000968static int vmw_cmd_end_query(struct vmw_private *dev_priv,
969 struct vmw_sw_context *sw_context,
970 SVGA3dCmdHeader *header)
971{
972 struct vmw_dma_buffer *vmw_bo;
973 struct vmw_query_cmd {
974 SVGA3dCmdHeader header;
975 SVGA3dCmdEndQuery q;
976 } *cmd;
977 int ret;
978
979 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +0100980 if (dev_priv->has_mob) {
981 struct {
982 SVGA3dCmdHeader header;
983 SVGA3dCmdEndGBQuery q;
984 } gb_cmd;
985
986 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
987
988 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
989 gb_cmd.header.size = cmd->header.size;
990 gb_cmd.q.cid = cmd->q.cid;
991 gb_cmd.q.type = cmd->q.type;
992 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
993 gb_cmd.q.offset = cmd->q.guestResult.offset;
994
995 memcpy(cmd, &gb_cmd, sizeof(*cmd));
996 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
997 }
998
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000999 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1000 if (unlikely(ret != 0))
1001 return ret;
1002
1003 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1004 &cmd->q.guestResult,
1005 &vmw_bo);
1006 if (unlikely(ret != 0))
1007 return ret;
1008
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001009 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001010
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001011 vmw_dmabuf_unreference(&vmw_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001012 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001013}
1014
Thomas Hellstromddcda242012-11-21 11:26:55 +01001015/**
1016 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1017 *
1018 * @dev_priv: Pointer to a device private struct.
1019 * @sw_context: The software context used for this command submission.
1020 * @header: Pointer to the command header in the command stream.
1021 */
1022static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1023 struct vmw_sw_context *sw_context,
1024 SVGA3dCmdHeader *header)
1025{
1026 struct vmw_dma_buffer *vmw_bo;
1027 struct vmw_query_cmd {
1028 SVGA3dCmdHeader header;
1029 SVGA3dCmdWaitForGBQuery q;
1030 } *cmd;
1031 int ret;
1032
1033 cmd = container_of(header, struct vmw_query_cmd, header);
1034 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1035 if (unlikely(ret != 0))
1036 return ret;
1037
1038 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1039 &cmd->q.mobid,
1040 &vmw_bo);
1041 if (unlikely(ret != 0))
1042 return ret;
1043
1044 vmw_dmabuf_unreference(&vmw_bo);
1045 return 0;
1046}
1047
1048/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001049 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1050 *
1051 * @dev_priv: Pointer to a device private struct.
1052 * @sw_context: The software context used for this command submission.
1053 * @header: Pointer to the command header in the command stream.
1054 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001055static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1056 struct vmw_sw_context *sw_context,
1057 SVGA3dCmdHeader *header)
1058{
1059 struct vmw_dma_buffer *vmw_bo;
1060 struct vmw_query_cmd {
1061 SVGA3dCmdHeader header;
1062 SVGA3dCmdWaitForQuery q;
1063 } *cmd;
1064 int ret;
1065
1066 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001067 if (dev_priv->has_mob) {
1068 struct {
1069 SVGA3dCmdHeader header;
1070 SVGA3dCmdWaitForGBQuery q;
1071 } gb_cmd;
1072
1073 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1074
1075 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1076 gb_cmd.header.size = cmd->header.size;
1077 gb_cmd.q.cid = cmd->q.cid;
1078 gb_cmd.q.type = cmd->q.type;
1079 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1080 gb_cmd.q.offset = cmd->q.guestResult.offset;
1081
1082 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1083 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1084 }
1085
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001086 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1087 if (unlikely(ret != 0))
1088 return ret;
1089
1090 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1091 &cmd->q.guestResult,
1092 &vmw_bo);
1093 if (unlikely(ret != 0))
1094 return ret;
1095
1096 vmw_dmabuf_unreference(&vmw_bo);
1097 return 0;
1098}
1099
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001100static int vmw_cmd_dma(struct vmw_private *dev_priv,
1101 struct vmw_sw_context *sw_context,
1102 SVGA3dCmdHeader *header)
1103{
1104 struct vmw_dma_buffer *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001105 struct vmw_surface *srf = NULL;
1106 struct vmw_dma_cmd {
1107 SVGA3dCmdHeader header;
1108 SVGA3dCmdSurfaceDMA dma;
1109 } *cmd;
1110 int ret;
1111
1112 cmd = container_of(header, struct vmw_dma_cmd, header);
1113 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1114 &cmd->dma.guest.ptr,
1115 &vmw_bo);
1116 if (unlikely(ret != 0))
1117 return ret;
1118
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001119 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1120 user_surface_converter, &cmd->dma.host.sid,
1121 NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001122 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001123 if (unlikely(ret != -ERESTARTSYS))
1124 DRM_ERROR("could not find surface for DMA.\n");
1125 goto out_no_surface;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001126 }
1127
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001128 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001129
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001130 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001131
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001132out_no_surface:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001133 vmw_dmabuf_unreference(&vmw_bo);
1134 return ret;
1135}
1136
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001137static int vmw_cmd_draw(struct vmw_private *dev_priv,
1138 struct vmw_sw_context *sw_context,
1139 SVGA3dCmdHeader *header)
1140{
1141 struct vmw_draw_cmd {
1142 SVGA3dCmdHeader header;
1143 SVGA3dCmdDrawPrimitives body;
1144 } *cmd;
1145 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1146 (unsigned long)header + sizeof(*cmd));
1147 SVGA3dPrimitiveRange *range;
1148 uint32_t i;
1149 uint32_t maxnum;
1150 int ret;
1151
1152 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1153 if (unlikely(ret != 0))
1154 return ret;
1155
1156 cmd = container_of(header, struct vmw_draw_cmd, header);
1157 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1158
1159 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1160 DRM_ERROR("Illegal number of vertex declarations.\n");
1161 return -EINVAL;
1162 }
1163
1164 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001165 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1166 user_surface_converter,
1167 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001168 if (unlikely(ret != 0))
1169 return ret;
1170 }
1171
1172 maxnum = (header->size - sizeof(cmd->body) -
1173 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1174 if (unlikely(cmd->body.numRanges > maxnum)) {
1175 DRM_ERROR("Illegal number of index ranges.\n");
1176 return -EINVAL;
1177 }
1178
1179 range = (SVGA3dPrimitiveRange *) decl;
1180 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001181 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1182 user_surface_converter,
1183 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001184 if (unlikely(ret != 0))
1185 return ret;
1186 }
1187 return 0;
1188}
1189
1190
1191static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1192 struct vmw_sw_context *sw_context,
1193 SVGA3dCmdHeader *header)
1194{
1195 struct vmw_tex_state_cmd {
1196 SVGA3dCmdHeader header;
1197 SVGA3dCmdSetTextureState state;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001198 } *cmd;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001199
1200 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1201 ((unsigned long) header + header->size + sizeof(header));
1202 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1203 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001204 struct vmw_resource_val_node *ctx_node;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001205 struct vmw_resource_val_node *res_node;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001206 int ret;
1207
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001208 cmd = container_of(header, struct vmw_tex_state_cmd,
1209 header);
1210
1211 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1212 user_context_converter, &cmd->state.cid,
1213 &ctx_node);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001214 if (unlikely(ret != 0))
1215 return ret;
1216
1217 for (; cur_state < last_state; ++cur_state) {
1218 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1219 continue;
1220
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001221 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1222 user_surface_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001223 &cur_state->value, &res_node);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001224 if (unlikely(ret != 0))
1225 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001226
1227 if (dev_priv->has_mob) {
1228 struct vmw_ctx_bindinfo bi;
1229
1230 bi.ctx = ctx_node->res;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001231 bi.res = res_node ? res_node->res : NULL;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001232 bi.bt = vmw_ctx_binding_tex;
1233 bi.i1.texture_stage = cur_state->stage;
1234 vmw_context_binding_add(ctx_node->staged_bindings,
1235 &bi);
1236 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001237 }
1238
1239 return 0;
1240}
1241
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001242static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1243 struct vmw_sw_context *sw_context,
1244 void *buf)
1245{
1246 struct vmw_dma_buffer *vmw_bo;
1247 int ret;
1248
1249 struct {
1250 uint32_t header;
1251 SVGAFifoCmdDefineGMRFB body;
1252 } *cmd = buf;
1253
1254 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1255 &cmd->body.ptr,
1256 &vmw_bo);
1257 if (unlikely(ret != 0))
1258 return ret;
1259
1260 vmw_dmabuf_unreference(&vmw_bo);
1261
1262 return ret;
1263}
1264
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001265/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001266 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1267 *
1268 * @dev_priv: Pointer to a device private struct.
1269 * @sw_context: The software context being used for this batch.
1270 * @res_type: The resource type.
1271 * @converter: Information about user-space binding for this resource type.
1272 * @res_id: Pointer to the user-space resource handle in the command stream.
1273 * @buf_id: Pointer to the user-space backup buffer handle in the command
1274 * stream.
1275 * @backup_offset: Offset of backup into MOB.
1276 *
1277 * This function prepares for registering a switch of backup buffers
1278 * in the resource metadata just prior to unreserving.
1279 */
1280static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1281 struct vmw_sw_context *sw_context,
1282 enum vmw_res_type res_type,
1283 const struct vmw_user_resource_conv
1284 *converter,
1285 uint32_t *res_id,
1286 uint32_t *buf_id,
1287 unsigned long backup_offset)
1288{
1289 int ret;
1290 struct vmw_dma_buffer *dma_buf;
1291 struct vmw_resource_val_node *val_node;
1292
1293 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1294 converter, res_id, &val_node);
1295 if (unlikely(ret != 0))
1296 return ret;
1297
1298 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1299 if (unlikely(ret != 0))
1300 return ret;
1301
1302 if (val_node->first_usage)
1303 val_node->no_buffer_needed = true;
1304
1305 vmw_dmabuf_unreference(&val_node->new_backup);
1306 val_node->new_backup = dma_buf;
1307 val_node->new_backup_offset = backup_offset;
1308
1309 return 0;
1310}
1311
1312/**
1313 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1314 * command
1315 *
1316 * @dev_priv: Pointer to a device private struct.
1317 * @sw_context: The software context being used for this batch.
1318 * @header: Pointer to the command header in the command stream.
1319 */
1320static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1321 struct vmw_sw_context *sw_context,
1322 SVGA3dCmdHeader *header)
1323{
1324 struct vmw_bind_gb_surface_cmd {
1325 SVGA3dCmdHeader header;
1326 SVGA3dCmdBindGBSurface body;
1327 } *cmd;
1328
1329 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1330
1331 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1332 user_surface_converter,
1333 &cmd->body.sid, &cmd->body.mobid,
1334 0);
1335}
1336
1337/**
1338 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1339 * command
1340 *
1341 * @dev_priv: Pointer to a device private struct.
1342 * @sw_context: The software context being used for this batch.
1343 * @header: Pointer to the command header in the command stream.
1344 */
1345static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1346 struct vmw_sw_context *sw_context,
1347 SVGA3dCmdHeader *header)
1348{
1349 struct vmw_gb_surface_cmd {
1350 SVGA3dCmdHeader header;
1351 SVGA3dCmdUpdateGBImage body;
1352 } *cmd;
1353
1354 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1355
1356 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1357 user_surface_converter,
1358 &cmd->body.image.sid, NULL);
1359}
1360
1361/**
1362 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1363 * command
1364 *
1365 * @dev_priv: Pointer to a device private struct.
1366 * @sw_context: The software context being used for this batch.
1367 * @header: Pointer to the command header in the command stream.
1368 */
1369static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1370 struct vmw_sw_context *sw_context,
1371 SVGA3dCmdHeader *header)
1372{
1373 struct vmw_gb_surface_cmd {
1374 SVGA3dCmdHeader header;
1375 SVGA3dCmdUpdateGBSurface body;
1376 } *cmd;
1377
1378 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1379
1380 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1381 user_surface_converter,
1382 &cmd->body.sid, NULL);
1383}
1384
1385/**
1386 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1387 * command
1388 *
1389 * @dev_priv: Pointer to a device private struct.
1390 * @sw_context: The software context being used for this batch.
1391 * @header: Pointer to the command header in the command stream.
1392 */
1393static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1394 struct vmw_sw_context *sw_context,
1395 SVGA3dCmdHeader *header)
1396{
1397 struct vmw_gb_surface_cmd {
1398 SVGA3dCmdHeader header;
1399 SVGA3dCmdReadbackGBImage body;
1400 } *cmd;
1401
1402 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1403
1404 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1405 user_surface_converter,
1406 &cmd->body.image.sid, NULL);
1407}
1408
1409/**
1410 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1411 * command
1412 *
1413 * @dev_priv: Pointer to a device private struct.
1414 * @sw_context: The software context being used for this batch.
1415 * @header: Pointer to the command header in the command stream.
1416 */
1417static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1418 struct vmw_sw_context *sw_context,
1419 SVGA3dCmdHeader *header)
1420{
1421 struct vmw_gb_surface_cmd {
1422 SVGA3dCmdHeader header;
1423 SVGA3dCmdReadbackGBSurface body;
1424 } *cmd;
1425
1426 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1427
1428 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1429 user_surface_converter,
1430 &cmd->body.sid, NULL);
1431}
1432
1433/**
1434 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1435 * command
1436 *
1437 * @dev_priv: Pointer to a device private struct.
1438 * @sw_context: The software context being used for this batch.
1439 * @header: Pointer to the command header in the command stream.
1440 */
1441static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1442 struct vmw_sw_context *sw_context,
1443 SVGA3dCmdHeader *header)
1444{
1445 struct vmw_gb_surface_cmd {
1446 SVGA3dCmdHeader header;
1447 SVGA3dCmdInvalidateGBImage body;
1448 } *cmd;
1449
1450 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1451
1452 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1453 user_surface_converter,
1454 &cmd->body.image.sid, NULL);
1455}
1456
1457/**
1458 * vmw_cmd_invalidate_gb_surface - Validate an
1459 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1460 *
1461 * @dev_priv: Pointer to a device private struct.
1462 * @sw_context: The software context being used for this batch.
1463 * @header: Pointer to the command header in the command stream.
1464 */
1465static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1466 struct vmw_sw_context *sw_context,
1467 SVGA3dCmdHeader *header)
1468{
1469 struct vmw_gb_surface_cmd {
1470 SVGA3dCmdHeader header;
1471 SVGA3dCmdInvalidateGBSurface body;
1472 } *cmd;
1473
1474 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1475
1476 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1477 user_surface_converter,
1478 &cmd->body.sid, NULL);
1479}
1480
1481/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001482 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1483 * command
1484 *
1485 * @dev_priv: Pointer to a device private struct.
1486 * @sw_context: The software context being used for this batch.
1487 * @header: Pointer to the command header in the command stream.
1488 */
1489static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1490 struct vmw_sw_context *sw_context,
1491 SVGA3dCmdHeader *header)
1492{
1493 struct vmw_set_shader_cmd {
1494 SVGA3dCmdHeader header;
1495 SVGA3dCmdSetShader body;
1496 } *cmd;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001497 struct vmw_resource_val_node *ctx_node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001498 int ret;
1499
1500 cmd = container_of(header, struct vmw_set_shader_cmd,
1501 header);
1502
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001503 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1504 user_context_converter, &cmd->body.cid,
1505 &ctx_node);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001506 if (unlikely(ret != 0))
1507 return ret;
1508
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001509 if (dev_priv->has_mob) {
1510 struct vmw_ctx_bindinfo bi;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001511 struct vmw_resource_val_node *res_node;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001512
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001513 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
1514 user_shader_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001515 &cmd->body.shid, &res_node);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001516 if (unlikely(ret != 0))
1517 return ret;
1518
1519 bi.ctx = ctx_node->res;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001520 bi.res = res_node ? res_node->res : NULL;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001521 bi.bt = vmw_ctx_binding_shader;
1522 bi.i1.shader_type = cmd->body.type;
1523 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1524 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001525
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001526 return 0;
1527}
1528
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001529/**
1530 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1531 * command
1532 *
1533 * @dev_priv: Pointer to a device private struct.
1534 * @sw_context: The software context being used for this batch.
1535 * @header: Pointer to the command header in the command stream.
1536 */
1537static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1538 struct vmw_sw_context *sw_context,
1539 SVGA3dCmdHeader *header)
1540{
1541 struct vmw_bind_gb_shader_cmd {
1542 SVGA3dCmdHeader header;
1543 SVGA3dCmdBindGBShader body;
1544 } *cmd;
1545
1546 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1547 header);
1548
1549 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1550 user_shader_converter,
1551 &cmd->body.shid, &cmd->body.mobid,
1552 cmd->body.offsetInBytes);
1553}
1554
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001555static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1556 struct vmw_sw_context *sw_context,
1557 void *buf, uint32_t *size)
1558{
1559 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001560 uint32_t cmd_id;
1561
1562 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1563 switch (cmd_id) {
1564 case SVGA_CMD_UPDATE:
1565 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001566 break;
1567 case SVGA_CMD_DEFINE_GMRFB:
1568 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1569 break;
1570 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1571 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1572 break;
1573 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1574 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1575 break;
1576 default:
1577 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1578 return -EINVAL;
1579 }
1580
1581 if (*size > size_remaining) {
1582 DRM_ERROR("Invalid SVGA command (size mismatch):"
1583 " %u.\n", cmd_id);
1584 return -EINVAL;
1585 }
1586
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001587 if (unlikely(!sw_context->kernel)) {
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001588 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1589 return -EPERM;
1590 }
1591
1592 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1593 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1594
1595 return 0;
1596}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001597
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001598static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1599 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1600 false, false, false),
1601 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1602 false, false, false),
1603 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1604 true, false, false),
1605 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1606 true, false, false),
1607 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1608 true, false, false),
1609 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1610 false, false, false),
1611 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1612 false, false, false),
1613 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1614 true, false, false),
1615 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1616 true, false, false),
1617 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1618 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001619 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001620 &vmw_cmd_set_render_target_check, true, false, false),
1621 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1622 true, false, false),
1623 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1624 true, false, false),
1625 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1626 true, false, false),
1627 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1628 true, false, false),
1629 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1630 true, false, false),
1631 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1632 true, false, false),
1633 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1634 true, false, false),
1635 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1636 false, false, false),
1637 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
1638 true, true, false),
1639 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
1640 true, true, false),
1641 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1642 true, false, false),
1643 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
1644 true, true, false),
1645 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1646 true, false, false),
1647 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1648 true, false, false),
1649 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1650 true, false, false),
1651 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1652 true, false, false),
1653 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1654 true, false, false),
1655 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1656 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001657 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001658 &vmw_cmd_blt_surf_screen_check, false, false, false),
1659 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1660 false, false, false),
1661 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1662 false, false, false),
1663 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1664 false, false, false),
1665 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1666 false, false, false),
1667 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1668 false, false, false),
1669 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1670 false, false, false),
1671 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1672 false, false, false),
1673 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1674 false, false, false),
1675 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1676 false, false, false),
1677 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1678 false, false, false),
1679 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1680 false, false, false),
1681 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1682 false, false, false),
1683 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1684 false, false, false),
1685 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1686 false, false, true),
1687 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1688 false, false, true),
1689 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1690 false, false, true),
1691 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1692 false, false, true),
1693 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1694 false, false, true),
1695 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1696 false, false, true),
1697 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1698 false, false, true),
1699 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1700 false, false, true),
1701 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1702 true, false, true),
1703 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1704 false, false, true),
1705 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1706 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001707 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001708 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001709 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001710 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001711 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001712 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001713 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001714 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001715 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001716 &vmw_cmd_invalidate_gb_surface, true, false, true),
1717 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1718 false, false, true),
1719 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1720 false, false, true),
1721 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1722 false, false, true),
1723 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1724 false, false, true),
1725 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1726 false, false, true),
1727 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1728 false, false, true),
1729 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1730 true, false, true),
1731 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1732 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01001733 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07001734 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001735 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1736 true, false, true),
1737 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
1738 true, false, true),
1739 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
1740 true, false, true),
1741 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
1742 true, false, true),
1743 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
1744 false, false, true),
1745 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
1746 false, false, true),
1747 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
1748 false, false, true),
1749 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
1750 false, false, true),
1751 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
1752 false, false, true),
1753 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
1754 false, false, true),
1755 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
1756 false, false, true),
1757 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
1758 false, false, true),
1759 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1760 false, false, true),
1761 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1762 false, false, true),
1763 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
1764 true, false, true)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001765};
1766
1767static int vmw_cmd_check(struct vmw_private *dev_priv,
1768 struct vmw_sw_context *sw_context,
1769 void *buf, uint32_t *size)
1770{
1771 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001772 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001773 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1774 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001775 const struct vmw_cmd_entry *entry;
1776 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001777
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001778 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1779 /* Handle any none 3D commands */
1780 if (unlikely(cmd_id < SVGA_CMD_MAX))
1781 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1782
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001783
1784 cmd_id = le32_to_cpu(header->id);
1785 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
1786
1787 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001788 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001789 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001790
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001791 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001792 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001793
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001794 entry = &vmw_cmd_entries[cmd_id];
1795 if (unlikely(!entry->user_allow && !sw_context->kernel))
1796 goto out_privileged;
1797
1798 if (unlikely(entry->gb_disable && gb))
1799 goto out_old;
1800
1801 if (unlikely(entry->gb_enable && !gb))
1802 goto out_new;
1803
1804 ret = entry->func(dev_priv, sw_context, header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001805 if (unlikely(ret != 0))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001806 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001807
1808 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001809out_invalid:
1810 DRM_ERROR("Invalid SVGA3D command: %d\n",
1811 cmd_id + SVGA_3D_CMD_BASE);
1812 return -EINVAL;
1813out_privileged:
1814 DRM_ERROR("Privileged SVGA3D command: %d\n",
1815 cmd_id + SVGA_3D_CMD_BASE);
1816 return -EPERM;
1817out_old:
1818 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
1819 cmd_id + SVGA_3D_CMD_BASE);
1820 return -EINVAL;
1821out_new:
1822 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001823 cmd_id + SVGA_3D_CMD_BASE);
1824 return -EINVAL;
1825}
1826
1827static int vmw_cmd_check_all(struct vmw_private *dev_priv,
1828 struct vmw_sw_context *sw_context,
Thomas Hellstrom922ade02011-10-04 20:13:17 +02001829 void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001830 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001831{
1832 int32_t cur_size = size;
1833 int ret;
1834
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001835 sw_context->buf_start = buf;
1836
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001837 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001838 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001839 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
1840 if (unlikely(ret != 0))
1841 return ret;
1842 buf = (void *)((unsigned long) buf + size);
1843 cur_size -= size;
1844 }
1845
1846 if (unlikely(cur_size != 0)) {
1847 DRM_ERROR("Command verifier out of sync.\n");
1848 return -EINVAL;
1849 }
1850
1851 return 0;
1852}
1853
1854static void vmw_free_relocations(struct vmw_sw_context *sw_context)
1855{
1856 sw_context->cur_reloc = 0;
1857}
1858
1859static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1860{
1861 uint32_t i;
1862 struct vmw_relocation *reloc;
1863 struct ttm_validate_buffer *validate;
1864 struct ttm_buffer_object *bo;
1865
1866 for (i = 0; i < sw_context->cur_reloc; ++i) {
1867 reloc = &sw_context->relocs[i];
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001868 validate = &sw_context->val_bufs[reloc->index].base;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001869 bo = validate->bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001870 switch (bo->mem.mem_type) {
1871 case TTM_PL_VRAM:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02001872 reloc->location->offset += bo->offset;
1873 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001874 break;
1875 case VMW_PL_GMR:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02001876 reloc->location->gmrId = bo->mem.start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001877 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001878 case VMW_PL_MOB:
1879 *reloc->mob_loc = bo->mem.start;
1880 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001881 default:
1882 BUG();
1883 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001884 }
1885 vmw_free_relocations(sw_context);
1886}
1887
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001888/**
1889 * vmw_resource_list_unrefererence - Free up a resource list and unreference
1890 * all resources referenced by it.
1891 *
1892 * @list: The resource list.
1893 */
1894static void vmw_resource_list_unreference(struct list_head *list)
1895{
1896 struct vmw_resource_val_node *val, *val_next;
1897
1898 /*
1899 * Drop references to resources held during command submission.
1900 */
1901
1902 list_for_each_entry_safe(val, val_next, list, head) {
1903 list_del_init(&val->head);
1904 vmw_resource_unreference(&val->res);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001905 if (unlikely(val->staged_bindings))
1906 kfree(val->staged_bindings);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001907 kfree(val);
1908 }
1909}
1910
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001911static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1912{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001913 struct vmw_validate_buffer *entry, *next;
1914 struct vmw_resource_val_node *val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001915
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001916 /*
1917 * Drop references to DMA buffers held during command submission.
1918 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001919 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001920 base.head) {
1921 list_del(&entry->base.head);
1922 ttm_bo_unref(&entry->base.bo);
1923 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001924 sw_context->cur_val_buf--;
1925 }
1926 BUG_ON(sw_context->cur_val_buf != 0);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001927
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001928 list_for_each_entry(val, &sw_context->resource_list, head)
1929 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001930}
1931
1932static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01001933 struct ttm_buffer_object *bo,
1934 bool validate_as_mob)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001935{
1936 int ret;
1937
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001938
1939 /*
1940 * Don't validate pinned buffers.
1941 */
1942
1943 if (bo == dev_priv->pinned_bo ||
1944 (bo == dev_priv->dummy_query_bo &&
1945 dev_priv->dummy_query_bo_pinned))
1946 return 0;
1947
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01001948 if (validate_as_mob)
1949 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
1950
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01001951 /**
Thomas Hellstrom135cba02010-10-26 21:21:47 +02001952 * Put BO in VRAM if there is space, otherwise as a GMR.
1953 * If there is no space in VRAM and GMR ids are all used up,
1954 * start evicting GMRs to make room. If the DMA buffer can't be
1955 * used as a GMR, this will return -ENOMEM.
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01001956 */
1957
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001958 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +01001959 if (likely(ret == 0 || ret == -ERESTARTSYS))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001960 return ret;
1961
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01001962 /**
1963 * If that failed, try VRAM again, this time evicting
1964 * previous contents.
1965 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001966
Thomas Hellstrom135cba02010-10-26 21:21:47 +02001967 DRM_INFO("Falling through to VRAM.\n");
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001968 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001969 return ret;
1970}
1971
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001972static int vmw_validate_buffers(struct vmw_private *dev_priv,
1973 struct vmw_sw_context *sw_context)
1974{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001975 struct vmw_validate_buffer *entry;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001976 int ret;
1977
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001978 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01001979 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1980 entry->validate_as_mob);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001981 if (unlikely(ret != 0))
1982 return ret;
1983 }
1984 return 0;
1985}
1986
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001987static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
1988 uint32_t size)
1989{
1990 if (likely(sw_context->cmd_bounce_size >= size))
1991 return 0;
1992
1993 if (sw_context->cmd_bounce_size == 0)
1994 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
1995
1996 while (sw_context->cmd_bounce_size < size) {
1997 sw_context->cmd_bounce_size =
1998 PAGE_ALIGN(sw_context->cmd_bounce_size +
1999 (sw_context->cmd_bounce_size >> 1));
2000 }
2001
2002 if (sw_context->cmd_bounce != NULL)
2003 vfree(sw_context->cmd_bounce);
2004
2005 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2006
2007 if (sw_context->cmd_bounce == NULL) {
2008 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2009 sw_context->cmd_bounce_size = 0;
2010 return -ENOMEM;
2011 }
2012
2013 return 0;
2014}
2015
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002016/**
2017 * vmw_execbuf_fence_commands - create and submit a command stream fence
2018 *
2019 * Creates a fence object and submits a command stream marker.
2020 * If this fails for some reason, We sync the fifo and return NULL.
2021 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02002022 *
2023 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2024 * a userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002025 */
2026
2027int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2028 struct vmw_private *dev_priv,
2029 struct vmw_fence_obj **p_fence,
2030 uint32_t *p_handle)
2031{
2032 uint32_t sequence;
2033 int ret;
2034 bool synced = false;
2035
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02002036 /* p_handle implies file_priv. */
2037 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002038
2039 ret = vmw_fifo_send_fence(dev_priv, &sequence);
2040 if (unlikely(ret != 0)) {
2041 DRM_ERROR("Fence submission error. Syncing.\n");
2042 synced = true;
2043 }
2044
2045 if (p_handle != NULL)
2046 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2047 sequence,
2048 DRM_VMW_FENCE_FLAG_EXEC,
2049 p_fence, p_handle);
2050 else
2051 ret = vmw_fence_create(dev_priv->fman, sequence,
2052 DRM_VMW_FENCE_FLAG_EXEC,
2053 p_fence);
2054
2055 if (unlikely(ret != 0 && !synced)) {
2056 (void) vmw_fallback_wait(dev_priv, false, false,
2057 sequence, false,
2058 VMW_FENCE_WAIT_TIMEOUT);
2059 *p_fence = NULL;
2060 }
2061
2062 return 0;
2063}
2064
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02002065/**
2066 * vmw_execbuf_copy_fence_user - copy fence object information to
2067 * user-space.
2068 *
2069 * @dev_priv: Pointer to a vmw_private struct.
2070 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2071 * @ret: Return value from fence object creation.
2072 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2073 * which the information should be copied.
2074 * @fence: Pointer to the fenc object.
2075 * @fence_handle: User-space fence handle.
2076 *
2077 * This function copies fence information to user-space. If copying fails,
2078 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2079 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2080 * the error will hopefully be detected.
2081 * Also if copying fails, user-space will be unable to signal the fence
2082 * object so we wait for it immediately, and then unreference the
2083 * user-space reference.
2084 */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +02002085void
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02002086vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2087 struct vmw_fpriv *vmw_fp,
2088 int ret,
2089 struct drm_vmw_fence_rep __user *user_fence_rep,
2090 struct vmw_fence_obj *fence,
2091 uint32_t fence_handle)
2092{
2093 struct drm_vmw_fence_rep fence_rep;
2094
2095 if (user_fence_rep == NULL)
2096 return;
2097
Dan Carpenter80d9b242011-10-18 09:10:12 +03002098 memset(&fence_rep, 0, sizeof(fence_rep));
2099
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02002100 fence_rep.error = ret;
2101 if (ret == 0) {
2102 BUG_ON(fence == NULL);
2103
2104 fence_rep.handle = fence_handle;
2105 fence_rep.seqno = fence->seqno;
2106 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2107 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2108 }
2109
2110 /*
2111 * copy_to_user errors will be detected by user space not
2112 * seeing fence_rep::error filled in. Typically
2113 * user-space would have pre-set that member to -EFAULT.
2114 */
2115 ret = copy_to_user(user_fence_rep, &fence_rep,
2116 sizeof(fence_rep));
2117
2118 /*
2119 * User-space lost the fence object. We need to sync
2120 * and unreference the handle.
2121 */
2122 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2123 ttm_ref_object_base_unref(vmw_fp->tfile,
2124 fence_handle, TTM_REF_USAGE);
2125 DRM_ERROR("Fence copy error. Syncing.\n");
2126 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
2127 false, false,
2128 VMW_FENCE_WAIT_TIMEOUT);
2129 }
2130}
2131
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002132int vmw_execbuf_process(struct drm_file *file_priv,
2133 struct vmw_private *dev_priv,
2134 void __user *user_commands,
2135 void *kernel_commands,
2136 uint32_t command_size,
2137 uint64_t throttle_us,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002138 struct drm_vmw_fence_rep __user *user_fence_rep,
2139 struct vmw_fence_obj **out_fence)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002140{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002141 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002142 struct vmw_fence_obj *fence = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002143 struct vmw_resource *error_resource;
2144 struct list_head resource_list;
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002145 struct ww_acquire_ctx ticket;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002146 uint32_t handle;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002147 void *cmd;
2148 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002149
2150 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002151 if (unlikely(ret != 0))
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002152 return -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002153
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002154 if (kernel_commands == NULL) {
2155 sw_context->kernel = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002156
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002157 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2158 if (unlikely(ret != 0))
2159 goto out_unlock;
2160
2161
2162 ret = copy_from_user(sw_context->cmd_bounce,
2163 user_commands, command_size);
2164
2165 if (unlikely(ret != 0)) {
2166 ret = -EFAULT;
2167 DRM_ERROR("Failed copying commands.\n");
2168 goto out_unlock;
2169 }
2170 kernel_commands = sw_context->cmd_bounce;
2171 } else
2172 sw_context->kernel = true;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002173
2174 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002175 sw_context->cur_reloc = 0;
2176 sw_context->cur_val_buf = 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002177 sw_context->fence_flags = 0;
Thomas Hellstromf18c8842011-10-04 20:13:31 +02002178 INIT_LIST_HEAD(&sw_context->resource_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002179 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002180 sw_context->last_query_ctx = NULL;
2181 sw_context->needs_post_query_barrier = false;
2182 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002183 INIT_LIST_HEAD(&sw_context->validate_nodes);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002184 INIT_LIST_HEAD(&sw_context->res_relocations);
2185 if (!sw_context->res_ht_initialized) {
2186 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2187 if (unlikely(ret != 0))
2188 goto out_unlock;
2189 sw_context->res_ht_initialized = true;
2190 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002191
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002192 INIT_LIST_HEAD(&resource_list);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002193 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2194 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002195 if (unlikely(ret != 0))
2196 goto out_err;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002197
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002198 ret = vmw_resources_reserve(sw_context);
2199 if (unlikely(ret != 0))
2200 goto out_err;
2201
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002202 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002203 if (unlikely(ret != 0))
2204 goto out_err;
2205
2206 ret = vmw_validate_buffers(dev_priv, sw_context);
2207 if (unlikely(ret != 0))
2208 goto out_err;
2209
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002210 ret = vmw_resources_validate(sw_context);
2211 if (unlikely(ret != 0))
2212 goto out_err;
Thomas Hellstrom1925d452010-05-28 11:21:57 +02002213
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002214 if (throttle_us) {
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +00002215 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002216 throttle_us);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02002217
2218 if (unlikely(ret != 0))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002219 goto out_err;
Thomas Hellstrom1925d452010-05-28 11:21:57 +02002220 }
2221
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07002222 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2223 if (unlikely(ret != 0)) {
2224 ret = -ERESTARTSYS;
2225 goto out_err;
2226 }
2227
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002228 cmd = vmw_fifo_reserve(dev_priv, command_size);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002229 if (unlikely(cmd == NULL)) {
2230 DRM_ERROR("Failed reserving fifo space for commands.\n");
2231 ret = -ENOMEM;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07002232 goto out_unlock_binding;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002233 }
2234
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002235 vmw_apply_relocations(sw_context);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002236 memcpy(cmd, kernel_commands, command_size);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002237
2238 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2239 vmw_resource_relocations_free(&sw_context->res_relocations);
2240
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002241 vmw_fifo_commit(dev_priv, command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002242
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002243 vmw_query_bo_switch_commit(dev_priv, sw_context);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002244 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2245 &fence,
2246 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002247 /*
2248 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002249 * vmw_fifo_send_fence will sync. The error will be propagated to
2250 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002251 */
2252
2253 if (ret != 0)
2254 DRM_ERROR("Fence submission error. Syncing.\n");
2255
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002256 vmw_resource_list_unreserve(&sw_context->resource_list, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07002257 mutex_unlock(&dev_priv->binding_mutex);
2258
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002259 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002260 (void *) fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002261
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002262 if (unlikely(dev_priv->pinned_bo != NULL &&
2263 !dev_priv->query_cid_valid))
2264 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2265
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002266 vmw_clear_validations(sw_context);
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02002267 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2268 user_fence_rep, fence, handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002269
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002270 /* Don't unreference when handing fence out */
2271 if (unlikely(out_fence != NULL)) {
2272 *out_fence = fence;
2273 fence = NULL;
2274 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002275 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002276 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002277
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002278 list_splice_init(&sw_context->resource_list, &resource_list);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002279 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002280
2281 /*
2282 * Unreference resources outside of the cmdbuf_mutex to
2283 * avoid deadlocks in resource destruction paths.
2284 */
2285 vmw_resource_list_unreference(&resource_list);
2286
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002287 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002288
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07002289out_unlock_binding:
2290 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002291out_err:
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002292 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002293 vmw_free_relocations(sw_context);
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002294 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002295 vmw_resource_list_unreserve(&sw_context->resource_list, true);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002296 vmw_clear_validations(sw_context);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002297 if (unlikely(dev_priv->pinned_bo != NULL &&
2298 !dev_priv->query_cid_valid))
2299 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002300out_unlock:
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002301 list_splice_init(&sw_context->resource_list, &resource_list);
2302 error_resource = sw_context->error_resource;
2303 sw_context->error_resource = NULL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002304 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002305
2306 /*
2307 * Unreference resources outside of the cmdbuf_mutex to
2308 * avoid deadlocks in resource destruction paths.
2309 */
2310 vmw_resource_list_unreference(&resource_list);
2311 if (unlikely(error_resource != NULL))
2312 vmw_resource_unreference(&error_resource);
2313
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002314 return ret;
2315}
2316
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002317/**
2318 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2319 *
2320 * @dev_priv: The device private structure.
2321 *
2322 * This function is called to idle the fifo and unpin the query buffer
2323 * if the normal way to do this hits an error, which should typically be
2324 * extremely rare.
2325 */
2326static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2327{
2328 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2329
2330 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2331 vmw_bo_pin(dev_priv->pinned_bo, false);
2332 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2333 dev_priv->dummy_query_bo_pinned = false;
2334}
2335
2336
2337/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002338 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002339 * query bo.
2340 *
2341 * @dev_priv: The device private structure.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002342 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2343 * _after_ a query barrier that flushes all queries touching the current
2344 * buffer pointed to by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002345 *
2346 * This function should be used to unpin the pinned query bo, or
2347 * as a query barrier when we need to make sure that all queries have
2348 * finished before the next fifo command. (For example on hardware
2349 * context destructions where the hardware may otherwise leak unfinished
2350 * queries).
2351 *
2352 * This function does not return any failure codes, but make attempts
2353 * to do safe unpinning in case of errors.
2354 *
2355 * The function will synchronize on the previous query barrier, and will
2356 * thus not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002357 *
2358 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2359 * before calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002360 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002361void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2362 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002363{
2364 int ret = 0;
2365 struct list_head validate_list;
2366 struct ttm_validate_buffer pinned_val, query_val;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002367 struct vmw_fence_obj *lfence = NULL;
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002368 struct ww_acquire_ctx ticket;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002369
2370 if (dev_priv->pinned_bo == NULL)
2371 goto out_unlock;
2372
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002373 INIT_LIST_HEAD(&validate_list);
2374
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002375 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2376 list_add_tail(&pinned_val.head, &validate_list);
2377
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002378 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2379 list_add_tail(&query_val.head, &validate_list);
2380
2381 do {
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002382 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002383 } while (ret == -ERESTARTSYS);
2384
2385 if (unlikely(ret != 0)) {
2386 vmw_execbuf_unpin_panic(dev_priv);
2387 goto out_no_reserve;
2388 }
2389
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002390 if (dev_priv->query_cid_valid) {
2391 BUG_ON(fence != NULL);
2392 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2393 if (unlikely(ret != 0)) {
2394 vmw_execbuf_unpin_panic(dev_priv);
2395 goto out_no_emit;
2396 }
2397 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002398 }
2399
2400 vmw_bo_pin(dev_priv->pinned_bo, false);
2401 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2402 dev_priv->dummy_query_bo_pinned = false;
2403
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002404 if (fence == NULL) {
2405 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2406 NULL);
2407 fence = lfence;
2408 }
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002409 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002410 if (lfence != NULL)
2411 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002412
2413 ttm_bo_unref(&query_val.bo);
2414 ttm_bo_unref(&pinned_val.bo);
2415 ttm_bo_unref(&dev_priv->pinned_bo);
2416
2417out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002418 return;
2419
2420out_no_emit:
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002421 ttm_eu_backoff_reservation(&ticket, &validate_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002422out_no_reserve:
2423 ttm_bo_unref(&query_val.bo);
2424 ttm_bo_unref(&pinned_val.bo);
2425 ttm_bo_unref(&dev_priv->pinned_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002426}
2427
2428/**
2429 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2430 * query bo.
2431 *
2432 * @dev_priv: The device private structure.
2433 *
2434 * This function should be used to unpin the pinned query bo, or
2435 * as a query barrier when we need to make sure that all queries have
2436 * finished before the next fifo command. (For example on hardware
2437 * context destructions where the hardware may otherwise leak unfinished
2438 * queries).
2439 *
2440 * This function does not return any failure codes, but make attempts
2441 * to do safe unpinning in case of errors.
2442 *
2443 * The function will synchronize on the previous query barrier, and will
2444 * thus not finish until that barrier has executed.
2445 */
2446void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2447{
2448 mutex_lock(&dev_priv->cmdbuf_mutex);
2449 if (dev_priv->query_cid_valid)
2450 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002451 mutex_unlock(&dev_priv->cmdbuf_mutex);
2452}
2453
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002454
2455int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2456 struct drm_file *file_priv)
2457{
2458 struct vmw_private *dev_priv = vmw_priv(dev);
2459 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2460 struct vmw_master *vmaster = vmw_master(file_priv->master);
2461 int ret;
2462
2463 /*
2464 * This will allow us to extend the ioctl argument while
2465 * maintaining backwards compatibility:
2466 * We take different code paths depending on the value of
2467 * arg->version.
2468 */
2469
2470 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2471 DRM_ERROR("Incorrect execbuf version.\n");
2472 DRM_ERROR("You're running outdated experimental "
2473 "vmwgfx user-space drivers.");
2474 return -EINVAL;
2475 }
2476
2477 ret = ttm_read_lock(&vmaster->lock, true);
2478 if (unlikely(ret != 0))
2479 return ret;
2480
2481 ret = vmw_execbuf_process(file_priv, dev_priv,
2482 (void __user *)(unsigned long)arg->commands,
2483 NULL, arg->command_size, arg->throttle_us,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002484 (void __user *)(unsigned long)arg->fence_rep,
2485 NULL);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002486
2487 if (unlikely(ret != 0))
2488 goto out_unlock;
2489
2490 vmw_kms_cursor_post_execbuf(dev_priv);
2491
2492out_unlock:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002493 ttm_read_unlock(&vmaster->lock);
2494 return ret;
2495}