blob: 269b85cc875aa2108381eea88b70affd5f26757c [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000032
Thomas Hellstromc0951b72012-11-20 12:19:35 +000033#define VMW_RES_HT_ORDER 12
34
35/**
36 * struct vmw_resource_relocation - Relocation info for resources
37 *
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
42 */
43struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
46 unsigned long offset;
47};
48
49/**
50 * struct vmw_resource_val_node - Validation info for resources
51 *
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070057 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000059 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in
61 * the command stream.
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
63 * reservation. The command stream will provide one.
64 */
65struct vmw_resource_val_node {
66 struct list_head head;
67 struct drm_hash_item hash;
68 struct vmw_resource *res;
69 struct vmw_dma_buffer *new_backup;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070070 struct vmw_ctx_binding_state *staged_bindings;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000071 unsigned long new_backup_offset;
72 bool first_usage;
73 bool no_buffer_needed;
74};
75
76/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +010077 * struct vmw_cmd_entry - Describe a command for the verifier
78 *
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
82 */
83struct vmw_cmd_entry {
84 int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 SVGA3dCmdHeader *);
86 bool user_allow;
87 bool gb_disable;
88 bool gb_enable;
89};
90
91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
94
95/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +000096 * vmw_resource_unreserve - unreserve resources previously reserved for
97 * command submission.
98 *
99 * @list_head: list of resources to unreserve.
100 * @backoff: Whether command submission failed.
101 */
102static void vmw_resource_list_unreserve(struct list_head *list,
103 bool backoff)
104{
105 struct vmw_resource_val_node *val;
106
107 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup =
110 backoff ? NULL : val->new_backup;
111
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700112 /*
113 * Transfer staged context bindings to the
114 * persistent context binding tracker.
115 */
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700116 if (unlikely(val->staged_bindings)) {
Thomas Hellstrom76c7d182014-01-30 10:46:12 +0100117 if (!backoff) {
118 vmw_context_binding_state_transfer
119 (val->res, val->staged_bindings);
120 }
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700121 kfree(val->staged_bindings);
122 val->staged_bindings = NULL;
123 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000124 vmw_resource_unreserve(res, new_backup,
125 val->new_backup_offset);
126 vmw_dmabuf_unreference(&val->new_backup);
127 }
128}
129
130
131/**
132 * vmw_resource_val_add - Add a resource to the software context's
133 * resource list if it's not already on it.
134 *
135 * @sw_context: Pointer to the software context.
136 * @res: Pointer to the resource.
137 * @p_node On successful return points to a valid pointer to a
138 * struct vmw_resource_val_node, if non-NULL on entry.
139 */
140static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 struct vmw_resource *res,
142 struct vmw_resource_val_node **p_node)
143{
144 struct vmw_resource_val_node *node;
145 struct drm_hash_item *hash;
146 int ret;
147
148 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149 &hash) == 0)) {
150 node = container_of(hash, struct vmw_resource_val_node, hash);
151 node->first_usage = false;
152 if (unlikely(p_node != NULL))
153 *p_node = node;
154 return 0;
155 }
156
157 node = kzalloc(sizeof(*node), GFP_KERNEL);
158 if (unlikely(node == NULL)) {
159 DRM_ERROR("Failed to allocate a resource validation "
160 "entry.\n");
161 return -ENOMEM;
162 }
163
164 node->hash.key = (unsigned long) res;
165 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166 if (unlikely(ret != 0)) {
167 DRM_ERROR("Failed to initialize a resource validation "
168 "entry.\n");
169 kfree(node);
170 return ret;
171 }
172 list_add_tail(&node->head, &sw_context->resource_list);
173 node->res = vmw_resource_reference(res);
174 node->first_usage = true;
175
176 if (unlikely(p_node != NULL))
177 *p_node = node;
178
179 return 0;
180}
181
182/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100183 * vmw_resource_context_res_add - Put resources previously bound to a context on
184 * the validation list
185 *
186 * @dev_priv: Pointer to a device private structure
187 * @sw_context: Pointer to a software context used for this command submission
188 * @ctx: Pointer to the context resource
189 *
190 * This function puts all resources that were previously bound to @ctx on
191 * the resource validation list. This is part of the context state reemission
192 */
193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 struct vmw_sw_context *sw_context,
195 struct vmw_resource *ctx)
196{
197 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry;
199 int ret = 0;
200 struct vmw_resource *res;
201
202 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx);
204
205 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 if (unlikely(res == NULL))
208 continue;
209
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0))
213 break;
214 }
215
216 mutex_unlock(&dev_priv->binding_mutex);
217 return ret;
218}
219
220/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000221 * vmw_resource_relocation_add - Add a relocation to the relocation list
222 *
223 * @list: Pointer to head of relocation list.
224 * @res: The resource.
225 * @offset: Offset into the command buffer currently being parsed where the
226 * id that needs fixup is located. Granularity is 4 bytes.
227 */
228static int vmw_resource_relocation_add(struct list_head *list,
229 const struct vmw_resource *res,
230 unsigned long offset)
231{
232 struct vmw_resource_relocation *rel;
233
234 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235 if (unlikely(rel == NULL)) {
236 DRM_ERROR("Failed to allocate a resource relocation.\n");
237 return -ENOMEM;
238 }
239
240 rel->res = res;
241 rel->offset = offset;
242 list_add_tail(&rel->head, list);
243
244 return 0;
245}
246
247/**
248 * vmw_resource_relocations_free - Free all relocations on a list
249 *
250 * @list: Pointer to the head of the relocation list.
251 */
252static void vmw_resource_relocations_free(struct list_head *list)
253{
254 struct vmw_resource_relocation *rel, *n;
255
256 list_for_each_entry_safe(rel, n, list, head) {
257 list_del(&rel->head);
258 kfree(rel);
259 }
260}
261
262/**
263 * vmw_resource_relocations_apply - Apply all relocations on a list
264 *
265 * @cb: Pointer to the start of the command buffer bein patch. This need
266 * not be the same buffer as the one being parsed when the relocation
267 * list was built, but the contents must be the same modulo the
268 * resource ids.
269 * @list: Pointer to the head of the relocation list.
270 */
271static void vmw_resource_relocations_apply(uint32_t *cb,
272 struct list_head *list)
273{
274 struct vmw_resource_relocation *rel;
275
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100276 list_for_each_entry(rel, list, head) {
277 if (likely(rel->res != NULL))
278 cb[rel->offset] = rel->res->id;
279 else
280 cb[rel->offset] = SVGA_3D_CMD_NOP;
281 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000282}
283
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285 struct vmw_sw_context *sw_context,
286 SVGA3dCmdHeader *header)
287{
288 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289}
290
291static int vmw_cmd_ok(struct vmw_private *dev_priv,
292 struct vmw_sw_context *sw_context,
293 SVGA3dCmdHeader *header)
294{
295 return 0;
296}
297
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200298/**
299 * vmw_bo_to_validate_list - add a bo to a validate list
300 *
301 * @sw_context: The software context used for this command submission batch.
302 * @bo: The buffer object to add.
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100303 * @validate_as_mob: Validate this buffer as a MOB.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200304 * @p_val_node: If non-NULL Will be updated with the validate node number
305 * on return.
306 *
307 * Returns -EINVAL if the limit of number of buffer objects per command
308 * submission is reached.
309 */
310static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311 struct ttm_buffer_object *bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100312 bool validate_as_mob,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200313 uint32_t *p_val_node)
314{
315 uint32_t val_node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000316 struct vmw_validate_buffer *vval_buf;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200317 struct ttm_validate_buffer *val_buf;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000318 struct drm_hash_item *hash;
319 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200320
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000321 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
322 &hash) == 0)) {
323 vval_buf = container_of(hash, struct vmw_validate_buffer,
324 hash);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100325 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326 DRM_ERROR("Inconsistent buffer usage.\n");
327 return -EINVAL;
328 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000329 val_buf = &vval_buf->base;
330 val_node = vval_buf - sw_context->val_bufs;
331 } else {
332 val_node = sw_context->cur_val_buf;
333 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334 DRM_ERROR("Max number of DMA buffers per submission "
335 "exceeded.\n");
336 return -EINVAL;
337 }
338 vval_buf = &sw_context->val_bufs[val_node];
339 vval_buf->hash.key = (unsigned long) bo;
340 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 if (unlikely(ret != 0)) {
342 DRM_ERROR("Failed to initialize a buffer validation "
343 "entry.\n");
344 return ret;
345 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200346 ++sw_context->cur_val_buf;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000347 val_buf = &vval_buf->base;
348 val_buf->bo = ttm_bo_reference(bo);
349 val_buf->reserved = false;
350 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100351 vval_buf->validate_as_mob = validate_as_mob;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200352 }
353
Maarten Lankhorstbe013362012-10-12 15:01:43 +0000354 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200355
356 if (p_val_node)
357 *p_val_node = val_node;
358
359 return 0;
360}
361
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000362/**
363 * vmw_resources_reserve - Reserve all resources on the sw_context's
364 * resource list.
365 *
366 * @sw_context: Pointer to the software context.
367 *
368 * Note that since vmware's command submission currently is protected by
369 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
370 * since only a single thread at once will attempt this.
371 */
372static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
373{
374 struct vmw_resource_val_node *val;
375 int ret;
376
377 list_for_each_entry(val, &sw_context->resource_list, head) {
378 struct vmw_resource *res = val->res;
379
380 ret = vmw_resource_reserve(res, val->no_buffer_needed);
381 if (unlikely(ret != 0))
382 return ret;
383
384 if (res->backup) {
385 struct ttm_buffer_object *bo = &res->backup->base;
386
387 ret = vmw_bo_to_validate_list
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100388 (sw_context, bo,
389 vmw_resource_needs_backup(res), NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000390
391 if (unlikely(ret != 0))
392 return ret;
393 }
394 }
395 return 0;
396}
397
398/**
399 * vmw_resources_validate - Validate all resources on the sw_context's
400 * resource list.
401 *
402 * @sw_context: Pointer to the software context.
403 *
404 * Before this function is called, all resource backup buffers must have
405 * been validated.
406 */
407static int vmw_resources_validate(struct vmw_sw_context *sw_context)
408{
409 struct vmw_resource_val_node *val;
410 int ret;
411
412 list_for_each_entry(val, &sw_context->resource_list, head) {
413 struct vmw_resource *res = val->res;
414
415 ret = vmw_resource_validate(res);
416 if (unlikely(ret != 0)) {
417 if (ret != -ERESTARTSYS)
418 DRM_ERROR("Failed to validate resource.\n");
419 return ret;
420 }
421 }
422 return 0;
423}
424
425/**
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000427 * on the resource validate list unless it's already there.
428 *
429 * @dev_priv: Pointer to a device private structure.
430 * @sw_context: Pointer to the software context.
431 * @res_type: Resource type.
432 * @converter: User-space visisble type specific information.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100433 * @id: user-space resource id handle.
434 * @id_loc: Pointer to the location in the command buffer currently being
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000435 * parsed from where the user-space resource id handle is located.
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100436 * @p_val: Pointer to pointer to resource validalidation node. Populated
437 * on exit.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000438 */
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100439static int
440vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
441 struct vmw_sw_context *sw_context,
442 enum vmw_res_type res_type,
443 const struct vmw_user_resource_conv *converter,
444 uint32_t id,
445 uint32_t *id_loc,
446 struct vmw_resource_val_node **p_val)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000447{
448 struct vmw_res_cache_entry *rcache =
449 &sw_context->res_cache[res_type];
450 struct vmw_resource *res;
451 struct vmw_resource_val_node *node;
452 int ret;
453
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100454 if (id == SVGA3D_INVALID_ID) {
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700455 if (p_val)
456 *p_val = NULL;
457 if (res_type == vmw_res_context) {
458 DRM_ERROR("Illegal context invalid id.\n");
459 return -EINVAL;
460 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000461 return 0;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700462 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000463
464 /*
465 * Fastpath in case of repeated commands referencing the same
466 * resource
467 */
468
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100469 if (likely(rcache->valid && id == rcache->handle)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000470 const struct vmw_resource *res = rcache->res;
471
472 rcache->node->first_usage = false;
473 if (p_val)
474 *p_val = rcache->node;
475
476 return vmw_resource_relocation_add
477 (&sw_context->res_relocations, res,
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100478 id_loc - sw_context->buf_start);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000479 }
480
481 ret = vmw_user_resource_lookup_handle(dev_priv,
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100482 sw_context->fp->tfile,
483 id,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000484 converter,
485 &res);
486 if (unlikely(ret != 0)) {
487 DRM_ERROR("Could not find or use resource 0x%08x.\n",
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100488 (unsigned) id);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000489 dump_stack();
490 return ret;
491 }
492
493 rcache->valid = true;
494 rcache->res = res;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100495 rcache->handle = id;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000496
497 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
498 res,
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100499 id_loc - sw_context->buf_start);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000500 if (unlikely(ret != 0))
501 goto out_no_reloc;
502
503 ret = vmw_resource_val_add(sw_context, res, &node);
504 if (unlikely(ret != 0))
505 goto out_no_reloc;
506
507 rcache->node = node;
508 if (p_val)
509 *p_val = node;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700510
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100511 if (dev_priv->has_mob && node->first_usage &&
512 res_type == vmw_res_context) {
513 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
514 if (unlikely(ret != 0))
515 goto out_no_reloc;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700516 node->staged_bindings =
517 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
518 if (node->staged_bindings == NULL) {
519 DRM_ERROR("Failed to allocate context binding "
520 "information.\n");
521 goto out_no_reloc;
522 }
523 INIT_LIST_HEAD(&node->staged_bindings->list);
524 }
525
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000526 vmw_resource_unreference(&res);
527 return 0;
528
529out_no_reloc:
530 BUG_ON(sw_context->error_resource != NULL);
531 sw_context->error_resource = res;
532
533 return ret;
534}
535
536/**
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
538 * on the resource validate list unless it's already there.
539 *
540 * @dev_priv: Pointer to a device private structure.
541 * @sw_context: Pointer to the software context.
542 * @res_type: Resource type.
543 * @converter: User-space visisble type specific information.
544 * @id_loc: Pointer to the location in the command buffer currently being
545 * parsed from where the user-space resource id handle is located.
546 * @p_val: Pointer to pointer to resource validalidation node. Populated
547 * on exit.
548 */
549static int
550vmw_cmd_res_check(struct vmw_private *dev_priv,
551 struct vmw_sw_context *sw_context,
552 enum vmw_res_type res_type,
553 const struct vmw_user_resource_conv *converter,
554 uint32_t *id_loc,
555 struct vmw_resource_val_node **p_val)
556{
557 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
558 converter, *id_loc, id_loc, p_val);
559}
560
561/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100562 * vmw_rebind_contexts - Rebind all resources previously bound to
563 * referenced contexts.
564 *
565 * @sw_context: Pointer to the software context.
566 *
567 * Rebind context binding points that have been scrubbed because of eviction.
568 */
569static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
570{
571 struct vmw_resource_val_node *val;
572 int ret;
573
574 list_for_each_entry(val, &sw_context->resource_list, head) {
575 if (likely(!val->staged_bindings))
576 continue;
577
578 ret = vmw_context_rebind_all(val->res);
579 if (unlikely(ret != 0)) {
580 if (ret != -ERESTARTSYS)
581 DRM_ERROR("Failed to rebind context.\n");
582 return ret;
583 }
584 }
585
586 return 0;
587}
588
589/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000590 * vmw_cmd_cid_check - Check a command header for valid context information.
591 *
592 * @dev_priv: Pointer to a device private structure.
593 * @sw_context: Pointer to the software context.
594 * @header: A command header with an embedded user-space context handle.
595 *
596 * Convenience function: Call vmw_cmd_res_check with the user-space context
597 * handle embedded in @header.
598 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000599static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
600 struct vmw_sw_context *sw_context,
601 SVGA3dCmdHeader *header)
602{
603 struct vmw_cid_cmd {
604 SVGA3dCmdHeader header;
605 __le32 cid;
606 } *cmd;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000607
608 cmd = container_of(header, struct vmw_cid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000609 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
610 user_context_converter, &cmd->cid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000611}
612
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000613static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
614 struct vmw_sw_context *sw_context,
615 SVGA3dCmdHeader *header)
616{
617 struct vmw_sid_cmd {
618 SVGA3dCmdHeader header;
619 SVGA3dCmdSetRenderTarget body;
620 } *cmd;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700621 struct vmw_resource_val_node *ctx_node;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700622 struct vmw_resource_val_node *res_node;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000623 int ret;
624
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700625 cmd = container_of(header, struct vmw_sid_cmd, header);
626
627 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
628 user_context_converter, &cmd->body.cid,
629 &ctx_node);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000630 if (unlikely(ret != 0))
631 return ret;
632
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000633 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
634 user_surface_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700635 &cmd->body.target.sid, &res_node);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700636 if (unlikely(ret != 0))
637 return ret;
638
639 if (dev_priv->has_mob) {
640 struct vmw_ctx_bindinfo bi;
641
642 bi.ctx = ctx_node->res;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700643 bi.res = res_node ? res_node->res : NULL;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700644 bi.bt = vmw_ctx_binding_rt;
645 bi.i1.rt_type = cmd->body.type;
646 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
647 }
648
649 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000650}
651
652static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
653 struct vmw_sw_context *sw_context,
654 SVGA3dCmdHeader *header)
655{
656 struct vmw_sid_cmd {
657 SVGA3dCmdHeader header;
658 SVGA3dCmdSurfaceCopy body;
659 } *cmd;
660 int ret;
661
662 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000663 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
664 user_surface_converter,
665 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000666 if (unlikely(ret != 0))
667 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000668 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
669 user_surface_converter,
670 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000671}
672
673static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
674 struct vmw_sw_context *sw_context,
675 SVGA3dCmdHeader *header)
676{
677 struct vmw_sid_cmd {
678 SVGA3dCmdHeader header;
679 SVGA3dCmdSurfaceStretchBlt body;
680 } *cmd;
681 int ret;
682
683 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000684 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
685 user_surface_converter,
686 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000687 if (unlikely(ret != 0))
688 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000689 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
690 user_surface_converter,
691 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000692}
693
694static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
695 struct vmw_sw_context *sw_context,
696 SVGA3dCmdHeader *header)
697{
698 struct vmw_sid_cmd {
699 SVGA3dCmdHeader header;
700 SVGA3dCmdBlitSurfaceToScreen body;
701 } *cmd;
702
703 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200704
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000705 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
706 user_surface_converter,
707 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000708}
709
710static int vmw_cmd_present_check(struct vmw_private *dev_priv,
711 struct vmw_sw_context *sw_context,
712 SVGA3dCmdHeader *header)
713{
714 struct vmw_sid_cmd {
715 SVGA3dCmdHeader header;
716 SVGA3dCmdPresent body;
717 } *cmd;
718
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200719
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000720 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +0200721
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000722 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
723 user_surface_converter, &cmd->body.sid,
724 NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000725}
726
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200727/**
728 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
729 *
730 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200731 * @new_query_bo: The new buffer holding query results.
732 * @sw_context: The software context used for this command submission.
733 *
734 * This function checks whether @new_query_bo is suitable for holding
735 * query results, and if another buffer currently is pinned for query
736 * results. If so, the function prepares the state of @sw_context for
737 * switching pinned buffers after successful submission of the current
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000738 * command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200739 */
740static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200741 struct ttm_buffer_object *new_query_bo,
742 struct vmw_sw_context *sw_context)
743{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000744 struct vmw_res_cache_entry *ctx_entry =
745 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200746 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000747
748 BUG_ON(!ctx_entry->valid);
749 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200750
751 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
752
753 if (unlikely(new_query_bo->num_pages > 4)) {
754 DRM_ERROR("Query buffer too large.\n");
755 return -EINVAL;
756 }
757
758 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000759 sw_context->needs_post_query_barrier = true;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200760 ret = vmw_bo_to_validate_list(sw_context,
761 sw_context->cur_query_bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100762 dev_priv->has_mob, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200763 if (unlikely(ret != 0))
764 return ret;
765 }
766 sw_context->cur_query_bo = new_query_bo;
767
768 ret = vmw_bo_to_validate_list(sw_context,
769 dev_priv->dummy_query_bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100770 dev_priv->has_mob, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200771 if (unlikely(ret != 0))
772 return ret;
773
774 }
775
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200776 return 0;
777}
778
779
780/**
781 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
782 *
783 * @dev_priv: The device private structure.
784 * @sw_context: The software context used for this command submission batch.
785 *
786 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200787 * issue a dummy occlusion query wait used as a query barrier. When the fence
788 * object following that query wait has signaled, we are sure that all
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000789 * preceding queries have finished, and the old query buffer can be unpinned.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200790 * However, since both the new query buffer and the old one are fenced with
791 * that fence, we can do an asynchronus unpin now, and be sure that the
792 * old query buffer won't be moved until the fence has signaled.
793 *
794 * As mentioned above, both the new - and old query buffers need to be fenced
795 * using a sequence emitted *after* calling this function.
796 */
797static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
798 struct vmw_sw_context *sw_context)
799{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200800 /*
801 * The validate list should still hold references to all
802 * contexts here.
803 */
804
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000805 if (sw_context->needs_post_query_barrier) {
806 struct vmw_res_cache_entry *ctx_entry =
807 &sw_context->res_cache[vmw_res_context];
808 struct vmw_resource *ctx;
809 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200810
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000811 BUG_ON(!ctx_entry->valid);
812 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200813
814 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
815
816 if (unlikely(ret != 0))
817 DRM_ERROR("Out of fifo space for dummy query.\n");
818 }
819
820 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
821 if (dev_priv->pinned_bo) {
822 vmw_bo_pin(dev_priv->pinned_bo, false);
823 ttm_bo_unref(&dev_priv->pinned_bo);
824 }
825
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000826 if (!sw_context->needs_post_query_barrier) {
827 vmw_bo_pin(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200828
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000829 /*
830 * We pin also the dummy_query_bo buffer so that we
831 * don't need to validate it when emitting
832 * dummy queries in context destroy paths.
833 */
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200834
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000835 vmw_bo_pin(dev_priv->dummy_query_bo, true);
836 dev_priv->dummy_query_bo_pinned = true;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200837
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000838 BUG_ON(sw_context->last_query_ctx == NULL);
839 dev_priv->query_cid = sw_context->last_query_ctx->id;
840 dev_priv->query_cid_valid = true;
841 dev_priv->pinned_bo =
842 ttm_bo_reference(sw_context->cur_query_bo);
843 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200844 }
845}
846
847/**
Thomas Hellstromddcda242012-11-21 11:26:55 +0100848 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
849 * handle to a MOB id.
850 *
851 * @dev_priv: Pointer to a device private structure.
852 * @sw_context: The software context used for this command batch validation.
853 * @id: Pointer to the user-space handle to be translated.
854 * @vmw_bo_p: Points to a location that, on successful return will carry
855 * a reference-counted pointer to the DMA buffer identified by the
856 * user-space handle in @id.
857 *
858 * This function saves information needed to translate a user-space buffer
859 * handle to a MOB id. The translation does not take place immediately, but
860 * during a call to vmw_apply_relocations(). This function builds a relocation
861 * list and a list of buffers to validate. The former needs to be freed using
862 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
863 * needs to be freed using vmw_clear_validations.
864 */
865static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
866 struct vmw_sw_context *sw_context,
867 SVGAMobId *id,
868 struct vmw_dma_buffer **vmw_bo_p)
869{
870 struct vmw_dma_buffer *vmw_bo = NULL;
871 struct ttm_buffer_object *bo;
872 uint32_t handle = *id;
873 struct vmw_relocation *reloc;
874 int ret;
875
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100876 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
Thomas Hellstromddcda242012-11-21 11:26:55 +0100877 if (unlikely(ret != 0)) {
878 DRM_ERROR("Could not find or use MOB buffer.\n");
879 return -EINVAL;
880 }
881 bo = &vmw_bo->base;
882
883 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
884 DRM_ERROR("Max number relocations per submission"
885 " exceeded\n");
886 ret = -EINVAL;
887 goto out_no_reloc;
888 }
889
890 reloc = &sw_context->relocs[sw_context->cur_reloc++];
891 reloc->mob_loc = id;
892 reloc->location = NULL;
893
894 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
895 if (unlikely(ret != 0))
896 goto out_no_reloc;
897
898 *vmw_bo_p = vmw_bo;
899 return 0;
900
901out_no_reloc:
902 vmw_dmabuf_unreference(&vmw_bo);
903 vmw_bo_p = NULL;
904 return ret;
905}
906
907/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000908 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
909 * handle to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200910 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000911 * @dev_priv: Pointer to a device private structure.
912 * @sw_context: The software context used for this command batch validation.
913 * @ptr: Pointer to the user-space handle to be translated.
914 * @vmw_bo_p: Points to a location that, on successful return will carry
915 * a reference-counted pointer to the DMA buffer identified by the
916 * user-space handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200917 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000918 * This function saves information needed to translate a user-space buffer
919 * handle to a valid SVGAGuestPtr. The translation does not take place
920 * immediately, but during a call to vmw_apply_relocations().
921 * This function builds a relocation list and a list of buffers to validate.
922 * The former needs to be freed using either vmw_apply_relocations() or
923 * vmw_free_relocations(). The latter needs to be freed using
924 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200925 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000926static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
927 struct vmw_sw_context *sw_context,
928 SVGAGuestPtr *ptr,
929 struct vmw_dma_buffer **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000930{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000931 struct vmw_dma_buffer *vmw_bo = NULL;
932 struct ttm_buffer_object *bo;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000933 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000934 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000935 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000936
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100937 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000938 if (unlikely(ret != 0)) {
939 DRM_ERROR("Could not find or use GMR region.\n");
940 return -EINVAL;
941 }
942 bo = &vmw_bo->base;
943
944 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000945 DRM_ERROR("Max number relocations per submission"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000946 " exceeded\n");
947 ret = -EINVAL;
948 goto out_no_reloc;
949 }
950
951 reloc = &sw_context->relocs[sw_context->cur_reloc++];
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000952 reloc->location = ptr;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000953
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100954 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200955 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000956 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000957
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000958 *vmw_bo_p = vmw_bo;
959 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000960
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +0000961out_no_reloc:
962 vmw_dmabuf_unreference(&vmw_bo);
963 vmw_bo_p = NULL;
964 return ret;
965}
966
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000967/**
Thomas Hellstromddcda242012-11-21 11:26:55 +0100968 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
969 *
970 * @dev_priv: Pointer to a device private struct.
971 * @sw_context: The software context used for this command submission.
972 * @header: Pointer to the command header in the command stream.
973 */
974static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
975 struct vmw_sw_context *sw_context,
976 SVGA3dCmdHeader *header)
977{
978 struct vmw_begin_gb_query_cmd {
979 SVGA3dCmdHeader header;
980 SVGA3dCmdBeginGBQuery q;
981 } *cmd;
982
983 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
984 header);
985
986 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
987 user_context_converter, &cmd->q.cid,
988 NULL);
989}
990
991/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000992 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
993 *
994 * @dev_priv: Pointer to a device private struct.
995 * @sw_context: The software context used for this command submission.
996 * @header: Pointer to the command header in the command stream.
997 */
998static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
999 struct vmw_sw_context *sw_context,
1000 SVGA3dCmdHeader *header)
1001{
1002 struct vmw_begin_query_cmd {
1003 SVGA3dCmdHeader header;
1004 SVGA3dCmdBeginQuery q;
1005 } *cmd;
1006
1007 cmd = container_of(header, struct vmw_begin_query_cmd,
1008 header);
1009
Thomas Hellstromddcda242012-11-21 11:26:55 +01001010 if (unlikely(dev_priv->has_mob)) {
1011 struct {
1012 SVGA3dCmdHeader header;
1013 SVGA3dCmdBeginGBQuery q;
1014 } gb_cmd;
1015
1016 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1017
1018 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1019 gb_cmd.header.size = cmd->header.size;
1020 gb_cmd.q.cid = cmd->q.cid;
1021 gb_cmd.q.type = cmd->q.type;
1022
1023 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1024 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1025 }
1026
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001027 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1028 user_context_converter, &cmd->q.cid,
1029 NULL);
1030}
1031
1032/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001033 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1034 *
1035 * @dev_priv: Pointer to a device private struct.
1036 * @sw_context: The software context used for this command submission.
1037 * @header: Pointer to the command header in the command stream.
1038 */
1039static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1040 struct vmw_sw_context *sw_context,
1041 SVGA3dCmdHeader *header)
1042{
1043 struct vmw_dma_buffer *vmw_bo;
1044 struct vmw_query_cmd {
1045 SVGA3dCmdHeader header;
1046 SVGA3dCmdEndGBQuery q;
1047 } *cmd;
1048 int ret;
1049
1050 cmd = container_of(header, struct vmw_query_cmd, header);
1051 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1052 if (unlikely(ret != 0))
1053 return ret;
1054
1055 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1056 &cmd->q.mobid,
1057 &vmw_bo);
1058 if (unlikely(ret != 0))
1059 return ret;
1060
1061 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1062
1063 vmw_dmabuf_unreference(&vmw_bo);
1064 return ret;
1065}
1066
1067/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001068 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1069 *
1070 * @dev_priv: Pointer to a device private struct.
1071 * @sw_context: The software context used for this command submission.
1072 * @header: Pointer to the command header in the command stream.
1073 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001074static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1075 struct vmw_sw_context *sw_context,
1076 SVGA3dCmdHeader *header)
1077{
1078 struct vmw_dma_buffer *vmw_bo;
1079 struct vmw_query_cmd {
1080 SVGA3dCmdHeader header;
1081 SVGA3dCmdEndQuery q;
1082 } *cmd;
1083 int ret;
1084
1085 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001086 if (dev_priv->has_mob) {
1087 struct {
1088 SVGA3dCmdHeader header;
1089 SVGA3dCmdEndGBQuery q;
1090 } gb_cmd;
1091
1092 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1093
1094 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1095 gb_cmd.header.size = cmd->header.size;
1096 gb_cmd.q.cid = cmd->q.cid;
1097 gb_cmd.q.type = cmd->q.type;
1098 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1099 gb_cmd.q.offset = cmd->q.guestResult.offset;
1100
1101 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1102 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1103 }
1104
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001105 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1106 if (unlikely(ret != 0))
1107 return ret;
1108
1109 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1110 &cmd->q.guestResult,
1111 &vmw_bo);
1112 if (unlikely(ret != 0))
1113 return ret;
1114
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001115 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001116
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001117 vmw_dmabuf_unreference(&vmw_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001118 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001119}
1120
Thomas Hellstromddcda242012-11-21 11:26:55 +01001121/**
1122 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1123 *
1124 * @dev_priv: Pointer to a device private struct.
1125 * @sw_context: The software context used for this command submission.
1126 * @header: Pointer to the command header in the command stream.
1127 */
1128static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1129 struct vmw_sw_context *sw_context,
1130 SVGA3dCmdHeader *header)
1131{
1132 struct vmw_dma_buffer *vmw_bo;
1133 struct vmw_query_cmd {
1134 SVGA3dCmdHeader header;
1135 SVGA3dCmdWaitForGBQuery q;
1136 } *cmd;
1137 int ret;
1138
1139 cmd = container_of(header, struct vmw_query_cmd, header);
1140 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1141 if (unlikely(ret != 0))
1142 return ret;
1143
1144 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1145 &cmd->q.mobid,
1146 &vmw_bo);
1147 if (unlikely(ret != 0))
1148 return ret;
1149
1150 vmw_dmabuf_unreference(&vmw_bo);
1151 return 0;
1152}
1153
1154/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001155 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1156 *
1157 * @dev_priv: Pointer to a device private struct.
1158 * @sw_context: The software context used for this command submission.
1159 * @header: Pointer to the command header in the command stream.
1160 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001161static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1162 struct vmw_sw_context *sw_context,
1163 SVGA3dCmdHeader *header)
1164{
1165 struct vmw_dma_buffer *vmw_bo;
1166 struct vmw_query_cmd {
1167 SVGA3dCmdHeader header;
1168 SVGA3dCmdWaitForQuery q;
1169 } *cmd;
1170 int ret;
1171
1172 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001173 if (dev_priv->has_mob) {
1174 struct {
1175 SVGA3dCmdHeader header;
1176 SVGA3dCmdWaitForGBQuery q;
1177 } gb_cmd;
1178
1179 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1180
1181 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1182 gb_cmd.header.size = cmd->header.size;
1183 gb_cmd.q.cid = cmd->q.cid;
1184 gb_cmd.q.type = cmd->q.type;
1185 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1186 gb_cmd.q.offset = cmd->q.guestResult.offset;
1187
1188 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1189 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1190 }
1191
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001192 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1193 if (unlikely(ret != 0))
1194 return ret;
1195
1196 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1197 &cmd->q.guestResult,
1198 &vmw_bo);
1199 if (unlikely(ret != 0))
1200 return ret;
1201
1202 vmw_dmabuf_unreference(&vmw_bo);
1203 return 0;
1204}
1205
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001206static int vmw_cmd_dma(struct vmw_private *dev_priv,
1207 struct vmw_sw_context *sw_context,
1208 SVGA3dCmdHeader *header)
1209{
1210 struct vmw_dma_buffer *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001211 struct vmw_surface *srf = NULL;
1212 struct vmw_dma_cmd {
1213 SVGA3dCmdHeader header;
1214 SVGA3dCmdSurfaceDMA dma;
1215 } *cmd;
1216 int ret;
1217
1218 cmd = container_of(header, struct vmw_dma_cmd, header);
1219 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1220 &cmd->dma.guest.ptr,
1221 &vmw_bo);
1222 if (unlikely(ret != 0))
1223 return ret;
1224
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001225 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1226 user_surface_converter, &cmd->dma.host.sid,
1227 NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001228 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001229 if (unlikely(ret != -ERESTARTSYS))
1230 DRM_ERROR("could not find surface for DMA.\n");
1231 goto out_no_surface;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001232 }
1233
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001234 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001235
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001236 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1237 header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001238
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001239out_no_surface:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001240 vmw_dmabuf_unreference(&vmw_bo);
1241 return ret;
1242}
1243
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001244static int vmw_cmd_draw(struct vmw_private *dev_priv,
1245 struct vmw_sw_context *sw_context,
1246 SVGA3dCmdHeader *header)
1247{
1248 struct vmw_draw_cmd {
1249 SVGA3dCmdHeader header;
1250 SVGA3dCmdDrawPrimitives body;
1251 } *cmd;
1252 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1253 (unsigned long)header + sizeof(*cmd));
1254 SVGA3dPrimitiveRange *range;
1255 uint32_t i;
1256 uint32_t maxnum;
1257 int ret;
1258
1259 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1260 if (unlikely(ret != 0))
1261 return ret;
1262
1263 cmd = container_of(header, struct vmw_draw_cmd, header);
1264 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1265
1266 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1267 DRM_ERROR("Illegal number of vertex declarations.\n");
1268 return -EINVAL;
1269 }
1270
1271 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001272 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1273 user_surface_converter,
1274 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001275 if (unlikely(ret != 0))
1276 return ret;
1277 }
1278
1279 maxnum = (header->size - sizeof(cmd->body) -
1280 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1281 if (unlikely(cmd->body.numRanges > maxnum)) {
1282 DRM_ERROR("Illegal number of index ranges.\n");
1283 return -EINVAL;
1284 }
1285
1286 range = (SVGA3dPrimitiveRange *) decl;
1287 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001288 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1289 user_surface_converter,
1290 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001291 if (unlikely(ret != 0))
1292 return ret;
1293 }
1294 return 0;
1295}
1296
1297
1298static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1299 struct vmw_sw_context *sw_context,
1300 SVGA3dCmdHeader *header)
1301{
1302 struct vmw_tex_state_cmd {
1303 SVGA3dCmdHeader header;
1304 SVGA3dCmdSetTextureState state;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001305 } *cmd;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001306
1307 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1308 ((unsigned long) header + header->size + sizeof(header));
1309 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1310 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001311 struct vmw_resource_val_node *ctx_node;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001312 struct vmw_resource_val_node *res_node;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001313 int ret;
1314
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001315 cmd = container_of(header, struct vmw_tex_state_cmd,
1316 header);
1317
1318 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1319 user_context_converter, &cmd->state.cid,
1320 &ctx_node);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001321 if (unlikely(ret != 0))
1322 return ret;
1323
1324 for (; cur_state < last_state; ++cur_state) {
1325 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1326 continue;
1327
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001328 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1329 user_surface_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001330 &cur_state->value, &res_node);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001331 if (unlikely(ret != 0))
1332 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001333
1334 if (dev_priv->has_mob) {
1335 struct vmw_ctx_bindinfo bi;
1336
1337 bi.ctx = ctx_node->res;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001338 bi.res = res_node ? res_node->res : NULL;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001339 bi.bt = vmw_ctx_binding_tex;
1340 bi.i1.texture_stage = cur_state->stage;
1341 vmw_context_binding_add(ctx_node->staged_bindings,
1342 &bi);
1343 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001344 }
1345
1346 return 0;
1347}
1348
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001349static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1350 struct vmw_sw_context *sw_context,
1351 void *buf)
1352{
1353 struct vmw_dma_buffer *vmw_bo;
1354 int ret;
1355
1356 struct {
1357 uint32_t header;
1358 SVGAFifoCmdDefineGMRFB body;
1359 } *cmd = buf;
1360
1361 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1362 &cmd->body.ptr,
1363 &vmw_bo);
1364 if (unlikely(ret != 0))
1365 return ret;
1366
1367 vmw_dmabuf_unreference(&vmw_bo);
1368
1369 return ret;
1370}
1371
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001372/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001373 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1374 *
1375 * @dev_priv: Pointer to a device private struct.
1376 * @sw_context: The software context being used for this batch.
1377 * @res_type: The resource type.
1378 * @converter: Information about user-space binding for this resource type.
1379 * @res_id: Pointer to the user-space resource handle in the command stream.
1380 * @buf_id: Pointer to the user-space backup buffer handle in the command
1381 * stream.
1382 * @backup_offset: Offset of backup into MOB.
1383 *
1384 * This function prepares for registering a switch of backup buffers
1385 * in the resource metadata just prior to unreserving.
1386 */
1387static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1388 struct vmw_sw_context *sw_context,
1389 enum vmw_res_type res_type,
1390 const struct vmw_user_resource_conv
1391 *converter,
1392 uint32_t *res_id,
1393 uint32_t *buf_id,
1394 unsigned long backup_offset)
1395{
1396 int ret;
1397 struct vmw_dma_buffer *dma_buf;
1398 struct vmw_resource_val_node *val_node;
1399
1400 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1401 converter, res_id, &val_node);
1402 if (unlikely(ret != 0))
1403 return ret;
1404
1405 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1406 if (unlikely(ret != 0))
1407 return ret;
1408
1409 if (val_node->first_usage)
1410 val_node->no_buffer_needed = true;
1411
1412 vmw_dmabuf_unreference(&val_node->new_backup);
1413 val_node->new_backup = dma_buf;
1414 val_node->new_backup_offset = backup_offset;
1415
1416 return 0;
1417}
1418
1419/**
1420 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1421 * command
1422 *
1423 * @dev_priv: Pointer to a device private struct.
1424 * @sw_context: The software context being used for this batch.
1425 * @header: Pointer to the command header in the command stream.
1426 */
1427static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1428 struct vmw_sw_context *sw_context,
1429 SVGA3dCmdHeader *header)
1430{
1431 struct vmw_bind_gb_surface_cmd {
1432 SVGA3dCmdHeader header;
1433 SVGA3dCmdBindGBSurface body;
1434 } *cmd;
1435
1436 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1437
1438 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1439 user_surface_converter,
1440 &cmd->body.sid, &cmd->body.mobid,
1441 0);
1442}
1443
1444/**
1445 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1446 * command
1447 *
1448 * @dev_priv: Pointer to a device private struct.
1449 * @sw_context: The software context being used for this batch.
1450 * @header: Pointer to the command header in the command stream.
1451 */
1452static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1453 struct vmw_sw_context *sw_context,
1454 SVGA3dCmdHeader *header)
1455{
1456 struct vmw_gb_surface_cmd {
1457 SVGA3dCmdHeader header;
1458 SVGA3dCmdUpdateGBImage body;
1459 } *cmd;
1460
1461 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1462
1463 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1464 user_surface_converter,
1465 &cmd->body.image.sid, NULL);
1466}
1467
1468/**
1469 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1470 * command
1471 *
1472 * @dev_priv: Pointer to a device private struct.
1473 * @sw_context: The software context being used for this batch.
1474 * @header: Pointer to the command header in the command stream.
1475 */
1476static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1477 struct vmw_sw_context *sw_context,
1478 SVGA3dCmdHeader *header)
1479{
1480 struct vmw_gb_surface_cmd {
1481 SVGA3dCmdHeader header;
1482 SVGA3dCmdUpdateGBSurface body;
1483 } *cmd;
1484
1485 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1486
1487 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1488 user_surface_converter,
1489 &cmd->body.sid, NULL);
1490}
1491
1492/**
1493 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1494 * command
1495 *
1496 * @dev_priv: Pointer to a device private struct.
1497 * @sw_context: The software context being used for this batch.
1498 * @header: Pointer to the command header in the command stream.
1499 */
1500static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1501 struct vmw_sw_context *sw_context,
1502 SVGA3dCmdHeader *header)
1503{
1504 struct vmw_gb_surface_cmd {
1505 SVGA3dCmdHeader header;
1506 SVGA3dCmdReadbackGBImage body;
1507 } *cmd;
1508
1509 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1510
1511 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1512 user_surface_converter,
1513 &cmd->body.image.sid, NULL);
1514}
1515
1516/**
1517 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1518 * command
1519 *
1520 * @dev_priv: Pointer to a device private struct.
1521 * @sw_context: The software context being used for this batch.
1522 * @header: Pointer to the command header in the command stream.
1523 */
1524static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1525 struct vmw_sw_context *sw_context,
1526 SVGA3dCmdHeader *header)
1527{
1528 struct vmw_gb_surface_cmd {
1529 SVGA3dCmdHeader header;
1530 SVGA3dCmdReadbackGBSurface body;
1531 } *cmd;
1532
1533 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1534
1535 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1536 user_surface_converter,
1537 &cmd->body.sid, NULL);
1538}
1539
1540/**
1541 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1542 * command
1543 *
1544 * @dev_priv: Pointer to a device private struct.
1545 * @sw_context: The software context being used for this batch.
1546 * @header: Pointer to the command header in the command stream.
1547 */
1548static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1549 struct vmw_sw_context *sw_context,
1550 SVGA3dCmdHeader *header)
1551{
1552 struct vmw_gb_surface_cmd {
1553 SVGA3dCmdHeader header;
1554 SVGA3dCmdInvalidateGBImage body;
1555 } *cmd;
1556
1557 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1558
1559 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1560 user_surface_converter,
1561 &cmd->body.image.sid, NULL);
1562}
1563
1564/**
1565 * vmw_cmd_invalidate_gb_surface - Validate an
1566 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1567 *
1568 * @dev_priv: Pointer to a device private struct.
1569 * @sw_context: The software context being used for this batch.
1570 * @header: Pointer to the command header in the command stream.
1571 */
1572static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1573 struct vmw_sw_context *sw_context,
1574 SVGA3dCmdHeader *header)
1575{
1576 struct vmw_gb_surface_cmd {
1577 SVGA3dCmdHeader header;
1578 SVGA3dCmdInvalidateGBSurface body;
1579 } *cmd;
1580
1581 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1582
1583 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1584 user_surface_converter,
1585 &cmd->body.sid, NULL);
1586}
1587
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001588
1589/**
1590 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1591 * command
1592 *
1593 * @dev_priv: Pointer to a device private struct.
1594 * @sw_context: The software context being used for this batch.
1595 * @header: Pointer to the command header in the command stream.
1596 */
1597static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1598 struct vmw_sw_context *sw_context,
1599 SVGA3dCmdHeader *header)
1600{
1601 struct vmw_shader_define_cmd {
1602 SVGA3dCmdHeader header;
1603 SVGA3dCmdDefineShader body;
1604 } *cmd;
1605 int ret;
1606 size_t size;
1607
1608 cmd = container_of(header, struct vmw_shader_define_cmd,
1609 header);
1610
1611 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1612 user_context_converter, &cmd->body.cid,
1613 NULL);
1614 if (unlikely(ret != 0))
1615 return ret;
1616
1617 if (unlikely(!dev_priv->has_mob))
1618 return 0;
1619
1620 size = cmd->header.size - sizeof(cmd->body);
1621 ret = vmw_compat_shader_add(sw_context->fp->shman,
1622 cmd->body.shid, cmd + 1,
1623 cmd->body.type, size,
1624 sw_context->fp->tfile,
1625 &sw_context->staged_shaders);
1626 if (unlikely(ret != 0))
1627 return ret;
1628
1629 return vmw_resource_relocation_add(&sw_context->res_relocations,
1630 NULL, &cmd->header.id -
1631 sw_context->buf_start);
1632
1633 return 0;
1634}
1635
1636/**
1637 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1638 * command
1639 *
1640 * @dev_priv: Pointer to a device private struct.
1641 * @sw_context: The software context being used for this batch.
1642 * @header: Pointer to the command header in the command stream.
1643 */
1644static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1645 struct vmw_sw_context *sw_context,
1646 SVGA3dCmdHeader *header)
1647{
1648 struct vmw_shader_destroy_cmd {
1649 SVGA3dCmdHeader header;
1650 SVGA3dCmdDestroyShader body;
1651 } *cmd;
1652 int ret;
1653
1654 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1655 header);
1656
1657 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1658 user_context_converter, &cmd->body.cid,
1659 NULL);
1660 if (unlikely(ret != 0))
1661 return ret;
1662
1663 if (unlikely(!dev_priv->has_mob))
1664 return 0;
1665
1666 ret = vmw_compat_shader_remove(sw_context->fp->shman,
1667 cmd->body.shid,
1668 cmd->body.type,
1669 &sw_context->staged_shaders);
1670 if (unlikely(ret != 0))
1671 return ret;
1672
1673 return vmw_resource_relocation_add(&sw_context->res_relocations,
1674 NULL, &cmd->header.id -
1675 sw_context->buf_start);
1676
1677 return 0;
1678}
1679
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001680/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001681 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1682 * command
1683 *
1684 * @dev_priv: Pointer to a device private struct.
1685 * @sw_context: The software context being used for this batch.
1686 * @header: Pointer to the command header in the command stream.
1687 */
1688static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1689 struct vmw_sw_context *sw_context,
1690 SVGA3dCmdHeader *header)
1691{
1692 struct vmw_set_shader_cmd {
1693 SVGA3dCmdHeader header;
1694 SVGA3dCmdSetShader body;
1695 } *cmd;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001696 struct vmw_resource_val_node *ctx_node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001697 int ret;
1698
1699 cmd = container_of(header, struct vmw_set_shader_cmd,
1700 header);
1701
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001702 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1703 user_context_converter, &cmd->body.cid,
1704 &ctx_node);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001705 if (unlikely(ret != 0))
1706 return ret;
1707
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001708 if (dev_priv->has_mob) {
1709 struct vmw_ctx_bindinfo bi;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001710 struct vmw_resource_val_node *res_node;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001711 u32 shid = cmd->body.shid;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001712
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01001713 if (shid != SVGA3D_INVALID_ID)
1714 (void) vmw_compat_shader_lookup(sw_context->fp->shman,
1715 cmd->body.type,
1716 &shid);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001717
1718 ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1719 vmw_res_shader,
1720 user_shader_converter,
1721 shid,
1722 &cmd->body.shid, &res_node);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001723 if (unlikely(ret != 0))
1724 return ret;
1725
1726 bi.ctx = ctx_node->res;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001727 bi.res = res_node ? res_node->res : NULL;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001728 bi.bt = vmw_ctx_binding_shader;
1729 bi.i1.shader_type = cmd->body.type;
1730 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1731 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001732
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001733 return 0;
1734}
1735
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001736/**
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01001737 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1738 * command
1739 *
1740 * @dev_priv: Pointer to a device private struct.
1741 * @sw_context: The software context being used for this batch.
1742 * @header: Pointer to the command header in the command stream.
1743 */
1744static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1745 struct vmw_sw_context *sw_context,
1746 SVGA3dCmdHeader *header)
1747{
1748 struct vmw_set_shader_const_cmd {
1749 SVGA3dCmdHeader header;
1750 SVGA3dCmdSetShaderConst body;
1751 } *cmd;
1752 int ret;
1753
1754 cmd = container_of(header, struct vmw_set_shader_const_cmd,
1755 header);
1756
1757 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1758 user_context_converter, &cmd->body.cid,
1759 NULL);
1760 if (unlikely(ret != 0))
1761 return ret;
1762
1763 if (dev_priv->has_mob)
1764 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1765
1766 return 0;
1767}
1768
1769/**
Thomas Hellstromc74c1622012-11-21 12:10:26 +01001770 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1771 * command
1772 *
1773 * @dev_priv: Pointer to a device private struct.
1774 * @sw_context: The software context being used for this batch.
1775 * @header: Pointer to the command header in the command stream.
1776 */
1777static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1778 struct vmw_sw_context *sw_context,
1779 SVGA3dCmdHeader *header)
1780{
1781 struct vmw_bind_gb_shader_cmd {
1782 SVGA3dCmdHeader header;
1783 SVGA3dCmdBindGBShader body;
1784 } *cmd;
1785
1786 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1787 header);
1788
1789 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1790 user_shader_converter,
1791 &cmd->body.shid, &cmd->body.mobid,
1792 cmd->body.offsetInBytes);
1793}
1794
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001795static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1796 struct vmw_sw_context *sw_context,
1797 void *buf, uint32_t *size)
1798{
1799 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001800 uint32_t cmd_id;
1801
1802 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1803 switch (cmd_id) {
1804 case SVGA_CMD_UPDATE:
1805 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001806 break;
1807 case SVGA_CMD_DEFINE_GMRFB:
1808 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1809 break;
1810 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1811 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1812 break;
1813 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1814 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1815 break;
1816 default:
1817 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1818 return -EINVAL;
1819 }
1820
1821 if (*size > size_remaining) {
1822 DRM_ERROR("Invalid SVGA command (size mismatch):"
1823 " %u.\n", cmd_id);
1824 return -EINVAL;
1825 }
1826
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001827 if (unlikely(!sw_context->kernel)) {
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001828 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1829 return -EPERM;
1830 }
1831
1832 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1833 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1834
1835 return 0;
1836}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001837
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001838static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1839 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1840 false, false, false),
1841 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1842 false, false, false),
1843 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1844 true, false, false),
1845 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1846 true, false, false),
1847 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1848 true, false, false),
1849 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1850 false, false, false),
1851 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1852 false, false, false),
1853 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1854 true, false, false),
1855 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1856 true, false, false),
1857 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1858 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001859 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001860 &vmw_cmd_set_render_target_check, true, false, false),
1861 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1862 true, false, false),
1863 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1864 true, false, false),
1865 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1866 true, false, false),
1867 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1868 true, false, false),
1869 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1870 true, false, false),
1871 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1872 true, false, false),
1873 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1874 true, false, false),
1875 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1876 false, false, false),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001877 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1878 true, false, false),
1879 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1880 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001881 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1882 true, false, false),
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01001883 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1884 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001885 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1886 true, false, false),
1887 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1888 true, false, false),
1889 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1890 true, false, false),
1891 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1892 true, false, false),
1893 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1894 true, false, false),
1895 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1896 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001897 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001898 &vmw_cmd_blt_surf_screen_check, false, false, false),
1899 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1900 false, false, false),
1901 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1902 false, false, false),
1903 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1904 false, false, false),
1905 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1906 false, false, false),
1907 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1908 false, false, false),
1909 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1910 false, false, false),
1911 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1912 false, false, false),
1913 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1914 false, false, false),
1915 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1916 false, false, false),
1917 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1918 false, false, false),
1919 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1920 false, false, false),
1921 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1922 false, false, false),
1923 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1924 false, false, false),
1925 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1926 false, false, true),
1927 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1928 false, false, true),
1929 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1930 false, false, true),
1931 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1932 false, false, true),
1933 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1934 false, false, true),
1935 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1936 false, false, true),
1937 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1938 false, false, true),
1939 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1940 false, false, true),
1941 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1942 true, false, true),
1943 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1944 false, false, true),
1945 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1946 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001947 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001948 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001949 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001950 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001951 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001952 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001953 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001954 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001955 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001956 &vmw_cmd_invalidate_gb_surface, true, false, true),
1957 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1958 false, false, true),
1959 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1960 false, false, true),
1961 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1962 false, false, true),
1963 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1964 false, false, true),
1965 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1966 false, false, true),
1967 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1968 false, false, true),
1969 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1970 true, false, true),
1971 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1972 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01001973 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07001974 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01001975 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1976 true, false, true),
1977 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
1978 true, false, true),
1979 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
1980 true, false, true),
1981 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
1982 true, false, true),
1983 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
1984 false, false, true),
1985 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
1986 false, false, true),
1987 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
1988 false, false, true),
1989 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
1990 false, false, true),
1991 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
1992 false, false, true),
1993 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
1994 false, false, true),
1995 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
1996 false, false, true),
1997 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
1998 false, false, true),
1999 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2000 false, false, true),
2001 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2002 false, false, true),
2003 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2004 true, false, true)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002005};
2006
2007static int vmw_cmd_check(struct vmw_private *dev_priv,
2008 struct vmw_sw_context *sw_context,
2009 void *buf, uint32_t *size)
2010{
2011 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01002012 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002013 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2014 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002015 const struct vmw_cmd_entry *entry;
2016 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002017
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02002018 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2019 /* Handle any none 3D commands */
2020 if (unlikely(cmd_id < SVGA_CMD_MAX))
2021 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2022
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002023
2024 cmd_id = le32_to_cpu(header->id);
2025 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2026
2027 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01002028 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002029 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01002030
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002031 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002032 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002033
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002034 entry = &vmw_cmd_entries[cmd_id];
2035 if (unlikely(!entry->user_allow && !sw_context->kernel))
2036 goto out_privileged;
2037
2038 if (unlikely(entry->gb_disable && gb))
2039 goto out_old;
2040
2041 if (unlikely(entry->gb_enable && !gb))
2042 goto out_new;
2043
2044 ret = entry->func(dev_priv, sw_context, header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002045 if (unlikely(ret != 0))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002046 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002047
2048 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01002049out_invalid:
2050 DRM_ERROR("Invalid SVGA3D command: %d\n",
2051 cmd_id + SVGA_3D_CMD_BASE);
2052 return -EINVAL;
2053out_privileged:
2054 DRM_ERROR("Privileged SVGA3D command: %d\n",
2055 cmd_id + SVGA_3D_CMD_BASE);
2056 return -EPERM;
2057out_old:
2058 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2059 cmd_id + SVGA_3D_CMD_BASE);
2060 return -EINVAL;
2061out_new:
2062 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002063 cmd_id + SVGA_3D_CMD_BASE);
2064 return -EINVAL;
2065}
2066
2067static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2068 struct vmw_sw_context *sw_context,
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002069 void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002070 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002071{
2072 int32_t cur_size = size;
2073 int ret;
2074
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002075 sw_context->buf_start = buf;
2076
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002077 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01002078 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002079 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2080 if (unlikely(ret != 0))
2081 return ret;
2082 buf = (void *)((unsigned long) buf + size);
2083 cur_size -= size;
2084 }
2085
2086 if (unlikely(cur_size != 0)) {
2087 DRM_ERROR("Command verifier out of sync.\n");
2088 return -EINVAL;
2089 }
2090
2091 return 0;
2092}
2093
2094static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2095{
2096 sw_context->cur_reloc = 0;
2097}
2098
2099static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2100{
2101 uint32_t i;
2102 struct vmw_relocation *reloc;
2103 struct ttm_validate_buffer *validate;
2104 struct ttm_buffer_object *bo;
2105
2106 for (i = 0; i < sw_context->cur_reloc; ++i) {
2107 reloc = &sw_context->relocs[i];
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002108 validate = &sw_context->val_bufs[reloc->index].base;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002109 bo = validate->bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002110 switch (bo->mem.mem_type) {
2111 case TTM_PL_VRAM:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02002112 reloc->location->offset += bo->offset;
2113 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002114 break;
2115 case VMW_PL_GMR:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02002116 reloc->location->gmrId = bo->mem.start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002117 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01002118 case VMW_PL_MOB:
2119 *reloc->mob_loc = bo->mem.start;
2120 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002121 default:
2122 BUG();
2123 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002124 }
2125 vmw_free_relocations(sw_context);
2126}
2127
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002128/**
2129 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2130 * all resources referenced by it.
2131 *
2132 * @list: The resource list.
2133 */
2134static void vmw_resource_list_unreference(struct list_head *list)
2135{
2136 struct vmw_resource_val_node *val, *val_next;
2137
2138 /*
2139 * Drop references to resources held during command submission.
2140 */
2141
2142 list_for_each_entry_safe(val, val_next, list, head) {
2143 list_del_init(&val->head);
2144 vmw_resource_unreference(&val->res);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002145 if (unlikely(val->staged_bindings))
2146 kfree(val->staged_bindings);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002147 kfree(val);
2148 }
2149}
2150
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002151static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2152{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002153 struct vmw_validate_buffer *entry, *next;
2154 struct vmw_resource_val_node *val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002155
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002156 /*
2157 * Drop references to DMA buffers held during command submission.
2158 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002159 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002160 base.head) {
2161 list_del(&entry->base.head);
2162 ttm_bo_unref(&entry->base.bo);
2163 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002164 sw_context->cur_val_buf--;
2165 }
2166 BUG_ON(sw_context->cur_val_buf != 0);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002167
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002168 list_for_each_entry(val, &sw_context->resource_list, head)
2169 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002170}
2171
2172static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01002173 struct ttm_buffer_object *bo,
2174 bool validate_as_mob)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002175{
2176 int ret;
2177
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002178
2179 /*
2180 * Don't validate pinned buffers.
2181 */
2182
2183 if (bo == dev_priv->pinned_bo ||
2184 (bo == dev_priv->dummy_query_bo &&
2185 dev_priv->dummy_query_bo_pinned))
2186 return 0;
2187
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01002188 if (validate_as_mob)
2189 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2190
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01002191 /**
Thomas Hellstrom135cba02010-10-26 21:21:47 +02002192 * Put BO in VRAM if there is space, otherwise as a GMR.
2193 * If there is no space in VRAM and GMR ids are all used up,
2194 * start evicting GMRs to make room. If the DMA buffer can't be
2195 * used as a GMR, this will return -ENOMEM.
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01002196 */
2197
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00002198 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +01002199 if (likely(ret == 0 || ret == -ERESTARTSYS))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002200 return ret;
2201
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01002202 /**
2203 * If that failed, try VRAM again, this time evicting
2204 * previous contents.
2205 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002206
Thomas Hellstrom135cba02010-10-26 21:21:47 +02002207 DRM_INFO("Falling through to VRAM.\n");
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00002208 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002209 return ret;
2210}
2211
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002212static int vmw_validate_buffers(struct vmw_private *dev_priv,
2213 struct vmw_sw_context *sw_context)
2214{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002215 struct vmw_validate_buffer *entry;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002216 int ret;
2217
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002218 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01002219 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2220 entry->validate_as_mob);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002221 if (unlikely(ret != 0))
2222 return ret;
2223 }
2224 return 0;
2225}
2226
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002227static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2228 uint32_t size)
2229{
2230 if (likely(sw_context->cmd_bounce_size >= size))
2231 return 0;
2232
2233 if (sw_context->cmd_bounce_size == 0)
2234 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2235
2236 while (sw_context->cmd_bounce_size < size) {
2237 sw_context->cmd_bounce_size =
2238 PAGE_ALIGN(sw_context->cmd_bounce_size +
2239 (sw_context->cmd_bounce_size >> 1));
2240 }
2241
2242 if (sw_context->cmd_bounce != NULL)
2243 vfree(sw_context->cmd_bounce);
2244
2245 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2246
2247 if (sw_context->cmd_bounce == NULL) {
2248 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2249 sw_context->cmd_bounce_size = 0;
2250 return -ENOMEM;
2251 }
2252
2253 return 0;
2254}
2255
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002256/**
2257 * vmw_execbuf_fence_commands - create and submit a command stream fence
2258 *
2259 * Creates a fence object and submits a command stream marker.
2260 * If this fails for some reason, We sync the fifo and return NULL.
2261 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02002262 *
2263 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2264 * a userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002265 */
2266
2267int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2268 struct vmw_private *dev_priv,
2269 struct vmw_fence_obj **p_fence,
2270 uint32_t *p_handle)
2271{
2272 uint32_t sequence;
2273 int ret;
2274 bool synced = false;
2275
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02002276 /* p_handle implies file_priv. */
2277 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002278
2279 ret = vmw_fifo_send_fence(dev_priv, &sequence);
2280 if (unlikely(ret != 0)) {
2281 DRM_ERROR("Fence submission error. Syncing.\n");
2282 synced = true;
2283 }
2284
2285 if (p_handle != NULL)
2286 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2287 sequence,
2288 DRM_VMW_FENCE_FLAG_EXEC,
2289 p_fence, p_handle);
2290 else
2291 ret = vmw_fence_create(dev_priv->fman, sequence,
2292 DRM_VMW_FENCE_FLAG_EXEC,
2293 p_fence);
2294
2295 if (unlikely(ret != 0 && !synced)) {
2296 (void) vmw_fallback_wait(dev_priv, false, false,
2297 sequence, false,
2298 VMW_FENCE_WAIT_TIMEOUT);
2299 *p_fence = NULL;
2300 }
2301
2302 return 0;
2303}
2304
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02002305/**
2306 * vmw_execbuf_copy_fence_user - copy fence object information to
2307 * user-space.
2308 *
2309 * @dev_priv: Pointer to a vmw_private struct.
2310 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2311 * @ret: Return value from fence object creation.
2312 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2313 * which the information should be copied.
2314 * @fence: Pointer to the fenc object.
2315 * @fence_handle: User-space fence handle.
2316 *
2317 * This function copies fence information to user-space. If copying fails,
2318 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2319 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2320 * the error will hopefully be detected.
2321 * Also if copying fails, user-space will be unable to signal the fence
2322 * object so we wait for it immediately, and then unreference the
2323 * user-space reference.
2324 */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +02002325void
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02002326vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2327 struct vmw_fpriv *vmw_fp,
2328 int ret,
2329 struct drm_vmw_fence_rep __user *user_fence_rep,
2330 struct vmw_fence_obj *fence,
2331 uint32_t fence_handle)
2332{
2333 struct drm_vmw_fence_rep fence_rep;
2334
2335 if (user_fence_rep == NULL)
2336 return;
2337
Dan Carpenter80d9b242011-10-18 09:10:12 +03002338 memset(&fence_rep, 0, sizeof(fence_rep));
2339
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02002340 fence_rep.error = ret;
2341 if (ret == 0) {
2342 BUG_ON(fence == NULL);
2343
2344 fence_rep.handle = fence_handle;
2345 fence_rep.seqno = fence->seqno;
2346 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2347 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2348 }
2349
2350 /*
2351 * copy_to_user errors will be detected by user space not
2352 * seeing fence_rep::error filled in. Typically
2353 * user-space would have pre-set that member to -EFAULT.
2354 */
2355 ret = copy_to_user(user_fence_rep, &fence_rep,
2356 sizeof(fence_rep));
2357
2358 /*
2359 * User-space lost the fence object. We need to sync
2360 * and unreference the handle.
2361 */
2362 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2363 ttm_ref_object_base_unref(vmw_fp->tfile,
2364 fence_handle, TTM_REF_USAGE);
2365 DRM_ERROR("Fence copy error. Syncing.\n");
2366 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
2367 false, false,
2368 VMW_FENCE_WAIT_TIMEOUT);
2369 }
2370}
2371
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002372int vmw_execbuf_process(struct drm_file *file_priv,
2373 struct vmw_private *dev_priv,
2374 void __user *user_commands,
2375 void *kernel_commands,
2376 uint32_t command_size,
2377 uint64_t throttle_us,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002378 struct drm_vmw_fence_rep __user *user_fence_rep,
2379 struct vmw_fence_obj **out_fence)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002380{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002381 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002382 struct vmw_fence_obj *fence = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002383 struct vmw_resource *error_resource;
2384 struct list_head resource_list;
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002385 struct ww_acquire_ctx ticket;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002386 uint32_t handle;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002387 void *cmd;
2388 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002389
2390 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002391 if (unlikely(ret != 0))
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002392 return -ERESTARTSYS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002393
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002394 if (kernel_commands == NULL) {
2395 sw_context->kernel = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002396
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002397 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2398 if (unlikely(ret != 0))
2399 goto out_unlock;
2400
2401
2402 ret = copy_from_user(sw_context->cmd_bounce,
2403 user_commands, command_size);
2404
2405 if (unlikely(ret != 0)) {
2406 ret = -EFAULT;
2407 DRM_ERROR("Failed copying commands.\n");
2408 goto out_unlock;
2409 }
2410 kernel_commands = sw_context->cmd_bounce;
2411 } else
2412 sw_context->kernel = true;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002413
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002414 sw_context->fp = vmw_fpriv(file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002415 sw_context->cur_reloc = 0;
2416 sw_context->cur_val_buf = 0;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002417 sw_context->fence_flags = 0;
Thomas Hellstromf18c8842011-10-04 20:13:31 +02002418 INIT_LIST_HEAD(&sw_context->resource_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002419 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002420 sw_context->last_query_ctx = NULL;
2421 sw_context->needs_post_query_barrier = false;
2422 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002423 INIT_LIST_HEAD(&sw_context->validate_nodes);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002424 INIT_LIST_HEAD(&sw_context->res_relocations);
2425 if (!sw_context->res_ht_initialized) {
2426 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2427 if (unlikely(ret != 0))
2428 goto out_unlock;
2429 sw_context->res_ht_initialized = true;
2430 }
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002431 INIT_LIST_HEAD(&sw_context->staged_shaders);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002432
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002433 INIT_LIST_HEAD(&resource_list);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002434 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2435 command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002436 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01002437 goto out_err_nores;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002438
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002439 ret = vmw_resources_reserve(sw_context);
2440 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01002441 goto out_err_nores;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002442
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002443 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002444 if (unlikely(ret != 0))
2445 goto out_err;
2446
2447 ret = vmw_validate_buffers(dev_priv, sw_context);
2448 if (unlikely(ret != 0))
2449 goto out_err;
2450
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002451 ret = vmw_resources_validate(sw_context);
2452 if (unlikely(ret != 0))
2453 goto out_err;
Thomas Hellstrom1925d452010-05-28 11:21:57 +02002454
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002455 if (throttle_us) {
Thomas Hellstrom6bcd8d3c2011-09-01 20:18:42 +00002456 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002457 throttle_us);
Thomas Hellstrom1925d452010-05-28 11:21:57 +02002458
2459 if (unlikely(ret != 0))
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002460 goto out_err;
Thomas Hellstrom1925d452010-05-28 11:21:57 +02002461 }
2462
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07002463 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2464 if (unlikely(ret != 0)) {
2465 ret = -ERESTARTSYS;
2466 goto out_err;
2467 }
2468
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01002469 if (dev_priv->has_mob) {
2470 ret = vmw_rebind_contexts(sw_context);
2471 if (unlikely(ret != 0))
2472 goto out_err;
2473 }
2474
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002475 cmd = vmw_fifo_reserve(dev_priv, command_size);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002476 if (unlikely(cmd == NULL)) {
2477 DRM_ERROR("Failed reserving fifo space for commands.\n");
2478 ret = -ENOMEM;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07002479 goto out_unlock_binding;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00002480 }
2481
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002482 vmw_apply_relocations(sw_context);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002483 memcpy(cmd, kernel_commands, command_size);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002484
2485 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2486 vmw_resource_relocations_free(&sw_context->res_relocations);
2487
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002488 vmw_fifo_commit(dev_priv, command_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002489
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002490 vmw_query_bo_switch_commit(dev_priv, sw_context);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002491 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2492 &fence,
2493 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002494 /*
2495 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002496 * vmw_fifo_send_fence will sync. The error will be propagated to
2497 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002498 */
2499
2500 if (ret != 0)
2501 DRM_ERROR("Fence submission error. Syncing.\n");
2502
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002503 vmw_resource_list_unreserve(&sw_context->resource_list, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07002504 mutex_unlock(&dev_priv->binding_mutex);
2505
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002506 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002507 (void *) fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002508
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002509 if (unlikely(dev_priv->pinned_bo != NULL &&
2510 !dev_priv->query_cid_valid))
2511 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2512
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002513 vmw_clear_validations(sw_context);
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02002514 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2515 user_fence_rep, fence, handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002516
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002517 /* Don't unreference when handing fence out */
2518 if (unlikely(out_fence != NULL)) {
2519 *out_fence = fence;
2520 fence = NULL;
2521 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00002522 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002523 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002524
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002525 list_splice_init(&sw_context->resource_list, &resource_list);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002526 vmw_compat_shaders_commit(sw_context->fp->shman,
2527 &sw_context->staged_shaders);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002528 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002529
2530 /*
2531 * Unreference resources outside of the cmdbuf_mutex to
2532 * avoid deadlocks in resource destruction paths.
2533 */
2534 vmw_resource_list_unreference(&resource_list);
2535
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002536 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002537
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07002538out_unlock_binding:
2539 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002540out_err:
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01002541 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2542out_err_nores:
2543 vmw_resource_list_unreserve(&sw_context->resource_list, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002544 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002545 vmw_free_relocations(sw_context);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002546 vmw_clear_validations(sw_context);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002547 if (unlikely(dev_priv->pinned_bo != NULL &&
2548 !dev_priv->query_cid_valid))
2549 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002550out_unlock:
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002551 list_splice_init(&sw_context->resource_list, &resource_list);
2552 error_resource = sw_context->error_resource;
2553 sw_context->error_resource = NULL;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002554 vmw_compat_shaders_revert(sw_context->fp->shman,
2555 &sw_context->staged_shaders);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002556 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002557
2558 /*
2559 * Unreference resources outside of the cmdbuf_mutex to
2560 * avoid deadlocks in resource destruction paths.
2561 */
2562 vmw_resource_list_unreference(&resource_list);
2563 if (unlikely(error_resource != NULL))
2564 vmw_resource_unreference(&error_resource);
2565
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002566 return ret;
2567}
2568
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002569/**
2570 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2571 *
2572 * @dev_priv: The device private structure.
2573 *
2574 * This function is called to idle the fifo and unpin the query buffer
2575 * if the normal way to do this hits an error, which should typically be
2576 * extremely rare.
2577 */
2578static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2579{
2580 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2581
2582 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2583 vmw_bo_pin(dev_priv->pinned_bo, false);
2584 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2585 dev_priv->dummy_query_bo_pinned = false;
2586}
2587
2588
2589/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002590 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002591 * query bo.
2592 *
2593 * @dev_priv: The device private structure.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002594 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2595 * _after_ a query barrier that flushes all queries touching the current
2596 * buffer pointed to by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002597 *
2598 * This function should be used to unpin the pinned query bo, or
2599 * as a query barrier when we need to make sure that all queries have
2600 * finished before the next fifo command. (For example on hardware
2601 * context destructions where the hardware may otherwise leak unfinished
2602 * queries).
2603 *
2604 * This function does not return any failure codes, but make attempts
2605 * to do safe unpinning in case of errors.
2606 *
2607 * The function will synchronize on the previous query barrier, and will
2608 * thus not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002609 *
2610 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2611 * before calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002612 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002613void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2614 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002615{
2616 int ret = 0;
2617 struct list_head validate_list;
2618 struct ttm_validate_buffer pinned_val, query_val;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002619 struct vmw_fence_obj *lfence = NULL;
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002620 struct ww_acquire_ctx ticket;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002621
2622 if (dev_priv->pinned_bo == NULL)
2623 goto out_unlock;
2624
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002625 INIT_LIST_HEAD(&validate_list);
2626
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002627 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2628 list_add_tail(&pinned_val.head, &validate_list);
2629
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002630 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2631 list_add_tail(&query_val.head, &validate_list);
2632
2633 do {
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002634 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002635 } while (ret == -ERESTARTSYS);
2636
2637 if (unlikely(ret != 0)) {
2638 vmw_execbuf_unpin_panic(dev_priv);
2639 goto out_no_reserve;
2640 }
2641
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002642 if (dev_priv->query_cid_valid) {
2643 BUG_ON(fence != NULL);
2644 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2645 if (unlikely(ret != 0)) {
2646 vmw_execbuf_unpin_panic(dev_priv);
2647 goto out_no_emit;
2648 }
2649 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002650 }
2651
2652 vmw_bo_pin(dev_priv->pinned_bo, false);
2653 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2654 dev_priv->dummy_query_bo_pinned = false;
2655
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002656 if (fence == NULL) {
2657 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2658 NULL);
2659 fence = lfence;
2660 }
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002661 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002662 if (lfence != NULL)
2663 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002664
2665 ttm_bo_unref(&query_val.bo);
2666 ttm_bo_unref(&pinned_val.bo);
2667 ttm_bo_unref(&dev_priv->pinned_bo);
2668
2669out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002670 return;
2671
2672out_no_emit:
Maarten Lankhorstecff6652013-06-27 13:48:17 +02002673 ttm_eu_backoff_reservation(&ticket, &validate_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002674out_no_reserve:
2675 ttm_bo_unref(&query_val.bo);
2676 ttm_bo_unref(&pinned_val.bo);
2677 ttm_bo_unref(&dev_priv->pinned_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002678}
2679
2680/**
2681 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2682 * query bo.
2683 *
2684 * @dev_priv: The device private structure.
2685 *
2686 * This function should be used to unpin the pinned query bo, or
2687 * as a query barrier when we need to make sure that all queries have
2688 * finished before the next fifo command. (For example on hardware
2689 * context destructions where the hardware may otherwise leak unfinished
2690 * queries).
2691 *
2692 * This function does not return any failure codes, but make attempts
2693 * to do safe unpinning in case of errors.
2694 *
2695 * The function will synchronize on the previous query barrier, and will
2696 * thus not finish until that barrier has executed.
2697 */
2698void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2699{
2700 mutex_lock(&dev_priv->cmdbuf_mutex);
2701 if (dev_priv->query_cid_valid)
2702 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02002703 mutex_unlock(&dev_priv->cmdbuf_mutex);
2704}
2705
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002706
2707int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2708 struct drm_file *file_priv)
2709{
2710 struct vmw_private *dev_priv = vmw_priv(dev);
2711 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2712 struct vmw_master *vmaster = vmw_master(file_priv->master);
2713 int ret;
2714
2715 /*
2716 * This will allow us to extend the ioctl argument while
2717 * maintaining backwards compatibility:
2718 * We take different code paths depending on the value of
2719 * arg->version.
2720 */
2721
2722 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2723 DRM_ERROR("Incorrect execbuf version.\n");
2724 DRM_ERROR("You're running outdated experimental "
2725 "vmwgfx user-space drivers.");
2726 return -EINVAL;
2727 }
2728
2729 ret = ttm_read_lock(&vmaster->lock, true);
2730 if (unlikely(ret != 0))
2731 return ret;
2732
2733 ret = vmw_execbuf_process(file_priv, dev_priv,
2734 (void __user *)(unsigned long)arg->commands,
2735 NULL, arg->command_size, arg->throttle_us,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01002736 (void __user *)(unsigned long)arg->fence_rep,
2737 NULL);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02002738
2739 if (unlikely(ret != 0))
2740 goto out_unlock;
2741
2742 vmw_kms_cursor_post_execbuf(dev_priv);
2743
2744out_unlock:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00002745 ttm_read_unlock(&vmaster->lock);
2746 return ret;
2747}