blob: d1f4a48dee0f07e0c5c2bbf66cb71b023b4910d0 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
Thomas Hellstromd80efd52015-08-10 10:39:35 -070032#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034
Thomas Hellstromc0951b72012-11-20 12:19:35 +000035#define VMW_RES_HT_ORDER 12
36
37/**
38 * struct vmw_resource_relocation - Relocation info for resources
39 *
40 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource.
Thomas Hellstrome7a45282016-10-10 10:44:00 -070042 * @offset: Offset of single byte entries into the command buffer where the
Thomas Hellstromc0951b72012-11-20 12:19:35 +000043 * id that needs fixup is located.
44 */
45struct vmw_resource_relocation {
46 struct list_head head;
47 const struct vmw_resource *res;
48 unsigned long offset;
49};
50
51/**
52 * struct vmw_resource_val_node - Validation info for resources
53 *
54 * @head: List head for the software context's resource list.
55 * @hash: Hash entry for quick resouce to val_node lookup.
56 * @res: Ref-counted pointer to the resource.
57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58 * @new_backup: Refcounted pointer to the new backup buffer.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070059 * @staged_bindings: If @res is a context, tracks bindings set up during
60 * the command batch. Otherwise NULL.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000061 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62 * @first_usage: Set to true the first time the resource is referenced in
63 * the command stream.
Thomas Hellstromd80efd52015-08-10 10:39:35 -070064 * @switching_backup: The command stream provides a new backup buffer for a
65 * resource.
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000069 */
70struct vmw_resource_val_node {
71 struct list_head head;
72 struct drm_hash_item hash;
73 struct vmw_resource *res;
74 struct vmw_dma_buffer *new_backup;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070075 struct vmw_ctx_binding_state *staged_bindings;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000076 unsigned long new_backup_offset;
Thomas Hellstromd80efd52015-08-10 10:39:35 -070077 u32 first_usage : 1;
78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000080};
81
82/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +010083 * struct vmw_cmd_entry - Describe a command for the verifier
84 *
85 * @user_allow: Whether allowed from the execbuf ioctl.
86 * @gb_disable: Whether disabled if guest-backed objects are available.
87 * @gb_enable: Whether enabled iff guest-backed objects are available.
88 */
89struct vmw_cmd_entry {
90 int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 SVGA3dCmdHeader *);
92 bool user_allow;
93 bool gb_disable;
94 bool gb_enable;
95};
96
97#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 (_gb_disable), (_gb_enable)}
100
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700104static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105 struct vmw_sw_context *sw_context,
106 SVGAMobId *id,
107 struct vmw_dma_buffer **vmw_bo_p);
108static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob,
111 uint32_t *p_val_node);
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700112/**
113 * vmw_ptr_diff - Compute the offset from a to b in bytes
114 *
115 * @a: A starting pointer.
116 * @b: A pointer offset in the same address space.
117 *
118 * Returns: The offset in bytes between the two pointers.
119 */
120static size_t vmw_ptr_diff(void *a, void *b)
121{
122 return (unsigned long) b - (unsigned long) a;
123}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700124
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100125/**
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700126 * vmw_resources_unreserve - unreserve resources previously reserved for
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000127 * command submission.
128 *
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700129 * @sw_context: pointer to the software context
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000130 * @backoff: Whether command submission failed.
131 */
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700132static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
133 bool backoff)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000134{
135 struct vmw_resource_val_node *val;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700136 struct list_head *list = &sw_context->resource_list;
137
138 if (sw_context->dx_query_mob && !backoff)
139 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
140 sw_context->dx_query_mob);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000141
142 list_for_each_entry(val, list, head) {
143 struct vmw_resource *res = val->res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700144 bool switch_backup =
145 (backoff) ? false : val->switching_backup;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000146
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700147 /*
148 * Transfer staged context bindings to the
149 * persistent context binding tracker.
150 */
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700151 if (unlikely(val->staged_bindings)) {
Thomas Hellstrom76c7d182014-01-30 10:46:12 +0100152 if (!backoff) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700153 vmw_binding_state_commit
154 (vmw_context_binding_state(val->res),
155 val->staged_bindings);
Thomas Hellstrom76c7d182014-01-30 10:46:12 +0100156 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700157
158 if (val->staged_bindings != sw_context->staged_bindings)
159 vmw_binding_state_free(val->staged_bindings);
160 else
161 sw_context->staged_bindings_inuse = false;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700162 val->staged_bindings = NULL;
163 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700164 vmw_resource_unreserve(res, switch_backup, val->new_backup,
165 val->new_backup_offset);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000166 vmw_dmabuf_unreference(&val->new_backup);
167 }
168}
169
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700170/**
171 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
172 * added to the validate list.
173 *
174 * @dev_priv: Pointer to the device private:
175 * @sw_context: The validation context:
176 * @node: The validation node holding this context.
177 */
178static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
179 struct vmw_sw_context *sw_context,
180 struct vmw_resource_val_node *node)
181{
182 int ret;
183
184 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
185 if (unlikely(ret != 0))
186 goto out_err;
187
188 if (!sw_context->staged_bindings) {
189 sw_context->staged_bindings =
190 vmw_binding_state_alloc(dev_priv);
191 if (IS_ERR(sw_context->staged_bindings)) {
192 DRM_ERROR("Failed to allocate context binding "
193 "information.\n");
194 ret = PTR_ERR(sw_context->staged_bindings);
195 sw_context->staged_bindings = NULL;
196 goto out_err;
197 }
198 }
199
200 if (sw_context->staged_bindings_inuse) {
201 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
202 if (IS_ERR(node->staged_bindings)) {
203 DRM_ERROR("Failed to allocate context binding "
204 "information.\n");
205 ret = PTR_ERR(node->staged_bindings);
206 node->staged_bindings = NULL;
207 goto out_err;
208 }
209 } else {
210 node->staged_bindings = sw_context->staged_bindings;
211 sw_context->staged_bindings_inuse = true;
212 }
213
214 return 0;
215out_err:
216 return ret;
217}
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000218
219/**
220 * vmw_resource_val_add - Add a resource to the software context's
221 * resource list if it's not already on it.
222 *
223 * @sw_context: Pointer to the software context.
224 * @res: Pointer to the resource.
225 * @p_node On successful return points to a valid pointer to a
226 * struct vmw_resource_val_node, if non-NULL on entry.
227 */
228static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
229 struct vmw_resource *res,
230 struct vmw_resource_val_node **p_node)
231{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700232 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000233 struct vmw_resource_val_node *node;
234 struct drm_hash_item *hash;
235 int ret;
236
237 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
238 &hash) == 0)) {
239 node = container_of(hash, struct vmw_resource_val_node, hash);
240 node->first_usage = false;
241 if (unlikely(p_node != NULL))
242 *p_node = node;
243 return 0;
244 }
245
246 node = kzalloc(sizeof(*node), GFP_KERNEL);
247 if (unlikely(node == NULL)) {
248 DRM_ERROR("Failed to allocate a resource validation "
249 "entry.\n");
250 return -ENOMEM;
251 }
252
253 node->hash.key = (unsigned long) res;
254 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
255 if (unlikely(ret != 0)) {
256 DRM_ERROR("Failed to initialize a resource validation "
257 "entry.\n");
258 kfree(node);
259 return ret;
260 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000261 node->res = vmw_resource_reference(res);
262 node->first_usage = true;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000263 if (unlikely(p_node != NULL))
264 *p_node = node;
265
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700266 if (!dev_priv->has_mob) {
267 list_add_tail(&node->head, &sw_context->resource_list);
268 return 0;
269 }
270
271 switch (vmw_res_type(res)) {
272 case vmw_res_context:
273 case vmw_res_dx_context:
274 list_add(&node->head, &sw_context->ctx_resource_list);
275 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
276 break;
277 case vmw_res_cotable:
278 list_add_tail(&node->head, &sw_context->ctx_resource_list);
279 break;
280 default:
281 list_add_tail(&node->head, &sw_context->resource_list);
282 break;
283 }
284
285 return ret;
286}
287
288/**
289 * vmw_view_res_val_add - Add a view and the surface it's pointing to
290 * to the validation list
291 *
292 * @sw_context: The software context holding the validation list.
293 * @view: Pointer to the view resource.
294 *
295 * Returns 0 if success, negative error code otherwise.
296 */
297static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
298 struct vmw_resource *view)
299{
300 int ret;
301
302 /*
303 * First add the resource the view is pointing to, otherwise
304 * it may be swapped out when the view is validated.
305 */
306 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
307 if (ret)
308 return ret;
309
310 return vmw_resource_val_add(sw_context, view, NULL);
311}
312
313/**
314 * vmw_view_id_val_add - Look up a view and add it and the surface it's
315 * pointing to to the validation list.
316 *
317 * @sw_context: The software context holding the validation list.
318 * @view_type: The view type to look up.
319 * @id: view id of the view.
320 *
321 * The view is represented by a view id and the DX context it's created on,
322 * or scheduled for creation on. If there is no DX context set, the function
323 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
324 */
325static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
326 enum vmw_view_type view_type, u32 id)
327{
328 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
329 struct vmw_resource *view;
330 int ret;
331
332 if (!ctx_node) {
333 DRM_ERROR("DX Context not set.\n");
334 return -EINVAL;
335 }
336
337 view = vmw_view_lookup(sw_context->man, view_type, id);
338 if (IS_ERR(view))
339 return PTR_ERR(view);
340
341 ret = vmw_view_res_val_add(sw_context, view);
342 vmw_resource_unreference(&view);
343
344 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000345}
346
347/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100348 * vmw_resource_context_res_add - Put resources previously bound to a context on
349 * the validation list
350 *
351 * @dev_priv: Pointer to a device private structure
352 * @sw_context: Pointer to a software context used for this command submission
353 * @ctx: Pointer to the context resource
354 *
355 * This function puts all resources that were previously bound to @ctx on
356 * the resource validation list. This is part of the context state reemission
357 */
358static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
359 struct vmw_sw_context *sw_context,
360 struct vmw_resource *ctx)
361{
362 struct list_head *binding_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700363 struct vmw_ctx_bindinfo *entry;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100364 int ret = 0;
365 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700366 u32 i;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100367
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700368 /* Add all cotables to the validation list. */
369 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
370 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
371 res = vmw_context_cotable(ctx, i);
372 if (IS_ERR(res))
373 continue;
374
375 ret = vmw_resource_val_add(sw_context, res, NULL);
376 vmw_resource_unreference(&res);
377 if (unlikely(ret != 0))
378 return ret;
379 }
380 }
381
382
383 /* Add all resources bound to the context to the validation list */
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100384 mutex_lock(&dev_priv->binding_mutex);
385 binding_list = vmw_context_binding_list(ctx);
386
387 list_for_each_entry(entry, binding_list, ctx_list) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700388 /* entry->res is not refcounted */
389 res = vmw_resource_reference_unless_doomed(entry->res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100390 if (unlikely(res == NULL))
391 continue;
392
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700393 if (vmw_res_type(entry->res) == vmw_res_view)
394 ret = vmw_view_res_val_add(sw_context, entry->res);
395 else
396 ret = vmw_resource_val_add(sw_context, entry->res,
397 NULL);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100398 vmw_resource_unreference(&res);
399 if (unlikely(ret != 0))
400 break;
401 }
402
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700403 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
404 struct vmw_dma_buffer *dx_query_mob;
405
406 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
407 if (dx_query_mob)
408 ret = vmw_bo_to_validate_list(sw_context,
409 dx_query_mob,
410 true, NULL);
411 }
412
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100413 mutex_unlock(&dev_priv->binding_mutex);
414 return ret;
415}
416
417/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000418 * vmw_resource_relocation_add - Add a relocation to the relocation list
419 *
420 * @list: Pointer to head of relocation list.
421 * @res: The resource.
422 * @offset: Offset into the command buffer currently being parsed where the
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700423 * id that needs fixup is located. Granularity is one byte.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000424 */
425static int vmw_resource_relocation_add(struct list_head *list,
426 const struct vmw_resource *res,
427 unsigned long offset)
428{
429 struct vmw_resource_relocation *rel;
430
431 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
432 if (unlikely(rel == NULL)) {
433 DRM_ERROR("Failed to allocate a resource relocation.\n");
434 return -ENOMEM;
435 }
436
437 rel->res = res;
438 rel->offset = offset;
439 list_add_tail(&rel->head, list);
440
441 return 0;
442}
443
444/**
445 * vmw_resource_relocations_free - Free all relocations on a list
446 *
447 * @list: Pointer to the head of the relocation list.
448 */
449static void vmw_resource_relocations_free(struct list_head *list)
450{
451 struct vmw_resource_relocation *rel, *n;
452
453 list_for_each_entry_safe(rel, n, list, head) {
454 list_del(&rel->head);
455 kfree(rel);
456 }
457}
458
459/**
460 * vmw_resource_relocations_apply - Apply all relocations on a list
461 *
462 * @cb: Pointer to the start of the command buffer bein patch. This need
463 * not be the same buffer as the one being parsed when the relocation
464 * list was built, but the contents must be the same modulo the
465 * resource ids.
466 * @list: Pointer to the head of the relocation list.
467 */
468static void vmw_resource_relocations_apply(uint32_t *cb,
469 struct list_head *list)
470{
471 struct vmw_resource_relocation *rel;
472
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100473 list_for_each_entry(rel, list, head) {
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700474 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100475 if (likely(rel->res != NULL))
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700476 *addr = rel->res->id;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100477 else
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700478 *addr = SVGA_3D_CMD_NOP;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100479 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000480}
481
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000482static int vmw_cmd_invalid(struct vmw_private *dev_priv,
483 struct vmw_sw_context *sw_context,
484 SVGA3dCmdHeader *header)
485{
486 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
487}
488
489static int vmw_cmd_ok(struct vmw_private *dev_priv,
490 struct vmw_sw_context *sw_context,
491 SVGA3dCmdHeader *header)
492{
493 return 0;
494}
495
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200496/**
497 * vmw_bo_to_validate_list - add a bo to a validate list
498 *
499 * @sw_context: The software context used for this command submission batch.
500 * @bo: The buffer object to add.
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100501 * @validate_as_mob: Validate this buffer as a MOB.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200502 * @p_val_node: If non-NULL Will be updated with the validate node number
503 * on return.
504 *
505 * Returns -EINVAL if the limit of number of buffer objects per command
506 * submission is reached.
507 */
508static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700509 struct vmw_dma_buffer *vbo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100510 bool validate_as_mob,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200511 uint32_t *p_val_node)
512{
513 uint32_t val_node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000514 struct vmw_validate_buffer *vval_buf;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200515 struct ttm_validate_buffer *val_buf;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000516 struct drm_hash_item *hash;
517 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200518
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700519 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000520 &hash) == 0)) {
521 vval_buf = container_of(hash, struct vmw_validate_buffer,
522 hash);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100523 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
524 DRM_ERROR("Inconsistent buffer usage.\n");
525 return -EINVAL;
526 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000527 val_buf = &vval_buf->base;
528 val_node = vval_buf - sw_context->val_bufs;
529 } else {
530 val_node = sw_context->cur_val_buf;
531 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
532 DRM_ERROR("Max number of DMA buffers per submission "
533 "exceeded.\n");
534 return -EINVAL;
535 }
536 vval_buf = &sw_context->val_bufs[val_node];
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700537 vval_buf->hash.key = (unsigned long) vbo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000538 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
539 if (unlikely(ret != 0)) {
540 DRM_ERROR("Failed to initialize a buffer validation "
541 "entry.\n");
542 return ret;
543 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200544 ++sw_context->cur_val_buf;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000545 val_buf = &vval_buf->base;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700546 val_buf->bo = ttm_bo_reference(&vbo->base);
Christian Königae9c0af2014-09-04 20:01:52 +0200547 val_buf->shared = false;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000548 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100549 vval_buf->validate_as_mob = validate_as_mob;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200550 }
551
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200552 if (p_val_node)
553 *p_val_node = val_node;
554
555 return 0;
556}
557
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000558/**
559 * vmw_resources_reserve - Reserve all resources on the sw_context's
560 * resource list.
561 *
562 * @sw_context: Pointer to the software context.
563 *
564 * Note that since vmware's command submission currently is protected by
565 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
566 * since only a single thread at once will attempt this.
567 */
568static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
569{
570 struct vmw_resource_val_node *val;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700571 int ret = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000572
573 list_for_each_entry(val, &sw_context->resource_list, head) {
574 struct vmw_resource *res = val->res;
575
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700576 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000577 if (unlikely(ret != 0))
578 return ret;
579
580 if (res->backup) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700581 struct vmw_dma_buffer *vbo = res->backup;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000582
583 ret = vmw_bo_to_validate_list
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700584 (sw_context, vbo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100585 vmw_resource_needs_backup(res), NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000586
587 if (unlikely(ret != 0))
588 return ret;
589 }
590 }
Charmaine Lee2f633e52015-08-10 10:45:11 -0700591
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700592 if (sw_context->dx_query_mob) {
593 struct vmw_dma_buffer *expected_dx_query_mob;
594
595 expected_dx_query_mob =
596 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
597 if (expected_dx_query_mob &&
598 expected_dx_query_mob != sw_context->dx_query_mob) {
599 ret = -EINVAL;
600 }
601 }
602
603 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000604}
605
606/**
607 * vmw_resources_validate - Validate all resources on the sw_context's
608 * resource list.
609 *
610 * @sw_context: Pointer to the software context.
611 *
612 * Before this function is called, all resource backup buffers must have
613 * been validated.
614 */
615static int vmw_resources_validate(struct vmw_sw_context *sw_context)
616{
617 struct vmw_resource_val_node *val;
618 int ret;
619
620 list_for_each_entry(val, &sw_context->resource_list, head) {
621 struct vmw_resource *res = val->res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700622 struct vmw_dma_buffer *backup = res->backup;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000623
624 ret = vmw_resource_validate(res);
625 if (unlikely(ret != 0)) {
626 if (ret != -ERESTARTSYS)
627 DRM_ERROR("Failed to validate resource.\n");
628 return ret;
629 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700630
631 /* Check if the resource switched backup buffer */
632 if (backup && res->backup && (backup != res->backup)) {
633 struct vmw_dma_buffer *vbo = res->backup;
634
635 ret = vmw_bo_to_validate_list
636 (sw_context, vbo,
637 vmw_resource_needs_backup(res), NULL);
638 if (ret) {
639 ttm_bo_unreserve(&vbo->base);
640 return ret;
641 }
642 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000643 }
644 return 0;
645}
646
647/**
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200648 * vmw_cmd_res_reloc_add - Add a resource to a software context's
649 * relocation- and validation lists.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000650 *
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200651 * @dev_priv: Pointer to a struct vmw_private identifying the device.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000652 * @sw_context: Pointer to the software context.
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200653 * @id_loc: Pointer to where the id that needs translation is located.
654 * @res: Valid pointer to a struct vmw_resource.
655 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
656 * used for this resource is returned here.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000657 */
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200658static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
659 struct vmw_sw_context *sw_context,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200660 uint32_t *id_loc,
661 struct vmw_resource *res,
662 struct vmw_resource_val_node **p_val)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000663{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000664 int ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200665 struct vmw_resource_val_node *node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000666
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200667 *p_val = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000668 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
669 res,
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700670 vmw_ptr_diff(sw_context->buf_start,
671 id_loc));
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000672 if (unlikely(ret != 0))
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200673 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000674
675 ret = vmw_resource_val_add(sw_context, res, &node);
676 if (unlikely(ret != 0))
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200677 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000678
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200679 if (p_val)
680 *p_val = node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000681
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200682 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000683}
684
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200685
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000686/**
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100687 * vmw_cmd_res_check - Check that a resource is present and if so, put it
688 * on the resource validate list unless it's already there.
689 *
690 * @dev_priv: Pointer to a device private structure.
691 * @sw_context: Pointer to the software context.
692 * @res_type: Resource type.
693 * @converter: User-space visisble type specific information.
694 * @id_loc: Pointer to the location in the command buffer currently being
695 * parsed from where the user-space resource id handle is located.
696 * @p_val: Pointer to pointer to resource validalidation node. Populated
697 * on exit.
698 */
699static int
700vmw_cmd_res_check(struct vmw_private *dev_priv,
701 struct vmw_sw_context *sw_context,
702 enum vmw_res_type res_type,
703 const struct vmw_user_resource_conv *converter,
704 uint32_t *id_loc,
705 struct vmw_resource_val_node **p_val)
706{
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200707 struct vmw_res_cache_entry *rcache =
708 &sw_context->res_cache[res_type];
709 struct vmw_resource *res;
710 struct vmw_resource_val_node *node;
711 int ret;
712
713 if (*id_loc == SVGA3D_INVALID_ID) {
714 if (p_val)
715 *p_val = NULL;
716 if (res_type == vmw_res_context) {
717 DRM_ERROR("Illegal context invalid id.\n");
718 return -EINVAL;
719 }
720 return 0;
721 }
722
723 /*
724 * Fastpath in case of repeated commands referencing the same
725 * resource
726 */
727
728 if (likely(rcache->valid && *id_loc == rcache->handle)) {
729 const struct vmw_resource *res = rcache->res;
730
731 rcache->node->first_usage = false;
732 if (p_val)
733 *p_val = rcache->node;
734
735 return vmw_resource_relocation_add
736 (&sw_context->res_relocations, res,
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700737 vmw_ptr_diff(sw_context->buf_start, id_loc));
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200738 }
739
740 ret = vmw_user_resource_lookup_handle(dev_priv,
741 sw_context->fp->tfile,
742 *id_loc,
743 converter,
744 &res);
745 if (unlikely(ret != 0)) {
746 DRM_ERROR("Could not find or use resource 0x%08x.\n",
747 (unsigned) *id_loc);
748 dump_stack();
749 return ret;
750 }
751
752 rcache->valid = true;
753 rcache->res = res;
754 rcache->handle = *id_loc;
755
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700756 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200757 res, &node);
758 if (unlikely(ret != 0))
759 goto out_no_reloc;
760
761 rcache->node = node;
762 if (p_val)
763 *p_val = node;
764 vmw_resource_unreference(&res);
765 return 0;
766
767out_no_reloc:
768 BUG_ON(sw_context->error_resource != NULL);
769 sw_context->error_resource = res;
770
771 return ret;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100772}
773
774/**
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700775 * vmw_rebind_dx_query - Rebind DX query associated with the context
776 *
777 * @ctx_res: context the query belongs to
778 *
779 * This function assumes binding_mutex is held.
780 */
781static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
782{
783 struct vmw_private *dev_priv = ctx_res->dev_priv;
784 struct vmw_dma_buffer *dx_query_mob;
785 struct {
786 SVGA3dCmdHeader header;
787 SVGA3dCmdDXBindAllQuery body;
788 } *cmd;
789
790
791 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
792
793 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
794 return 0;
795
796 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
797
798 if (cmd == NULL) {
799 DRM_ERROR("Failed to rebind queries.\n");
800 return -ENOMEM;
801 }
802
803 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
804 cmd->header.size = sizeof(cmd->body);
805 cmd->body.cid = ctx_res->id;
806 cmd->body.mobid = dx_query_mob->base.mem.start;
807 vmw_fifo_commit(dev_priv, sizeof(*cmd));
808
809 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
810
811 return 0;
812}
813
814/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100815 * vmw_rebind_contexts - Rebind all resources previously bound to
816 * referenced contexts.
817 *
818 * @sw_context: Pointer to the software context.
819 *
820 * Rebind context binding points that have been scrubbed because of eviction.
821 */
822static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
823{
824 struct vmw_resource_val_node *val;
825 int ret;
826
827 list_for_each_entry(val, &sw_context->resource_list, head) {
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200828 if (unlikely(!val->staged_bindings))
829 break;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100830
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700831 ret = vmw_binding_rebind_all
832 (vmw_context_binding_state(val->res));
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100833 if (unlikely(ret != 0)) {
834 if (ret != -ERESTARTSYS)
835 DRM_ERROR("Failed to rebind context.\n");
836 return ret;
837 }
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700838
839 ret = vmw_rebind_all_dx_query(val->res);
840 if (ret != 0)
841 return ret;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100842 }
843
844 return 0;
845}
846
847/**
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700848 * vmw_view_bindings_add - Add an array of view bindings to a context
849 * binding state tracker.
850 *
851 * @sw_context: The execbuf state used for this command.
852 * @view_type: View type for the bindings.
853 * @binding_type: Binding type for the bindings.
854 * @shader_slot: The shader slot to user for the bindings.
855 * @view_ids: Array of view ids to be bound.
856 * @num_views: Number of view ids in @view_ids.
857 * @first_slot: The binding slot to be used for the first view id in @view_ids.
858 */
859static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
860 enum vmw_view_type view_type,
861 enum vmw_ctx_binding_type binding_type,
862 uint32 shader_slot,
863 uint32 view_ids[], u32 num_views,
864 u32 first_slot)
865{
866 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
867 struct vmw_cmdbuf_res_manager *man;
868 u32 i;
869 int ret;
870
871 if (!ctx_node) {
872 DRM_ERROR("DX Context not set.\n");
873 return -EINVAL;
874 }
875
876 man = sw_context->man;
877 for (i = 0; i < num_views; ++i) {
878 struct vmw_ctx_bindinfo_view binding;
879 struct vmw_resource *view = NULL;
880
881 if (view_ids[i] != SVGA3D_INVALID_ID) {
882 view = vmw_view_lookup(man, view_type, view_ids[i]);
883 if (IS_ERR(view)) {
884 DRM_ERROR("View not found.\n");
885 return PTR_ERR(view);
886 }
887
888 ret = vmw_view_res_val_add(sw_context, view);
889 if (ret) {
890 DRM_ERROR("Could not add view to "
891 "validation list.\n");
892 vmw_resource_unreference(&view);
893 return ret;
894 }
895 }
896 binding.bi.ctx = ctx_node->res;
897 binding.bi.res = view;
898 binding.bi.bt = binding_type;
899 binding.shader_slot = shader_slot;
900 binding.slot = first_slot + i;
901 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
902 shader_slot, binding.slot);
903 if (view)
904 vmw_resource_unreference(&view);
905 }
906
907 return 0;
908}
909
910/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000911 * vmw_cmd_cid_check - Check a command header for valid context information.
912 *
913 * @dev_priv: Pointer to a device private structure.
914 * @sw_context: Pointer to the software context.
915 * @header: A command header with an embedded user-space context handle.
916 *
917 * Convenience function: Call vmw_cmd_res_check with the user-space context
918 * handle embedded in @header.
919 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000920static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
921 struct vmw_sw_context *sw_context,
922 SVGA3dCmdHeader *header)
923{
924 struct vmw_cid_cmd {
925 SVGA3dCmdHeader header;
Thomas Hellstrom8e67bbb2014-02-06 12:35:05 +0100926 uint32_t cid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000927 } *cmd;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000928
929 cmd = container_of(header, struct vmw_cid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000930 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
931 user_context_converter, &cmd->cid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000932}
933
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000934static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
935 struct vmw_sw_context *sw_context,
936 SVGA3dCmdHeader *header)
937{
938 struct vmw_sid_cmd {
939 SVGA3dCmdHeader header;
940 SVGA3dCmdSetRenderTarget body;
941 } *cmd;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700942 struct vmw_resource_val_node *ctx_node;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700943 struct vmw_resource_val_node *res_node;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000944 int ret;
945
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700946 cmd = container_of(header, struct vmw_sid_cmd, header);
947
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700948 if (cmd->body.type >= SVGA3D_RT_MAX) {
949 DRM_ERROR("Illegal render target type %u.\n",
950 (unsigned) cmd->body.type);
951 return -EINVAL;
952 }
953
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700954 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
955 user_context_converter, &cmd->body.cid,
956 &ctx_node);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000957 if (unlikely(ret != 0))
958 return ret;
959
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000960 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
961 user_surface_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700962 &cmd->body.target.sid, &res_node);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700963 if (unlikely(ret != 0))
964 return ret;
965
966 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700967 struct vmw_ctx_bindinfo_view binding;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700968
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700969 binding.bi.ctx = ctx_node->res;
970 binding.bi.res = res_node ? res_node->res : NULL;
971 binding.bi.bt = vmw_ctx_binding_rt;
972 binding.slot = cmd->body.type;
973 vmw_binding_add(ctx_node->staged_bindings,
974 &binding.bi, 0, binding.slot);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700975 }
976
977 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000978}
979
980static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
981 struct vmw_sw_context *sw_context,
982 SVGA3dCmdHeader *header)
983{
984 struct vmw_sid_cmd {
985 SVGA3dCmdHeader header;
986 SVGA3dCmdSurfaceCopy body;
987 } *cmd;
988 int ret;
989
990 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800991
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -0700992 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
993 user_surface_converter,
994 &cmd->body.src.sid, NULL);
995 if (ret)
996 return ret;
Thomas Hellstromc9146cd2015-03-02 23:45:04 -0800997
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000998 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
999 user_surface_converter,
1000 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001001}
1002
Neha Bhende0fca749e2015-08-10 10:51:07 -07001003static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1004 struct vmw_sw_context *sw_context,
1005 SVGA3dCmdHeader *header)
1006{
1007 struct {
1008 SVGA3dCmdHeader header;
1009 SVGA3dCmdDXBufferCopy body;
1010 } *cmd;
1011 int ret;
1012
1013 cmd = container_of(header, typeof(*cmd), header);
1014 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1015 user_surface_converter,
1016 &cmd->body.src, NULL);
1017 if (ret != 0)
1018 return ret;
1019
1020 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1021 user_surface_converter,
1022 &cmd->body.dest, NULL);
1023}
1024
1025static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1026 struct vmw_sw_context *sw_context,
1027 SVGA3dCmdHeader *header)
1028{
1029 struct {
1030 SVGA3dCmdHeader header;
1031 SVGA3dCmdDXPredCopyRegion body;
1032 } *cmd;
1033 int ret;
1034
1035 cmd = container_of(header, typeof(*cmd), header);
1036 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1037 user_surface_converter,
1038 &cmd->body.srcSid, NULL);
1039 if (ret != 0)
1040 return ret;
1041
1042 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1043 user_surface_converter,
1044 &cmd->body.dstSid, NULL);
1045}
1046
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001047static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1048 struct vmw_sw_context *sw_context,
1049 SVGA3dCmdHeader *header)
1050{
1051 struct vmw_sid_cmd {
1052 SVGA3dCmdHeader header;
1053 SVGA3dCmdSurfaceStretchBlt body;
1054 } *cmd;
1055 int ret;
1056
1057 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001058 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1059 user_surface_converter,
1060 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001061 if (unlikely(ret != 0))
1062 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001063 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1064 user_surface_converter,
1065 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001066}
1067
1068static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1069 struct vmw_sw_context *sw_context,
1070 SVGA3dCmdHeader *header)
1071{
1072 struct vmw_sid_cmd {
1073 SVGA3dCmdHeader header;
1074 SVGA3dCmdBlitSurfaceToScreen body;
1075 } *cmd;
1076
1077 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001078
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001079 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1080 user_surface_converter,
1081 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001082}
1083
1084static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1085 struct vmw_sw_context *sw_context,
1086 SVGA3dCmdHeader *header)
1087{
1088 struct vmw_sid_cmd {
1089 SVGA3dCmdHeader header;
1090 SVGA3dCmdPresent body;
1091 } *cmd;
1092
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001093
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001094 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001095
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001096 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1097 user_surface_converter, &cmd->body.sid,
1098 NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001099}
1100
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001101/**
1102 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1103 *
1104 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001105 * @new_query_bo: The new buffer holding query results.
1106 * @sw_context: The software context used for this command submission.
1107 *
1108 * This function checks whether @new_query_bo is suitable for holding
1109 * query results, and if another buffer currently is pinned for query
1110 * results. If so, the function prepares the state of @sw_context for
1111 * switching pinned buffers after successful submission of the current
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001112 * command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001113 */
1114static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001115 struct vmw_dma_buffer *new_query_bo,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001116 struct vmw_sw_context *sw_context)
1117{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001118 struct vmw_res_cache_entry *ctx_entry =
1119 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001120 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001121
1122 BUG_ON(!ctx_entry->valid);
1123 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001124
1125 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1126
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001127 if (unlikely(new_query_bo->base.num_pages > 4)) {
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001128 DRM_ERROR("Query buffer too large.\n");
1129 return -EINVAL;
1130 }
1131
1132 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001133 sw_context->needs_post_query_barrier = true;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001134 ret = vmw_bo_to_validate_list(sw_context,
1135 sw_context->cur_query_bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01001136 dev_priv->has_mob, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001137 if (unlikely(ret != 0))
1138 return ret;
1139 }
1140 sw_context->cur_query_bo = new_query_bo;
1141
1142 ret = vmw_bo_to_validate_list(sw_context,
1143 dev_priv->dummy_query_bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01001144 dev_priv->has_mob, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001145 if (unlikely(ret != 0))
1146 return ret;
1147
1148 }
1149
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001150 return 0;
1151}
1152
1153
1154/**
1155 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1156 *
1157 * @dev_priv: The device private structure.
1158 * @sw_context: The software context used for this command submission batch.
1159 *
1160 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001161 * issue a dummy occlusion query wait used as a query barrier. When the fence
1162 * object following that query wait has signaled, we are sure that all
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001163 * preceding queries have finished, and the old query buffer can be unpinned.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001164 * However, since both the new query buffer and the old one are fenced with
1165 * that fence, we can do an asynchronus unpin now, and be sure that the
1166 * old query buffer won't be moved until the fence has signaled.
1167 *
1168 * As mentioned above, both the new - and old query buffers need to be fenced
1169 * using a sequence emitted *after* calling this function.
1170 */
1171static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1172 struct vmw_sw_context *sw_context)
1173{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001174 /*
1175 * The validate list should still hold references to all
1176 * contexts here.
1177 */
1178
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001179 if (sw_context->needs_post_query_barrier) {
1180 struct vmw_res_cache_entry *ctx_entry =
1181 &sw_context->res_cache[vmw_res_context];
1182 struct vmw_resource *ctx;
1183 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001184
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001185 BUG_ON(!ctx_entry->valid);
1186 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001187
1188 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1189
1190 if (unlikely(ret != 0))
1191 DRM_ERROR("Out of fifo space for dummy query.\n");
1192 }
1193
1194 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1195 if (dev_priv->pinned_bo) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001196 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1197 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001198 }
1199
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001200 if (!sw_context->needs_post_query_barrier) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001201 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001202
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001203 /*
1204 * We pin also the dummy_query_bo buffer so that we
1205 * don't need to validate it when emitting
1206 * dummy queries in context destroy paths.
1207 */
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001208
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001209 if (!dev_priv->dummy_query_bo_pinned) {
1210 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1211 true);
1212 dev_priv->dummy_query_bo_pinned = true;
1213 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001214
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001215 BUG_ON(sw_context->last_query_ctx == NULL);
1216 dev_priv->query_cid = sw_context->last_query_ctx->id;
1217 dev_priv->query_cid_valid = true;
1218 dev_priv->pinned_bo =
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001219 vmw_dmabuf_reference(sw_context->cur_query_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001220 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001221 }
1222}
1223
1224/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001225 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1226 * handle to a MOB id.
1227 *
1228 * @dev_priv: Pointer to a device private structure.
1229 * @sw_context: The software context used for this command batch validation.
1230 * @id: Pointer to the user-space handle to be translated.
1231 * @vmw_bo_p: Points to a location that, on successful return will carry
1232 * a reference-counted pointer to the DMA buffer identified by the
1233 * user-space handle in @id.
1234 *
1235 * This function saves information needed to translate a user-space buffer
1236 * handle to a MOB id. The translation does not take place immediately, but
1237 * during a call to vmw_apply_relocations(). This function builds a relocation
1238 * list and a list of buffers to validate. The former needs to be freed using
1239 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1240 * needs to be freed using vmw_clear_validations.
1241 */
1242static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1243 struct vmw_sw_context *sw_context,
1244 SVGAMobId *id,
1245 struct vmw_dma_buffer **vmw_bo_p)
1246{
1247 struct vmw_dma_buffer *vmw_bo = NULL;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001248 uint32_t handle = *id;
1249 struct vmw_relocation *reloc;
1250 int ret;
1251
Thomas Hellstrom54c12bc2015-09-14 01:13:11 -07001252 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1253 NULL);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001254 if (unlikely(ret != 0)) {
1255 DRM_ERROR("Could not find or use MOB buffer.\n");
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001256 ret = -EINVAL;
1257 goto out_no_reloc;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001258 }
Thomas Hellstromddcda242012-11-21 11:26:55 +01001259
1260 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1261 DRM_ERROR("Max number relocations per submission"
1262 " exceeded\n");
1263 ret = -EINVAL;
1264 goto out_no_reloc;
1265 }
1266
1267 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1268 reloc->mob_loc = id;
1269 reloc->location = NULL;
1270
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001271 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001272 if (unlikely(ret != 0))
1273 goto out_no_reloc;
1274
1275 *vmw_bo_p = vmw_bo;
1276 return 0;
1277
1278out_no_reloc:
1279 vmw_dmabuf_unreference(&vmw_bo);
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001280 *vmw_bo_p = NULL;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001281 return ret;
1282}
1283
1284/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001285 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1286 * handle to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001287 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001288 * @dev_priv: Pointer to a device private structure.
1289 * @sw_context: The software context used for this command batch validation.
1290 * @ptr: Pointer to the user-space handle to be translated.
1291 * @vmw_bo_p: Points to a location that, on successful return will carry
1292 * a reference-counted pointer to the DMA buffer identified by the
1293 * user-space handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001294 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001295 * This function saves information needed to translate a user-space buffer
1296 * handle to a valid SVGAGuestPtr. The translation does not take place
1297 * immediately, but during a call to vmw_apply_relocations().
1298 * This function builds a relocation list and a list of buffers to validate.
1299 * The former needs to be freed using either vmw_apply_relocations() or
1300 * vmw_free_relocations(). The latter needs to be freed using
1301 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001302 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001303static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1304 struct vmw_sw_context *sw_context,
1305 SVGAGuestPtr *ptr,
1306 struct vmw_dma_buffer **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001307{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001308 struct vmw_dma_buffer *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001309 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001310 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001311 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001312
Thomas Hellstrom54c12bc2015-09-14 01:13:11 -07001313 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1314 NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001315 if (unlikely(ret != 0)) {
1316 DRM_ERROR("Could not find or use GMR region.\n");
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001317 ret = -EINVAL;
1318 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001319 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001320
1321 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001322 DRM_ERROR("Max number relocations per submission"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001323 " exceeded\n");
1324 ret = -EINVAL;
1325 goto out_no_reloc;
1326 }
1327
1328 reloc = &sw_context->relocs[sw_context->cur_reloc++];
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001329 reloc->location = ptr;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001330
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001331 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001332 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001333 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001334
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001335 *vmw_bo_p = vmw_bo;
1336 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001337
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001338out_no_reloc:
1339 vmw_dmabuf_unreference(&vmw_bo);
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001340 *vmw_bo_p = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001341 return ret;
1342}
1343
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001344
1345
1346/**
1347 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1348 *
1349 * @dev_priv: Pointer to a device private struct.
1350 * @sw_context: The software context used for this command submission.
1351 * @header: Pointer to the command header in the command stream.
1352 *
1353 * This function adds the new query into the query COTABLE
1354 */
1355static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1356 struct vmw_sw_context *sw_context,
1357 SVGA3dCmdHeader *header)
1358{
1359 struct vmw_dx_define_query_cmd {
1360 SVGA3dCmdHeader header;
1361 SVGA3dCmdDXDefineQuery q;
1362 } *cmd;
1363
1364 int ret;
1365 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1366 struct vmw_resource *cotable_res;
1367
1368
1369 if (ctx_node == NULL) {
1370 DRM_ERROR("DX Context not set for query.\n");
1371 return -EINVAL;
1372 }
1373
1374 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1375
1376 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1377 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1378 return -EINVAL;
1379
1380 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1381 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1382 vmw_resource_unreference(&cotable_res);
1383
1384 return ret;
1385}
1386
1387
1388
1389/**
1390 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1391 *
1392 * @dev_priv: Pointer to a device private struct.
1393 * @sw_context: The software context used for this command submission.
1394 * @header: Pointer to the command header in the command stream.
1395 *
1396 * The query bind operation will eventually associate the query ID
1397 * with its backing MOB. In this function, we take the user mode
1398 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1399 * kernel mode equivalent.
1400 */
1401static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1402 struct vmw_sw_context *sw_context,
1403 SVGA3dCmdHeader *header)
1404{
1405 struct vmw_dx_bind_query_cmd {
1406 SVGA3dCmdHeader header;
1407 SVGA3dCmdDXBindQuery q;
1408 } *cmd;
1409
1410 struct vmw_dma_buffer *vmw_bo;
1411 int ret;
1412
1413
1414 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1415
1416 /*
1417 * Look up the buffer pointed to by q.mobid, put it on the relocation
1418 * list so its kernel mode MOB ID can be filled in later
1419 */
1420 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1421 &vmw_bo);
1422
1423 if (ret != 0)
1424 return ret;
1425
1426 sw_context->dx_query_mob = vmw_bo;
1427 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1428
1429 vmw_dmabuf_unreference(&vmw_bo);
1430
1431 return ret;
1432}
1433
1434
1435
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001436/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001437 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1438 *
1439 * @dev_priv: Pointer to a device private struct.
1440 * @sw_context: The software context used for this command submission.
1441 * @header: Pointer to the command header in the command stream.
1442 */
1443static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1444 struct vmw_sw_context *sw_context,
1445 SVGA3dCmdHeader *header)
1446{
1447 struct vmw_begin_gb_query_cmd {
1448 SVGA3dCmdHeader header;
1449 SVGA3dCmdBeginGBQuery q;
1450 } *cmd;
1451
1452 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1453 header);
1454
1455 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1456 user_context_converter, &cmd->q.cid,
1457 NULL);
1458}
1459
1460/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001461 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1462 *
1463 * @dev_priv: Pointer to a device private struct.
1464 * @sw_context: The software context used for this command submission.
1465 * @header: Pointer to the command header in the command stream.
1466 */
1467static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1468 struct vmw_sw_context *sw_context,
1469 SVGA3dCmdHeader *header)
1470{
1471 struct vmw_begin_query_cmd {
1472 SVGA3dCmdHeader header;
1473 SVGA3dCmdBeginQuery q;
1474 } *cmd;
1475
1476 cmd = container_of(header, struct vmw_begin_query_cmd,
1477 header);
1478
Thomas Hellstromddcda242012-11-21 11:26:55 +01001479 if (unlikely(dev_priv->has_mob)) {
1480 struct {
1481 SVGA3dCmdHeader header;
1482 SVGA3dCmdBeginGBQuery q;
1483 } gb_cmd;
1484
1485 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1486
1487 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1488 gb_cmd.header.size = cmd->header.size;
1489 gb_cmd.q.cid = cmd->q.cid;
1490 gb_cmd.q.type = cmd->q.type;
1491
1492 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1493 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1494 }
1495
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001496 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1497 user_context_converter, &cmd->q.cid,
1498 NULL);
1499}
1500
1501/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001502 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1503 *
1504 * @dev_priv: Pointer to a device private struct.
1505 * @sw_context: The software context used for this command submission.
1506 * @header: Pointer to the command header in the command stream.
1507 */
1508static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1509 struct vmw_sw_context *sw_context,
1510 SVGA3dCmdHeader *header)
1511{
1512 struct vmw_dma_buffer *vmw_bo;
1513 struct vmw_query_cmd {
1514 SVGA3dCmdHeader header;
1515 SVGA3dCmdEndGBQuery q;
1516 } *cmd;
1517 int ret;
1518
1519 cmd = container_of(header, struct vmw_query_cmd, header);
1520 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1521 if (unlikely(ret != 0))
1522 return ret;
1523
1524 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1525 &cmd->q.mobid,
1526 &vmw_bo);
1527 if (unlikely(ret != 0))
1528 return ret;
1529
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001530 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001531
1532 vmw_dmabuf_unreference(&vmw_bo);
1533 return ret;
1534}
1535
1536/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001537 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1538 *
1539 * @dev_priv: Pointer to a device private struct.
1540 * @sw_context: The software context used for this command submission.
1541 * @header: Pointer to the command header in the command stream.
1542 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001543static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1544 struct vmw_sw_context *sw_context,
1545 SVGA3dCmdHeader *header)
1546{
1547 struct vmw_dma_buffer *vmw_bo;
1548 struct vmw_query_cmd {
1549 SVGA3dCmdHeader header;
1550 SVGA3dCmdEndQuery q;
1551 } *cmd;
1552 int ret;
1553
1554 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001555 if (dev_priv->has_mob) {
1556 struct {
1557 SVGA3dCmdHeader header;
1558 SVGA3dCmdEndGBQuery q;
1559 } gb_cmd;
1560
1561 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1562
1563 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1564 gb_cmd.header.size = cmd->header.size;
1565 gb_cmd.q.cid = cmd->q.cid;
1566 gb_cmd.q.type = cmd->q.type;
1567 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1568 gb_cmd.q.offset = cmd->q.guestResult.offset;
1569
1570 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1571 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1572 }
1573
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001574 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1575 if (unlikely(ret != 0))
1576 return ret;
1577
1578 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1579 &cmd->q.guestResult,
1580 &vmw_bo);
1581 if (unlikely(ret != 0))
1582 return ret;
1583
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001584 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001585
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001586 vmw_dmabuf_unreference(&vmw_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001587 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001588}
1589
Thomas Hellstromddcda242012-11-21 11:26:55 +01001590/**
1591 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1592 *
1593 * @dev_priv: Pointer to a device private struct.
1594 * @sw_context: The software context used for this command submission.
1595 * @header: Pointer to the command header in the command stream.
1596 */
1597static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1598 struct vmw_sw_context *sw_context,
1599 SVGA3dCmdHeader *header)
1600{
1601 struct vmw_dma_buffer *vmw_bo;
1602 struct vmw_query_cmd {
1603 SVGA3dCmdHeader header;
1604 SVGA3dCmdWaitForGBQuery q;
1605 } *cmd;
1606 int ret;
1607
1608 cmd = container_of(header, struct vmw_query_cmd, header);
1609 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1610 if (unlikely(ret != 0))
1611 return ret;
1612
1613 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1614 &cmd->q.mobid,
1615 &vmw_bo);
1616 if (unlikely(ret != 0))
1617 return ret;
1618
1619 vmw_dmabuf_unreference(&vmw_bo);
1620 return 0;
1621}
1622
1623/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001624 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1625 *
1626 * @dev_priv: Pointer to a device private struct.
1627 * @sw_context: The software context used for this command submission.
1628 * @header: Pointer to the command header in the command stream.
1629 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001630static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1631 struct vmw_sw_context *sw_context,
1632 SVGA3dCmdHeader *header)
1633{
1634 struct vmw_dma_buffer *vmw_bo;
1635 struct vmw_query_cmd {
1636 SVGA3dCmdHeader header;
1637 SVGA3dCmdWaitForQuery q;
1638 } *cmd;
1639 int ret;
1640
1641 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001642 if (dev_priv->has_mob) {
1643 struct {
1644 SVGA3dCmdHeader header;
1645 SVGA3dCmdWaitForGBQuery q;
1646 } gb_cmd;
1647
1648 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1649
1650 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1651 gb_cmd.header.size = cmd->header.size;
1652 gb_cmd.q.cid = cmd->q.cid;
1653 gb_cmd.q.type = cmd->q.type;
1654 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1655 gb_cmd.q.offset = cmd->q.guestResult.offset;
1656
1657 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1658 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1659 }
1660
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001661 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1662 if (unlikely(ret != 0))
1663 return ret;
1664
1665 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1666 &cmd->q.guestResult,
1667 &vmw_bo);
1668 if (unlikely(ret != 0))
1669 return ret;
1670
1671 vmw_dmabuf_unreference(&vmw_bo);
1672 return 0;
1673}
1674
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001675static int vmw_cmd_dma(struct vmw_private *dev_priv,
1676 struct vmw_sw_context *sw_context,
1677 SVGA3dCmdHeader *header)
1678{
1679 struct vmw_dma_buffer *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001680 struct vmw_surface *srf = NULL;
1681 struct vmw_dma_cmd {
1682 SVGA3dCmdHeader header;
1683 SVGA3dCmdSurfaceDMA dma;
1684 } *cmd;
1685 int ret;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001686 SVGA3dCmdSurfaceDMASuffix *suffix;
1687 uint32_t bo_size;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001688
1689 cmd = container_of(header, struct vmw_dma_cmd, header);
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001690 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1691 header->size - sizeof(*suffix));
1692
1693 /* Make sure device and verifier stays in sync. */
1694 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1695 DRM_ERROR("Invalid DMA suffix size.\n");
1696 return -EINVAL;
1697 }
1698
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001699 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1700 &cmd->dma.guest.ptr,
1701 &vmw_bo);
1702 if (unlikely(ret != 0))
1703 return ret;
1704
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001705 /* Make sure DMA doesn't cross BO boundaries. */
1706 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1707 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1708 DRM_ERROR("Invalid DMA offset.\n");
1709 return -EINVAL;
1710 }
1711
1712 bo_size -= cmd->dma.guest.ptr.offset;
1713 if (unlikely(suffix->maximumOffset > bo_size))
1714 suffix->maximumOffset = bo_size;
1715
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001716 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1717 user_surface_converter, &cmd->dma.host.sid,
1718 NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001719 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001720 if (unlikely(ret != -ERESTARTSYS))
1721 DRM_ERROR("could not find surface for DMA.\n");
1722 goto out_no_surface;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001723 }
1724
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001725 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001726
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001727 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1728 header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001729
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001730out_no_surface:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001731 vmw_dmabuf_unreference(&vmw_bo);
1732 return ret;
1733}
1734
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001735static int vmw_cmd_draw(struct vmw_private *dev_priv,
1736 struct vmw_sw_context *sw_context,
1737 SVGA3dCmdHeader *header)
1738{
1739 struct vmw_draw_cmd {
1740 SVGA3dCmdHeader header;
1741 SVGA3dCmdDrawPrimitives body;
1742 } *cmd;
1743 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1744 (unsigned long)header + sizeof(*cmd));
1745 SVGA3dPrimitiveRange *range;
1746 uint32_t i;
1747 uint32_t maxnum;
1748 int ret;
1749
1750 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1751 if (unlikely(ret != 0))
1752 return ret;
1753
1754 cmd = container_of(header, struct vmw_draw_cmd, header);
1755 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1756
1757 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1758 DRM_ERROR("Illegal number of vertex declarations.\n");
1759 return -EINVAL;
1760 }
1761
1762 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001763 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1764 user_surface_converter,
1765 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001766 if (unlikely(ret != 0))
1767 return ret;
1768 }
1769
1770 maxnum = (header->size - sizeof(cmd->body) -
1771 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1772 if (unlikely(cmd->body.numRanges > maxnum)) {
1773 DRM_ERROR("Illegal number of index ranges.\n");
1774 return -EINVAL;
1775 }
1776
1777 range = (SVGA3dPrimitiveRange *) decl;
1778 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001779 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1780 user_surface_converter,
1781 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001782 if (unlikely(ret != 0))
1783 return ret;
1784 }
1785 return 0;
1786}
1787
1788
1789static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1790 struct vmw_sw_context *sw_context,
1791 SVGA3dCmdHeader *header)
1792{
1793 struct vmw_tex_state_cmd {
1794 SVGA3dCmdHeader header;
1795 SVGA3dCmdSetTextureState state;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001796 } *cmd;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001797
1798 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1799 ((unsigned long) header + header->size + sizeof(header));
1800 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1801 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001802 struct vmw_resource_val_node *ctx_node;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001803 struct vmw_resource_val_node *res_node;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001804 int ret;
1805
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001806 cmd = container_of(header, struct vmw_tex_state_cmd,
1807 header);
1808
1809 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1810 user_context_converter, &cmd->state.cid,
1811 &ctx_node);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001812 if (unlikely(ret != 0))
1813 return ret;
1814
1815 for (; cur_state < last_state; ++cur_state) {
1816 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1817 continue;
1818
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001819 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1820 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1821 (unsigned) cur_state->stage);
1822 return -EINVAL;
1823 }
1824
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001825 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1826 user_surface_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001827 &cur_state->value, &res_node);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001828 if (unlikely(ret != 0))
1829 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001830
1831 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001832 struct vmw_ctx_bindinfo_tex binding;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001833
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001834 binding.bi.ctx = ctx_node->res;
1835 binding.bi.res = res_node ? res_node->res : NULL;
1836 binding.bi.bt = vmw_ctx_binding_tex;
1837 binding.texture_stage = cur_state->stage;
1838 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1839 0, binding.texture_stage);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001840 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001841 }
1842
1843 return 0;
1844}
1845
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001846static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1847 struct vmw_sw_context *sw_context,
1848 void *buf)
1849{
1850 struct vmw_dma_buffer *vmw_bo;
1851 int ret;
1852
1853 struct {
1854 uint32_t header;
1855 SVGAFifoCmdDefineGMRFB body;
1856 } *cmd = buf;
1857
1858 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1859 &cmd->body.ptr,
1860 &vmw_bo);
1861 if (unlikely(ret != 0))
1862 return ret;
1863
1864 vmw_dmabuf_unreference(&vmw_bo);
1865
1866 return ret;
1867}
1868
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001869
1870/**
1871 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1872 * switching
1873 *
1874 * @dev_priv: Pointer to a device private struct.
1875 * @sw_context: The software context being used for this batch.
1876 * @val_node: The validation node representing the resource.
1877 * @buf_id: Pointer to the user-space backup buffer handle in the command
1878 * stream.
1879 * @backup_offset: Offset of backup into MOB.
1880 *
1881 * This function prepares for registering a switch of backup buffers
1882 * in the resource metadata just prior to unreserving. It's basically a wrapper
1883 * around vmw_cmd_res_switch_backup with a different interface.
1884 */
1885static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1886 struct vmw_sw_context *sw_context,
1887 struct vmw_resource_val_node *val_node,
1888 uint32_t *buf_id,
1889 unsigned long backup_offset)
1890{
1891 struct vmw_dma_buffer *dma_buf;
1892 int ret;
1893
1894 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1895 if (ret)
1896 return ret;
1897
1898 val_node->switching_backup = true;
1899 if (val_node->first_usage)
1900 val_node->no_buffer_needed = true;
1901
1902 vmw_dmabuf_unreference(&val_node->new_backup);
1903 val_node->new_backup = dma_buf;
1904 val_node->new_backup_offset = backup_offset;
1905
1906 return 0;
1907}
1908
1909
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001910/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001911 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1912 *
1913 * @dev_priv: Pointer to a device private struct.
1914 * @sw_context: The software context being used for this batch.
1915 * @res_type: The resource type.
1916 * @converter: Information about user-space binding for this resource type.
1917 * @res_id: Pointer to the user-space resource handle in the command stream.
1918 * @buf_id: Pointer to the user-space backup buffer handle in the command
1919 * stream.
1920 * @backup_offset: Offset of backup into MOB.
1921 *
1922 * This function prepares for registering a switch of backup buffers
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001923 * in the resource metadata just prior to unreserving. It's basically a wrapper
1924 * around vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001925 */
1926static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1927 struct vmw_sw_context *sw_context,
1928 enum vmw_res_type res_type,
1929 const struct vmw_user_resource_conv
1930 *converter,
1931 uint32_t *res_id,
1932 uint32_t *buf_id,
1933 unsigned long backup_offset)
1934{
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001935 struct vmw_resource_val_node *val_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001936 int ret;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001937
1938 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1939 converter, res_id, &val_node);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001940 if (ret)
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001941 return ret;
1942
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001943 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1944 buf_id, backup_offset);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001945}
1946
1947/**
1948 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1949 * command
1950 *
1951 * @dev_priv: Pointer to a device private struct.
1952 * @sw_context: The software context being used for this batch.
1953 * @header: Pointer to the command header in the command stream.
1954 */
1955static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1956 struct vmw_sw_context *sw_context,
1957 SVGA3dCmdHeader *header)
1958{
1959 struct vmw_bind_gb_surface_cmd {
1960 SVGA3dCmdHeader header;
1961 SVGA3dCmdBindGBSurface body;
1962 } *cmd;
1963
1964 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1965
1966 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1967 user_surface_converter,
1968 &cmd->body.sid, &cmd->body.mobid,
1969 0);
1970}
1971
1972/**
1973 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1974 * command
1975 *
1976 * @dev_priv: Pointer to a device private struct.
1977 * @sw_context: The software context being used for this batch.
1978 * @header: Pointer to the command header in the command stream.
1979 */
1980static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1981 struct vmw_sw_context *sw_context,
1982 SVGA3dCmdHeader *header)
1983{
1984 struct vmw_gb_surface_cmd {
1985 SVGA3dCmdHeader header;
1986 SVGA3dCmdUpdateGBImage body;
1987 } *cmd;
1988
1989 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1990
1991 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1992 user_surface_converter,
1993 &cmd->body.image.sid, NULL);
1994}
1995
1996/**
1997 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1998 * command
1999 *
2000 * @dev_priv: Pointer to a device private struct.
2001 * @sw_context: The software context being used for this batch.
2002 * @header: Pointer to the command header in the command stream.
2003 */
2004static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2005 struct vmw_sw_context *sw_context,
2006 SVGA3dCmdHeader *header)
2007{
2008 struct vmw_gb_surface_cmd {
2009 SVGA3dCmdHeader header;
2010 SVGA3dCmdUpdateGBSurface body;
2011 } *cmd;
2012
2013 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2014
2015 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2016 user_surface_converter,
2017 &cmd->body.sid, NULL);
2018}
2019
2020/**
2021 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2022 * command
2023 *
2024 * @dev_priv: Pointer to a device private struct.
2025 * @sw_context: The software context being used for this batch.
2026 * @header: Pointer to the command header in the command stream.
2027 */
2028static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2029 struct vmw_sw_context *sw_context,
2030 SVGA3dCmdHeader *header)
2031{
2032 struct vmw_gb_surface_cmd {
2033 SVGA3dCmdHeader header;
2034 SVGA3dCmdReadbackGBImage body;
2035 } *cmd;
2036
2037 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2038
2039 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2040 user_surface_converter,
2041 &cmd->body.image.sid, NULL);
2042}
2043
2044/**
2045 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2046 * command
2047 *
2048 * @dev_priv: Pointer to a device private struct.
2049 * @sw_context: The software context being used for this batch.
2050 * @header: Pointer to the command header in the command stream.
2051 */
2052static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2053 struct vmw_sw_context *sw_context,
2054 SVGA3dCmdHeader *header)
2055{
2056 struct vmw_gb_surface_cmd {
2057 SVGA3dCmdHeader header;
2058 SVGA3dCmdReadbackGBSurface body;
2059 } *cmd;
2060
2061 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2062
2063 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2064 user_surface_converter,
2065 &cmd->body.sid, NULL);
2066}
2067
2068/**
2069 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2070 * command
2071 *
2072 * @dev_priv: Pointer to a device private struct.
2073 * @sw_context: The software context being used for this batch.
2074 * @header: Pointer to the command header in the command stream.
2075 */
2076static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2077 struct vmw_sw_context *sw_context,
2078 SVGA3dCmdHeader *header)
2079{
2080 struct vmw_gb_surface_cmd {
2081 SVGA3dCmdHeader header;
2082 SVGA3dCmdInvalidateGBImage body;
2083 } *cmd;
2084
2085 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2086
2087 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2088 user_surface_converter,
2089 &cmd->body.image.sid, NULL);
2090}
2091
2092/**
2093 * vmw_cmd_invalidate_gb_surface - Validate an
2094 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2095 *
2096 * @dev_priv: Pointer to a device private struct.
2097 * @sw_context: The software context being used for this batch.
2098 * @header: Pointer to the command header in the command stream.
2099 */
2100static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2101 struct vmw_sw_context *sw_context,
2102 SVGA3dCmdHeader *header)
2103{
2104 struct vmw_gb_surface_cmd {
2105 SVGA3dCmdHeader header;
2106 SVGA3dCmdInvalidateGBSurface body;
2107 } *cmd;
2108
2109 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2110
2111 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2112 user_surface_converter,
2113 &cmd->body.sid, NULL);
2114}
2115
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002116
2117/**
2118 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2119 * command
2120 *
2121 * @dev_priv: Pointer to a device private struct.
2122 * @sw_context: The software context being used for this batch.
2123 * @header: Pointer to the command header in the command stream.
2124 */
2125static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2126 struct vmw_sw_context *sw_context,
2127 SVGA3dCmdHeader *header)
2128{
2129 struct vmw_shader_define_cmd {
2130 SVGA3dCmdHeader header;
2131 SVGA3dCmdDefineShader body;
2132 } *cmd;
2133 int ret;
2134 size_t size;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002135 struct vmw_resource_val_node *val;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002136
2137 cmd = container_of(header, struct vmw_shader_define_cmd,
2138 header);
2139
2140 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2141 user_context_converter, &cmd->body.cid,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002142 &val);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002143 if (unlikely(ret != 0))
2144 return ret;
2145
2146 if (unlikely(!dev_priv->has_mob))
2147 return 0;
2148
2149 size = cmd->header.size - sizeof(cmd->body);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002150 ret = vmw_compat_shader_add(dev_priv,
2151 vmw_context_res_man(val->res),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002152 cmd->body.shid, cmd + 1,
2153 cmd->body.type, size,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002154 &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002155 if (unlikely(ret != 0))
2156 return ret;
2157
2158 return vmw_resource_relocation_add(&sw_context->res_relocations,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07002159 NULL,
2160 vmw_ptr_diff(sw_context->buf_start,
2161 &cmd->header.id));
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002162}
2163
2164/**
2165 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2166 * command
2167 *
2168 * @dev_priv: Pointer to a device private struct.
2169 * @sw_context: The software context being used for this batch.
2170 * @header: Pointer to the command header in the command stream.
2171 */
2172static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2173 struct vmw_sw_context *sw_context,
2174 SVGA3dCmdHeader *header)
2175{
2176 struct vmw_shader_destroy_cmd {
2177 SVGA3dCmdHeader header;
2178 SVGA3dCmdDestroyShader body;
2179 } *cmd;
2180 int ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002181 struct vmw_resource_val_node *val;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002182
2183 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2184 header);
2185
2186 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2187 user_context_converter, &cmd->body.cid,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002188 &val);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002189 if (unlikely(ret != 0))
2190 return ret;
2191
2192 if (unlikely(!dev_priv->has_mob))
2193 return 0;
2194
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002195 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2196 cmd->body.shid,
2197 cmd->body.type,
2198 &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002199 if (unlikely(ret != 0))
2200 return ret;
2201
2202 return vmw_resource_relocation_add(&sw_context->res_relocations,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07002203 NULL,
2204 vmw_ptr_diff(sw_context->buf_start,
2205 &cmd->header.id));
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002206}
2207
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002208/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002209 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2210 * command
2211 *
2212 * @dev_priv: Pointer to a device private struct.
2213 * @sw_context: The software context being used for this batch.
2214 * @header: Pointer to the command header in the command stream.
2215 */
2216static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2217 struct vmw_sw_context *sw_context,
2218 SVGA3dCmdHeader *header)
2219{
2220 struct vmw_set_shader_cmd {
2221 SVGA3dCmdHeader header;
2222 SVGA3dCmdSetShader body;
2223 } *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002224 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002225 struct vmw_ctx_bindinfo_shader binding;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002226 struct vmw_resource *res = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002227 int ret;
2228
2229 cmd = container_of(header, struct vmw_set_shader_cmd,
2230 header);
2231
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002232 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2233 DRM_ERROR("Illegal shader type %u.\n",
2234 (unsigned) cmd->body.type);
2235 return -EINVAL;
2236 }
2237
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002238 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2239 user_context_converter, &cmd->body.cid,
2240 &ctx_node);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002241 if (unlikely(ret != 0))
2242 return ret;
2243
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002244 if (!dev_priv->has_mob)
2245 return 0;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002246
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002247 if (cmd->body.shid != SVGA3D_INVALID_ID) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002248 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2249 cmd->body.shid,
2250 cmd->body.type);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002251
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002252 if (!IS_ERR(res)) {
2253 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002254 &cmd->body.shid, res,
2255 &res_node);
2256 vmw_resource_unreference(&res);
2257 if (unlikely(ret != 0))
2258 return ret;
2259 }
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002260 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002261
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002262 if (!res_node) {
2263 ret = vmw_cmd_res_check(dev_priv, sw_context,
2264 vmw_res_shader,
2265 user_shader_converter,
2266 &cmd->body.shid, &res_node);
2267 if (unlikely(ret != 0))
2268 return ret;
2269 }
2270
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002271 binding.bi.ctx = ctx_node->res;
2272 binding.bi.res = res_node ? res_node->res : NULL;
2273 binding.bi.bt = vmw_ctx_binding_shader;
2274 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2275 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2276 binding.shader_slot, 0);
2277 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002278}
2279
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002280/**
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002281 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2282 * command
2283 *
2284 * @dev_priv: Pointer to a device private struct.
2285 * @sw_context: The software context being used for this batch.
2286 * @header: Pointer to the command header in the command stream.
2287 */
2288static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2289 struct vmw_sw_context *sw_context,
2290 SVGA3dCmdHeader *header)
2291{
2292 struct vmw_set_shader_const_cmd {
2293 SVGA3dCmdHeader header;
2294 SVGA3dCmdSetShaderConst body;
2295 } *cmd;
2296 int ret;
2297
2298 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2299 header);
2300
2301 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2302 user_context_converter, &cmd->body.cid,
2303 NULL);
2304 if (unlikely(ret != 0))
2305 return ret;
2306
2307 if (dev_priv->has_mob)
2308 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2309
2310 return 0;
2311}
2312
2313/**
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002314 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2315 * command
2316 *
2317 * @dev_priv: Pointer to a device private struct.
2318 * @sw_context: The software context being used for this batch.
2319 * @header: Pointer to the command header in the command stream.
2320 */
2321static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2322 struct vmw_sw_context *sw_context,
2323 SVGA3dCmdHeader *header)
2324{
2325 struct vmw_bind_gb_shader_cmd {
2326 SVGA3dCmdHeader header;
2327 SVGA3dCmdBindGBShader body;
2328 } *cmd;
2329
2330 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2331 header);
2332
2333 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2334 user_shader_converter,
2335 &cmd->body.shid, &cmd->body.mobid,
2336 cmd->body.offsetInBytes);
2337}
2338
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002339/**
2340 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2341 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2342 *
2343 * @dev_priv: Pointer to a device private struct.
2344 * @sw_context: The software context being used for this batch.
2345 * @header: Pointer to the command header in the command stream.
2346 */
2347static int
2348vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2349 struct vmw_sw_context *sw_context,
2350 SVGA3dCmdHeader *header)
2351{
2352 struct {
2353 SVGA3dCmdHeader header;
2354 SVGA3dCmdDXSetSingleConstantBuffer body;
2355 } *cmd;
2356 struct vmw_resource_val_node *res_node = NULL;
2357 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2358 struct vmw_ctx_bindinfo_cb binding;
2359 int ret;
2360
2361 if (unlikely(ctx_node == NULL)) {
2362 DRM_ERROR("DX Context not set.\n");
2363 return -EINVAL;
2364 }
2365
2366 cmd = container_of(header, typeof(*cmd), header);
2367 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2368 user_surface_converter,
2369 &cmd->body.sid, &res_node);
2370 if (unlikely(ret != 0))
2371 return ret;
2372
2373 binding.bi.ctx = ctx_node->res;
2374 binding.bi.res = res_node ? res_node->res : NULL;
2375 binding.bi.bt = vmw_ctx_binding_cb;
2376 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2377 binding.offset = cmd->body.offsetInBytes;
2378 binding.size = cmd->body.sizeInBytes;
2379 binding.slot = cmd->body.slot;
2380
2381 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2382 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2383 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2384 (unsigned) cmd->body.type,
2385 (unsigned) binding.slot);
2386 return -EINVAL;
2387 }
2388
2389 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2390 binding.shader_slot, binding.slot);
2391
2392 return 0;
2393}
2394
2395/**
2396 * vmw_cmd_dx_set_shader_res - Validate an
2397 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2398 *
2399 * @dev_priv: Pointer to a device private struct.
2400 * @sw_context: The software context being used for this batch.
2401 * @header: Pointer to the command header in the command stream.
2402 */
2403static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2404 struct vmw_sw_context *sw_context,
2405 SVGA3dCmdHeader *header)
2406{
2407 struct {
2408 SVGA3dCmdHeader header;
2409 SVGA3dCmdDXSetShaderResources body;
2410 } *cmd = container_of(header, typeof(*cmd), header);
2411 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2412 sizeof(SVGA3dShaderResourceViewId);
2413
2414 if ((u64) cmd->body.startView + (u64) num_sr_view >
2415 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2416 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2417 DRM_ERROR("Invalid shader binding.\n");
2418 return -EINVAL;
2419 }
2420
2421 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2422 vmw_ctx_binding_sr,
2423 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2424 (void *) &cmd[1], num_sr_view,
2425 cmd->body.startView);
2426}
2427
2428/**
2429 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2430 * command
2431 *
2432 * @dev_priv: Pointer to a device private struct.
2433 * @sw_context: The software context being used for this batch.
2434 * @header: Pointer to the command header in the command stream.
2435 */
2436static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2437 struct vmw_sw_context *sw_context,
2438 SVGA3dCmdHeader *header)
2439{
2440 struct {
2441 SVGA3dCmdHeader header;
2442 SVGA3dCmdDXSetShader body;
2443 } *cmd;
2444 struct vmw_resource *res = NULL;
2445 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2446 struct vmw_ctx_bindinfo_shader binding;
2447 int ret = 0;
2448
2449 if (unlikely(ctx_node == NULL)) {
2450 DRM_ERROR("DX Context not set.\n");
2451 return -EINVAL;
2452 }
2453
2454 cmd = container_of(header, typeof(*cmd), header);
2455
2456 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2457 DRM_ERROR("Illegal shader type %u.\n",
2458 (unsigned) cmd->body.type);
2459 return -EINVAL;
2460 }
2461
2462 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2463 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2464 if (IS_ERR(res)) {
2465 DRM_ERROR("Could not find shader for binding.\n");
2466 return PTR_ERR(res);
2467 }
2468
2469 ret = vmw_resource_val_add(sw_context, res, NULL);
2470 if (ret)
2471 goto out_unref;
2472 }
2473
2474 binding.bi.ctx = ctx_node->res;
2475 binding.bi.res = res;
2476 binding.bi.bt = vmw_ctx_binding_dx_shader;
2477 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2478
2479 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2480 binding.shader_slot, 0);
2481out_unref:
2482 if (res)
2483 vmw_resource_unreference(&res);
2484
2485 return ret;
2486}
2487
2488/**
2489 * vmw_cmd_dx_set_vertex_buffers - Validates an
2490 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2491 *
2492 * @dev_priv: Pointer to a device private struct.
2493 * @sw_context: The software context being used for this batch.
2494 * @header: Pointer to the command header in the command stream.
2495 */
2496static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2497 struct vmw_sw_context *sw_context,
2498 SVGA3dCmdHeader *header)
2499{
2500 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2501 struct vmw_ctx_bindinfo_vb binding;
2502 struct vmw_resource_val_node *res_node;
2503 struct {
2504 SVGA3dCmdHeader header;
2505 SVGA3dCmdDXSetVertexBuffers body;
2506 SVGA3dVertexBuffer buf[];
2507 } *cmd;
2508 int i, ret, num;
2509
2510 if (unlikely(ctx_node == NULL)) {
2511 DRM_ERROR("DX Context not set.\n");
2512 return -EINVAL;
2513 }
2514
2515 cmd = container_of(header, typeof(*cmd), header);
2516 num = (cmd->header.size - sizeof(cmd->body)) /
2517 sizeof(SVGA3dVertexBuffer);
2518 if ((u64)num + (u64)cmd->body.startBuffer >
2519 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2520 DRM_ERROR("Invalid number of vertex buffers.\n");
2521 return -EINVAL;
2522 }
2523
2524 for (i = 0; i < num; i++) {
2525 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2526 user_surface_converter,
2527 &cmd->buf[i].sid, &res_node);
2528 if (unlikely(ret != 0))
2529 return ret;
2530
2531 binding.bi.ctx = ctx_node->res;
2532 binding.bi.bt = vmw_ctx_binding_vb;
2533 binding.bi.res = ((res_node) ? res_node->res : NULL);
2534 binding.offset = cmd->buf[i].offset;
2535 binding.stride = cmd->buf[i].stride;
2536 binding.slot = i + cmd->body.startBuffer;
2537
2538 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2539 0, binding.slot);
2540 }
2541
2542 return 0;
2543}
2544
2545/**
2546 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2547 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2548 *
2549 * @dev_priv: Pointer to a device private struct.
2550 * @sw_context: The software context being used for this batch.
2551 * @header: Pointer to the command header in the command stream.
2552 */
2553static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2554 struct vmw_sw_context *sw_context,
2555 SVGA3dCmdHeader *header)
2556{
2557 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2558 struct vmw_ctx_bindinfo_ib binding;
2559 struct vmw_resource_val_node *res_node;
2560 struct {
2561 SVGA3dCmdHeader header;
2562 SVGA3dCmdDXSetIndexBuffer body;
2563 } *cmd;
2564 int ret;
2565
2566 if (unlikely(ctx_node == NULL)) {
2567 DRM_ERROR("DX Context not set.\n");
2568 return -EINVAL;
2569 }
2570
2571 cmd = container_of(header, typeof(*cmd), header);
2572 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2573 user_surface_converter,
2574 &cmd->body.sid, &res_node);
2575 if (unlikely(ret != 0))
2576 return ret;
2577
2578 binding.bi.ctx = ctx_node->res;
2579 binding.bi.res = ((res_node) ? res_node->res : NULL);
2580 binding.bi.bt = vmw_ctx_binding_ib;
2581 binding.offset = cmd->body.offset;
2582 binding.format = cmd->body.format;
2583
2584 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2585
2586 return 0;
2587}
2588
2589/**
2590 * vmw_cmd_dx_set_rendertarget - Validate an
2591 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2592 *
2593 * @dev_priv: Pointer to a device private struct.
2594 * @sw_context: The software context being used for this batch.
2595 * @header: Pointer to the command header in the command stream.
2596 */
2597static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2598 struct vmw_sw_context *sw_context,
2599 SVGA3dCmdHeader *header)
2600{
2601 struct {
2602 SVGA3dCmdHeader header;
2603 SVGA3dCmdDXSetRenderTargets body;
2604 } *cmd = container_of(header, typeof(*cmd), header);
2605 int ret;
2606 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2607 sizeof(SVGA3dRenderTargetViewId);
2608
2609 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2610 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2611 return -EINVAL;
2612 }
2613
2614 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2615 vmw_ctx_binding_ds, 0,
2616 &cmd->body.depthStencilViewId, 1, 0);
2617 if (ret)
2618 return ret;
2619
2620 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2621 vmw_ctx_binding_dx_rt, 0,
2622 (void *)&cmd[1], num_rt_view, 0);
2623}
2624
2625/**
2626 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2627 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2628 *
2629 * @dev_priv: Pointer to a device private struct.
2630 * @sw_context: The software context being used for this batch.
2631 * @header: Pointer to the command header in the command stream.
2632 */
2633static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2634 struct vmw_sw_context *sw_context,
2635 SVGA3dCmdHeader *header)
2636{
2637 struct {
2638 SVGA3dCmdHeader header;
2639 SVGA3dCmdDXClearRenderTargetView body;
2640 } *cmd = container_of(header, typeof(*cmd), header);
2641
2642 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2643 cmd->body.renderTargetViewId);
2644}
2645
2646/**
2647 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2648 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2649 *
2650 * @dev_priv: Pointer to a device private struct.
2651 * @sw_context: The software context being used for this batch.
2652 * @header: Pointer to the command header in the command stream.
2653 */
2654static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2655 struct vmw_sw_context *sw_context,
2656 SVGA3dCmdHeader *header)
2657{
2658 struct {
2659 SVGA3dCmdHeader header;
2660 SVGA3dCmdDXClearDepthStencilView body;
2661 } *cmd = container_of(header, typeof(*cmd), header);
2662
2663 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2664 cmd->body.depthStencilViewId);
2665}
2666
2667static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2668 struct vmw_sw_context *sw_context,
2669 SVGA3dCmdHeader *header)
2670{
2671 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2672 struct vmw_resource_val_node *srf_node;
2673 struct vmw_resource *res;
2674 enum vmw_view_type view_type;
2675 int ret;
2676 /*
2677 * This is based on the fact that all affected define commands have
2678 * the same initial command body layout.
2679 */
2680 struct {
2681 SVGA3dCmdHeader header;
2682 uint32 defined_id;
2683 uint32 sid;
2684 } *cmd;
2685
2686 if (unlikely(ctx_node == NULL)) {
2687 DRM_ERROR("DX Context not set.\n");
2688 return -EINVAL;
2689 }
2690
2691 view_type = vmw_view_cmd_to_type(header->id);
2692 cmd = container_of(header, typeof(*cmd), header);
2693 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2694 user_surface_converter,
2695 &cmd->sid, &srf_node);
2696 if (unlikely(ret != 0))
2697 return ret;
2698
2699 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2700 ret = vmw_cotable_notify(res, cmd->defined_id);
2701 vmw_resource_unreference(&res);
2702 if (unlikely(ret != 0))
2703 return ret;
2704
2705 return vmw_view_add(sw_context->man,
2706 ctx_node->res,
2707 srf_node->res,
2708 view_type,
2709 cmd->defined_id,
2710 header,
2711 header->size + sizeof(*header),
2712 &sw_context->staged_cmd_res);
2713}
2714
Charmaine Lee2f633e52015-08-10 10:45:11 -07002715/**
2716 * vmw_cmd_dx_set_so_targets - Validate an
2717 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2718 *
2719 * @dev_priv: Pointer to a device private struct.
2720 * @sw_context: The software context being used for this batch.
2721 * @header: Pointer to the command header in the command stream.
2722 */
2723static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2724 struct vmw_sw_context *sw_context,
2725 SVGA3dCmdHeader *header)
2726{
2727 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2728 struct vmw_ctx_bindinfo_so binding;
2729 struct vmw_resource_val_node *res_node;
2730 struct {
2731 SVGA3dCmdHeader header;
2732 SVGA3dCmdDXSetSOTargets body;
2733 SVGA3dSoTarget targets[];
2734 } *cmd;
2735 int i, ret, num;
2736
2737 if (unlikely(ctx_node == NULL)) {
2738 DRM_ERROR("DX Context not set.\n");
2739 return -EINVAL;
2740 }
2741
2742 cmd = container_of(header, typeof(*cmd), header);
2743 num = (cmd->header.size - sizeof(cmd->body)) /
2744 sizeof(SVGA3dSoTarget);
2745
2746 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2747 DRM_ERROR("Invalid DX SO binding.\n");
2748 return -EINVAL;
2749 }
2750
2751 for (i = 0; i < num; i++) {
2752 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2753 user_surface_converter,
2754 &cmd->targets[i].sid, &res_node);
2755 if (unlikely(ret != 0))
2756 return ret;
2757
2758 binding.bi.ctx = ctx_node->res;
2759 binding.bi.res = ((res_node) ? res_node->res : NULL);
2760 binding.bi.bt = vmw_ctx_binding_so,
2761 binding.offset = cmd->targets[i].offset;
2762 binding.size = cmd->targets[i].sizeInBytes;
2763 binding.slot = i;
2764
2765 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2766 0, binding.slot);
2767 }
2768
2769 return 0;
2770}
2771
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002772static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2773 struct vmw_sw_context *sw_context,
2774 SVGA3dCmdHeader *header)
2775{
2776 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2777 struct vmw_resource *res;
2778 /*
2779 * This is based on the fact that all affected define commands have
2780 * the same initial command body layout.
2781 */
2782 struct {
2783 SVGA3dCmdHeader header;
2784 uint32 defined_id;
2785 } *cmd;
2786 enum vmw_so_type so_type;
2787 int ret;
2788
2789 if (unlikely(ctx_node == NULL)) {
2790 DRM_ERROR("DX Context not set.\n");
2791 return -EINVAL;
2792 }
2793
2794 so_type = vmw_so_cmd_to_type(header->id);
2795 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2796 cmd = container_of(header, typeof(*cmd), header);
2797 ret = vmw_cotable_notify(res, cmd->defined_id);
2798 vmw_resource_unreference(&res);
2799
2800 return ret;
2801}
2802
2803/**
2804 * vmw_cmd_dx_check_subresource - Validate an
2805 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2806 *
2807 * @dev_priv: Pointer to a device private struct.
2808 * @sw_context: The software context being used for this batch.
2809 * @header: Pointer to the command header in the command stream.
2810 */
2811static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2812 struct vmw_sw_context *sw_context,
2813 SVGA3dCmdHeader *header)
2814{
2815 struct {
2816 SVGA3dCmdHeader header;
2817 union {
2818 SVGA3dCmdDXReadbackSubResource r_body;
2819 SVGA3dCmdDXInvalidateSubResource i_body;
2820 SVGA3dCmdDXUpdateSubResource u_body;
2821 SVGA3dSurfaceId sid;
2822 };
2823 } *cmd;
2824
2825 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2826 offsetof(typeof(*cmd), sid));
2827 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2828 offsetof(typeof(*cmd), sid));
2829 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2830 offsetof(typeof(*cmd), sid));
2831
2832 cmd = container_of(header, typeof(*cmd), header);
2833
2834 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2835 user_surface_converter,
2836 &cmd->sid, NULL);
2837}
2838
2839static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2840 struct vmw_sw_context *sw_context,
2841 SVGA3dCmdHeader *header)
2842{
2843 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2844
2845 if (unlikely(ctx_node == NULL)) {
2846 DRM_ERROR("DX Context not set.\n");
2847 return -EINVAL;
2848 }
2849
2850 return 0;
2851}
2852
2853/**
2854 * vmw_cmd_dx_view_remove - validate a view remove command and
2855 * schedule the view resource for removal.
2856 *
2857 * @dev_priv: Pointer to a device private struct.
2858 * @sw_context: The software context being used for this batch.
2859 * @header: Pointer to the command header in the command stream.
2860 *
2861 * Check that the view exists, and if it was not created using this
2862 * command batch, make sure it's validated (present in the device) so that
2863 * the remove command will not confuse the device.
2864 */
2865static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2866 struct vmw_sw_context *sw_context,
2867 SVGA3dCmdHeader *header)
2868{
2869 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2870 struct {
2871 SVGA3dCmdHeader header;
2872 union vmw_view_destroy body;
2873 } *cmd = container_of(header, typeof(*cmd), header);
2874 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2875 struct vmw_resource *view;
2876 int ret;
2877
2878 if (!ctx_node) {
2879 DRM_ERROR("DX Context not set.\n");
2880 return -EINVAL;
2881 }
2882
2883 ret = vmw_view_remove(sw_context->man,
2884 cmd->body.view_id, view_type,
2885 &sw_context->staged_cmd_res,
2886 &view);
2887 if (ret || !view)
2888 return ret;
2889
2890 /*
2891 * Add view to the validate list iff it was not created using this
2892 * command batch.
2893 */
2894 return vmw_view_res_val_add(sw_context, view);
2895}
2896
2897/**
2898 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2899 * command
2900 *
2901 * @dev_priv: Pointer to a device private struct.
2902 * @sw_context: The software context being used for this batch.
2903 * @header: Pointer to the command header in the command stream.
2904 */
2905static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2906 struct vmw_sw_context *sw_context,
2907 SVGA3dCmdHeader *header)
2908{
2909 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2910 struct vmw_resource *res;
2911 struct {
2912 SVGA3dCmdHeader header;
2913 SVGA3dCmdDXDefineShader body;
2914 } *cmd = container_of(header, typeof(*cmd), header);
2915 int ret;
2916
2917 if (!ctx_node) {
2918 DRM_ERROR("DX Context not set.\n");
2919 return -EINVAL;
2920 }
2921
2922 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2923 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2924 vmw_resource_unreference(&res);
2925 if (ret)
2926 return ret;
2927
2928 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2929 cmd->body.shaderId, cmd->body.type,
2930 &sw_context->staged_cmd_res);
2931}
2932
2933/**
2934 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2935 * command
2936 *
2937 * @dev_priv: Pointer to a device private struct.
2938 * @sw_context: The software context being used for this batch.
2939 * @header: Pointer to the command header in the command stream.
2940 */
2941static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2942 struct vmw_sw_context *sw_context,
2943 SVGA3dCmdHeader *header)
2944{
2945 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2946 struct {
2947 SVGA3dCmdHeader header;
2948 SVGA3dCmdDXDestroyShader body;
2949 } *cmd = container_of(header, typeof(*cmd), header);
2950 int ret;
2951
2952 if (!ctx_node) {
2953 DRM_ERROR("DX Context not set.\n");
2954 return -EINVAL;
2955 }
2956
2957 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2958 &sw_context->staged_cmd_res);
2959 if (ret)
2960 DRM_ERROR("Could not find shader to remove.\n");
2961
2962 return ret;
2963}
2964
2965/**
2966 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2967 * command
2968 *
2969 * @dev_priv: Pointer to a device private struct.
2970 * @sw_context: The software context being used for this batch.
2971 * @header: Pointer to the command header in the command stream.
2972 */
2973static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2974 struct vmw_sw_context *sw_context,
2975 SVGA3dCmdHeader *header)
2976{
2977 struct vmw_resource_val_node *ctx_node;
2978 struct vmw_resource_val_node *res_node;
2979 struct vmw_resource *res;
2980 struct {
2981 SVGA3dCmdHeader header;
2982 SVGA3dCmdDXBindShader body;
2983 } *cmd = container_of(header, typeof(*cmd), header);
2984 int ret;
2985
2986 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2987 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2988 user_context_converter,
2989 &cmd->body.cid, &ctx_node);
2990 if (ret)
2991 return ret;
2992 } else {
2993 ctx_node = sw_context->dx_ctx_node;
2994 if (!ctx_node) {
2995 DRM_ERROR("DX Context not set.\n");
2996 return -EINVAL;
2997 }
2998 }
2999
3000 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3001 cmd->body.shid, 0);
3002 if (IS_ERR(res)) {
3003 DRM_ERROR("Could not find shader to bind.\n");
3004 return PTR_ERR(res);
3005 }
3006
3007 ret = vmw_resource_val_add(sw_context, res, &res_node);
3008 if (ret) {
3009 DRM_ERROR("Error creating resource validation node.\n");
3010 goto out_unref;
3011 }
3012
3013
3014 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3015 &cmd->body.mobid,
3016 cmd->body.offsetInBytes);
3017out_unref:
3018 vmw_resource_unreference(&res);
3019
3020 return ret;
3021}
3022
Charmaine Leef3b335502016-02-12 08:11:56 +01003023/**
3024 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3025 *
3026 * @dev_priv: Pointer to a device private struct.
3027 * @sw_context: The software context being used for this batch.
3028 * @header: Pointer to the command header in the command stream.
3029 */
3030static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3031 struct vmw_sw_context *sw_context,
3032 SVGA3dCmdHeader *header)
3033{
3034 struct {
3035 SVGA3dCmdHeader header;
3036 SVGA3dCmdDXGenMips body;
3037 } *cmd = container_of(header, typeof(*cmd), header);
3038
3039 return vmw_view_id_val_add(sw_context, vmw_view_sr,
3040 cmd->body.shaderResourceViewId);
3041}
3042
Charmaine Lee1f982e42016-10-10 10:37:03 -07003043/**
3044 * vmw_cmd_dx_transfer_from_buffer -
3045 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3046 *
3047 * @dev_priv: Pointer to a device private struct.
3048 * @sw_context: The software context being used for this batch.
3049 * @header: Pointer to the command header in the command stream.
3050 */
3051static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3052 struct vmw_sw_context *sw_context,
3053 SVGA3dCmdHeader *header)
3054{
3055 struct {
3056 SVGA3dCmdHeader header;
3057 SVGA3dCmdDXTransferFromBuffer body;
3058 } *cmd = container_of(header, typeof(*cmd), header);
3059 int ret;
3060
3061 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3062 user_surface_converter,
3063 &cmd->body.srcSid, NULL);
3064 if (ret != 0)
3065 return ret;
3066
3067 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3068 user_surface_converter,
3069 &cmd->body.destSid, NULL);
3070}
3071
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003072static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3073 struct vmw_sw_context *sw_context,
3074 void *buf, uint32_t *size)
3075{
3076 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003077 uint32_t cmd_id;
3078
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003079 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003080 switch (cmd_id) {
3081 case SVGA_CMD_UPDATE:
3082 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003083 break;
3084 case SVGA_CMD_DEFINE_GMRFB:
3085 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3086 break;
3087 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3088 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3089 break;
3090 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3091 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3092 break;
3093 default:
3094 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3095 return -EINVAL;
3096 }
3097
3098 if (*size > size_remaining) {
3099 DRM_ERROR("Invalid SVGA command (size mismatch):"
3100 " %u.\n", cmd_id);
3101 return -EINVAL;
3102 }
3103
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02003104 if (unlikely(!sw_context->kernel)) {
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003105 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3106 return -EPERM;
3107 }
3108
3109 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3110 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3111
3112 return 0;
3113}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003114
Thomas Hellstrom4fbd9d22014-02-12 12:37:01 +01003115static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003116 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3117 false, false, false),
3118 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3119 false, false, false),
3120 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3121 true, false, false),
3122 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3123 true, false, false),
3124 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3125 true, false, false),
3126 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3127 false, false, false),
3128 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3129 false, false, false),
3130 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3131 true, false, false),
3132 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3133 true, false, false),
3134 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3135 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003136 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003137 &vmw_cmd_set_render_target_check, true, false, false),
3138 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3139 true, false, false),
3140 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3141 true, false, false),
3142 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3143 true, false, false),
3144 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3145 true, false, false),
3146 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3147 true, false, false),
3148 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3149 true, false, false),
3150 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3151 true, false, false),
3152 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3153 false, false, false),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01003154 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3155 true, false, false),
3156 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3157 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003158 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3159 true, false, false),
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01003160 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3161 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003162 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3163 true, false, false),
3164 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3165 true, false, false),
3166 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3167 true, false, false),
3168 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3169 true, false, false),
3170 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3171 true, false, false),
3172 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3173 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003174 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003175 &vmw_cmd_blt_surf_screen_check, false, false, false),
3176 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3177 false, false, false),
3178 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3179 false, false, false),
3180 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3181 false, false, false),
3182 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3183 false, false, false),
3184 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3185 false, false, false),
3186 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3187 false, false, false),
3188 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3189 false, false, false),
3190 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3191 false, false, false),
3192 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3193 false, false, false),
3194 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3195 false, false, false),
3196 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3197 false, false, false),
3198 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3199 false, false, false),
3200 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3201 false, false, false),
3202 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3203 false, false, true),
3204 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3205 false, false, true),
3206 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3207 false, false, true),
3208 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3209 false, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003210 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3211 false, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003212 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3213 false, false, true),
3214 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3215 false, false, true),
3216 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3217 false, false, true),
3218 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3219 true, false, true),
3220 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3221 false, false, true),
3222 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3223 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003224 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003225 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003226 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003227 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003228 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003229 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003230 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003231 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003232 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003233 &vmw_cmd_invalidate_gb_surface, true, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3235 false, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3237 false, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3239 false, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3241 false, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3243 false, false, true),
3244 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3245 false, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3247 true, false, true),
3248 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3249 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01003250 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07003251 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003252 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3253 true, false, true),
3254 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3255 true, false, true),
3256 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3257 true, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3259 true, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3261 false, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3263 false, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3265 false, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3267 false, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3269 false, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3271 false, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3273 false, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3275 false, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3277 false, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3279 false, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003281 true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3283 false, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3285 false, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3287 false, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3289 false, false, true),
3290
3291 /*
3292 * DX commands
3293 */
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3295 false, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3297 false, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3299 false, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3301 false, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3303 false, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3305 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3307 &vmw_cmd_dx_set_shader_res, true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3309 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003311 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003312 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003313 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003314 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3315 true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3317 true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3319 &vmw_cmd_dx_cid_check, true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003321 true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3323 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3325 &vmw_cmd_dx_set_index_buffer, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3327 &vmw_cmd_dx_set_rendertargets, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3329 true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003330 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
Charmaine Lee2f633e52015-08-10 10:45:11 -07003331 &vmw_cmd_dx_cid_check, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3333 &vmw_cmd_dx_cid_check, true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003334 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003335 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003336 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003337 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003338 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003339 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003340 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
Charmaine Leee02e5882016-04-12 08:19:08 -07003341 &vmw_cmd_dx_cid_check, true, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003343 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003344 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003345 true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3347 true, false, true),
Charmaine Lee18835982016-04-12 08:14:23 -07003348 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003349 true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3351 true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3353 true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3355 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3357 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003358 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3359 true, false, true),
Charmaine Leef3b335502016-02-12 08:11:56 +01003360 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003361 true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3363 &vmw_cmd_dx_check_subresource, true, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3365 &vmw_cmd_dx_check_subresource, true, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3367 &vmw_cmd_dx_check_subresource, true, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3369 &vmw_cmd_dx_view_define, true, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3371 &vmw_cmd_dx_view_remove, true, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3373 &vmw_cmd_dx_view_define, true, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3375 &vmw_cmd_dx_view_remove, true, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3377 &vmw_cmd_dx_view_define, true, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3379 &vmw_cmd_dx_view_remove, true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3381 &vmw_cmd_dx_so_define, true, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3383 &vmw_cmd_dx_cid_check, true, false, true),
3384 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3385 &vmw_cmd_dx_so_define, true, false, true),
3386 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3387 &vmw_cmd_dx_cid_check, true, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3389 &vmw_cmd_dx_so_define, true, false, true),
3390 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3391 &vmw_cmd_dx_cid_check, true, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3393 &vmw_cmd_dx_so_define, true, false, true),
3394 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3395 &vmw_cmd_dx_cid_check, true, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3397 &vmw_cmd_dx_so_define, true, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3399 &vmw_cmd_dx_cid_check, true, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3401 &vmw_cmd_dx_define_shader, true, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3403 &vmw_cmd_dx_destroy_shader, true, false, true),
3404 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3405 &vmw_cmd_dx_bind_shader, true, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3407 &vmw_cmd_dx_so_define, true, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3409 &vmw_cmd_dx_cid_check, true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003410 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003411 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003412 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3413 &vmw_cmd_dx_set_so_targets, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003414 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3415 &vmw_cmd_dx_cid_check, true, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3417 &vmw_cmd_dx_cid_check, true, false, true),
Neha Bhende0fca749e2015-08-10 10:51:07 -07003418 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3419 &vmw_cmd_buffer_copy_check, true, false, true),
3420 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3421 &vmw_cmd_pred_copy_check, true, false, true),
Charmaine Lee1f982e42016-10-10 10:37:03 -07003422 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3423 &vmw_cmd_dx_transfer_from_buffer,
3424 true, false, true),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003425};
3426
3427static int vmw_cmd_check(struct vmw_private *dev_priv,
3428 struct vmw_sw_context *sw_context,
3429 void *buf, uint32_t *size)
3430{
3431 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003432 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003433 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3434 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003435 const struct vmw_cmd_entry *entry;
3436 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003437
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003438 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003439 /* Handle any none 3D commands */
3440 if (unlikely(cmd_id < SVGA_CMD_MAX))
3441 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3442
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003443
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003444 cmd_id = header->id;
3445 *size = header->size + sizeof(SVGA3dCmdHeader);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003446
3447 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003448 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003449 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003450
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003451 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003452 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003453
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003454 entry = &vmw_cmd_entries[cmd_id];
Thomas Hellstrom36e952c2014-02-12 13:19:36 +01003455 if (unlikely(!entry->func))
3456 goto out_invalid;
3457
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003458 if (unlikely(!entry->user_allow && !sw_context->kernel))
3459 goto out_privileged;
3460
3461 if (unlikely(entry->gb_disable && gb))
3462 goto out_old;
3463
3464 if (unlikely(entry->gb_enable && !gb))
3465 goto out_new;
3466
3467 ret = entry->func(dev_priv, sw_context, header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003468 if (unlikely(ret != 0))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003469 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003470
3471 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003472out_invalid:
3473 DRM_ERROR("Invalid SVGA3D command: %d\n",
3474 cmd_id + SVGA_3D_CMD_BASE);
3475 return -EINVAL;
3476out_privileged:
3477 DRM_ERROR("Privileged SVGA3D command: %d\n",
3478 cmd_id + SVGA_3D_CMD_BASE);
3479 return -EPERM;
3480out_old:
3481 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3482 cmd_id + SVGA_3D_CMD_BASE);
3483 return -EINVAL;
3484out_new:
3485 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003486 cmd_id + SVGA_3D_CMD_BASE);
3487 return -EINVAL;
3488}
3489
3490static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3491 struct vmw_sw_context *sw_context,
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003492 void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003493 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003494{
3495 int32_t cur_size = size;
3496 int ret;
3497
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003498 sw_context->buf_start = buf;
3499
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003500 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003501 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003502 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3503 if (unlikely(ret != 0))
3504 return ret;
3505 buf = (void *)((unsigned long) buf + size);
3506 cur_size -= size;
3507 }
3508
3509 if (unlikely(cur_size != 0)) {
3510 DRM_ERROR("Command verifier out of sync.\n");
3511 return -EINVAL;
3512 }
3513
3514 return 0;
3515}
3516
3517static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3518{
3519 sw_context->cur_reloc = 0;
3520}
3521
3522static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3523{
3524 uint32_t i;
3525 struct vmw_relocation *reloc;
3526 struct ttm_validate_buffer *validate;
3527 struct ttm_buffer_object *bo;
3528
3529 for (i = 0; i < sw_context->cur_reloc; ++i) {
3530 reloc = &sw_context->relocs[i];
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003531 validate = &sw_context->val_bufs[reloc->index].base;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003532 bo = validate->bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003533 switch (bo->mem.mem_type) {
3534 case TTM_PL_VRAM:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003535 reloc->location->offset += bo->offset;
3536 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003537 break;
3538 case VMW_PL_GMR:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003539 reloc->location->gmrId = bo->mem.start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003540 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01003541 case VMW_PL_MOB:
3542 *reloc->mob_loc = bo->mem.start;
3543 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003544 default:
3545 BUG();
3546 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003547 }
3548 vmw_free_relocations(sw_context);
3549}
3550
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003551/**
3552 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3553 * all resources referenced by it.
3554 *
3555 * @list: The resource list.
3556 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003557static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3558 struct list_head *list)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003559{
3560 struct vmw_resource_val_node *val, *val_next;
3561
3562 /*
3563 * Drop references to resources held during command submission.
3564 */
3565
3566 list_for_each_entry_safe(val, val_next, list, head) {
3567 list_del_init(&val->head);
3568 vmw_resource_unreference(&val->res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003569
3570 if (val->staged_bindings) {
3571 if (val->staged_bindings != sw_context->staged_bindings)
3572 vmw_binding_state_free(val->staged_bindings);
3573 else
3574 sw_context->staged_bindings_inuse = false;
3575 val->staged_bindings = NULL;
3576 }
3577
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003578 kfree(val);
3579 }
3580}
3581
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003582static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3583{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003584 struct vmw_validate_buffer *entry, *next;
3585 struct vmw_resource_val_node *val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003586
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003587 /*
3588 * Drop references to DMA buffers held during command submission.
3589 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003590 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003591 base.head) {
3592 list_del(&entry->base.head);
3593 ttm_bo_unref(&entry->base.bo);
3594 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003595 sw_context->cur_val_buf--;
3596 }
3597 BUG_ON(sw_context->cur_val_buf != 0);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003598
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003599 list_for_each_entry(val, &sw_context->resource_list, head)
3600 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003601}
3602
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003603int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3604 struct ttm_buffer_object *bo,
3605 bool interruptible,
3606 bool validate_as_mob)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003607{
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07003608 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3609 base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003610 int ret;
3611
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07003612 if (vbo->pin_count > 0)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003613 return 0;
3614
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01003615 if (validate_as_mob)
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003616 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3617 false);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01003618
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01003619 /**
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003620 * Put BO in VRAM if there is space, otherwise as a GMR.
3621 * If there is no space in VRAM and GMR ids are all used up,
3622 * start evicting GMRs to make room. If the DMA buffer can't be
3623 * used as a GMR, this will return -ENOMEM.
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01003624 */
3625
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003626 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3627 false);
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +01003628 if (likely(ret == 0 || ret == -ERESTARTSYS))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003629 return ret;
3630
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01003631 /**
3632 * If that failed, try VRAM again, this time evicting
3633 * previous contents.
3634 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003635
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003636 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003637 return ret;
3638}
3639
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003640static int vmw_validate_buffers(struct vmw_private *dev_priv,
3641 struct vmw_sw_context *sw_context)
3642{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003643 struct vmw_validate_buffer *entry;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003644 int ret;
3645
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003646 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01003647 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003648 true,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01003649 entry->validate_as_mob);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003650 if (unlikely(ret != 0))
3651 return ret;
3652 }
3653 return 0;
3654}
3655
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003656static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3657 uint32_t size)
3658{
3659 if (likely(sw_context->cmd_bounce_size >= size))
3660 return 0;
3661
3662 if (sw_context->cmd_bounce_size == 0)
3663 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3664
3665 while (sw_context->cmd_bounce_size < size) {
3666 sw_context->cmd_bounce_size =
3667 PAGE_ALIGN(sw_context->cmd_bounce_size +
3668 (sw_context->cmd_bounce_size >> 1));
3669 }
3670
Markus Elfring0bc32992016-07-22 13:31:00 +02003671 vfree(sw_context->cmd_bounce);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003672 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3673
3674 if (sw_context->cmd_bounce == NULL) {
3675 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3676 sw_context->cmd_bounce_size = 0;
3677 return -ENOMEM;
3678 }
3679
3680 return 0;
3681}
3682
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003683/**
3684 * vmw_execbuf_fence_commands - create and submit a command stream fence
3685 *
3686 * Creates a fence object and submits a command stream marker.
3687 * If this fails for some reason, We sync the fifo and return NULL.
3688 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003689 *
3690 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3691 * a userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003692 */
3693
3694int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3695 struct vmw_private *dev_priv,
3696 struct vmw_fence_obj **p_fence,
3697 uint32_t *p_handle)
3698{
3699 uint32_t sequence;
3700 int ret;
3701 bool synced = false;
3702
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003703 /* p_handle implies file_priv. */
3704 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003705
3706 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3707 if (unlikely(ret != 0)) {
3708 DRM_ERROR("Fence submission error. Syncing.\n");
3709 synced = true;
3710 }
3711
3712 if (p_handle != NULL)
3713 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003714 sequence, p_fence, p_handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003715 else
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003716 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003717
3718 if (unlikely(ret != 0 && !synced)) {
3719 (void) vmw_fallback_wait(dev_priv, false, false,
3720 sequence, false,
3721 VMW_FENCE_WAIT_TIMEOUT);
3722 *p_fence = NULL;
3723 }
3724
3725 return 0;
3726}
3727
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003728/**
3729 * vmw_execbuf_copy_fence_user - copy fence object information to
3730 * user-space.
3731 *
3732 * @dev_priv: Pointer to a vmw_private struct.
3733 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3734 * @ret: Return value from fence object creation.
3735 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3736 * which the information should be copied.
3737 * @fence: Pointer to the fenc object.
3738 * @fence_handle: User-space fence handle.
3739 *
3740 * This function copies fence information to user-space. If copying fails,
3741 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3742 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3743 * the error will hopefully be detected.
3744 * Also if copying fails, user-space will be unable to signal the fence
3745 * object so we wait for it immediately, and then unreference the
3746 * user-space reference.
3747 */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +02003748void
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003749vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3750 struct vmw_fpriv *vmw_fp,
3751 int ret,
3752 struct drm_vmw_fence_rep __user *user_fence_rep,
3753 struct vmw_fence_obj *fence,
3754 uint32_t fence_handle)
3755{
3756 struct drm_vmw_fence_rep fence_rep;
3757
3758 if (user_fence_rep == NULL)
3759 return;
3760
Dan Carpenter80d9b242011-10-18 09:10:12 +03003761 memset(&fence_rep, 0, sizeof(fence_rep));
3762
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003763 fence_rep.error = ret;
3764 if (ret == 0) {
3765 BUG_ON(fence == NULL);
3766
3767 fence_rep.handle = fence_handle;
Maarten Lankhorst2298e802014-03-26 14:07:44 +01003768 fence_rep.seqno = fence->base.seqno;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003769 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3770 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3771 }
3772
3773 /*
3774 * copy_to_user errors will be detected by user space not
3775 * seeing fence_rep::error filled in. Typically
3776 * user-space would have pre-set that member to -EFAULT.
3777 */
3778 ret = copy_to_user(user_fence_rep, &fence_rep,
3779 sizeof(fence_rep));
3780
3781 /*
3782 * User-space lost the fence object. We need to sync
3783 * and unreference the handle.
3784 */
3785 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3786 ttm_ref_object_base_unref(vmw_fp->tfile,
3787 fence_handle, TTM_REF_USAGE);
3788 DRM_ERROR("Fence copy error. Syncing.\n");
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003789 (void) vmw_fence_obj_wait(fence, false, false,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003790 VMW_FENCE_WAIT_TIMEOUT);
3791 }
3792}
3793
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003794/**
3795 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3796 * the fifo.
3797 *
3798 * @dev_priv: Pointer to a device private structure.
3799 * @kernel_commands: Pointer to the unpatched command batch.
3800 * @command_size: Size of the unpatched command batch.
3801 * @sw_context: Structure holding the relocation lists.
3802 *
3803 * Side effects: If this function returns 0, then the command batch
3804 * pointed to by @kernel_commands will have been modified.
3805 */
3806static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3807 void *kernel_commands,
3808 u32 command_size,
3809 struct vmw_sw_context *sw_context)
3810{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003811 void *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003812
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003813 if (sw_context->dx_ctx_node)
3814 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3815 sw_context->dx_ctx_node->res->id);
3816 else
3817 cmd = vmw_fifo_reserve(dev_priv, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003818 if (!cmd) {
3819 DRM_ERROR("Failed reserving fifo space for commands.\n");
3820 return -ENOMEM;
3821 }
3822
3823 vmw_apply_relocations(sw_context);
3824 memcpy(cmd, kernel_commands, command_size);
3825 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3826 vmw_resource_relocations_free(&sw_context->res_relocations);
3827 vmw_fifo_commit(dev_priv, command_size);
3828
3829 return 0;
3830}
3831
3832/**
3833 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3834 * the command buffer manager.
3835 *
3836 * @dev_priv: Pointer to a device private structure.
3837 * @header: Opaque handle to the command buffer allocation.
3838 * @command_size: Size of the unpatched command batch.
3839 * @sw_context: Structure holding the relocation lists.
3840 *
3841 * Side effects: If this function returns 0, then the command buffer
3842 * represented by @header will have been modified.
3843 */
3844static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3845 struct vmw_cmdbuf_header *header,
3846 u32 command_size,
3847 struct vmw_sw_context *sw_context)
3848{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003849 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3850 SVGA3D_INVALID_ID);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003851 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003852 id, false, header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003853
3854 vmw_apply_relocations(sw_context);
3855 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3856 vmw_resource_relocations_free(&sw_context->res_relocations);
3857 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3858
3859 return 0;
3860}
3861
3862/**
3863 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3864 * submission using a command buffer.
3865 *
3866 * @dev_priv: Pointer to a device private structure.
3867 * @user_commands: User-space pointer to the commands to be submitted.
3868 * @command_size: Size of the unpatched command batch.
3869 * @header: Out parameter returning the opaque pointer to the command buffer.
3870 *
3871 * This function checks whether we can use the command buffer manager for
3872 * submission and if so, creates a command buffer of suitable size and
3873 * copies the user data into that buffer.
3874 *
3875 * On successful return, the function returns a pointer to the data in the
3876 * command buffer and *@header is set to non-NULL.
3877 * If command buffers could not be used, the function will return the value
3878 * of @kernel_commands on function call. That value may be NULL. In that case,
3879 * the value of *@header will be set to NULL.
3880 * If an error is encountered, the function will return a pointer error value.
3881 * If the function is interrupted by a signal while sleeping, it will return
3882 * -ERESTARTSYS casted to a pointer error value.
3883 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003884static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3885 void __user *user_commands,
3886 void *kernel_commands,
3887 u32 command_size,
3888 struct vmw_cmdbuf_header **header)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003889{
3890 size_t cmdbuf_size;
3891 int ret;
3892
3893 *header = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003894 if (command_size > SVGA_CB_MAX_SIZE) {
3895 DRM_ERROR("Command buffer is too large.\n");
3896 return ERR_PTR(-EINVAL);
3897 }
3898
Thomas Hellstrom51ab70b2016-10-10 10:51:24 -07003899 if (!dev_priv->cman || kernel_commands)
3900 return kernel_commands;
3901
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003902 /* If possible, add a little space for fencing. */
3903 cmdbuf_size = command_size + 512;
3904 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3905 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3906 true, header);
3907 if (IS_ERR(kernel_commands))
3908 return kernel_commands;
3909
3910 ret = copy_from_user(kernel_commands, user_commands,
3911 command_size);
3912 if (ret) {
3913 DRM_ERROR("Failed copying commands.\n");
3914 vmw_cmdbuf_header_free(*header);
3915 *header = NULL;
3916 return ERR_PTR(-EFAULT);
3917 }
3918
3919 return kernel_commands;
3920}
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003921
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003922static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3923 struct vmw_sw_context *sw_context,
3924 uint32_t handle)
3925{
3926 struct vmw_resource_val_node *ctx_node;
3927 struct vmw_resource *res;
3928 int ret;
3929
3930 if (handle == SVGA3D_INVALID_ID)
3931 return 0;
3932
3933 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3934 handle, user_context_converter,
3935 &res);
3936 if (unlikely(ret != 0)) {
3937 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3938 (unsigned) handle);
3939 return ret;
3940 }
3941
3942 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3943 if (unlikely(ret != 0))
3944 goto out_err;
3945
3946 sw_context->dx_ctx_node = ctx_node;
3947 sw_context->man = vmw_context_res_man(res);
3948out_err:
3949 vmw_resource_unreference(&res);
3950 return ret;
3951}
3952
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003953int vmw_execbuf_process(struct drm_file *file_priv,
3954 struct vmw_private *dev_priv,
3955 void __user *user_commands,
3956 void *kernel_commands,
3957 uint32_t command_size,
3958 uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003959 uint32_t dx_context_handle,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003960 struct drm_vmw_fence_rep __user *user_fence_rep,
3961 struct vmw_fence_obj **out_fence)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003962{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003963 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01003964 struct vmw_fence_obj *fence = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003965 struct vmw_resource *error_resource;
3966 struct list_head resource_list;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003967 struct vmw_cmdbuf_header *header;
Maarten Lankhorstecff6652013-06-27 13:48:17 +02003968 struct ww_acquire_ctx ticket;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003969 uint32_t handle;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003970 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003971
Charmaine Lee2f633e52015-08-10 10:45:11 -07003972 if (throttle_us) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003973 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3974 throttle_us);
Charmaine Lee2f633e52015-08-10 10:45:11 -07003975
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003976 if (ret)
3977 return ret;
3978 }
Charmaine Lee2f633e52015-08-10 10:45:11 -07003979
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003980 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3981 kernel_commands, command_size,
3982 &header);
3983 if (IS_ERR(kernel_commands))
3984 return PTR_ERR(kernel_commands);
3985
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003986 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003987 if (ret) {
3988 ret = -ERESTARTSYS;
3989 goto out_free_header;
3990 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003991
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003992 sw_context->kernel = false;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003993 if (kernel_commands == NULL) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003994 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3995 if (unlikely(ret != 0))
3996 goto out_unlock;
3997
3998
3999 ret = copy_from_user(sw_context->cmd_bounce,
4000 user_commands, command_size);
4001
4002 if (unlikely(ret != 0)) {
4003 ret = -EFAULT;
4004 DRM_ERROR("Failed copying commands.\n");
4005 goto out_unlock;
4006 }
4007 kernel_commands = sw_context->cmd_bounce;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004008 } else if (!header)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004009 sw_context->kernel = true;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004010
Thomas Hellstromd5bde952014-01-31 10:12:10 +01004011 sw_context->fp = vmw_fpriv(file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004012 sw_context->cur_reloc = 0;
4013 sw_context->cur_val_buf = 0;
Thomas Hellstromf18c8842011-10-04 20:13:31 +02004014 INIT_LIST_HEAD(&sw_context->resource_list);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004015 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004016 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004017 sw_context->last_query_ctx = NULL;
4018 sw_context->needs_post_query_barrier = false;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004019 sw_context->dx_ctx_node = NULL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07004020 sw_context->dx_query_mob = NULL;
4021 sw_context->dx_query_ctx = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004022 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004023 INIT_LIST_HEAD(&sw_context->validate_nodes);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004024 INIT_LIST_HEAD(&sw_context->res_relocations);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004025 if (sw_context->staged_bindings)
4026 vmw_binding_state_reset(sw_context->staged_bindings);
4027
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004028 if (!sw_context->res_ht_initialized) {
4029 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4030 if (unlikely(ret != 0))
4031 goto out_unlock;
4032 sw_context->res_ht_initialized = true;
4033 }
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004034 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004035 INIT_LIST_HEAD(&resource_list);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004036 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4037 if (unlikely(ret != 0)) {
4038 list_splice_init(&sw_context->ctx_resource_list,
4039 &sw_context->resource_list);
4040 goto out_err_nores;
4041 }
4042
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004043 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4044 command_size);
Charmaine Lee2f633e52015-08-10 10:45:11 -07004045 /*
4046 * Merge the resource lists before checking the return status
4047 * from vmd_cmd_check_all so that all the open hashtabs will
4048 * be handled properly even if vmw_cmd_check_all fails.
4049 */
4050 list_splice_init(&sw_context->ctx_resource_list,
4051 &sw_context->resource_list);
4052
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004053 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004054 goto out_err_nores;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00004055
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004056 ret = vmw_resources_reserve(sw_context);
4057 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004058 goto out_err_nores;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004059
Christian Königaa350712014-12-03 15:46:48 +01004060 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4061 true, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004062 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004063 goto out_err_nores;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004064
4065 ret = vmw_validate_buffers(dev_priv, sw_context);
4066 if (unlikely(ret != 0))
4067 goto out_err;
4068
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004069 ret = vmw_resources_validate(sw_context);
4070 if (unlikely(ret != 0))
4071 goto out_err;
Thomas Hellstrom1925d452010-05-28 11:21:57 +02004072
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004073 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4074 if (unlikely(ret != 0)) {
4075 ret = -ERESTARTSYS;
4076 goto out_err;
4077 }
4078
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01004079 if (dev_priv->has_mob) {
4080 ret = vmw_rebind_contexts(sw_context);
4081 if (unlikely(ret != 0))
Dan Carpenterb2ad9882014-02-11 19:03:47 +03004082 goto out_unlock_binding;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01004083 }
4084
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004085 if (!header) {
4086 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4087 command_size, sw_context);
4088 } else {
4089 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4090 sw_context);
4091 header = NULL;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00004092 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004093 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004094 if (ret)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004095 goto out_err;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004096
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004097 vmw_query_bo_switch_commit(dev_priv, sw_context);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004098 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4099 &fence,
4100 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004101 /*
4102 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004103 * vmw_fifo_send_fence will sync. The error will be propagated to
4104 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004105 */
4106
4107 if (ret != 0)
4108 DRM_ERROR("Fence submission error. Syncing.\n");
4109
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07004110 vmw_resources_unreserve(sw_context, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004111
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004112 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004113 (void *) fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004114
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004115 if (unlikely(dev_priv->pinned_bo != NULL &&
4116 !dev_priv->query_cid_valid))
4117 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4118
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004119 vmw_clear_validations(sw_context);
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02004120 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4121 user_fence_rep, fence, handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004122
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004123 /* Don't unreference when handing fence out */
4124 if (unlikely(out_fence != NULL)) {
4125 *out_fence = fence;
4126 fence = NULL;
4127 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004128 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004129 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004130
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004131 list_splice_init(&sw_context->resource_list, &resource_list);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004132 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004133 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004134
4135 /*
4136 * Unreference resources outside of the cmdbuf_mutex to
4137 * avoid deadlocks in resource destruction paths.
4138 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004139 vmw_resource_list_unreference(sw_context, &resource_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004140
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004141 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004142
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004143out_unlock_binding:
4144 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004145out_err:
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004146 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4147out_err_nores:
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07004148 vmw_resources_unreserve(sw_context, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004149 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004150 vmw_free_relocations(sw_context);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004151 vmw_clear_validations(sw_context);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004152 if (unlikely(dev_priv->pinned_bo != NULL &&
4153 !dev_priv->query_cid_valid))
4154 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004155out_unlock:
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004156 list_splice_init(&sw_context->resource_list, &resource_list);
4157 error_resource = sw_context->error_resource;
4158 sw_context->error_resource = NULL;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004159 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004160 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004161
4162 /*
4163 * Unreference resources outside of the cmdbuf_mutex to
4164 * avoid deadlocks in resource destruction paths.
4165 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004166 vmw_resource_list_unreference(sw_context, &resource_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004167 if (unlikely(error_resource != NULL))
4168 vmw_resource_unreference(&error_resource);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004169out_free_header:
4170 if (header)
4171 vmw_cmdbuf_header_free(header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004172
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004173 return ret;
4174}
4175
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004176/**
4177 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4178 *
4179 * @dev_priv: The device private structure.
4180 *
4181 * This function is called to idle the fifo and unpin the query buffer
4182 * if the normal way to do this hits an error, which should typically be
4183 * extremely rare.
4184 */
4185static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4186{
4187 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4188
4189 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004190 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4191 if (dev_priv->dummy_query_bo_pinned) {
4192 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4193 dev_priv->dummy_query_bo_pinned = false;
4194 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004195}
4196
4197
4198/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004199 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004200 * query bo.
4201 *
4202 * @dev_priv: The device private structure.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004203 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4204 * _after_ a query barrier that flushes all queries touching the current
4205 * buffer pointed to by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004206 *
4207 * This function should be used to unpin the pinned query bo, or
4208 * as a query barrier when we need to make sure that all queries have
4209 * finished before the next fifo command. (For example on hardware
4210 * context destructions where the hardware may otherwise leak unfinished
4211 * queries).
4212 *
4213 * This function does not return any failure codes, but make attempts
4214 * to do safe unpinning in case of errors.
4215 *
4216 * The function will synchronize on the previous query barrier, and will
4217 * thus not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004218 *
4219 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4220 * before calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004221 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004222void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4223 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004224{
4225 int ret = 0;
4226 struct list_head validate_list;
4227 struct ttm_validate_buffer pinned_val, query_val;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004228 struct vmw_fence_obj *lfence = NULL;
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004229 struct ww_acquire_ctx ticket;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004230
4231 if (dev_priv->pinned_bo == NULL)
4232 goto out_unlock;
4233
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004234 INIT_LIST_HEAD(&validate_list);
4235
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004236 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
Christian Königae9c0af2014-09-04 20:01:52 +02004237 pinned_val.shared = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004238 list_add_tail(&pinned_val.head, &validate_list);
4239
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004240 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
Christian Königae9c0af2014-09-04 20:01:52 +02004241 query_val.shared = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004242 list_add_tail(&query_val.head, &validate_list);
4243
Christian Königaa350712014-12-03 15:46:48 +01004244 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4245 false, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004246 if (unlikely(ret != 0)) {
4247 vmw_execbuf_unpin_panic(dev_priv);
4248 goto out_no_reserve;
4249 }
4250
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004251 if (dev_priv->query_cid_valid) {
4252 BUG_ON(fence != NULL);
4253 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4254 if (unlikely(ret != 0)) {
4255 vmw_execbuf_unpin_panic(dev_priv);
4256 goto out_no_emit;
4257 }
4258 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004259 }
4260
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004261 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4262 if (dev_priv->dummy_query_bo_pinned) {
4263 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4264 dev_priv->dummy_query_bo_pinned = false;
4265 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004266 if (fence == NULL) {
4267 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4268 NULL);
4269 fence = lfence;
4270 }
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004271 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004272 if (lfence != NULL)
4273 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004274
4275 ttm_bo_unref(&query_val.bo);
4276 ttm_bo_unref(&pinned_val.bo);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004277 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004278out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004279 return;
4280
4281out_no_emit:
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004282 ttm_eu_backoff_reservation(&ticket, &validate_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004283out_no_reserve:
4284 ttm_bo_unref(&query_val.bo);
4285 ttm_bo_unref(&pinned_val.bo);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004286 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004287}
4288
4289/**
4290 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4291 * query bo.
4292 *
4293 * @dev_priv: The device private structure.
4294 *
4295 * This function should be used to unpin the pinned query bo, or
4296 * as a query barrier when we need to make sure that all queries have
4297 * finished before the next fifo command. (For example on hardware
4298 * context destructions where the hardware may otherwise leak unfinished
4299 * queries).
4300 *
4301 * This function does not return any failure codes, but make attempts
4302 * to do safe unpinning in case of errors.
4303 *
4304 * The function will synchronize on the previous query barrier, and will
4305 * thus not finish until that barrier has executed.
4306 */
4307void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4308{
4309 mutex_lock(&dev_priv->cmdbuf_mutex);
4310 if (dev_priv->query_cid_valid)
4311 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004312 mutex_unlock(&dev_priv->cmdbuf_mutex);
4313}
4314
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004315int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4316 struct drm_file *file_priv, size_t size)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004317{
4318 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004319 struct drm_vmw_execbuf_arg arg;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004320 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004321 static const size_t copy_offset[] = {
4322 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4323 sizeof(struct drm_vmw_execbuf_arg)};
4324
4325 if (unlikely(size < copy_offset[0])) {
4326 DRM_ERROR("Invalid command size, ioctl %d\n",
4327 DRM_VMW_EXECBUF);
4328 return -EINVAL;
4329 }
4330
4331 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4332 return -EFAULT;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004333
4334 /*
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004335 * Extend the ioctl argument while
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004336 * maintaining backwards compatibility:
4337 * We take different code paths depending on the value of
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004338 * arg.version.
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004339 */
4340
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004341 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4342 arg.version == 0)) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004343 DRM_ERROR("Incorrect execbuf version.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004344 return -EINVAL;
4345 }
4346
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004347 if (arg.version > 1 &&
4348 copy_from_user(&arg.context_handle,
4349 (void __user *) (data + copy_offset[0]),
4350 copy_offset[arg.version - 1] -
4351 copy_offset[0]) != 0)
4352 return -EFAULT;
4353
4354 switch (arg.version) {
4355 case 1:
4356 arg.context_handle = (uint32_t) -1;
4357 break;
4358 case 2:
4359 if (arg.pad64 != 0) {
4360 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4361 return -EINVAL;
4362 }
4363 break;
4364 default:
4365 break;
4366 }
4367
Thomas Hellstrom294adf72014-02-27 12:34:51 +01004368 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004369 if (unlikely(ret != 0))
4370 return ret;
4371
4372 ret = vmw_execbuf_process(file_priv, dev_priv,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004373 (void __user *)(unsigned long)arg.commands,
4374 NULL, arg.command_size, arg.throttle_us,
4375 arg.context_handle,
4376 (void __user *)(unsigned long)arg.fence_rep,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004377 NULL);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004378 ttm_read_unlock(&dev_priv->reservation_sem);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004379 if (unlikely(ret != 0))
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004380 return ret;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004381
4382 vmw_kms_cursor_post_execbuf(dev_priv);
4383
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004384 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004385}