blob: ba2032e04f06e527fb9e84870d74ac00e97a624c [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
Thomas Hellstromd80efd52015-08-10 10:39:35 -070032#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034
Thomas Hellstromc0951b72012-11-20 12:19:35 +000035#define VMW_RES_HT_ORDER 12
36
37/**
Thomas Hellstroma1944032016-10-10 11:06:45 -070038 * enum vmw_resource_relocation_type - Relocation type for resources
39 *
40 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
41 * command stream is replaced with the actual id after validation.
42 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
43 * with a NOP.
44 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
45 * after validation is -1, the command is replaced with a NOP. Otherwise no
46 * action.
47 */
48enum vmw_resource_relocation_type {
49 vmw_res_rel_normal,
50 vmw_res_rel_nop,
51 vmw_res_rel_cond_nop,
52 vmw_res_rel_max
53};
54
55/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +000056 * struct vmw_resource_relocation - Relocation info for resources
57 *
58 * @head: List head for the software context's relocation list.
59 * @res: Non-ref-counted pointer to the resource.
Thomas Hellstrome7a45282016-10-10 10:44:00 -070060 * @offset: Offset of single byte entries into the command buffer where the
Thomas Hellstromc0951b72012-11-20 12:19:35 +000061 * id that needs fixup is located.
Thomas Hellstroma1944032016-10-10 11:06:45 -070062 * @rel_type: Type of relocation.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000063 */
64struct vmw_resource_relocation {
65 struct list_head head;
66 const struct vmw_resource *res;
Thomas Hellstroma1944032016-10-10 11:06:45 -070067 u32 offset:29;
68 enum vmw_resource_relocation_type rel_type:3;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000069};
70
71/**
72 * struct vmw_resource_val_node - Validation info for resources
73 *
74 * @head: List head for the software context's resource list.
75 * @hash: Hash entry for quick resouce to val_node lookup.
76 * @res: Ref-counted pointer to the resource.
77 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
78 * @new_backup: Refcounted pointer to the new backup buffer.
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070079 * @staged_bindings: If @res is a context, tracks bindings set up during
80 * the command batch. Otherwise NULL.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000081 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
82 * @first_usage: Set to true the first time the resource is referenced in
83 * the command stream.
Thomas Hellstromd80efd52015-08-10 10:39:35 -070084 * @switching_backup: The command stream provides a new backup buffer for a
85 * resource.
86 * @no_buffer_needed: This means @switching_backup is true on first buffer
87 * reference. So resource reservation does not need to allocate a backup
88 * buffer for the resource.
Thomas Hellstromc0951b72012-11-20 12:19:35 +000089 */
90struct vmw_resource_val_node {
91 struct list_head head;
92 struct drm_hash_item hash;
93 struct vmw_resource *res;
94 struct vmw_dma_buffer *new_backup;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -070095 struct vmw_ctx_binding_state *staged_bindings;
Thomas Hellstromc0951b72012-11-20 12:19:35 +000096 unsigned long new_backup_offset;
Thomas Hellstromd80efd52015-08-10 10:39:35 -070097 u32 first_usage : 1;
98 u32 switching_backup : 1;
99 u32 no_buffer_needed : 1;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000100};
101
102/**
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100103 * struct vmw_cmd_entry - Describe a command for the verifier
104 *
105 * @user_allow: Whether allowed from the execbuf ioctl.
106 * @gb_disable: Whether disabled if guest-backed objects are available.
107 * @gb_enable: Whether enabled iff guest-backed objects are available.
108 */
109struct vmw_cmd_entry {
110 int (*func) (struct vmw_private *, struct vmw_sw_context *,
111 SVGA3dCmdHeader *);
112 bool user_allow;
113 bool gb_disable;
114 bool gb_enable;
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200115 const char *cmd_name;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100116};
117
118#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
119 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
Thomas Hellstrom65b97a22017-08-24 08:06:29 +0200120 (_gb_disable), (_gb_enable), #_cmd}
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100121
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700122static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
123 struct vmw_sw_context *sw_context,
124 struct vmw_resource *ctx);
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700125static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
126 struct vmw_sw_context *sw_context,
127 SVGAMobId *id,
128 struct vmw_dma_buffer **vmw_bo_p);
129static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
130 struct vmw_dma_buffer *vbo,
131 bool validate_as_mob,
132 uint32_t *p_val_node);
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700133/**
134 * vmw_ptr_diff - Compute the offset from a to b in bytes
135 *
136 * @a: A starting pointer.
137 * @b: A pointer offset in the same address space.
138 *
139 * Returns: The offset in bytes between the two pointers.
140 */
141static size_t vmw_ptr_diff(void *a, void *b)
142{
143 return (unsigned long) b - (unsigned long) a;
144}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700145
Thomas Hellstromc373d4e2012-11-21 12:22:35 +0100146/**
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700147 * vmw_resources_unreserve - unreserve resources previously reserved for
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000148 * command submission.
149 *
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700150 * @sw_context: pointer to the software context
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000151 * @backoff: Whether command submission failed.
152 */
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700153static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
154 bool backoff)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000155{
156 struct vmw_resource_val_node *val;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700157 struct list_head *list = &sw_context->resource_list;
158
159 if (sw_context->dx_query_mob && !backoff)
160 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
161 sw_context->dx_query_mob);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000162
163 list_for_each_entry(val, list, head) {
164 struct vmw_resource *res = val->res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700165 bool switch_backup =
166 (backoff) ? false : val->switching_backup;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000167
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700168 /*
169 * Transfer staged context bindings to the
170 * persistent context binding tracker.
171 */
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700172 if (unlikely(val->staged_bindings)) {
Thomas Hellstrom76c7d182014-01-30 10:46:12 +0100173 if (!backoff) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700174 vmw_binding_state_commit
175 (vmw_context_binding_state(val->res),
176 val->staged_bindings);
Thomas Hellstrom76c7d182014-01-30 10:46:12 +0100177 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700178
179 if (val->staged_bindings != sw_context->staged_bindings)
180 vmw_binding_state_free(val->staged_bindings);
181 else
182 sw_context->staged_bindings_inuse = false;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700183 val->staged_bindings = NULL;
184 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700185 vmw_resource_unreserve(res, switch_backup, val->new_backup,
186 val->new_backup_offset);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000187 vmw_dmabuf_unreference(&val->new_backup);
188 }
189}
190
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700191/**
192 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
193 * added to the validate list.
194 *
195 * @dev_priv: Pointer to the device private:
196 * @sw_context: The validation context:
197 * @node: The validation node holding this context.
198 */
199static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
200 struct vmw_sw_context *sw_context,
201 struct vmw_resource_val_node *node)
202{
203 int ret;
204
205 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
206 if (unlikely(ret != 0))
207 goto out_err;
208
209 if (!sw_context->staged_bindings) {
210 sw_context->staged_bindings =
211 vmw_binding_state_alloc(dev_priv);
212 if (IS_ERR(sw_context->staged_bindings)) {
213 DRM_ERROR("Failed to allocate context binding "
214 "information.\n");
215 ret = PTR_ERR(sw_context->staged_bindings);
216 sw_context->staged_bindings = NULL;
217 goto out_err;
218 }
219 }
220
221 if (sw_context->staged_bindings_inuse) {
222 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
223 if (IS_ERR(node->staged_bindings)) {
224 DRM_ERROR("Failed to allocate context binding "
225 "information.\n");
226 ret = PTR_ERR(node->staged_bindings);
227 node->staged_bindings = NULL;
228 goto out_err;
229 }
230 } else {
231 node->staged_bindings = sw_context->staged_bindings;
232 sw_context->staged_bindings_inuse = true;
233 }
234
235 return 0;
236out_err:
237 return ret;
238}
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000239
240/**
241 * vmw_resource_val_add - Add a resource to the software context's
242 * resource list if it's not already on it.
243 *
244 * @sw_context: Pointer to the software context.
245 * @res: Pointer to the resource.
246 * @p_node On successful return points to a valid pointer to a
247 * struct vmw_resource_val_node, if non-NULL on entry.
248 */
249static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
250 struct vmw_resource *res,
251 struct vmw_resource_val_node **p_node)
252{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700253 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000254 struct vmw_resource_val_node *node;
255 struct drm_hash_item *hash;
256 int ret;
257
258 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
259 &hash) == 0)) {
260 node = container_of(hash, struct vmw_resource_val_node, hash);
261 node->first_usage = false;
262 if (unlikely(p_node != NULL))
263 *p_node = node;
264 return 0;
265 }
266
267 node = kzalloc(sizeof(*node), GFP_KERNEL);
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530268 if (unlikely(!node)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000269 DRM_ERROR("Failed to allocate a resource validation "
270 "entry.\n");
271 return -ENOMEM;
272 }
273
274 node->hash.key = (unsigned long) res;
275 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
276 if (unlikely(ret != 0)) {
277 DRM_ERROR("Failed to initialize a resource validation "
278 "entry.\n");
279 kfree(node);
280 return ret;
281 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000282 node->res = vmw_resource_reference(res);
283 node->first_usage = true;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000284 if (unlikely(p_node != NULL))
285 *p_node = node;
286
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700287 if (!dev_priv->has_mob) {
288 list_add_tail(&node->head, &sw_context->resource_list);
289 return 0;
290 }
291
292 switch (vmw_res_type(res)) {
293 case vmw_res_context:
294 case vmw_res_dx_context:
295 list_add(&node->head, &sw_context->ctx_resource_list);
296 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
297 break;
298 case vmw_res_cotable:
299 list_add_tail(&node->head, &sw_context->ctx_resource_list);
300 break;
301 default:
302 list_add_tail(&node->head, &sw_context->resource_list);
303 break;
304 }
305
306 return ret;
307}
308
309/**
310 * vmw_view_res_val_add - Add a view and the surface it's pointing to
311 * to the validation list
312 *
313 * @sw_context: The software context holding the validation list.
314 * @view: Pointer to the view resource.
315 *
316 * Returns 0 if success, negative error code otherwise.
317 */
318static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
319 struct vmw_resource *view)
320{
321 int ret;
322
323 /*
324 * First add the resource the view is pointing to, otherwise
325 * it may be swapped out when the view is validated.
326 */
327 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
328 if (ret)
329 return ret;
330
331 return vmw_resource_val_add(sw_context, view, NULL);
332}
333
334/**
335 * vmw_view_id_val_add - Look up a view and add it and the surface it's
336 * pointing to to the validation list.
337 *
338 * @sw_context: The software context holding the validation list.
339 * @view_type: The view type to look up.
340 * @id: view id of the view.
341 *
342 * The view is represented by a view id and the DX context it's created on,
343 * or scheduled for creation on. If there is no DX context set, the function
344 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
345 */
346static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
347 enum vmw_view_type view_type, u32 id)
348{
349 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
350 struct vmw_resource *view;
351 int ret;
352
353 if (!ctx_node) {
354 DRM_ERROR("DX Context not set.\n");
355 return -EINVAL;
356 }
357
358 view = vmw_view_lookup(sw_context->man, view_type, id);
359 if (IS_ERR(view))
360 return PTR_ERR(view);
361
362 ret = vmw_view_res_val_add(sw_context, view);
363 vmw_resource_unreference(&view);
364
365 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000366}
367
368/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100369 * vmw_resource_context_res_add - Put resources previously bound to a context on
370 * the validation list
371 *
372 * @dev_priv: Pointer to a device private structure
373 * @sw_context: Pointer to a software context used for this command submission
374 * @ctx: Pointer to the context resource
375 *
376 * This function puts all resources that were previously bound to @ctx on
377 * the resource validation list. This is part of the context state reemission
378 */
379static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
380 struct vmw_sw_context *sw_context,
381 struct vmw_resource *ctx)
382{
383 struct list_head *binding_list;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700384 struct vmw_ctx_bindinfo *entry;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100385 int ret = 0;
386 struct vmw_resource *res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700387 u32 i;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100388
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700389 /* Add all cotables to the validation list. */
390 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
391 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
392 res = vmw_context_cotable(ctx, i);
393 if (IS_ERR(res))
394 continue;
395
396 ret = vmw_resource_val_add(sw_context, res, NULL);
397 vmw_resource_unreference(&res);
398 if (unlikely(ret != 0))
399 return ret;
400 }
401 }
402
403
404 /* Add all resources bound to the context to the validation list */
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100405 mutex_lock(&dev_priv->binding_mutex);
406 binding_list = vmw_context_binding_list(ctx);
407
408 list_for_each_entry(entry, binding_list, ctx_list) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700409 /* entry->res is not refcounted */
410 res = vmw_resource_reference_unless_doomed(entry->res);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100411 if (unlikely(res == NULL))
412 continue;
413
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700414 if (vmw_res_type(entry->res) == vmw_res_view)
415 ret = vmw_view_res_val_add(sw_context, entry->res);
416 else
417 ret = vmw_resource_val_add(sw_context, entry->res,
418 NULL);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100419 vmw_resource_unreference(&res);
420 if (unlikely(ret != 0))
421 break;
422 }
423
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700424 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
425 struct vmw_dma_buffer *dx_query_mob;
426
427 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
428 if (dx_query_mob)
429 ret = vmw_bo_to_validate_list(sw_context,
430 dx_query_mob,
431 true, NULL);
432 }
433
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100434 mutex_unlock(&dev_priv->binding_mutex);
435 return ret;
436}
437
438/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000439 * vmw_resource_relocation_add - Add a relocation to the relocation list
440 *
441 * @list: Pointer to head of relocation list.
442 * @res: The resource.
443 * @offset: Offset into the command buffer currently being parsed where the
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700444 * id that needs fixup is located. Granularity is one byte.
Thomas Hellstroma1944032016-10-10 11:06:45 -0700445 * @rel_type: Relocation type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000446 */
447static int vmw_resource_relocation_add(struct list_head *list,
448 const struct vmw_resource *res,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700449 unsigned long offset,
450 enum vmw_resource_relocation_type
451 rel_type)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000452{
453 struct vmw_resource_relocation *rel;
454
455 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
Ravikant B Sharma1a4adb02016-11-08 17:30:31 +0530456 if (unlikely(!rel)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000457 DRM_ERROR("Failed to allocate a resource relocation.\n");
458 return -ENOMEM;
459 }
460
461 rel->res = res;
462 rel->offset = offset;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700463 rel->rel_type = rel_type;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000464 list_add_tail(&rel->head, list);
465
466 return 0;
467}
468
469/**
470 * vmw_resource_relocations_free - Free all relocations on a list
471 *
472 * @list: Pointer to the head of the relocation list.
473 */
474static void vmw_resource_relocations_free(struct list_head *list)
475{
476 struct vmw_resource_relocation *rel, *n;
477
478 list_for_each_entry_safe(rel, n, list, head) {
479 list_del(&rel->head);
480 kfree(rel);
481 }
482}
483
484/**
485 * vmw_resource_relocations_apply - Apply all relocations on a list
486 *
487 * @cb: Pointer to the start of the command buffer bein patch. This need
488 * not be the same buffer as the one being parsed when the relocation
489 * list was built, but the contents must be the same modulo the
490 * resource ids.
491 * @list: Pointer to the head of the relocation list.
492 */
493static void vmw_resource_relocations_apply(uint32_t *cb,
494 struct list_head *list)
495{
496 struct vmw_resource_relocation *rel;
497
Thomas Hellstroma1944032016-10-10 11:06:45 -0700498 /* Validate the struct vmw_resource_relocation member size */
499 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
500 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
501
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100502 list_for_each_entry(rel, list, head) {
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700503 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
Thomas Hellstroma1944032016-10-10 11:06:45 -0700504 switch (rel->rel_type) {
505 case vmw_res_rel_normal:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700506 *addr = rel->res->id;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700507 break;
508 case vmw_res_rel_nop:
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700509 *addr = SVGA_3D_CMD_NOP;
Thomas Hellstroma1944032016-10-10 11:06:45 -0700510 break;
511 default:
512 if (rel->res->id == -1)
513 *addr = SVGA_3D_CMD_NOP;
514 break;
515 }
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100516 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000517}
518
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000519static int vmw_cmd_invalid(struct vmw_private *dev_priv,
520 struct vmw_sw_context *sw_context,
521 SVGA3dCmdHeader *header)
522{
Sinclair Yehfcfffdd2017-07-17 23:28:36 -0700523 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000524}
525
526static int vmw_cmd_ok(struct vmw_private *dev_priv,
527 struct vmw_sw_context *sw_context,
528 SVGA3dCmdHeader *header)
529{
530 return 0;
531}
532
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200533/**
534 * vmw_bo_to_validate_list - add a bo to a validate list
535 *
536 * @sw_context: The software context used for this command submission batch.
537 * @bo: The buffer object to add.
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100538 * @validate_as_mob: Validate this buffer as a MOB.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200539 * @p_val_node: If non-NULL Will be updated with the validate node number
540 * on return.
541 *
542 * Returns -EINVAL if the limit of number of buffer objects per command
543 * submission is reached.
544 */
545static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700546 struct vmw_dma_buffer *vbo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100547 bool validate_as_mob,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200548 uint32_t *p_val_node)
549{
550 uint32_t val_node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000551 struct vmw_validate_buffer *vval_buf;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200552 struct ttm_validate_buffer *val_buf;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000553 struct drm_hash_item *hash;
554 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200555
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700556 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000557 &hash) == 0)) {
558 vval_buf = container_of(hash, struct vmw_validate_buffer,
559 hash);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100560 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
561 DRM_ERROR("Inconsistent buffer usage.\n");
562 return -EINVAL;
563 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000564 val_buf = &vval_buf->base;
565 val_node = vval_buf - sw_context->val_bufs;
566 } else {
567 val_node = sw_context->cur_val_buf;
568 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
569 DRM_ERROR("Max number of DMA buffers per submission "
570 "exceeded.\n");
571 return -EINVAL;
572 }
573 vval_buf = &sw_context->val_bufs[val_node];
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700574 vval_buf->hash.key = (unsigned long) vbo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000575 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
576 if (unlikely(ret != 0)) {
577 DRM_ERROR("Failed to initialize a buffer validation "
578 "entry.\n");
579 return ret;
580 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200581 ++sw_context->cur_val_buf;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000582 val_buf = &vval_buf->base;
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700583 val_buf->bo = ttm_bo_reference(&vbo->base);
Christian Königae9c0af2014-09-04 20:01:52 +0200584 val_buf->shared = false;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000585 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100586 vval_buf->validate_as_mob = validate_as_mob;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200587 }
588
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200589 if (p_val_node)
590 *p_val_node = val_node;
591
592 return 0;
593}
594
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000595/**
596 * vmw_resources_reserve - Reserve all resources on the sw_context's
597 * resource list.
598 *
599 * @sw_context: Pointer to the software context.
600 *
601 * Note that since vmware's command submission currently is protected by
602 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
603 * since only a single thread at once will attempt this.
604 */
605static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
606{
607 struct vmw_resource_val_node *val;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700608 int ret = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000609
610 list_for_each_entry(val, &sw_context->resource_list, head) {
611 struct vmw_resource *res = val->res;
612
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -0700613 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000614 if (unlikely(ret != 0))
615 return ret;
616
617 if (res->backup) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700618 struct vmw_dma_buffer *vbo = res->backup;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000619
620 ret = vmw_bo_to_validate_list
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -0700621 (sw_context, vbo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100622 vmw_resource_needs_backup(res), NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000623
624 if (unlikely(ret != 0))
625 return ret;
626 }
627 }
Charmaine Lee2f633e52015-08-10 10:45:11 -0700628
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700629 if (sw_context->dx_query_mob) {
630 struct vmw_dma_buffer *expected_dx_query_mob;
631
632 expected_dx_query_mob =
633 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
634 if (expected_dx_query_mob &&
635 expected_dx_query_mob != sw_context->dx_query_mob) {
636 ret = -EINVAL;
637 }
638 }
639
640 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000641}
642
643/**
644 * vmw_resources_validate - Validate all resources on the sw_context's
645 * resource list.
646 *
647 * @sw_context: Pointer to the software context.
648 *
649 * Before this function is called, all resource backup buffers must have
650 * been validated.
651 */
652static int vmw_resources_validate(struct vmw_sw_context *sw_context)
653{
654 struct vmw_resource_val_node *val;
655 int ret;
656
657 list_for_each_entry(val, &sw_context->resource_list, head) {
658 struct vmw_resource *res = val->res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700659 struct vmw_dma_buffer *backup = res->backup;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000660
661 ret = vmw_resource_validate(res);
662 if (unlikely(ret != 0)) {
663 if (ret != -ERESTARTSYS)
664 DRM_ERROR("Failed to validate resource.\n");
665 return ret;
666 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700667
668 /* Check if the resource switched backup buffer */
669 if (backup && res->backup && (backup != res->backup)) {
670 struct vmw_dma_buffer *vbo = res->backup;
671
672 ret = vmw_bo_to_validate_list
673 (sw_context, vbo,
674 vmw_resource_needs_backup(res), NULL);
675 if (ret) {
676 ttm_bo_unreserve(&vbo->base);
677 return ret;
678 }
679 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000680 }
681 return 0;
682}
683
684/**
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200685 * vmw_cmd_res_reloc_add - Add a resource to a software context's
686 * relocation- and validation lists.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000687 *
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200688 * @dev_priv: Pointer to a struct vmw_private identifying the device.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000689 * @sw_context: Pointer to the software context.
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200690 * @id_loc: Pointer to where the id that needs translation is located.
691 * @res: Valid pointer to a struct vmw_resource.
692 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
693 * used for this resource is returned here.
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000694 */
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200695static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
696 struct vmw_sw_context *sw_context,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200697 uint32_t *id_loc,
698 struct vmw_resource *res,
699 struct vmw_resource_val_node **p_val)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000700{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000701 int ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200702 struct vmw_resource_val_node *node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000703
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200704 *p_val = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000705 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
706 res,
Thomas Hellstrome7a45282016-10-10 10:44:00 -0700707 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700708 id_loc),
709 vmw_res_rel_normal);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000710 if (unlikely(ret != 0))
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200711 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000712
713 ret = vmw_resource_val_add(sw_context, res, &node);
714 if (unlikely(ret != 0))
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200715 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000716
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200717 if (p_val)
718 *p_val = node;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000719
Thomas Hellstrom9f9cb842014-08-28 11:35:25 +0200720 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000721}
722
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200723
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000724/**
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100725 * vmw_cmd_res_check - Check that a resource is present and if so, put it
726 * on the resource validate list unless it's already there.
727 *
728 * @dev_priv: Pointer to a device private structure.
729 * @sw_context: Pointer to the software context.
730 * @res_type: Resource type.
731 * @converter: User-space visisble type specific information.
732 * @id_loc: Pointer to the location in the command buffer currently being
733 * parsed from where the user-space resource id handle is located.
734 * @p_val: Pointer to pointer to resource validalidation node. Populated
735 * on exit.
736 */
737static int
738vmw_cmd_res_check(struct vmw_private *dev_priv,
739 struct vmw_sw_context *sw_context,
740 enum vmw_res_type res_type,
741 const struct vmw_user_resource_conv *converter,
742 uint32_t *id_loc,
743 struct vmw_resource_val_node **p_val)
744{
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200745 struct vmw_res_cache_entry *rcache =
746 &sw_context->res_cache[res_type];
747 struct vmw_resource *res;
748 struct vmw_resource_val_node *node;
749 int ret;
750
751 if (*id_loc == SVGA3D_INVALID_ID) {
752 if (p_val)
753 *p_val = NULL;
754 if (res_type == vmw_res_context) {
755 DRM_ERROR("Illegal context invalid id.\n");
756 return -EINVAL;
757 }
758 return 0;
759 }
760
761 /*
762 * Fastpath in case of repeated commands referencing the same
763 * resource
764 */
765
766 if (likely(rcache->valid && *id_loc == rcache->handle)) {
767 const struct vmw_resource *res = rcache->res;
768
769 rcache->node->first_usage = false;
770 if (p_val)
771 *p_val = rcache->node;
772
773 return vmw_resource_relocation_add
774 (&sw_context->res_relocations, res,
Thomas Hellstroma1944032016-10-10 11:06:45 -0700775 vmw_ptr_diff(sw_context->buf_start, id_loc),
776 vmw_res_rel_normal);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200777 }
778
779 ret = vmw_user_resource_lookup_handle(dev_priv,
780 sw_context->fp->tfile,
781 *id_loc,
782 converter,
783 &res);
784 if (unlikely(ret != 0)) {
785 DRM_ERROR("Could not find or use resource 0x%08x.\n",
786 (unsigned) *id_loc);
787 dump_stack();
788 return ret;
789 }
790
791 rcache->valid = true;
792 rcache->res = res;
793 rcache->handle = *id_loc;
794
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700795 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200796 res, &node);
797 if (unlikely(ret != 0))
798 goto out_no_reloc;
799
800 rcache->node = node;
801 if (p_val)
802 *p_val = node;
803 vmw_resource_unreference(&res);
804 return 0;
805
806out_no_reloc:
807 BUG_ON(sw_context->error_resource != NULL);
808 sw_context->error_resource = res;
809
810 return ret;
Thomas Hellstromd5bde952014-01-31 10:12:10 +0100811}
812
813/**
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700814 * vmw_rebind_dx_query - Rebind DX query associated with the context
815 *
816 * @ctx_res: context the query belongs to
817 *
818 * This function assumes binding_mutex is held.
819 */
820static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
821{
822 struct vmw_private *dev_priv = ctx_res->dev_priv;
823 struct vmw_dma_buffer *dx_query_mob;
824 struct {
825 SVGA3dCmdHeader header;
826 SVGA3dCmdDXBindAllQuery body;
827 } *cmd;
828
829
830 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
831
832 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
833 return 0;
834
835 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
836
837 if (cmd == NULL) {
838 DRM_ERROR("Failed to rebind queries.\n");
839 return -ENOMEM;
840 }
841
842 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
843 cmd->header.size = sizeof(cmd->body);
844 cmd->body.cid = ctx_res->id;
845 cmd->body.mobid = dx_query_mob->base.mem.start;
846 vmw_fifo_commit(dev_priv, sizeof(*cmd));
847
848 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
849
850 return 0;
851}
852
853/**
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100854 * vmw_rebind_contexts - Rebind all resources previously bound to
855 * referenced contexts.
856 *
857 * @sw_context: Pointer to the software context.
858 *
859 * Rebind context binding points that have been scrubbed because of eviction.
860 */
861static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
862{
863 struct vmw_resource_val_node *val;
864 int ret;
865
866 list_for_each_entry(val, &sw_context->resource_list, head) {
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200867 if (unlikely(!val->staged_bindings))
868 break;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100869
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700870 ret = vmw_binding_rebind_all
871 (vmw_context_binding_state(val->res));
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100872 if (unlikely(ret != 0)) {
873 if (ret != -ERESTARTSYS)
874 DRM_ERROR("Failed to rebind context.\n");
875 return ret;
876 }
Sinclair Yehfd11a3c2015-08-10 10:56:15 -0700877
878 ret = vmw_rebind_all_dx_query(val->res);
879 if (ret != 0)
880 return ret;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100881 }
882
883 return 0;
884}
885
886/**
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700887 * vmw_view_bindings_add - Add an array of view bindings to a context
888 * binding state tracker.
889 *
890 * @sw_context: The execbuf state used for this command.
891 * @view_type: View type for the bindings.
892 * @binding_type: Binding type for the bindings.
893 * @shader_slot: The shader slot to user for the bindings.
894 * @view_ids: Array of view ids to be bound.
895 * @num_views: Number of view ids in @view_ids.
896 * @first_slot: The binding slot to be used for the first view id in @view_ids.
897 */
898static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
899 enum vmw_view_type view_type,
900 enum vmw_ctx_binding_type binding_type,
901 uint32 shader_slot,
902 uint32 view_ids[], u32 num_views,
903 u32 first_slot)
904{
905 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
906 struct vmw_cmdbuf_res_manager *man;
907 u32 i;
908 int ret;
909
910 if (!ctx_node) {
911 DRM_ERROR("DX Context not set.\n");
912 return -EINVAL;
913 }
914
915 man = sw_context->man;
916 for (i = 0; i < num_views; ++i) {
917 struct vmw_ctx_bindinfo_view binding;
918 struct vmw_resource *view = NULL;
919
920 if (view_ids[i] != SVGA3D_INVALID_ID) {
921 view = vmw_view_lookup(man, view_type, view_ids[i]);
922 if (IS_ERR(view)) {
923 DRM_ERROR("View not found.\n");
924 return PTR_ERR(view);
925 }
926
927 ret = vmw_view_res_val_add(sw_context, view);
928 if (ret) {
929 DRM_ERROR("Could not add view to "
930 "validation list.\n");
931 vmw_resource_unreference(&view);
932 return ret;
933 }
934 }
935 binding.bi.ctx = ctx_node->res;
936 binding.bi.res = view;
937 binding.bi.bt = binding_type;
938 binding.shader_slot = shader_slot;
939 binding.slot = first_slot + i;
940 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
941 shader_slot, binding.slot);
942 if (view)
943 vmw_resource_unreference(&view);
944 }
945
946 return 0;
947}
948
949/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000950 * vmw_cmd_cid_check - Check a command header for valid context information.
951 *
952 * @dev_priv: Pointer to a device private structure.
953 * @sw_context: Pointer to the software context.
954 * @header: A command header with an embedded user-space context handle.
955 *
956 * Convenience function: Call vmw_cmd_res_check with the user-space context
957 * handle embedded in @header.
958 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000959static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
960 struct vmw_sw_context *sw_context,
961 SVGA3dCmdHeader *header)
962{
963 struct vmw_cid_cmd {
964 SVGA3dCmdHeader header;
Thomas Hellstrom8e67bbb2014-02-06 12:35:05 +0100965 uint32_t cid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000966 } *cmd;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000967
968 cmd = container_of(header, struct vmw_cid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000969 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
970 user_context_converter, &cmd->cid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000971}
972
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000973static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
974 struct vmw_sw_context *sw_context,
975 SVGA3dCmdHeader *header)
976{
977 struct vmw_sid_cmd {
978 SVGA3dCmdHeader header;
979 SVGA3dCmdSetRenderTarget body;
980 } *cmd;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700981 struct vmw_resource_val_node *ctx_node;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700982 struct vmw_resource_val_node *res_node;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000983 int ret;
984
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700985 cmd = container_of(header, struct vmw_sid_cmd, header);
986
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700987 if (cmd->body.type >= SVGA3D_RT_MAX) {
988 DRM_ERROR("Illegal render target type %u.\n",
989 (unsigned) cmd->body.type);
990 return -EINVAL;
991 }
992
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700993 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
994 user_context_converter, &cmd->body.cid,
995 &ctx_node);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000996 if (unlikely(ret != 0))
997 return ret;
998
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000999 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1000 user_surface_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001001 &cmd->body.target.sid, &res_node);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001002 if (unlikely(ret != 0))
1003 return ret;
1004
1005 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001006 struct vmw_ctx_bindinfo_view binding;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001007
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001008 binding.bi.ctx = ctx_node->res;
1009 binding.bi.res = res_node ? res_node->res : NULL;
1010 binding.bi.bt = vmw_ctx_binding_rt;
1011 binding.slot = cmd->body.type;
1012 vmw_binding_add(ctx_node->staged_bindings,
1013 &binding.bi, 0, binding.slot);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001014 }
1015
1016 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001017}
1018
1019static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1020 struct vmw_sw_context *sw_context,
1021 SVGA3dCmdHeader *header)
1022{
1023 struct vmw_sid_cmd {
1024 SVGA3dCmdHeader header;
1025 SVGA3dCmdSurfaceCopy body;
1026 } *cmd;
1027 int ret;
1028
1029 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc9146cd2015-03-02 23:45:04 -08001030
Thomas Hellstrom6bf6bf02015-06-26 02:22:40 -07001031 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1032 user_surface_converter,
1033 &cmd->body.src.sid, NULL);
1034 if (ret)
1035 return ret;
Thomas Hellstromc9146cd2015-03-02 23:45:04 -08001036
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001037 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1038 user_surface_converter,
1039 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001040}
1041
Neha Bhende0fca749e2015-08-10 10:51:07 -07001042static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1043 struct vmw_sw_context *sw_context,
1044 SVGA3dCmdHeader *header)
1045{
1046 struct {
1047 SVGA3dCmdHeader header;
1048 SVGA3dCmdDXBufferCopy body;
1049 } *cmd;
1050 int ret;
1051
1052 cmd = container_of(header, typeof(*cmd), header);
1053 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1054 user_surface_converter,
1055 &cmd->body.src, NULL);
1056 if (ret != 0)
1057 return ret;
1058
1059 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1060 user_surface_converter,
1061 &cmd->body.dest, NULL);
1062}
1063
1064static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1065 struct vmw_sw_context *sw_context,
1066 SVGA3dCmdHeader *header)
1067{
1068 struct {
1069 SVGA3dCmdHeader header;
1070 SVGA3dCmdDXPredCopyRegion body;
1071 } *cmd;
1072 int ret;
1073
1074 cmd = container_of(header, typeof(*cmd), header);
1075 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1076 user_surface_converter,
1077 &cmd->body.srcSid, NULL);
1078 if (ret != 0)
1079 return ret;
1080
1081 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1082 user_surface_converter,
1083 &cmd->body.dstSid, NULL);
1084}
1085
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001086static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1087 struct vmw_sw_context *sw_context,
1088 SVGA3dCmdHeader *header)
1089{
1090 struct vmw_sid_cmd {
1091 SVGA3dCmdHeader header;
1092 SVGA3dCmdSurfaceStretchBlt body;
1093 } *cmd;
1094 int ret;
1095
1096 cmd = container_of(header, struct vmw_sid_cmd, header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001097 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1098 user_surface_converter,
1099 &cmd->body.src.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001100 if (unlikely(ret != 0))
1101 return ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001102 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1103 user_surface_converter,
1104 &cmd->body.dest.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001105}
1106
1107static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1108 struct vmw_sw_context *sw_context,
1109 SVGA3dCmdHeader *header)
1110{
1111 struct vmw_sid_cmd {
1112 SVGA3dCmdHeader header;
1113 SVGA3dCmdBlitSurfaceToScreen body;
1114 } *cmd;
1115
1116 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001117
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001118 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1119 user_surface_converter,
1120 &cmd->body.srcImage.sid, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001121}
1122
1123static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1124 struct vmw_sw_context *sw_context,
1125 SVGA3dCmdHeader *header)
1126{
1127 struct vmw_sid_cmd {
1128 SVGA3dCmdHeader header;
1129 SVGA3dCmdPresent body;
1130 } *cmd;
1131
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001132
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001133 cmd = container_of(header, struct vmw_sid_cmd, header);
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02001134
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001135 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1136 user_surface_converter, &cmd->body.sid,
1137 NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001138}
1139
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001140/**
1141 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1142 *
1143 * @dev_priv: The device private structure.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001144 * @new_query_bo: The new buffer holding query results.
1145 * @sw_context: The software context used for this command submission.
1146 *
1147 * This function checks whether @new_query_bo is suitable for holding
1148 * query results, and if another buffer currently is pinned for query
1149 * results. If so, the function prepares the state of @sw_context for
1150 * switching pinned buffers after successful submission of the current
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001151 * command batch.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001152 */
1153static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001154 struct vmw_dma_buffer *new_query_bo,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001155 struct vmw_sw_context *sw_context)
1156{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001157 struct vmw_res_cache_entry *ctx_entry =
1158 &sw_context->res_cache[vmw_res_context];
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001159 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001160
1161 BUG_ON(!ctx_entry->valid);
1162 sw_context->last_query_ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001163
1164 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1165
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001166 if (unlikely(new_query_bo->base.num_pages > 4)) {
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001167 DRM_ERROR("Query buffer too large.\n");
1168 return -EINVAL;
1169 }
1170
1171 if (unlikely(sw_context->cur_query_bo != NULL)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001172 sw_context->needs_post_query_barrier = true;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001173 ret = vmw_bo_to_validate_list(sw_context,
1174 sw_context->cur_query_bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01001175 dev_priv->has_mob, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001176 if (unlikely(ret != 0))
1177 return ret;
1178 }
1179 sw_context->cur_query_bo = new_query_bo;
1180
1181 ret = vmw_bo_to_validate_list(sw_context,
1182 dev_priv->dummy_query_bo,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01001183 dev_priv->has_mob, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001184 if (unlikely(ret != 0))
1185 return ret;
1186
1187 }
1188
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001189 return 0;
1190}
1191
1192
1193/**
1194 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1195 *
1196 * @dev_priv: The device private structure.
1197 * @sw_context: The software context used for this command submission batch.
1198 *
1199 * This function will check if we're switching query buffers, and will then,
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001200 * issue a dummy occlusion query wait used as a query barrier. When the fence
1201 * object following that query wait has signaled, we are sure that all
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001202 * preceding queries have finished, and the old query buffer can be unpinned.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001203 * However, since both the new query buffer and the old one are fenced with
1204 * that fence, we can do an asynchronus unpin now, and be sure that the
1205 * old query buffer won't be moved until the fence has signaled.
1206 *
1207 * As mentioned above, both the new - and old query buffers need to be fenced
1208 * using a sequence emitted *after* calling this function.
1209 */
1210static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1211 struct vmw_sw_context *sw_context)
1212{
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001213 /*
1214 * The validate list should still hold references to all
1215 * contexts here.
1216 */
1217
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001218 if (sw_context->needs_post_query_barrier) {
1219 struct vmw_res_cache_entry *ctx_entry =
1220 &sw_context->res_cache[vmw_res_context];
1221 struct vmw_resource *ctx;
1222 int ret;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001223
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001224 BUG_ON(!ctx_entry->valid);
1225 ctx = ctx_entry->res;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001226
1227 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1228
1229 if (unlikely(ret != 0))
1230 DRM_ERROR("Out of fifo space for dummy query.\n");
1231 }
1232
1233 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1234 if (dev_priv->pinned_bo) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001235 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1236 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001237 }
1238
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001239 if (!sw_context->needs_post_query_barrier) {
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001240 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001241
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001242 /*
1243 * We pin also the dummy_query_bo buffer so that we
1244 * don't need to validate it when emitting
1245 * dummy queries in context destroy paths.
1246 */
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001247
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001248 if (!dev_priv->dummy_query_bo_pinned) {
1249 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1250 true);
1251 dev_priv->dummy_query_bo_pinned = true;
1252 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001253
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001254 BUG_ON(sw_context->last_query_ctx == NULL);
1255 dev_priv->query_cid = sw_context->last_query_ctx->id;
1256 dev_priv->query_cid_valid = true;
1257 dev_priv->pinned_bo =
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001258 vmw_dmabuf_reference(sw_context->cur_query_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001259 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001260 }
1261}
1262
1263/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001264 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1265 * handle to a MOB id.
1266 *
1267 * @dev_priv: Pointer to a device private structure.
1268 * @sw_context: The software context used for this command batch validation.
1269 * @id: Pointer to the user-space handle to be translated.
1270 * @vmw_bo_p: Points to a location that, on successful return will carry
1271 * a reference-counted pointer to the DMA buffer identified by the
1272 * user-space handle in @id.
1273 *
1274 * This function saves information needed to translate a user-space buffer
1275 * handle to a MOB id. The translation does not take place immediately, but
1276 * during a call to vmw_apply_relocations(). This function builds a relocation
1277 * list and a list of buffers to validate. The former needs to be freed using
1278 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1279 * needs to be freed using vmw_clear_validations.
1280 */
1281static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1282 struct vmw_sw_context *sw_context,
1283 SVGAMobId *id,
1284 struct vmw_dma_buffer **vmw_bo_p)
1285{
1286 struct vmw_dma_buffer *vmw_bo = NULL;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001287 uint32_t handle = *id;
1288 struct vmw_relocation *reloc;
1289 int ret;
1290
Thomas Hellstrom54c12bc2015-09-14 01:13:11 -07001291 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1292 NULL);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001293 if (unlikely(ret != 0)) {
1294 DRM_ERROR("Could not find or use MOB buffer.\n");
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001295 ret = -EINVAL;
1296 goto out_no_reloc;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001297 }
Thomas Hellstromddcda242012-11-21 11:26:55 +01001298
1299 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1300 DRM_ERROR("Max number relocations per submission"
1301 " exceeded\n");
1302 ret = -EINVAL;
1303 goto out_no_reloc;
1304 }
1305
1306 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1307 reloc->mob_loc = id;
1308 reloc->location = NULL;
1309
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001310 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001311 if (unlikely(ret != 0))
1312 goto out_no_reloc;
1313
1314 *vmw_bo_p = vmw_bo;
1315 return 0;
1316
1317out_no_reloc:
1318 vmw_dmabuf_unreference(&vmw_bo);
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001319 *vmw_bo_p = NULL;
Thomas Hellstromddcda242012-11-21 11:26:55 +01001320 return ret;
1321}
1322
1323/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001324 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1325 * handle to a valid SVGAGuestPtr
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001326 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001327 * @dev_priv: Pointer to a device private structure.
1328 * @sw_context: The software context used for this command batch validation.
1329 * @ptr: Pointer to the user-space handle to be translated.
1330 * @vmw_bo_p: Points to a location that, on successful return will carry
1331 * a reference-counted pointer to the DMA buffer identified by the
1332 * user-space handle in @id.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001333 *
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001334 * This function saves information needed to translate a user-space buffer
1335 * handle to a valid SVGAGuestPtr. The translation does not take place
1336 * immediately, but during a call to vmw_apply_relocations().
1337 * This function builds a relocation list and a list of buffers to validate.
1338 * The former needs to be freed using either vmw_apply_relocations() or
1339 * vmw_free_relocations(). The latter needs to be freed using
1340 * vmw_clear_validations.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001341 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001342static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1343 struct vmw_sw_context *sw_context,
1344 SVGAGuestPtr *ptr,
1345 struct vmw_dma_buffer **vmw_bo_p)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001346{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001347 struct vmw_dma_buffer *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001348 uint32_t handle = ptr->gmrId;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001349 struct vmw_relocation *reloc;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001350 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001351
Thomas Hellstrom54c12bc2015-09-14 01:13:11 -07001352 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1353 NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001354 if (unlikely(ret != 0)) {
1355 DRM_ERROR("Could not find or use GMR region.\n");
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001356 ret = -EINVAL;
1357 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001358 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001359
1360 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001361 DRM_ERROR("Max number relocations per submission"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001362 " exceeded\n");
1363 ret = -EINVAL;
1364 goto out_no_reloc;
1365 }
1366
1367 reloc = &sw_context->relocs[sw_context->cur_reloc++];
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001368 reloc->location = ptr;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001369
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001370 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001371 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001372 goto out_no_reloc;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001373
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001374 *vmw_bo_p = vmw_bo;
1375 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001376
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001377out_no_reloc:
1378 vmw_dmabuf_unreference(&vmw_bo);
Colin Ian Kingda5efff2015-01-22 15:17:07 +00001379 *vmw_bo_p = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001380 return ret;
1381}
1382
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07001383
1384
1385/**
1386 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1387 *
1388 * @dev_priv: Pointer to a device private struct.
1389 * @sw_context: The software context used for this command submission.
1390 * @header: Pointer to the command header in the command stream.
1391 *
1392 * This function adds the new query into the query COTABLE
1393 */
1394static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1395 struct vmw_sw_context *sw_context,
1396 SVGA3dCmdHeader *header)
1397{
1398 struct vmw_dx_define_query_cmd {
1399 SVGA3dCmdHeader header;
1400 SVGA3dCmdDXDefineQuery q;
1401 } *cmd;
1402
1403 int ret;
1404 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1405 struct vmw_resource *cotable_res;
1406
1407
1408 if (ctx_node == NULL) {
1409 DRM_ERROR("DX Context not set for query.\n");
1410 return -EINVAL;
1411 }
1412
1413 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1414
1415 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1416 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1417 return -EINVAL;
1418
1419 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1420 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1421 vmw_resource_unreference(&cotable_res);
1422
1423 return ret;
1424}
1425
1426
1427
1428/**
1429 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1430 *
1431 * @dev_priv: Pointer to a device private struct.
1432 * @sw_context: The software context used for this command submission.
1433 * @header: Pointer to the command header in the command stream.
1434 *
1435 * The query bind operation will eventually associate the query ID
1436 * with its backing MOB. In this function, we take the user mode
1437 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1438 * kernel mode equivalent.
1439 */
1440static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1441 struct vmw_sw_context *sw_context,
1442 SVGA3dCmdHeader *header)
1443{
1444 struct vmw_dx_bind_query_cmd {
1445 SVGA3dCmdHeader header;
1446 SVGA3dCmdDXBindQuery q;
1447 } *cmd;
1448
1449 struct vmw_dma_buffer *vmw_bo;
1450 int ret;
1451
1452
1453 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1454
1455 /*
1456 * Look up the buffer pointed to by q.mobid, put it on the relocation
1457 * list so its kernel mode MOB ID can be filled in later
1458 */
1459 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1460 &vmw_bo);
1461
1462 if (ret != 0)
1463 return ret;
1464
1465 sw_context->dx_query_mob = vmw_bo;
1466 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1467
1468 vmw_dmabuf_unreference(&vmw_bo);
1469
1470 return ret;
1471}
1472
1473
1474
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001475/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001476 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1477 *
1478 * @dev_priv: Pointer to a device private struct.
1479 * @sw_context: The software context used for this command submission.
1480 * @header: Pointer to the command header in the command stream.
1481 */
1482static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1483 struct vmw_sw_context *sw_context,
1484 SVGA3dCmdHeader *header)
1485{
1486 struct vmw_begin_gb_query_cmd {
1487 SVGA3dCmdHeader header;
1488 SVGA3dCmdBeginGBQuery q;
1489 } *cmd;
1490
1491 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1492 header);
1493
1494 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1495 user_context_converter, &cmd->q.cid,
1496 NULL);
1497}
1498
1499/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001500 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1501 *
1502 * @dev_priv: Pointer to a device private struct.
1503 * @sw_context: The software context used for this command submission.
1504 * @header: Pointer to the command header in the command stream.
1505 */
1506static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1507 struct vmw_sw_context *sw_context,
1508 SVGA3dCmdHeader *header)
1509{
1510 struct vmw_begin_query_cmd {
1511 SVGA3dCmdHeader header;
1512 SVGA3dCmdBeginQuery q;
1513 } *cmd;
1514
1515 cmd = container_of(header, struct vmw_begin_query_cmd,
1516 header);
1517
Thomas Hellstromddcda242012-11-21 11:26:55 +01001518 if (unlikely(dev_priv->has_mob)) {
1519 struct {
1520 SVGA3dCmdHeader header;
1521 SVGA3dCmdBeginGBQuery q;
1522 } gb_cmd;
1523
1524 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1525
1526 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1527 gb_cmd.header.size = cmd->header.size;
1528 gb_cmd.q.cid = cmd->q.cid;
1529 gb_cmd.q.type = cmd->q.type;
1530
1531 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1532 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1533 }
1534
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001535 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1536 user_context_converter, &cmd->q.cid,
1537 NULL);
1538}
1539
1540/**
Thomas Hellstromddcda242012-11-21 11:26:55 +01001541 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1542 *
1543 * @dev_priv: Pointer to a device private struct.
1544 * @sw_context: The software context used for this command submission.
1545 * @header: Pointer to the command header in the command stream.
1546 */
1547static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1548 struct vmw_sw_context *sw_context,
1549 SVGA3dCmdHeader *header)
1550{
1551 struct vmw_dma_buffer *vmw_bo;
1552 struct vmw_query_cmd {
1553 SVGA3dCmdHeader header;
1554 SVGA3dCmdEndGBQuery q;
1555 } *cmd;
1556 int ret;
1557
1558 cmd = container_of(header, struct vmw_query_cmd, header);
1559 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1560 if (unlikely(ret != 0))
1561 return ret;
1562
1563 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1564 &cmd->q.mobid,
1565 &vmw_bo);
1566 if (unlikely(ret != 0))
1567 return ret;
1568
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001569 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001570
1571 vmw_dmabuf_unreference(&vmw_bo);
1572 return ret;
1573}
1574
1575/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001576 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1577 *
1578 * @dev_priv: Pointer to a device private struct.
1579 * @sw_context: The software context used for this command submission.
1580 * @header: Pointer to the command header in the command stream.
1581 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001582static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1583 struct vmw_sw_context *sw_context,
1584 SVGA3dCmdHeader *header)
1585{
1586 struct vmw_dma_buffer *vmw_bo;
1587 struct vmw_query_cmd {
1588 SVGA3dCmdHeader header;
1589 SVGA3dCmdEndQuery q;
1590 } *cmd;
1591 int ret;
1592
1593 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001594 if (dev_priv->has_mob) {
1595 struct {
1596 SVGA3dCmdHeader header;
1597 SVGA3dCmdEndGBQuery q;
1598 } gb_cmd;
1599
1600 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1601
1602 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1603 gb_cmd.header.size = cmd->header.size;
1604 gb_cmd.q.cid = cmd->q.cid;
1605 gb_cmd.q.type = cmd->q.type;
1606 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1607 gb_cmd.q.offset = cmd->q.guestResult.offset;
1608
1609 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1610 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1611 }
1612
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001613 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1614 if (unlikely(ret != 0))
1615 return ret;
1616
1617 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1618 &cmd->q.guestResult,
1619 &vmw_bo);
1620 if (unlikely(ret != 0))
1621 return ret;
1622
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07001623 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001624
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001625 vmw_dmabuf_unreference(&vmw_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02001626 return ret;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001627}
1628
Thomas Hellstromddcda242012-11-21 11:26:55 +01001629/**
1630 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1631 *
1632 * @dev_priv: Pointer to a device private struct.
1633 * @sw_context: The software context used for this command submission.
1634 * @header: Pointer to the command header in the command stream.
1635 */
1636static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1637 struct vmw_sw_context *sw_context,
1638 SVGA3dCmdHeader *header)
1639{
1640 struct vmw_dma_buffer *vmw_bo;
1641 struct vmw_query_cmd {
1642 SVGA3dCmdHeader header;
1643 SVGA3dCmdWaitForGBQuery q;
1644 } *cmd;
1645 int ret;
1646
1647 cmd = container_of(header, struct vmw_query_cmd, header);
1648 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1649 if (unlikely(ret != 0))
1650 return ret;
1651
1652 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1653 &cmd->q.mobid,
1654 &vmw_bo);
1655 if (unlikely(ret != 0))
1656 return ret;
1657
1658 vmw_dmabuf_unreference(&vmw_bo);
1659 return 0;
1660}
1661
1662/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001663 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1664 *
1665 * @dev_priv: Pointer to a device private struct.
1666 * @sw_context: The software context used for this command submission.
1667 * @header: Pointer to the command header in the command stream.
1668 */
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001669static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1670 struct vmw_sw_context *sw_context,
1671 SVGA3dCmdHeader *header)
1672{
1673 struct vmw_dma_buffer *vmw_bo;
1674 struct vmw_query_cmd {
1675 SVGA3dCmdHeader header;
1676 SVGA3dCmdWaitForQuery q;
1677 } *cmd;
1678 int ret;
1679
1680 cmd = container_of(header, struct vmw_query_cmd, header);
Thomas Hellstromddcda242012-11-21 11:26:55 +01001681 if (dev_priv->has_mob) {
1682 struct {
1683 SVGA3dCmdHeader header;
1684 SVGA3dCmdWaitForGBQuery q;
1685 } gb_cmd;
1686
1687 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1688
1689 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1690 gb_cmd.header.size = cmd->header.size;
1691 gb_cmd.q.cid = cmd->q.cid;
1692 gb_cmd.q.type = cmd->q.type;
1693 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1694 gb_cmd.q.offset = cmd->q.guestResult.offset;
1695
1696 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1697 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1698 }
1699
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001700 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1701 if (unlikely(ret != 0))
1702 return ret;
1703
1704 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1705 &cmd->q.guestResult,
1706 &vmw_bo);
1707 if (unlikely(ret != 0))
1708 return ret;
1709
1710 vmw_dmabuf_unreference(&vmw_bo);
1711 return 0;
1712}
1713
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001714static int vmw_cmd_dma(struct vmw_private *dev_priv,
1715 struct vmw_sw_context *sw_context,
1716 SVGA3dCmdHeader *header)
1717{
1718 struct vmw_dma_buffer *vmw_bo = NULL;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001719 struct vmw_surface *srf = NULL;
1720 struct vmw_dma_cmd {
1721 SVGA3dCmdHeader header;
1722 SVGA3dCmdSurfaceDMA dma;
1723 } *cmd;
1724 int ret;
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001725 SVGA3dCmdSurfaceDMASuffix *suffix;
1726 uint32_t bo_size;
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001727
1728 cmd = container_of(header, struct vmw_dma_cmd, header);
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001729 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1730 header->size - sizeof(*suffix));
1731
1732 /* Make sure device and verifier stays in sync. */
1733 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1734 DRM_ERROR("Invalid DMA suffix size.\n");
1735 return -EINVAL;
1736 }
1737
Thomas Hellstrom4e4ddd42010-02-21 14:54:55 +00001738 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1739 &cmd->dma.guest.ptr,
1740 &vmw_bo);
1741 if (unlikely(ret != 0))
1742 return ret;
1743
Thomas Hellstromcbd75e92014-04-15 18:25:48 +02001744 /* Make sure DMA doesn't cross BO boundaries. */
1745 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1746 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1747 DRM_ERROR("Invalid DMA offset.\n");
1748 return -EINVAL;
1749 }
1750
1751 bo_size -= cmd->dma.guest.ptr.offset;
1752 if (unlikely(suffix->maximumOffset > bo_size))
1753 suffix->maximumOffset = bo_size;
1754
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001755 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1756 user_surface_converter, &cmd->dma.host.sid,
1757 NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001758 if (unlikely(ret != 0)) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001759 if (unlikely(ret != -ERESTARTSYS))
1760 DRM_ERROR("could not find surface for DMA.\n");
1761 goto out_no_surface;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001762 }
1763
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001764 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001765
Thomas Hellstromd5bde952014-01-31 10:12:10 +01001766 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1767 header);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00001768
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001769out_no_surface:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001770 vmw_dmabuf_unreference(&vmw_bo);
1771 return ret;
1772}
1773
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001774static int vmw_cmd_draw(struct vmw_private *dev_priv,
1775 struct vmw_sw_context *sw_context,
1776 SVGA3dCmdHeader *header)
1777{
1778 struct vmw_draw_cmd {
1779 SVGA3dCmdHeader header;
1780 SVGA3dCmdDrawPrimitives body;
1781 } *cmd;
1782 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1783 (unsigned long)header + sizeof(*cmd));
1784 SVGA3dPrimitiveRange *range;
1785 uint32_t i;
1786 uint32_t maxnum;
1787 int ret;
1788
1789 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1790 if (unlikely(ret != 0))
1791 return ret;
1792
1793 cmd = container_of(header, struct vmw_draw_cmd, header);
1794 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1795
1796 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1797 DRM_ERROR("Illegal number of vertex declarations.\n");
1798 return -EINVAL;
1799 }
1800
1801 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001802 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1803 user_surface_converter,
1804 &decl->array.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001805 if (unlikely(ret != 0))
1806 return ret;
1807 }
1808
1809 maxnum = (header->size - sizeof(cmd->body) -
1810 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1811 if (unlikely(cmd->body.numRanges > maxnum)) {
1812 DRM_ERROR("Illegal number of index ranges.\n");
1813 return -EINVAL;
1814 }
1815
1816 range = (SVGA3dPrimitiveRange *) decl;
1817 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001818 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1819 user_surface_converter,
1820 &range->indexArray.surfaceId, NULL);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001821 if (unlikely(ret != 0))
1822 return ret;
1823 }
1824 return 0;
1825}
1826
1827
1828static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1829 struct vmw_sw_context *sw_context,
1830 SVGA3dCmdHeader *header)
1831{
1832 struct vmw_tex_state_cmd {
1833 SVGA3dCmdHeader header;
1834 SVGA3dCmdSetTextureState state;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001835 } *cmd;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001836
1837 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1838 ((unsigned long) header + header->size + sizeof(header));
1839 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1840 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001841 struct vmw_resource_val_node *ctx_node;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001842 struct vmw_resource_val_node *res_node;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001843 int ret;
1844
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001845 cmd = container_of(header, struct vmw_tex_state_cmd,
1846 header);
1847
1848 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1849 user_context_converter, &cmd->state.cid,
1850 &ctx_node);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001851 if (unlikely(ret != 0))
1852 return ret;
1853
1854 for (; cur_state < last_state; ++cur_state) {
1855 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1856 continue;
1857
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001858 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1859 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1860 (unsigned) cur_state->stage);
1861 return -EINVAL;
1862 }
1863
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001864 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1865 user_surface_converter,
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07001866 &cur_state->value, &res_node);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001867 if (unlikely(ret != 0))
1868 return ret;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001869
1870 if (dev_priv->has_mob) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001871 struct vmw_ctx_bindinfo_tex binding;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001872
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001873 binding.bi.ctx = ctx_node->res;
1874 binding.bi.res = res_node ? res_node->res : NULL;
1875 binding.bi.bt = vmw_ctx_binding_tex;
1876 binding.texture_stage = cur_state->stage;
1877 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1878 0, binding.texture_stage);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07001879 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001880 }
1881
1882 return 0;
1883}
1884
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02001885static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1886 struct vmw_sw_context *sw_context,
1887 void *buf)
1888{
1889 struct vmw_dma_buffer *vmw_bo;
1890 int ret;
1891
1892 struct {
1893 uint32_t header;
1894 SVGAFifoCmdDefineGMRFB body;
1895 } *cmd = buf;
1896
1897 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1898 &cmd->body.ptr,
1899 &vmw_bo);
1900 if (unlikely(ret != 0))
1901 return ret;
1902
1903 vmw_dmabuf_unreference(&vmw_bo);
1904
1905 return ret;
1906}
1907
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001908
1909/**
1910 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1911 * switching
1912 *
1913 * @dev_priv: Pointer to a device private struct.
1914 * @sw_context: The software context being used for this batch.
1915 * @val_node: The validation node representing the resource.
1916 * @buf_id: Pointer to the user-space backup buffer handle in the command
1917 * stream.
1918 * @backup_offset: Offset of backup into MOB.
1919 *
1920 * This function prepares for registering a switch of backup buffers
1921 * in the resource metadata just prior to unreserving. It's basically a wrapper
1922 * around vmw_cmd_res_switch_backup with a different interface.
1923 */
1924static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1925 struct vmw_sw_context *sw_context,
1926 struct vmw_resource_val_node *val_node,
1927 uint32_t *buf_id,
1928 unsigned long backup_offset)
1929{
1930 struct vmw_dma_buffer *dma_buf;
1931 int ret;
1932
1933 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1934 if (ret)
1935 return ret;
1936
1937 val_node->switching_backup = true;
1938 if (val_node->first_usage)
1939 val_node->no_buffer_needed = true;
1940
1941 vmw_dmabuf_unreference(&val_node->new_backup);
1942 val_node->new_backup = dma_buf;
1943 val_node->new_backup_offset = backup_offset;
1944
1945 return 0;
1946}
1947
1948
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001949/**
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001950 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1951 *
1952 * @dev_priv: Pointer to a device private struct.
1953 * @sw_context: The software context being used for this batch.
1954 * @res_type: The resource type.
1955 * @converter: Information about user-space binding for this resource type.
1956 * @res_id: Pointer to the user-space resource handle in the command stream.
1957 * @buf_id: Pointer to the user-space backup buffer handle in the command
1958 * stream.
1959 * @backup_offset: Offset of backup into MOB.
1960 *
1961 * This function prepares for registering a switch of backup buffers
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001962 * in the resource metadata just prior to unreserving. It's basically a wrapper
1963 * around vmw_cmd_res_switch_backup with a different interface.
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001964 */
1965static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1966 struct vmw_sw_context *sw_context,
1967 enum vmw_res_type res_type,
1968 const struct vmw_user_resource_conv
1969 *converter,
1970 uint32_t *res_id,
1971 uint32_t *buf_id,
1972 unsigned long backup_offset)
1973{
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001974 struct vmw_resource_val_node *val_node;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001975 int ret;
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001976
1977 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1978 converter, res_id, &val_node);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001979 if (ret)
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001980 return ret;
1981
Thomas Hellstromd80efd52015-08-10 10:39:35 -07001982 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1983 buf_id, backup_offset);
Thomas Hellstroma97e2192012-11-21 11:45:13 +01001984}
1985
1986/**
1987 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1988 * command
1989 *
1990 * @dev_priv: Pointer to a device private struct.
1991 * @sw_context: The software context being used for this batch.
1992 * @header: Pointer to the command header in the command stream.
1993 */
1994static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1995 struct vmw_sw_context *sw_context,
1996 SVGA3dCmdHeader *header)
1997{
1998 struct vmw_bind_gb_surface_cmd {
1999 SVGA3dCmdHeader header;
2000 SVGA3dCmdBindGBSurface body;
2001 } *cmd;
2002
2003 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2004
2005 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2006 user_surface_converter,
2007 &cmd->body.sid, &cmd->body.mobid,
2008 0);
2009}
2010
2011/**
2012 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2013 * command
2014 *
2015 * @dev_priv: Pointer to a device private struct.
2016 * @sw_context: The software context being used for this batch.
2017 * @header: Pointer to the command header in the command stream.
2018 */
2019static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2020 struct vmw_sw_context *sw_context,
2021 SVGA3dCmdHeader *header)
2022{
2023 struct vmw_gb_surface_cmd {
2024 SVGA3dCmdHeader header;
2025 SVGA3dCmdUpdateGBImage body;
2026 } *cmd;
2027
2028 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2029
2030 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2031 user_surface_converter,
2032 &cmd->body.image.sid, NULL);
2033}
2034
2035/**
2036 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2037 * command
2038 *
2039 * @dev_priv: Pointer to a device private struct.
2040 * @sw_context: The software context being used for this batch.
2041 * @header: Pointer to the command header in the command stream.
2042 */
2043static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2044 struct vmw_sw_context *sw_context,
2045 SVGA3dCmdHeader *header)
2046{
2047 struct vmw_gb_surface_cmd {
2048 SVGA3dCmdHeader header;
2049 SVGA3dCmdUpdateGBSurface body;
2050 } *cmd;
2051
2052 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2053
2054 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2055 user_surface_converter,
2056 &cmd->body.sid, NULL);
2057}
2058
2059/**
2060 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2061 * command
2062 *
2063 * @dev_priv: Pointer to a device private struct.
2064 * @sw_context: The software context being used for this batch.
2065 * @header: Pointer to the command header in the command stream.
2066 */
2067static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2068 struct vmw_sw_context *sw_context,
2069 SVGA3dCmdHeader *header)
2070{
2071 struct vmw_gb_surface_cmd {
2072 SVGA3dCmdHeader header;
2073 SVGA3dCmdReadbackGBImage body;
2074 } *cmd;
2075
2076 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2077
2078 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2079 user_surface_converter,
2080 &cmd->body.image.sid, NULL);
2081}
2082
2083/**
2084 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2085 * command
2086 *
2087 * @dev_priv: Pointer to a device private struct.
2088 * @sw_context: The software context being used for this batch.
2089 * @header: Pointer to the command header in the command stream.
2090 */
2091static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2092 struct vmw_sw_context *sw_context,
2093 SVGA3dCmdHeader *header)
2094{
2095 struct vmw_gb_surface_cmd {
2096 SVGA3dCmdHeader header;
2097 SVGA3dCmdReadbackGBSurface body;
2098 } *cmd;
2099
2100 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2101
2102 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2103 user_surface_converter,
2104 &cmd->body.sid, NULL);
2105}
2106
2107/**
2108 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2109 * command
2110 *
2111 * @dev_priv: Pointer to a device private struct.
2112 * @sw_context: The software context being used for this batch.
2113 * @header: Pointer to the command header in the command stream.
2114 */
2115static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2116 struct vmw_sw_context *sw_context,
2117 SVGA3dCmdHeader *header)
2118{
2119 struct vmw_gb_surface_cmd {
2120 SVGA3dCmdHeader header;
2121 SVGA3dCmdInvalidateGBImage body;
2122 } *cmd;
2123
2124 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2125
2126 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2127 user_surface_converter,
2128 &cmd->body.image.sid, NULL);
2129}
2130
2131/**
2132 * vmw_cmd_invalidate_gb_surface - Validate an
2133 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2134 *
2135 * @dev_priv: Pointer to a device private struct.
2136 * @sw_context: The software context being used for this batch.
2137 * @header: Pointer to the command header in the command stream.
2138 */
2139static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2140 struct vmw_sw_context *sw_context,
2141 SVGA3dCmdHeader *header)
2142{
2143 struct vmw_gb_surface_cmd {
2144 SVGA3dCmdHeader header;
2145 SVGA3dCmdInvalidateGBSurface body;
2146 } *cmd;
2147
2148 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2149
2150 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2151 user_surface_converter,
2152 &cmd->body.sid, NULL);
2153}
2154
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002155
2156/**
2157 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2158 * command
2159 *
2160 * @dev_priv: Pointer to a device private struct.
2161 * @sw_context: The software context being used for this batch.
2162 * @header: Pointer to the command header in the command stream.
2163 */
2164static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2165 struct vmw_sw_context *sw_context,
2166 SVGA3dCmdHeader *header)
2167{
2168 struct vmw_shader_define_cmd {
2169 SVGA3dCmdHeader header;
2170 SVGA3dCmdDefineShader body;
2171 } *cmd;
2172 int ret;
2173 size_t size;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002174 struct vmw_resource_val_node *val;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002175
2176 cmd = container_of(header, struct vmw_shader_define_cmd,
2177 header);
2178
2179 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2180 user_context_converter, &cmd->body.cid,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002181 &val);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002182 if (unlikely(ret != 0))
2183 return ret;
2184
2185 if (unlikely(!dev_priv->has_mob))
2186 return 0;
2187
2188 size = cmd->header.size - sizeof(cmd->body);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002189 ret = vmw_compat_shader_add(dev_priv,
2190 vmw_context_res_man(val->res),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002191 cmd->body.shid, cmd + 1,
2192 cmd->body.type, size,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002193 &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002194 if (unlikely(ret != 0))
2195 return ret;
2196
2197 return vmw_resource_relocation_add(&sw_context->res_relocations,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07002198 NULL,
2199 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002200 &cmd->header.id),
2201 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002202}
2203
2204/**
2205 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2206 * command
2207 *
2208 * @dev_priv: Pointer to a device private struct.
2209 * @sw_context: The software context being used for this batch.
2210 * @header: Pointer to the command header in the command stream.
2211 */
2212static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2213 struct vmw_sw_context *sw_context,
2214 SVGA3dCmdHeader *header)
2215{
2216 struct vmw_shader_destroy_cmd {
2217 SVGA3dCmdHeader header;
2218 SVGA3dCmdDestroyShader body;
2219 } *cmd;
2220 int ret;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002221 struct vmw_resource_val_node *val;
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002222
2223 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2224 header);
2225
2226 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2227 user_context_converter, &cmd->body.cid,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002228 &val);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002229 if (unlikely(ret != 0))
2230 return ret;
2231
2232 if (unlikely(!dev_priv->has_mob))
2233 return 0;
2234
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002235 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2236 cmd->body.shid,
2237 cmd->body.type,
2238 &sw_context->staged_cmd_res);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002239 if (unlikely(ret != 0))
2240 return ret;
2241
2242 return vmw_resource_relocation_add(&sw_context->res_relocations,
Thomas Hellstrome7a45282016-10-10 10:44:00 -07002243 NULL,
2244 vmw_ptr_diff(sw_context->buf_start,
Thomas Hellstroma1944032016-10-10 11:06:45 -07002245 &cmd->header.id),
2246 vmw_res_rel_nop);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002247}
2248
Thomas Hellstroma97e2192012-11-21 11:45:13 +01002249/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002250 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2251 * command
2252 *
2253 * @dev_priv: Pointer to a device private struct.
2254 * @sw_context: The software context being used for this batch.
2255 * @header: Pointer to the command header in the command stream.
2256 */
2257static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2258 struct vmw_sw_context *sw_context,
2259 SVGA3dCmdHeader *header)
2260{
2261 struct vmw_set_shader_cmd {
2262 SVGA3dCmdHeader header;
2263 SVGA3dCmdSetShader body;
2264 } *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002265 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002266 struct vmw_ctx_bindinfo_shader binding;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002267 struct vmw_resource *res = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002268 int ret;
2269
2270 cmd = container_of(header, struct vmw_set_shader_cmd,
2271 header);
2272
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002273 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2274 DRM_ERROR("Illegal shader type %u.\n",
2275 (unsigned) cmd->body.type);
2276 return -EINVAL;
2277 }
2278
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002279 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2280 user_context_converter, &cmd->body.cid,
2281 &ctx_node);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002282 if (unlikely(ret != 0))
2283 return ret;
2284
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002285 if (!dev_priv->has_mob)
2286 return 0;
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002287
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002288 if (cmd->body.shid != SVGA3D_INVALID_ID) {
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002289 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2290 cmd->body.shid,
2291 cmd->body.type);
Thomas Hellstromd5bde952014-01-31 10:12:10 +01002292
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002293 if (!IS_ERR(res)) {
2294 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002295 &cmd->body.shid, res,
2296 &res_node);
2297 vmw_resource_unreference(&res);
2298 if (unlikely(ret != 0))
2299 return ret;
2300 }
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -07002301 }
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002302
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02002303 if (!res_node) {
2304 ret = vmw_cmd_res_check(dev_priv, sw_context,
2305 vmw_res_shader,
2306 user_shader_converter,
2307 &cmd->body.shid, &res_node);
2308 if (unlikely(ret != 0))
2309 return ret;
2310 }
2311
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002312 binding.bi.ctx = ctx_node->res;
2313 binding.bi.res = res_node ? res_node->res : NULL;
2314 binding.bi.bt = vmw_ctx_binding_shader;
2315 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2316 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2317 binding.shader_slot, 0);
2318 return 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00002319}
2320
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002321/**
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01002322 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2323 * command
2324 *
2325 * @dev_priv: Pointer to a device private struct.
2326 * @sw_context: The software context being used for this batch.
2327 * @header: Pointer to the command header in the command stream.
2328 */
2329static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2330 struct vmw_sw_context *sw_context,
2331 SVGA3dCmdHeader *header)
2332{
2333 struct vmw_set_shader_const_cmd {
2334 SVGA3dCmdHeader header;
2335 SVGA3dCmdSetShaderConst body;
2336 } *cmd;
2337 int ret;
2338
2339 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2340 header);
2341
2342 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2343 user_context_converter, &cmd->body.cid,
2344 NULL);
2345 if (unlikely(ret != 0))
2346 return ret;
2347
2348 if (dev_priv->has_mob)
2349 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2350
2351 return 0;
2352}
2353
2354/**
Thomas Hellstromc74c1622012-11-21 12:10:26 +01002355 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2356 * command
2357 *
2358 * @dev_priv: Pointer to a device private struct.
2359 * @sw_context: The software context being used for this batch.
2360 * @header: Pointer to the command header in the command stream.
2361 */
2362static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2363 struct vmw_sw_context *sw_context,
2364 SVGA3dCmdHeader *header)
2365{
2366 struct vmw_bind_gb_shader_cmd {
2367 SVGA3dCmdHeader header;
2368 SVGA3dCmdBindGBShader body;
2369 } *cmd;
2370
2371 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2372 header);
2373
2374 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2375 user_shader_converter,
2376 &cmd->body.shid, &cmd->body.mobid,
2377 cmd->body.offsetInBytes);
2378}
2379
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002380/**
2381 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2382 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2383 *
2384 * @dev_priv: Pointer to a device private struct.
2385 * @sw_context: The software context being used for this batch.
2386 * @header: Pointer to the command header in the command stream.
2387 */
2388static int
2389vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2390 struct vmw_sw_context *sw_context,
2391 SVGA3dCmdHeader *header)
2392{
2393 struct {
2394 SVGA3dCmdHeader header;
2395 SVGA3dCmdDXSetSingleConstantBuffer body;
2396 } *cmd;
2397 struct vmw_resource_val_node *res_node = NULL;
2398 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2399 struct vmw_ctx_bindinfo_cb binding;
2400 int ret;
2401
2402 if (unlikely(ctx_node == NULL)) {
2403 DRM_ERROR("DX Context not set.\n");
2404 return -EINVAL;
2405 }
2406
2407 cmd = container_of(header, typeof(*cmd), header);
2408 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2409 user_surface_converter,
2410 &cmd->body.sid, &res_node);
2411 if (unlikely(ret != 0))
2412 return ret;
2413
2414 binding.bi.ctx = ctx_node->res;
2415 binding.bi.res = res_node ? res_node->res : NULL;
2416 binding.bi.bt = vmw_ctx_binding_cb;
2417 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2418 binding.offset = cmd->body.offsetInBytes;
2419 binding.size = cmd->body.sizeInBytes;
2420 binding.slot = cmd->body.slot;
2421
2422 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2423 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2424 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2425 (unsigned) cmd->body.type,
2426 (unsigned) binding.slot);
2427 return -EINVAL;
2428 }
2429
2430 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2431 binding.shader_slot, binding.slot);
2432
2433 return 0;
2434}
2435
2436/**
2437 * vmw_cmd_dx_set_shader_res - Validate an
2438 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2439 *
2440 * @dev_priv: Pointer to a device private struct.
2441 * @sw_context: The software context being used for this batch.
2442 * @header: Pointer to the command header in the command stream.
2443 */
2444static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2445 struct vmw_sw_context *sw_context,
2446 SVGA3dCmdHeader *header)
2447{
2448 struct {
2449 SVGA3dCmdHeader header;
2450 SVGA3dCmdDXSetShaderResources body;
2451 } *cmd = container_of(header, typeof(*cmd), header);
2452 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2453 sizeof(SVGA3dShaderResourceViewId);
2454
2455 if ((u64) cmd->body.startView + (u64) num_sr_view >
2456 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2457 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2458 DRM_ERROR("Invalid shader binding.\n");
2459 return -EINVAL;
2460 }
2461
2462 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2463 vmw_ctx_binding_sr,
2464 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2465 (void *) &cmd[1], num_sr_view,
2466 cmd->body.startView);
2467}
2468
2469/**
2470 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2471 * command
2472 *
2473 * @dev_priv: Pointer to a device private struct.
2474 * @sw_context: The software context being used for this batch.
2475 * @header: Pointer to the command header in the command stream.
2476 */
2477static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2478 struct vmw_sw_context *sw_context,
2479 SVGA3dCmdHeader *header)
2480{
2481 struct {
2482 SVGA3dCmdHeader header;
2483 SVGA3dCmdDXSetShader body;
2484 } *cmd;
2485 struct vmw_resource *res = NULL;
2486 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2487 struct vmw_ctx_bindinfo_shader binding;
2488 int ret = 0;
2489
2490 if (unlikely(ctx_node == NULL)) {
2491 DRM_ERROR("DX Context not set.\n");
2492 return -EINVAL;
2493 }
2494
2495 cmd = container_of(header, typeof(*cmd), header);
2496
2497 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2498 DRM_ERROR("Illegal shader type %u.\n",
2499 (unsigned) cmd->body.type);
2500 return -EINVAL;
2501 }
2502
2503 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2504 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2505 if (IS_ERR(res)) {
2506 DRM_ERROR("Could not find shader for binding.\n");
2507 return PTR_ERR(res);
2508 }
2509
2510 ret = vmw_resource_val_add(sw_context, res, NULL);
2511 if (ret)
2512 goto out_unref;
2513 }
2514
2515 binding.bi.ctx = ctx_node->res;
2516 binding.bi.res = res;
2517 binding.bi.bt = vmw_ctx_binding_dx_shader;
2518 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2519
2520 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2521 binding.shader_slot, 0);
2522out_unref:
2523 if (res)
2524 vmw_resource_unreference(&res);
2525
2526 return ret;
2527}
2528
2529/**
2530 * vmw_cmd_dx_set_vertex_buffers - Validates an
2531 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2532 *
2533 * @dev_priv: Pointer to a device private struct.
2534 * @sw_context: The software context being used for this batch.
2535 * @header: Pointer to the command header in the command stream.
2536 */
2537static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2538 struct vmw_sw_context *sw_context,
2539 SVGA3dCmdHeader *header)
2540{
2541 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2542 struct vmw_ctx_bindinfo_vb binding;
2543 struct vmw_resource_val_node *res_node;
2544 struct {
2545 SVGA3dCmdHeader header;
2546 SVGA3dCmdDXSetVertexBuffers body;
2547 SVGA3dVertexBuffer buf[];
2548 } *cmd;
2549 int i, ret, num;
2550
2551 if (unlikely(ctx_node == NULL)) {
2552 DRM_ERROR("DX Context not set.\n");
2553 return -EINVAL;
2554 }
2555
2556 cmd = container_of(header, typeof(*cmd), header);
2557 num = (cmd->header.size - sizeof(cmd->body)) /
2558 sizeof(SVGA3dVertexBuffer);
2559 if ((u64)num + (u64)cmd->body.startBuffer >
2560 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2561 DRM_ERROR("Invalid number of vertex buffers.\n");
2562 return -EINVAL;
2563 }
2564
2565 for (i = 0; i < num; i++) {
2566 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2567 user_surface_converter,
2568 &cmd->buf[i].sid, &res_node);
2569 if (unlikely(ret != 0))
2570 return ret;
2571
2572 binding.bi.ctx = ctx_node->res;
2573 binding.bi.bt = vmw_ctx_binding_vb;
2574 binding.bi.res = ((res_node) ? res_node->res : NULL);
2575 binding.offset = cmd->buf[i].offset;
2576 binding.stride = cmd->buf[i].stride;
2577 binding.slot = i + cmd->body.startBuffer;
2578
2579 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2580 0, binding.slot);
2581 }
2582
2583 return 0;
2584}
2585
2586/**
2587 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
Brian Paul8bd62872017-07-17 07:36:10 -07002588 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002589 *
2590 * @dev_priv: Pointer to a device private struct.
2591 * @sw_context: The software context being used for this batch.
2592 * @header: Pointer to the command header in the command stream.
2593 */
2594static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2595 struct vmw_sw_context *sw_context,
2596 SVGA3dCmdHeader *header)
2597{
2598 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2599 struct vmw_ctx_bindinfo_ib binding;
2600 struct vmw_resource_val_node *res_node;
2601 struct {
2602 SVGA3dCmdHeader header;
2603 SVGA3dCmdDXSetIndexBuffer body;
2604 } *cmd;
2605 int ret;
2606
2607 if (unlikely(ctx_node == NULL)) {
2608 DRM_ERROR("DX Context not set.\n");
2609 return -EINVAL;
2610 }
2611
2612 cmd = container_of(header, typeof(*cmd), header);
2613 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2614 user_surface_converter,
2615 &cmd->body.sid, &res_node);
2616 if (unlikely(ret != 0))
2617 return ret;
2618
2619 binding.bi.ctx = ctx_node->res;
2620 binding.bi.res = ((res_node) ? res_node->res : NULL);
2621 binding.bi.bt = vmw_ctx_binding_ib;
2622 binding.offset = cmd->body.offset;
2623 binding.format = cmd->body.format;
2624
2625 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2626
2627 return 0;
2628}
2629
2630/**
2631 * vmw_cmd_dx_set_rendertarget - Validate an
2632 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2633 *
2634 * @dev_priv: Pointer to a device private struct.
2635 * @sw_context: The software context being used for this batch.
2636 * @header: Pointer to the command header in the command stream.
2637 */
2638static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2639 struct vmw_sw_context *sw_context,
2640 SVGA3dCmdHeader *header)
2641{
2642 struct {
2643 SVGA3dCmdHeader header;
2644 SVGA3dCmdDXSetRenderTargets body;
2645 } *cmd = container_of(header, typeof(*cmd), header);
2646 int ret;
2647 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2648 sizeof(SVGA3dRenderTargetViewId);
2649
2650 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2651 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2652 return -EINVAL;
2653 }
2654
2655 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2656 vmw_ctx_binding_ds, 0,
2657 &cmd->body.depthStencilViewId, 1, 0);
2658 if (ret)
2659 return ret;
2660
2661 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2662 vmw_ctx_binding_dx_rt, 0,
2663 (void *)&cmd[1], num_rt_view, 0);
2664}
2665
2666/**
2667 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2668 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2669 *
2670 * @dev_priv: Pointer to a device private struct.
2671 * @sw_context: The software context being used for this batch.
2672 * @header: Pointer to the command header in the command stream.
2673 */
2674static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2675 struct vmw_sw_context *sw_context,
2676 SVGA3dCmdHeader *header)
2677{
2678 struct {
2679 SVGA3dCmdHeader header;
2680 SVGA3dCmdDXClearRenderTargetView body;
2681 } *cmd = container_of(header, typeof(*cmd), header);
2682
2683 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2684 cmd->body.renderTargetViewId);
2685}
2686
2687/**
2688 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2689 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2690 *
2691 * @dev_priv: Pointer to a device private struct.
2692 * @sw_context: The software context being used for this batch.
2693 * @header: Pointer to the command header in the command stream.
2694 */
2695static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2696 struct vmw_sw_context *sw_context,
2697 SVGA3dCmdHeader *header)
2698{
2699 struct {
2700 SVGA3dCmdHeader header;
2701 SVGA3dCmdDXClearDepthStencilView body;
2702 } *cmd = container_of(header, typeof(*cmd), header);
2703
2704 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2705 cmd->body.depthStencilViewId);
2706}
2707
2708static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2709 struct vmw_sw_context *sw_context,
2710 SVGA3dCmdHeader *header)
2711{
2712 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2713 struct vmw_resource_val_node *srf_node;
2714 struct vmw_resource *res;
2715 enum vmw_view_type view_type;
2716 int ret;
2717 /*
2718 * This is based on the fact that all affected define commands have
2719 * the same initial command body layout.
2720 */
2721 struct {
2722 SVGA3dCmdHeader header;
2723 uint32 defined_id;
2724 uint32 sid;
2725 } *cmd;
2726
2727 if (unlikely(ctx_node == NULL)) {
2728 DRM_ERROR("DX Context not set.\n");
2729 return -EINVAL;
2730 }
2731
2732 view_type = vmw_view_cmd_to_type(header->id);
2733 cmd = container_of(header, typeof(*cmd), header);
2734 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2735 user_surface_converter,
2736 &cmd->sid, &srf_node);
2737 if (unlikely(ret != 0))
2738 return ret;
2739
2740 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2741 ret = vmw_cotable_notify(res, cmd->defined_id);
2742 vmw_resource_unreference(&res);
2743 if (unlikely(ret != 0))
2744 return ret;
2745
2746 return vmw_view_add(sw_context->man,
2747 ctx_node->res,
2748 srf_node->res,
2749 view_type,
2750 cmd->defined_id,
2751 header,
2752 header->size + sizeof(*header),
2753 &sw_context->staged_cmd_res);
2754}
2755
Charmaine Lee2f633e52015-08-10 10:45:11 -07002756/**
2757 * vmw_cmd_dx_set_so_targets - Validate an
2758 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2759 *
2760 * @dev_priv: Pointer to a device private struct.
2761 * @sw_context: The software context being used for this batch.
2762 * @header: Pointer to the command header in the command stream.
2763 */
2764static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2765 struct vmw_sw_context *sw_context,
2766 SVGA3dCmdHeader *header)
2767{
2768 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2769 struct vmw_ctx_bindinfo_so binding;
2770 struct vmw_resource_val_node *res_node;
2771 struct {
2772 SVGA3dCmdHeader header;
2773 SVGA3dCmdDXSetSOTargets body;
2774 SVGA3dSoTarget targets[];
2775 } *cmd;
2776 int i, ret, num;
2777
2778 if (unlikely(ctx_node == NULL)) {
2779 DRM_ERROR("DX Context not set.\n");
2780 return -EINVAL;
2781 }
2782
2783 cmd = container_of(header, typeof(*cmd), header);
2784 num = (cmd->header.size - sizeof(cmd->body)) /
2785 sizeof(SVGA3dSoTarget);
2786
2787 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2788 DRM_ERROR("Invalid DX SO binding.\n");
2789 return -EINVAL;
2790 }
2791
2792 for (i = 0; i < num; i++) {
2793 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2794 user_surface_converter,
2795 &cmd->targets[i].sid, &res_node);
2796 if (unlikely(ret != 0))
2797 return ret;
2798
2799 binding.bi.ctx = ctx_node->res;
2800 binding.bi.res = ((res_node) ? res_node->res : NULL);
2801 binding.bi.bt = vmw_ctx_binding_so,
2802 binding.offset = cmd->targets[i].offset;
2803 binding.size = cmd->targets[i].sizeInBytes;
2804 binding.slot = i;
2805
2806 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2807 0, binding.slot);
2808 }
2809
2810 return 0;
2811}
2812
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002813static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2814 struct vmw_sw_context *sw_context,
2815 SVGA3dCmdHeader *header)
2816{
2817 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2818 struct vmw_resource *res;
2819 /*
2820 * This is based on the fact that all affected define commands have
2821 * the same initial command body layout.
2822 */
2823 struct {
2824 SVGA3dCmdHeader header;
2825 uint32 defined_id;
2826 } *cmd;
2827 enum vmw_so_type so_type;
2828 int ret;
2829
2830 if (unlikely(ctx_node == NULL)) {
2831 DRM_ERROR("DX Context not set.\n");
2832 return -EINVAL;
2833 }
2834
2835 so_type = vmw_so_cmd_to_type(header->id);
2836 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2837 cmd = container_of(header, typeof(*cmd), header);
2838 ret = vmw_cotable_notify(res, cmd->defined_id);
2839 vmw_resource_unreference(&res);
2840
2841 return ret;
2842}
2843
2844/**
2845 * vmw_cmd_dx_check_subresource - Validate an
2846 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2847 *
2848 * @dev_priv: Pointer to a device private struct.
2849 * @sw_context: The software context being used for this batch.
2850 * @header: Pointer to the command header in the command stream.
2851 */
2852static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2853 struct vmw_sw_context *sw_context,
2854 SVGA3dCmdHeader *header)
2855{
2856 struct {
2857 SVGA3dCmdHeader header;
2858 union {
2859 SVGA3dCmdDXReadbackSubResource r_body;
2860 SVGA3dCmdDXInvalidateSubResource i_body;
2861 SVGA3dCmdDXUpdateSubResource u_body;
2862 SVGA3dSurfaceId sid;
2863 };
2864 } *cmd;
2865
2866 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2867 offsetof(typeof(*cmd), sid));
2868 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2869 offsetof(typeof(*cmd), sid));
2870 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2871 offsetof(typeof(*cmd), sid));
2872
2873 cmd = container_of(header, typeof(*cmd), header);
2874
2875 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2876 user_surface_converter,
2877 &cmd->sid, NULL);
2878}
2879
2880static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2881 struct vmw_sw_context *sw_context,
2882 SVGA3dCmdHeader *header)
2883{
2884 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2885
2886 if (unlikely(ctx_node == NULL)) {
2887 DRM_ERROR("DX Context not set.\n");
2888 return -EINVAL;
2889 }
2890
2891 return 0;
2892}
2893
2894/**
2895 * vmw_cmd_dx_view_remove - validate a view remove command and
2896 * schedule the view resource for removal.
2897 *
2898 * @dev_priv: Pointer to a device private struct.
2899 * @sw_context: The software context being used for this batch.
2900 * @header: Pointer to the command header in the command stream.
2901 *
2902 * Check that the view exists, and if it was not created using this
Thomas Hellstroma1944032016-10-10 11:06:45 -07002903 * command batch, conditionally make this command a NOP.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002904 */
2905static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2906 struct vmw_sw_context *sw_context,
2907 SVGA3dCmdHeader *header)
2908{
2909 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2910 struct {
2911 SVGA3dCmdHeader header;
2912 union vmw_view_destroy body;
2913 } *cmd = container_of(header, typeof(*cmd), header);
2914 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2915 struct vmw_resource *view;
2916 int ret;
2917
2918 if (!ctx_node) {
2919 DRM_ERROR("DX Context not set.\n");
2920 return -EINVAL;
2921 }
2922
2923 ret = vmw_view_remove(sw_context->man,
2924 cmd->body.view_id, view_type,
2925 &sw_context->staged_cmd_res,
2926 &view);
2927 if (ret || !view)
2928 return ret;
2929
2930 /*
Thomas Hellstroma1944032016-10-10 11:06:45 -07002931 * If the view wasn't created during this command batch, it might
2932 * have been removed due to a context swapout, so add a
2933 * relocation to conditionally make this command a NOP to avoid
2934 * device errors.
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002935 */
Thomas Hellstroma1944032016-10-10 11:06:45 -07002936 return vmw_resource_relocation_add(&sw_context->res_relocations,
2937 view,
2938 vmw_ptr_diff(sw_context->buf_start,
2939 &cmd->header.id),
2940 vmw_res_rel_cond_nop);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07002941}
2942
2943/**
2944 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2945 * command
2946 *
2947 * @dev_priv: Pointer to a device private struct.
2948 * @sw_context: The software context being used for this batch.
2949 * @header: Pointer to the command header in the command stream.
2950 */
2951static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2952 struct vmw_sw_context *sw_context,
2953 SVGA3dCmdHeader *header)
2954{
2955 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2956 struct vmw_resource *res;
2957 struct {
2958 SVGA3dCmdHeader header;
2959 SVGA3dCmdDXDefineShader body;
2960 } *cmd = container_of(header, typeof(*cmd), header);
2961 int ret;
2962
2963 if (!ctx_node) {
2964 DRM_ERROR("DX Context not set.\n");
2965 return -EINVAL;
2966 }
2967
2968 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2969 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2970 vmw_resource_unreference(&res);
2971 if (ret)
2972 return ret;
2973
2974 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2975 cmd->body.shaderId, cmd->body.type,
2976 &sw_context->staged_cmd_res);
2977}
2978
2979/**
2980 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2981 * command
2982 *
2983 * @dev_priv: Pointer to a device private struct.
2984 * @sw_context: The software context being used for this batch.
2985 * @header: Pointer to the command header in the command stream.
2986 */
2987static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2988 struct vmw_sw_context *sw_context,
2989 SVGA3dCmdHeader *header)
2990{
2991 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2992 struct {
2993 SVGA3dCmdHeader header;
2994 SVGA3dCmdDXDestroyShader body;
2995 } *cmd = container_of(header, typeof(*cmd), header);
2996 int ret;
2997
2998 if (!ctx_node) {
2999 DRM_ERROR("DX Context not set.\n");
3000 return -EINVAL;
3001 }
3002
3003 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3004 &sw_context->staged_cmd_res);
3005 if (ret)
3006 DRM_ERROR("Could not find shader to remove.\n");
3007
3008 return ret;
3009}
3010
3011/**
3012 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3013 * command
3014 *
3015 * @dev_priv: Pointer to a device private struct.
3016 * @sw_context: The software context being used for this batch.
3017 * @header: Pointer to the command header in the command stream.
3018 */
3019static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3020 struct vmw_sw_context *sw_context,
3021 SVGA3dCmdHeader *header)
3022{
3023 struct vmw_resource_val_node *ctx_node;
3024 struct vmw_resource_val_node *res_node;
3025 struct vmw_resource *res;
3026 struct {
3027 SVGA3dCmdHeader header;
3028 SVGA3dCmdDXBindShader body;
3029 } *cmd = container_of(header, typeof(*cmd), header);
3030 int ret;
3031
3032 if (cmd->body.cid != SVGA3D_INVALID_ID) {
3033 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3034 user_context_converter,
3035 &cmd->body.cid, &ctx_node);
3036 if (ret)
3037 return ret;
3038 } else {
3039 ctx_node = sw_context->dx_ctx_node;
3040 if (!ctx_node) {
3041 DRM_ERROR("DX Context not set.\n");
3042 return -EINVAL;
3043 }
3044 }
3045
3046 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3047 cmd->body.shid, 0);
3048 if (IS_ERR(res)) {
3049 DRM_ERROR("Could not find shader to bind.\n");
3050 return PTR_ERR(res);
3051 }
3052
3053 ret = vmw_resource_val_add(sw_context, res, &res_node);
3054 if (ret) {
3055 DRM_ERROR("Error creating resource validation node.\n");
3056 goto out_unref;
3057 }
3058
3059
3060 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3061 &cmd->body.mobid,
3062 cmd->body.offsetInBytes);
3063out_unref:
3064 vmw_resource_unreference(&res);
3065
3066 return ret;
3067}
3068
Charmaine Leef3b335502016-02-12 08:11:56 +01003069/**
3070 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3071 *
3072 * @dev_priv: Pointer to a device private struct.
3073 * @sw_context: The software context being used for this batch.
3074 * @header: Pointer to the command header in the command stream.
3075 */
3076static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3077 struct vmw_sw_context *sw_context,
3078 SVGA3dCmdHeader *header)
3079{
3080 struct {
3081 SVGA3dCmdHeader header;
3082 SVGA3dCmdDXGenMips body;
3083 } *cmd = container_of(header, typeof(*cmd), header);
3084
3085 return vmw_view_id_val_add(sw_context, vmw_view_sr,
3086 cmd->body.shaderResourceViewId);
3087}
3088
Charmaine Lee1f982e42016-10-10 10:37:03 -07003089/**
3090 * vmw_cmd_dx_transfer_from_buffer -
3091 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3092 *
3093 * @dev_priv: Pointer to a device private struct.
3094 * @sw_context: The software context being used for this batch.
3095 * @header: Pointer to the command header in the command stream.
3096 */
3097static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3098 struct vmw_sw_context *sw_context,
3099 SVGA3dCmdHeader *header)
3100{
3101 struct {
3102 SVGA3dCmdHeader header;
3103 SVGA3dCmdDXTransferFromBuffer body;
3104 } *cmd = container_of(header, typeof(*cmd), header);
3105 int ret;
3106
3107 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3108 user_surface_converter,
3109 &cmd->body.srcSid, NULL);
3110 if (ret != 0)
3111 return ret;
3112
3113 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3114 user_surface_converter,
3115 &cmd->body.destSid, NULL);
3116}
3117
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003118static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3119 struct vmw_sw_context *sw_context,
3120 void *buf, uint32_t *size)
3121{
3122 uint32_t size_remaining = *size;
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003123 uint32_t cmd_id;
3124
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003125 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003126 switch (cmd_id) {
3127 case SVGA_CMD_UPDATE:
3128 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003129 break;
3130 case SVGA_CMD_DEFINE_GMRFB:
3131 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3132 break;
3133 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3134 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3135 break;
3136 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3137 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3138 break;
3139 default:
3140 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3141 return -EINVAL;
3142 }
3143
3144 if (*size > size_remaining) {
3145 DRM_ERROR("Invalid SVGA command (size mismatch):"
3146 " %u.\n", cmd_id);
3147 return -EINVAL;
3148 }
3149
Jakob Bornecrantz0cff60c2011-10-04 20:13:27 +02003150 if (unlikely(!sw_context->kernel)) {
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003151 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3152 return -EPERM;
3153 }
3154
3155 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3156 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3157
3158 return 0;
3159}
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003160
Thomas Hellstrom4fbd9d22014-02-12 12:37:01 +01003161static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003162 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3163 false, false, false),
3164 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3165 false, false, false),
3166 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3167 true, false, false),
3168 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3169 true, false, false),
3170 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3171 true, false, false),
3172 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3173 false, false, false),
3174 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3175 false, false, false),
3176 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3177 true, false, false),
3178 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3179 true, false, false),
3180 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3181 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003182 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003183 &vmw_cmd_set_render_target_check, true, false, false),
3184 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3185 true, false, false),
3186 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3187 true, false, false),
3188 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3189 true, false, false),
3190 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3191 true, false, false),
3192 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3193 true, false, false),
3194 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3195 true, false, false),
3196 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3197 true, false, false),
3198 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3199 false, false, false),
Thomas Hellstromd5bde952014-01-31 10:12:10 +01003200 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3201 true, false, false),
3202 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3203 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003204 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3205 true, false, false),
Thomas Hellstrom0ccbbae2014-01-30 11:13:43 +01003206 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3207 true, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003208 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3209 true, false, false),
3210 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3211 true, false, false),
3212 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3213 true, false, false),
3214 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3215 true, false, false),
3216 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3217 true, false, false),
3218 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3219 true, false, false),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003220 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003221 &vmw_cmd_blt_surf_screen_check, false, false, false),
3222 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3223 false, false, false),
3224 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3225 false, false, false),
3226 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3227 false, false, false),
3228 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3229 false, false, false),
3230 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3231 false, false, false),
3232 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3233 false, false, false),
3234 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3235 false, false, false),
3236 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3237 false, false, false),
3238 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3239 false, false, false),
3240 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3241 false, false, false),
3242 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3243 false, false, false),
3244 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3245 false, false, false),
3246 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3247 false, false, false),
3248 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3249 false, false, true),
3250 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3251 false, false, true),
3252 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3253 false, false, true),
3254 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3255 false, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003256 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3257 false, false, true),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003258 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3259 false, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3261 false, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3263 false, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3265 true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3267 false, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3269 true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003270 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003271 &vmw_cmd_update_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003272 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003273 &vmw_cmd_readback_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003274 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003275 &vmw_cmd_readback_gb_surface, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003276 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003277 &vmw_cmd_invalidate_gb_image, true, false, true),
Thomas Hellstroma97e2192012-11-21 11:45:13 +01003278 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003279 &vmw_cmd_invalidate_gb_surface, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3281 false, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3283 false, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3285 false, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3287 false, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3289 false, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3291 false, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3293 true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3295 false, false, true),
Thomas Hellstromf2a0dcb2014-01-15 10:04:07 +01003296 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
Thomas Hellstrom8ba07312013-10-08 02:25:35 -07003297 false, false, false),
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003298 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3301 true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3303 true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3305 true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3307 false, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3309 false, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3311 false, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3313 false, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3315 false, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3317 false, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3319 false, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3321 false, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3323 false, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3325 false, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003327 true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3329 false, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3331 false, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3333 false, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3335 false, false, true),
3336
3337 /*
3338 * DX commands
3339 */
3340 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3343 false, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3345 false, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3347 false, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3349 false, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3351 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3353 &vmw_cmd_dx_set_shader_res, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3355 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003356 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003357 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003358 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003359 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003360 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3361 true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3363 true, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3365 &vmw_cmd_dx_cid_check, true, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003367 true, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3369 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3371 &vmw_cmd_dx_set_index_buffer, true, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3373 &vmw_cmd_dx_set_rendertargets, true, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3375 true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003376 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
Charmaine Lee2f633e52015-08-10 10:45:11 -07003377 &vmw_cmd_dx_cid_check, true, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3379 &vmw_cmd_dx_cid_check, true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003380 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003381 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003382 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003383 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003384 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003385 true, false, true),
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07003386 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
Charmaine Leee02e5882016-04-12 08:19:08 -07003387 &vmw_cmd_dx_cid_check, true, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003389 true, false, true),
Charmaine Leee02e5882016-04-12 08:19:08 -07003390 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003391 true, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3393 true, false, true),
Charmaine Lee18835982016-04-12 08:14:23 -07003394 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003395 true, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3397 true, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3399 true, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3401 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3403 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003404 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3405 true, false, true),
Charmaine Leef3b335502016-02-12 08:11:56 +01003406 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003407 true, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3409 &vmw_cmd_dx_check_subresource, true, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3411 &vmw_cmd_dx_check_subresource, true, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3413 &vmw_cmd_dx_check_subresource, true, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3415 &vmw_cmd_dx_view_define, true, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3417 &vmw_cmd_dx_view_remove, true, false, true),
3418 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3419 &vmw_cmd_dx_view_define, true, false, true),
3420 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3421 &vmw_cmd_dx_view_remove, true, false, true),
3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3423 &vmw_cmd_dx_view_define, true, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3425 &vmw_cmd_dx_view_remove, true, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3427 &vmw_cmd_dx_so_define, true, false, true),
3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3429 &vmw_cmd_dx_cid_check, true, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3431 &vmw_cmd_dx_so_define, true, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3433 &vmw_cmd_dx_cid_check, true, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3435 &vmw_cmd_dx_so_define, true, false, true),
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3437 &vmw_cmd_dx_cid_check, true, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3439 &vmw_cmd_dx_so_define, true, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3441 &vmw_cmd_dx_cid_check, true, false, true),
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3443 &vmw_cmd_dx_so_define, true, false, true),
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3445 &vmw_cmd_dx_cid_check, true, false, true),
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3447 &vmw_cmd_dx_define_shader, true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3449 &vmw_cmd_dx_destroy_shader, true, false, true),
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3451 &vmw_cmd_dx_bind_shader, true, false, true),
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3453 &vmw_cmd_dx_so_define, true, false, true),
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3455 &vmw_cmd_dx_cid_check, true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003456 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003457 true, false, true),
Charmaine Lee2f633e52015-08-10 10:45:11 -07003458 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3459 &vmw_cmd_dx_set_so_targets, true, false, true),
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003460 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3461 &vmw_cmd_dx_cid_check, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3463 &vmw_cmd_dx_cid_check, true, false, true),
Neha Bhende0fca749e2015-08-10 10:51:07 -07003464 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3465 &vmw_cmd_buffer_copy_check, true, false, true),
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3467 &vmw_cmd_pred_copy_check, true, false, true),
Charmaine Lee1f982e42016-10-10 10:37:03 -07003468 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3469 &vmw_cmd_dx_transfer_from_buffer,
3470 true, false, true),
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003471};
3472
Thomas Hellstrom65b97a22017-08-24 08:06:29 +02003473bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3474{
3475 u32 cmd_id = ((u32 *) buf)[0];
3476
3477 if (cmd_id >= SVGA_CMD_MAX) {
3478 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3479 const struct vmw_cmd_entry *entry;
3480
3481 *size = header->size + sizeof(SVGA3dCmdHeader);
3482 cmd_id = header->id;
3483 if (cmd_id >= SVGA_3D_CMD_MAX)
3484 return false;
3485
3486 cmd_id -= SVGA_3D_CMD_BASE;
3487 entry = &vmw_cmd_entries[cmd_id];
3488 *cmd = entry->cmd_name;
3489 return true;
3490 }
3491
3492 switch (cmd_id) {
3493 case SVGA_CMD_UPDATE:
3494 *cmd = "SVGA_CMD_UPDATE";
3495 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3496 break;
3497 case SVGA_CMD_DEFINE_GMRFB:
3498 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3499 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3500 break;
3501 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3502 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3503 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3504 break;
3505 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3506 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3507 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3508 break;
3509 default:
3510 *cmd = "UNKNOWN";
3511 *size = 0;
3512 return false;
3513 }
3514
3515 return true;
3516}
3517
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003518static int vmw_cmd_check(struct vmw_private *dev_priv,
3519 struct vmw_sw_context *sw_context,
3520 void *buf, uint32_t *size)
3521{
3522 uint32_t cmd_id;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003523 uint32_t size_remaining = *size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003524 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3525 int ret;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003526 const struct vmw_cmd_entry *entry;
3527 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003528
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003529 cmd_id = ((uint32_t *)buf)[0];
Jakob Bornecrantz4084fb82011-10-04 20:13:19 +02003530 /* Handle any none 3D commands */
3531 if (unlikely(cmd_id < SVGA_CMD_MAX))
3532 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3533
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003534
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003535 cmd_id = header->id;
3536 *size = header->size + sizeof(SVGA3dCmdHeader);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003537
3538 cmd_id -= SVGA_3D_CMD_BASE;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003539 if (unlikely(*size > size_remaining))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003540 goto out_invalid;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003541
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003542 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003543 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003544
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003545 entry = &vmw_cmd_entries[cmd_id];
Thomas Hellstrom36e952c2014-02-12 13:19:36 +01003546 if (unlikely(!entry->func))
3547 goto out_invalid;
3548
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003549 if (unlikely(!entry->user_allow && !sw_context->kernel))
3550 goto out_privileged;
3551
3552 if (unlikely(entry->gb_disable && gb))
3553 goto out_old;
3554
3555 if (unlikely(entry->gb_enable && !gb))
3556 goto out_new;
3557
3558 ret = entry->func(dev_priv, sw_context, header);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003559 if (unlikely(ret != 0))
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003560 goto out_invalid;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003561
3562 return 0;
Thomas Hellstromc373d4e2012-11-21 12:22:35 +01003563out_invalid:
3564 DRM_ERROR("Invalid SVGA3D command: %d\n",
3565 cmd_id + SVGA_3D_CMD_BASE);
3566 return -EINVAL;
3567out_privileged:
3568 DRM_ERROR("Privileged SVGA3D command: %d\n",
3569 cmd_id + SVGA_3D_CMD_BASE);
3570 return -EPERM;
3571out_old:
3572 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3573 cmd_id + SVGA_3D_CMD_BASE);
3574 return -EINVAL;
3575out_new:
3576 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003577 cmd_id + SVGA_3D_CMD_BASE);
3578 return -EINVAL;
3579}
3580
3581static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3582 struct vmw_sw_context *sw_context,
Thomas Hellstrom922ade02011-10-04 20:13:17 +02003583 void *buf,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003584 uint32_t size)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003585{
3586 int32_t cur_size = size;
3587 int ret;
3588
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003589 sw_context->buf_start = buf;
3590
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003591 while (cur_size > 0) {
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01003592 size = cur_size;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003593 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3594 if (unlikely(ret != 0))
3595 return ret;
3596 buf = (void *)((unsigned long) buf + size);
3597 cur_size -= size;
3598 }
3599
3600 if (unlikely(cur_size != 0)) {
3601 DRM_ERROR("Command verifier out of sync.\n");
3602 return -EINVAL;
3603 }
3604
3605 return 0;
3606}
3607
3608static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3609{
3610 sw_context->cur_reloc = 0;
3611}
3612
3613static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3614{
3615 uint32_t i;
3616 struct vmw_relocation *reloc;
3617 struct ttm_validate_buffer *validate;
3618 struct ttm_buffer_object *bo;
3619
3620 for (i = 0; i < sw_context->cur_reloc; ++i) {
3621 reloc = &sw_context->relocs[i];
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003622 validate = &sw_context->val_bufs[reloc->index].base;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003623 bo = validate->bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003624 switch (bo->mem.mem_type) {
3625 case TTM_PL_VRAM:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003626 reloc->location->offset += bo->offset;
3627 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003628 break;
3629 case VMW_PL_GMR:
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003630 reloc->location->gmrId = bo->mem.start;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003631 break;
Thomas Hellstromddcda242012-11-21 11:26:55 +01003632 case VMW_PL_MOB:
3633 *reloc->mob_loc = bo->mem.start;
3634 break;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003635 default:
3636 BUG();
3637 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003638 }
3639 vmw_free_relocations(sw_context);
3640}
3641
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003642/**
3643 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3644 * all resources referenced by it.
3645 *
3646 * @list: The resource list.
3647 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003648static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3649 struct list_head *list)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003650{
3651 struct vmw_resource_val_node *val, *val_next;
3652
3653 /*
3654 * Drop references to resources held during command submission.
3655 */
3656
3657 list_for_each_entry_safe(val, val_next, list, head) {
3658 list_del_init(&val->head);
3659 vmw_resource_unreference(&val->res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003660
3661 if (val->staged_bindings) {
3662 if (val->staged_bindings != sw_context->staged_bindings)
3663 vmw_binding_state_free(val->staged_bindings);
3664 else
3665 sw_context->staged_bindings_inuse = false;
3666 val->staged_bindings = NULL;
3667 }
3668
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003669 kfree(val);
3670 }
3671}
3672
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003673static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3674{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003675 struct vmw_validate_buffer *entry, *next;
3676 struct vmw_resource_val_node *val;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003677
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003678 /*
3679 * Drop references to DMA buffers held during command submission.
3680 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003681 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003682 base.head) {
3683 list_del(&entry->base.head);
3684 ttm_bo_unref(&entry->base.bo);
3685 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003686 sw_context->cur_val_buf--;
3687 }
3688 BUG_ON(sw_context->cur_val_buf != 0);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003689
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003690 list_for_each_entry(val, &sw_context->resource_list, head)
3691 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003692}
3693
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003694int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3695 struct ttm_buffer_object *bo,
3696 bool interruptible,
3697 bool validate_as_mob)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003698{
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07003699 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3700 base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003701 int ret;
3702
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07003703 if (vbo->pin_count > 0)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02003704 return 0;
3705
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01003706 if (validate_as_mob)
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003707 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3708 false);
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01003709
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01003710 /**
Thomas Hellstrom135cba02010-10-26 21:21:47 +02003711 * Put BO in VRAM if there is space, otherwise as a GMR.
3712 * If there is no space in VRAM and GMR ids are all used up,
3713 * start evicting GMRs to make room. If the DMA buffer can't be
3714 * used as a GMR, this will return -ENOMEM.
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01003715 */
3716
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003717 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3718 false);
Thomas Hellstrom3d3a5b32009-12-08 12:59:34 +01003719 if (likely(ret == 0 || ret == -ERESTARTSYS))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003720 return ret;
3721
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01003722 /**
3723 * If that failed, try VRAM again, this time evicting
3724 * previous contents.
3725 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003726
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003727 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003728 return ret;
3729}
3730
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003731static int vmw_validate_buffers(struct vmw_private *dev_priv,
3732 struct vmw_sw_context *sw_context)
3733{
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003734 struct vmw_validate_buffer *entry;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003735 int ret;
3736
Thomas Hellstromc0951b72012-11-20 12:19:35 +00003737 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01003738 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
Thomas Hellstrom1a4b1722015-06-26 02:03:53 -07003739 true,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +01003740 entry->validate_as_mob);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00003741 if (unlikely(ret != 0))
3742 return ret;
3743 }
3744 return 0;
3745}
3746
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003747static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3748 uint32_t size)
3749{
3750 if (likely(sw_context->cmd_bounce_size >= size))
3751 return 0;
3752
3753 if (sw_context->cmd_bounce_size == 0)
3754 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3755
3756 while (sw_context->cmd_bounce_size < size) {
3757 sw_context->cmd_bounce_size =
3758 PAGE_ALIGN(sw_context->cmd_bounce_size +
3759 (sw_context->cmd_bounce_size >> 1));
3760 }
3761
Markus Elfring0bc32992016-07-22 13:31:00 +02003762 vfree(sw_context->cmd_bounce);
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00003763 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3764
3765 if (sw_context->cmd_bounce == NULL) {
3766 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3767 sw_context->cmd_bounce_size = 0;
3768 return -ENOMEM;
3769 }
3770
3771 return 0;
3772}
3773
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003774/**
3775 * vmw_execbuf_fence_commands - create and submit a command stream fence
3776 *
3777 * Creates a fence object and submits a command stream marker.
3778 * If this fails for some reason, We sync the fifo and return NULL.
3779 * It is then safe to fence buffers with a NULL pointer.
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003780 *
3781 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3782 * a userspace handle if @p_handle is not NULL, otherwise not.
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003783 */
3784
3785int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3786 struct vmw_private *dev_priv,
3787 struct vmw_fence_obj **p_fence,
3788 uint32_t *p_handle)
3789{
3790 uint32_t sequence;
3791 int ret;
3792 bool synced = false;
3793
Jakob Bornecrantz6070e9f2011-10-04 20:13:16 +02003794 /* p_handle implies file_priv. */
3795 BUG_ON(p_handle != NULL && file_priv == NULL);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003796
3797 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3798 if (unlikely(ret != 0)) {
3799 DRM_ERROR("Fence submission error. Syncing.\n");
3800 synced = true;
3801 }
3802
3803 if (p_handle != NULL)
3804 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003805 sequence, p_fence, p_handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003806 else
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003807 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00003808
3809 if (unlikely(ret != 0 && !synced)) {
3810 (void) vmw_fallback_wait(dev_priv, false, false,
3811 sequence, false,
3812 VMW_FENCE_WAIT_TIMEOUT);
3813 *p_fence = NULL;
3814 }
3815
3816 return 0;
3817}
3818
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003819/**
3820 * vmw_execbuf_copy_fence_user - copy fence object information to
3821 * user-space.
3822 *
3823 * @dev_priv: Pointer to a vmw_private struct.
3824 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3825 * @ret: Return value from fence object creation.
3826 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3827 * which the information should be copied.
3828 * @fence: Pointer to the fenc object.
3829 * @fence_handle: User-space fence handle.
3830 *
3831 * This function copies fence information to user-space. If copying fails,
3832 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3833 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3834 * the error will hopefully be detected.
3835 * Also if copying fails, user-space will be unable to signal the fence
3836 * object so we wait for it immediately, and then unreference the
3837 * user-space reference.
3838 */
Thomas Hellstrom57c5ee72011-10-10 12:23:26 +02003839void
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003840vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3841 struct vmw_fpriv *vmw_fp,
3842 int ret,
3843 struct drm_vmw_fence_rep __user *user_fence_rep,
3844 struct vmw_fence_obj *fence,
3845 uint32_t fence_handle)
3846{
3847 struct drm_vmw_fence_rep fence_rep;
3848
3849 if (user_fence_rep == NULL)
3850 return;
3851
Dan Carpenter80d9b242011-10-18 09:10:12 +03003852 memset(&fence_rep, 0, sizeof(fence_rep));
3853
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003854 fence_rep.error = ret;
3855 if (ret == 0) {
3856 BUG_ON(fence == NULL);
3857
3858 fence_rep.handle = fence_handle;
Maarten Lankhorst2298e802014-03-26 14:07:44 +01003859 fence_rep.seqno = fence->base.seqno;
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003860 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3861 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3862 }
3863
3864 /*
3865 * copy_to_user errors will be detected by user space not
3866 * seeing fence_rep::error filled in. Typically
3867 * user-space would have pre-set that member to -EFAULT.
3868 */
3869 ret = copy_to_user(user_fence_rep, &fence_rep,
3870 sizeof(fence_rep));
3871
3872 /*
3873 * User-space lost the fence object. We need to sync
3874 * and unreference the handle.
3875 */
3876 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3877 ttm_ref_object_base_unref(vmw_fp->tfile,
3878 fence_handle, TTM_REF_USAGE);
3879 DRM_ERROR("Fence copy error. Syncing.\n");
Maarten Lankhorstc060a4e2014-03-26 13:06:24 +01003880 (void) vmw_fence_obj_wait(fence, false, false,
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02003881 VMW_FENCE_WAIT_TIMEOUT);
3882 }
3883}
3884
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003885/**
3886 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3887 * the fifo.
3888 *
3889 * @dev_priv: Pointer to a device private structure.
3890 * @kernel_commands: Pointer to the unpatched command batch.
3891 * @command_size: Size of the unpatched command batch.
3892 * @sw_context: Structure holding the relocation lists.
3893 *
3894 * Side effects: If this function returns 0, then the command batch
3895 * pointed to by @kernel_commands will have been modified.
3896 */
3897static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3898 void *kernel_commands,
3899 u32 command_size,
3900 struct vmw_sw_context *sw_context)
3901{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003902 void *cmd;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02003903
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003904 if (sw_context->dx_ctx_node)
3905 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3906 sw_context->dx_ctx_node->res->id);
3907 else
3908 cmd = vmw_fifo_reserve(dev_priv, command_size);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003909 if (!cmd) {
3910 DRM_ERROR("Failed reserving fifo space for commands.\n");
3911 return -ENOMEM;
3912 }
3913
3914 vmw_apply_relocations(sw_context);
3915 memcpy(cmd, kernel_commands, command_size);
3916 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3917 vmw_resource_relocations_free(&sw_context->res_relocations);
3918 vmw_fifo_commit(dev_priv, command_size);
3919
3920 return 0;
3921}
3922
3923/**
3924 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3925 * the command buffer manager.
3926 *
3927 * @dev_priv: Pointer to a device private structure.
3928 * @header: Opaque handle to the command buffer allocation.
3929 * @command_size: Size of the unpatched command batch.
3930 * @sw_context: Structure holding the relocation lists.
3931 *
3932 * Side effects: If this function returns 0, then the command buffer
3933 * represented by @header will have been modified.
3934 */
3935static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3936 struct vmw_cmdbuf_header *header,
3937 u32 command_size,
3938 struct vmw_sw_context *sw_context)
3939{
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003940 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3941 SVGA3D_INVALID_ID);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003942 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07003943 id, false, header);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003944
3945 vmw_apply_relocations(sw_context);
3946 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3947 vmw_resource_relocations_free(&sw_context->res_relocations);
3948 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3949
3950 return 0;
3951}
3952
3953/**
3954 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3955 * submission using a command buffer.
3956 *
3957 * @dev_priv: Pointer to a device private structure.
3958 * @user_commands: User-space pointer to the commands to be submitted.
3959 * @command_size: Size of the unpatched command batch.
3960 * @header: Out parameter returning the opaque pointer to the command buffer.
3961 *
3962 * This function checks whether we can use the command buffer manager for
3963 * submission and if so, creates a command buffer of suitable size and
3964 * copies the user data into that buffer.
3965 *
3966 * On successful return, the function returns a pointer to the data in the
3967 * command buffer and *@header is set to non-NULL.
3968 * If command buffers could not be used, the function will return the value
3969 * of @kernel_commands on function call. That value may be NULL. In that case,
3970 * the value of *@header will be set to NULL.
3971 * If an error is encountered, the function will return a pointer error value.
3972 * If the function is interrupted by a signal while sleeping, it will return
3973 * -ERESTARTSYS casted to a pointer error value.
3974 */
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -07003975static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3976 void __user *user_commands,
3977 void *kernel_commands,
3978 u32 command_size,
3979 struct vmw_cmdbuf_header **header)
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003980{
3981 size_t cmdbuf_size;
3982 int ret;
3983
3984 *header = NULL;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003985 if (command_size > SVGA_CB_MAX_SIZE) {
3986 DRM_ERROR("Command buffer is too large.\n");
3987 return ERR_PTR(-EINVAL);
3988 }
3989
Thomas Hellstrom51ab70b2016-10-10 10:51:24 -07003990 if (!dev_priv->cman || kernel_commands)
3991 return kernel_commands;
3992
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07003993 /* If possible, add a little space for fencing. */
3994 cmdbuf_size = command_size + 512;
3995 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3996 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3997 true, header);
3998 if (IS_ERR(kernel_commands))
3999 return kernel_commands;
4000
4001 ret = copy_from_user(kernel_commands, user_commands,
4002 command_size);
4003 if (ret) {
4004 DRM_ERROR("Failed copying commands.\n");
4005 vmw_cmdbuf_header_free(*header);
4006 *header = NULL;
4007 return ERR_PTR(-EFAULT);
4008 }
4009
4010 return kernel_commands;
4011}
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004012
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004013static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4014 struct vmw_sw_context *sw_context,
4015 uint32_t handle)
4016{
4017 struct vmw_resource_val_node *ctx_node;
4018 struct vmw_resource *res;
4019 int ret;
4020
4021 if (handle == SVGA3D_INVALID_ID)
4022 return 0;
4023
4024 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
4025 handle, user_context_converter,
4026 &res);
4027 if (unlikely(ret != 0)) {
4028 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
4029 (unsigned) handle);
4030 return ret;
4031 }
4032
4033 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
4034 if (unlikely(ret != 0))
4035 goto out_err;
4036
4037 sw_context->dx_ctx_node = ctx_node;
4038 sw_context->man = vmw_context_res_man(res);
4039out_err:
4040 vmw_resource_unreference(&res);
4041 return ret;
4042}
4043
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004044int vmw_execbuf_process(struct drm_file *file_priv,
4045 struct vmw_private *dev_priv,
4046 void __user *user_commands,
4047 void *kernel_commands,
4048 uint32_t command_size,
4049 uint64_t throttle_us,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004050 uint32_t dx_context_handle,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004051 struct drm_vmw_fence_rep __user *user_fence_rep,
4052 struct vmw_fence_obj **out_fence)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004053{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004054 struct vmw_sw_context *sw_context = &dev_priv->ctx;
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004055 struct vmw_fence_obj *fence = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004056 struct vmw_resource *error_resource;
4057 struct list_head resource_list;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004058 struct vmw_cmdbuf_header *header;
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004059 struct ww_acquire_ctx ticket;
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004060 uint32_t handle;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004061 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004062
Charmaine Lee2f633e52015-08-10 10:45:11 -07004063 if (throttle_us) {
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004064 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4065 throttle_us);
Charmaine Lee2f633e52015-08-10 10:45:11 -07004066
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004067 if (ret)
4068 return ret;
4069 }
Charmaine Lee2f633e52015-08-10 10:45:11 -07004070
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004071 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4072 kernel_commands, command_size,
4073 &header);
4074 if (IS_ERR(kernel_commands))
4075 return PTR_ERR(kernel_commands);
4076
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004077 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004078 if (ret) {
4079 ret = -ERESTARTSYS;
4080 goto out_free_header;
4081 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004082
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004083 sw_context->kernel = false;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004084 if (kernel_commands == NULL) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004085 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4086 if (unlikely(ret != 0))
4087 goto out_unlock;
4088
4089
4090 ret = copy_from_user(sw_context->cmd_bounce,
4091 user_commands, command_size);
4092
4093 if (unlikely(ret != 0)) {
4094 ret = -EFAULT;
4095 DRM_ERROR("Failed copying commands.\n");
4096 goto out_unlock;
4097 }
4098 kernel_commands = sw_context->cmd_bounce;
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004099 } else if (!header)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004100 sw_context->kernel = true;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004101
Thomas Hellstromd5bde952014-01-31 10:12:10 +01004102 sw_context->fp = vmw_fpriv(file_priv);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004103 sw_context->cur_reloc = 0;
4104 sw_context->cur_val_buf = 0;
Thomas Hellstromf18c8842011-10-04 20:13:31 +02004105 INIT_LIST_HEAD(&sw_context->resource_list);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004106 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004107 sw_context->cur_query_bo = dev_priv->pinned_bo;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004108 sw_context->last_query_ctx = NULL;
4109 sw_context->needs_post_query_barrier = false;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004110 sw_context->dx_ctx_node = NULL;
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07004111 sw_context->dx_query_mob = NULL;
4112 sw_context->dx_query_ctx = NULL;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004113 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004114 INIT_LIST_HEAD(&sw_context->validate_nodes);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004115 INIT_LIST_HEAD(&sw_context->res_relocations);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004116 if (sw_context->staged_bindings)
4117 vmw_binding_state_reset(sw_context->staged_bindings);
4118
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004119 if (!sw_context->res_ht_initialized) {
4120 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4121 if (unlikely(ret != 0))
4122 goto out_unlock;
4123 sw_context->res_ht_initialized = true;
4124 }
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004125 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004126 INIT_LIST_HEAD(&resource_list);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004127 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4128 if (unlikely(ret != 0)) {
4129 list_splice_init(&sw_context->ctx_resource_list,
4130 &sw_context->resource_list);
4131 goto out_err_nores;
4132 }
4133
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004134 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4135 command_size);
Charmaine Lee2f633e52015-08-10 10:45:11 -07004136 /*
4137 * Merge the resource lists before checking the return status
4138 * from vmd_cmd_check_all so that all the open hashtabs will
4139 * be handled properly even if vmw_cmd_check_all fails.
4140 */
4141 list_splice_init(&sw_context->ctx_resource_list,
4142 &sw_context->resource_list);
4143
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004144 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004145 goto out_err_nores;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00004146
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004147 ret = vmw_resources_reserve(sw_context);
4148 if (unlikely(ret != 0))
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004149 goto out_err_nores;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004150
Christian Königaa350712014-12-03 15:46:48 +01004151 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4152 true, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004153 if (unlikely(ret != 0))
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004154 goto out_err_nores;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004155
4156 ret = vmw_validate_buffers(dev_priv, sw_context);
4157 if (unlikely(ret != 0))
4158 goto out_err;
4159
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004160 ret = vmw_resources_validate(sw_context);
4161 if (unlikely(ret != 0))
4162 goto out_err;
Thomas Hellstrom1925d452010-05-28 11:21:57 +02004163
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004164 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4165 if (unlikely(ret != 0)) {
4166 ret = -ERESTARTSYS;
4167 goto out_err;
4168 }
4169
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01004170 if (dev_priv->has_mob) {
4171 ret = vmw_rebind_contexts(sw_context);
4172 if (unlikely(ret != 0))
Dan Carpenterb2ad9882014-02-11 19:03:47 +03004173 goto out_unlock_binding;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +01004174 }
4175
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004176 if (!header) {
4177 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4178 command_size, sw_context);
4179 } else {
4180 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4181 sw_context);
4182 header = NULL;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +00004183 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004184 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004185 if (ret)
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004186 goto out_err;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004187
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004188 vmw_query_bo_switch_commit(dev_priv, sw_context);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004189 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4190 &fence,
4191 (user_fence_rep) ? &handle : NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004192 /*
4193 * This error is harmless, because if fence submission fails,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004194 * vmw_fifo_send_fence will sync. The error will be propagated to
4195 * user-space in @fence_rep
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004196 */
4197
4198 if (ret != 0)
4199 DRM_ERROR("Fence submission error. Syncing.\n");
4200
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07004201 vmw_resources_unreserve(sw_context, false);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004202
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004203 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004204 (void *) fence);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004205
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004206 if (unlikely(dev_priv->pinned_bo != NULL &&
4207 !dev_priv->query_cid_valid))
4208 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4209
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004210 vmw_clear_validations(sw_context);
Thomas Hellstrom8bf445c2011-10-10 12:23:25 +02004211 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4212 user_fence_rep, fence, handle);
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004213
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004214 /* Don't unreference when handing fence out */
4215 if (unlikely(out_fence != NULL)) {
4216 *out_fence = fence;
4217 fence = NULL;
4218 } else if (likely(fence != NULL)) {
Thomas Hellstromae2a1042011-09-01 20:18:44 +00004219 vmw_fence_obj_unreference(&fence);
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004220 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004221
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004222 list_splice_init(&sw_context->resource_list, &resource_list);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004223 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004224 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004225
4226 /*
4227 * Unreference resources outside of the cmdbuf_mutex to
4228 * avoid deadlocks in resource destruction paths.
4229 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004230 vmw_resource_list_unreference(sw_context, &resource_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004231
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004232 return 0;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004233
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -07004234out_unlock_binding:
4235 mutex_unlock(&dev_priv->binding_mutex);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004236out_err:
Thomas Hellstromcf5e3412014-01-30 10:58:19 +01004237 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4238out_err_nores:
Sinclair Yehfd11a3c2015-08-10 10:56:15 -07004239 vmw_resources_unreserve(sw_context, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004240 vmw_resource_relocations_free(&sw_context->res_relocations);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004241 vmw_free_relocations(sw_context);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004242 vmw_clear_validations(sw_context);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004243 if (unlikely(dev_priv->pinned_bo != NULL &&
4244 !dev_priv->query_cid_valid))
4245 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004246out_unlock:
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004247 list_splice_init(&sw_context->resource_list, &resource_list);
4248 error_resource = sw_context->error_resource;
4249 sw_context->error_resource = NULL;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +02004250 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004251 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004252
4253 /*
4254 * Unreference resources outside of the cmdbuf_mutex to
4255 * avoid deadlocks in resource destruction paths.
4256 */
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004257 vmw_resource_list_unreference(sw_context, &resource_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004258 if (unlikely(error_resource != NULL))
4259 vmw_resource_unreference(&error_resource);
Thomas Hellstrom3eab3d92015-06-25 11:57:56 -07004260out_free_header:
4261 if (header)
4262 vmw_cmdbuf_header_free(header);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004263
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004264 return ret;
4265}
4266
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004267/**
4268 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4269 *
4270 * @dev_priv: The device private structure.
4271 *
4272 * This function is called to idle the fifo and unpin the query buffer
4273 * if the normal way to do this hits an error, which should typically be
4274 * extremely rare.
4275 */
4276static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4277{
4278 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4279
4280 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004281 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4282 if (dev_priv->dummy_query_bo_pinned) {
4283 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4284 dev_priv->dummy_query_bo_pinned = false;
4285 }
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004286}
4287
4288
4289/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004290 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004291 * query bo.
4292 *
4293 * @dev_priv: The device private structure.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004294 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4295 * _after_ a query barrier that flushes all queries touching the current
4296 * buffer pointed to by @dev_priv->pinned_bo
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004297 *
4298 * This function should be used to unpin the pinned query bo, or
4299 * as a query barrier when we need to make sure that all queries have
4300 * finished before the next fifo command. (For example on hardware
4301 * context destructions where the hardware may otherwise leak unfinished
4302 * queries).
4303 *
4304 * This function does not return any failure codes, but make attempts
4305 * to do safe unpinning in case of errors.
4306 *
4307 * The function will synchronize on the previous query barrier, and will
4308 * thus not finish until that barrier has executed.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004309 *
4310 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4311 * before calling this function.
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004312 */
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004313void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4314 struct vmw_fence_obj *fence)
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004315{
4316 int ret = 0;
4317 struct list_head validate_list;
4318 struct ttm_validate_buffer pinned_val, query_val;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004319 struct vmw_fence_obj *lfence = NULL;
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004320 struct ww_acquire_ctx ticket;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004321
4322 if (dev_priv->pinned_bo == NULL)
4323 goto out_unlock;
4324
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004325 INIT_LIST_HEAD(&validate_list);
4326
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004327 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
Christian Königae9c0af2014-09-04 20:01:52 +02004328 pinned_val.shared = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004329 list_add_tail(&pinned_val.head, &validate_list);
4330
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004331 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
Christian Königae9c0af2014-09-04 20:01:52 +02004332 query_val.shared = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004333 list_add_tail(&query_val.head, &validate_list);
4334
Christian Königaa350712014-12-03 15:46:48 +01004335 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4336 false, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004337 if (unlikely(ret != 0)) {
4338 vmw_execbuf_unpin_panic(dev_priv);
4339 goto out_no_reserve;
4340 }
4341
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004342 if (dev_priv->query_cid_valid) {
4343 BUG_ON(fence != NULL);
4344 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4345 if (unlikely(ret != 0)) {
4346 vmw_execbuf_unpin_panic(dev_priv);
4347 goto out_no_emit;
4348 }
4349 dev_priv->query_cid_valid = false;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004350 }
4351
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004352 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4353 if (dev_priv->dummy_query_bo_pinned) {
4354 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4355 dev_priv->dummy_query_bo_pinned = false;
4356 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004357 if (fence == NULL) {
4358 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4359 NULL);
4360 fence = lfence;
4361 }
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004362 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004363 if (lfence != NULL)
4364 vmw_fence_obj_unreference(&lfence);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004365
4366 ttm_bo_unref(&query_val.bo);
4367 ttm_bo_unref(&pinned_val.bo);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004368 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004369out_unlock:
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004370 return;
4371
4372out_no_emit:
Maarten Lankhorstecff6652013-06-27 13:48:17 +02004373 ttm_eu_backoff_reservation(&ticket, &validate_list);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004374out_no_reserve:
4375 ttm_bo_unref(&query_val.bo);
4376 ttm_bo_unref(&pinned_val.bo);
Thomas Hellstrom459d0fa2015-06-26 00:25:37 -07004377 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00004378}
4379
4380/**
4381 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4382 * query bo.
4383 *
4384 * @dev_priv: The device private structure.
4385 *
4386 * This function should be used to unpin the pinned query bo, or
4387 * as a query barrier when we need to make sure that all queries have
4388 * finished before the next fifo command. (For example on hardware
4389 * context destructions where the hardware may otherwise leak unfinished
4390 * queries).
4391 *
4392 * This function does not return any failure codes, but make attempts
4393 * to do safe unpinning in case of errors.
4394 *
4395 * The function will synchronize on the previous query barrier, and will
4396 * thus not finish until that barrier has executed.
4397 */
4398void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4399{
4400 mutex_lock(&dev_priv->cmdbuf_mutex);
4401 if (dev_priv->query_cid_valid)
4402 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +02004403 mutex_unlock(&dev_priv->cmdbuf_mutex);
4404}
4405
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004406int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4407 struct drm_file *file_priv, size_t size)
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004408{
4409 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004410 struct drm_vmw_execbuf_arg arg;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004411 int ret;
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004412 static const size_t copy_offset[] = {
4413 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4414 sizeof(struct drm_vmw_execbuf_arg)};
4415
4416 if (unlikely(size < copy_offset[0])) {
4417 DRM_ERROR("Invalid command size, ioctl %d\n",
4418 DRM_VMW_EXECBUF);
4419 return -EINVAL;
4420 }
4421
4422 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4423 return -EFAULT;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004424
4425 /*
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004426 * Extend the ioctl argument while
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004427 * maintaining backwards compatibility:
4428 * We take different code paths depending on the value of
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004429 * arg.version.
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004430 */
4431
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004432 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4433 arg.version == 0)) {
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004434 DRM_ERROR("Incorrect execbuf version.\n");
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004435 return -EINVAL;
4436 }
4437
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004438 if (arg.version > 1 &&
4439 copy_from_user(&arg.context_handle,
4440 (void __user *) (data + copy_offset[0]),
4441 copy_offset[arg.version - 1] -
4442 copy_offset[0]) != 0)
4443 return -EFAULT;
4444
4445 switch (arg.version) {
4446 case 1:
4447 arg.context_handle = (uint32_t) -1;
4448 break;
4449 case 2:
4450 if (arg.pad64 != 0) {
4451 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4452 return -EINVAL;
4453 }
4454 break;
4455 default:
4456 break;
4457 }
4458
Thomas Hellstrom294adf72014-02-27 12:34:51 +01004459 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004460 if (unlikely(ret != 0))
4461 return ret;
4462
4463 ret = vmw_execbuf_process(file_priv, dev_priv,
Thomas Hellstromd80efd52015-08-10 10:39:35 -07004464 (void __user *)(unsigned long)arg.commands,
4465 NULL, arg.command_size, arg.throttle_us,
4466 arg.context_handle,
4467 (void __user *)(unsigned long)arg.fence_rep,
Jakob Bornecrantzbb1bd2f2012-02-09 16:56:43 +01004468 NULL);
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004469 ttm_read_unlock(&dev_priv->reservation_sem);
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004470 if (unlikely(ret != 0))
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004471 return ret;
Thomas Hellstrom922ade02011-10-04 20:13:17 +02004472
4473 vmw_kms_cursor_post_execbuf(dev_priv);
4474
Thomas Hellstrom5151adb2015-03-09 01:56:21 -07004475 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004476}