blob: abfe67c893c77d7b884ef98f927cbfb35a9f122e [file] [log] [blame]
Thomas Hellstrom543831c2012-11-20 12:19:36 +00001/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
Thomas Hellstromd80efd52015-08-10 10:39:35 -070030#include "vmwgfx_binding.h"
Thomas Hellstrom543831c2012-11-20 12:19:36 +000031#include "ttm/ttm_placement.h"
32
33struct vmw_user_context {
34 struct ttm_base_object base;
35 struct vmw_resource res;
Thomas Hellstromd80efd52015-08-10 10:39:35 -070036 struct vmw_ctx_binding_state *cbs;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +020037 struct vmw_cmdbuf_res_manager *man;
Thomas Hellstromd80efd52015-08-10 10:39:35 -070038 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
39 spinlock_t cotable_lock;
Thomas Hellstrom543831c2012-11-20 12:19:36 +000040};
41
42static void vmw_user_context_free(struct vmw_resource *res);
43static struct vmw_resource *
44vmw_user_context_base_to_res(struct ttm_base_object *base);
45
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +010046static int vmw_gb_context_create(struct vmw_resource *res);
47static int vmw_gb_context_bind(struct vmw_resource *res,
48 struct ttm_validate_buffer *val_buf);
49static int vmw_gb_context_unbind(struct vmw_resource *res,
50 bool readback,
51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_destroy(struct vmw_resource *res);
Thomas Hellstromd80efd52015-08-10 10:39:35 -070053static int vmw_dx_context_create(struct vmw_resource *res);
54static int vmw_dx_context_bind(struct vmw_resource *res,
55 struct ttm_validate_buffer *val_buf);
56static int vmw_dx_context_unbind(struct vmw_resource *res,
57 bool readback,
58 struct ttm_validate_buffer *val_buf);
59static int vmw_dx_context_destroy(struct vmw_resource *res);
60
Thomas Hellstrom543831c2012-11-20 12:19:36 +000061static uint64_t vmw_user_context_size;
62
63static const struct vmw_user_resource_conv user_context_conv = {
64 .object_type = VMW_RES_CONTEXT,
65 .base_obj_to_res = vmw_user_context_base_to_res,
66 .res_free = vmw_user_context_free
67};
68
69const struct vmw_user_resource_conv *user_context_converter =
70 &user_context_conv;
71
72
73static const struct vmw_res_func vmw_legacy_context_func = {
74 .res_type = vmw_res_context,
75 .needs_backup = false,
76 .may_evict = false,
77 .type_name = "legacy contexts",
78 .backup_placement = NULL,
79 .create = NULL,
80 .destroy = NULL,
81 .bind = NULL,
82 .unbind = NULL
83};
84
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +010085static const struct vmw_res_func vmw_gb_context_func = {
86 .res_type = vmw_res_context,
87 .needs_backup = true,
88 .may_evict = true,
89 .type_name = "guest backed contexts",
90 .backup_placement = &vmw_mob_placement,
91 .create = vmw_gb_context_create,
92 .destroy = vmw_gb_context_destroy,
93 .bind = vmw_gb_context_bind,
94 .unbind = vmw_gb_context_unbind
95};
96
Thomas Hellstromd80efd52015-08-10 10:39:35 -070097static const struct vmw_res_func vmw_dx_context_func = {
98 .res_type = vmw_res_dx_context,
99 .needs_backup = true,
100 .may_evict = true,
101 .type_name = "dx contexts",
102 .backup_placement = &vmw_mob_placement,
103 .create = vmw_dx_context_create,
104 .destroy = vmw_dx_context_destroy,
105 .bind = vmw_dx_context_bind,
106 .unbind = vmw_dx_context_unbind
107};
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700108
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000109/**
110 * Context management:
111 */
112
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700113static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
114{
115 struct vmw_resource *res;
116 int i;
117
118 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
119 spin_lock(&uctx->cotable_lock);
120 res = uctx->cotables[i];
121 uctx->cotables[i] = NULL;
122 spin_unlock(&uctx->cotable_lock);
123 vmw_resource_unreference(&res);
124 }
125}
126
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000127static void vmw_hw_context_destroy(struct vmw_resource *res)
128{
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200129 struct vmw_user_context *uctx =
130 container_of(res, struct vmw_user_context, res);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000131 struct vmw_private *dev_priv = res->dev_priv;
132 struct {
133 SVGA3dCmdHeader header;
134 SVGA3dCmdDestroyContext body;
135 } *cmd;
136
137
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700138 if (res->func->destroy == vmw_gb_context_destroy ||
139 res->func->destroy == vmw_dx_context_destroy) {
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100140 mutex_lock(&dev_priv->cmdbuf_mutex);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200141 vmw_cmdbuf_res_man_destroy(uctx->man);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100142 mutex_lock(&dev_priv->binding_mutex);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700143 vmw_binding_state_kill(uctx->cbs);
144 (void) res->func->destroy(res);
Thomas Hellstromc8e5e012014-03-31 10:20:30 +0200145 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100146 if (dev_priv->pinned_bo != NULL &&
147 !dev_priv->query_cid_valid)
148 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
149 mutex_unlock(&dev_priv->cmdbuf_mutex);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700150 vmw_context_cotables_unref(uctx);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100151 return;
152 }
153
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000154 vmw_execbuf_release_pinned_bo(dev_priv);
155 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
156 if (unlikely(cmd == NULL)) {
157 DRM_ERROR("Failed reserving FIFO space for surface "
158 "destruction.\n");
159 return;
160 }
161
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700162 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
163 cmd->header.size = sizeof(cmd->body);
164 cmd->body.cid = res->id;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000165
166 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700167 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000168}
169
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100170static int vmw_gb_context_init(struct vmw_private *dev_priv,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700171 bool dx,
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100172 struct vmw_resource *res,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700173 void (*res_free)(struct vmw_resource *res))
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100174{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700175 int ret, i;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700176 struct vmw_user_context *uctx =
177 container_of(res, struct vmw_user_context, res);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100178
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700179 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
180 SVGA3D_CONTEXT_DATA_SIZE);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100181 ret = vmw_resource_init(dev_priv, res, true,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700182 res_free,
183 dx ? &vmw_dx_context_func :
184 &vmw_gb_context_func);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200185 if (unlikely(ret != 0))
186 goto out_err;
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100187
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200188 if (dev_priv->has_mob) {
189 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
190 if (unlikely(IS_ERR(uctx->man))) {
191 ret = PTR_ERR(uctx->man);
192 uctx->man = NULL;
193 goto out_err;
194 }
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100195 }
196
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700197 uctx->cbs = vmw_binding_state_alloc(dev_priv);
198 if (IS_ERR(uctx->cbs)) {
199 ret = PTR_ERR(uctx->cbs);
200 goto out_err;
201 }
202
203 spin_lock_init(&uctx->cotable_lock);
204
205 if (dx) {
206 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
207 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
208 &uctx->res, i);
209 if (unlikely(uctx->cotables[i] == NULL)) {
210 ret = -ENOMEM;
211 goto out_cotables;
212 }
213 }
214 }
215
216
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700217
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100218 vmw_resource_activate(res, vmw_hw_context_destroy);
219 return 0;
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200220
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700221out_cotables:
222 vmw_context_cotables_unref(uctx);
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200223out_err:
224 if (res_free)
225 res_free(res);
226 else
227 kfree(res);
228 return ret;
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100229}
230
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000231static int vmw_context_init(struct vmw_private *dev_priv,
232 struct vmw_resource *res,
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700233 void (*res_free)(struct vmw_resource *res),
234 bool dx)
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000235{
236 int ret;
237
238 struct {
239 SVGA3dCmdHeader header;
240 SVGA3dCmdDefineContext body;
241 } *cmd;
242
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100243 if (dev_priv->has_mob)
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700244 return vmw_gb_context_init(dev_priv, dx, res, res_free);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100245
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000246 ret = vmw_resource_init(dev_priv, res, false,
247 res_free, &vmw_legacy_context_func);
248
249 if (unlikely(ret != 0)) {
250 DRM_ERROR("Failed to allocate a resource id.\n");
251 goto out_early;
252 }
253
254 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
255 DRM_ERROR("Out of hw context ids.\n");
256 vmw_resource_unreference(&res);
257 return -ENOMEM;
258 }
259
260 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
261 if (unlikely(cmd == NULL)) {
262 DRM_ERROR("Fifo reserve failed.\n");
263 vmw_resource_unreference(&res);
264 return -ENOMEM;
265 }
266
Thomas Hellstromb9eb1a62015-04-02 02:39:45 -0700267 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
268 cmd->header.size = sizeof(cmd->body);
269 cmd->body.cid = res->id;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000270
271 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700272 vmw_fifo_resource_inc(dev_priv);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000273 vmw_resource_activate(res, vmw_hw_context_destroy);
274 return 0;
275
276out_early:
277 if (res_free == NULL)
278 kfree(res);
279 else
280 res_free(res);
281 return ret;
282}
283
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000284
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700285/*
286 * GB context.
287 */
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100288
289static int vmw_gb_context_create(struct vmw_resource *res)
290{
291 struct vmw_private *dev_priv = res->dev_priv;
292 int ret;
293 struct {
294 SVGA3dCmdHeader header;
295 SVGA3dCmdDefineGBContext body;
296 } *cmd;
297
298 if (likely(res->id != -1))
299 return 0;
300
301 ret = vmw_resource_alloc_id(res);
302 if (unlikely(ret != 0)) {
303 DRM_ERROR("Failed to allocate a context id.\n");
304 goto out_no_id;
305 }
306
307 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
308 ret = -EBUSY;
309 goto out_no_fifo;
310 }
311
312 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
313 if (unlikely(cmd == NULL)) {
314 DRM_ERROR("Failed reserving FIFO space for context "
315 "creation.\n");
316 ret = -ENOMEM;
317 goto out_no_fifo;
318 }
319
320 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
321 cmd->header.size = sizeof(cmd->body);
322 cmd->body.cid = res->id;
323 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700324 vmw_fifo_resource_inc(dev_priv);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100325
326 return 0;
327
328out_no_fifo:
329 vmw_resource_release_id(res);
330out_no_id:
331 return ret;
332}
333
334static int vmw_gb_context_bind(struct vmw_resource *res,
335 struct ttm_validate_buffer *val_buf)
336{
337 struct vmw_private *dev_priv = res->dev_priv;
338 struct {
339 SVGA3dCmdHeader header;
340 SVGA3dCmdBindGBContext body;
341 } *cmd;
342 struct ttm_buffer_object *bo = val_buf->bo;
343
344 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
345
346 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
347 if (unlikely(cmd == NULL)) {
348 DRM_ERROR("Failed reserving FIFO space for context "
349 "binding.\n");
350 return -ENOMEM;
351 }
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100352 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
353 cmd->header.size = sizeof(cmd->body);
354 cmd->body.cid = res->id;
355 cmd->body.mobid = bo->mem.start;
356 cmd->body.validContents = res->backup_dirty;
357 res->backup_dirty = false;
358 vmw_fifo_commit(dev_priv, sizeof(*cmd));
359
360 return 0;
361}
362
363static int vmw_gb_context_unbind(struct vmw_resource *res,
364 bool readback,
365 struct ttm_validate_buffer *val_buf)
366{
367 struct vmw_private *dev_priv = res->dev_priv;
368 struct ttm_buffer_object *bo = val_buf->bo;
369 struct vmw_fence_obj *fence;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700370 struct vmw_user_context *uctx =
371 container_of(res, struct vmw_user_context, res);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100372
373 struct {
374 SVGA3dCmdHeader header;
375 SVGA3dCmdReadbackGBContext body;
376 } *cmd1;
377 struct {
378 SVGA3dCmdHeader header;
379 SVGA3dCmdBindGBContext body;
380 } *cmd2;
381 uint32_t submit_size;
382 uint8_t *cmd;
383
384
385 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
386
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700387 mutex_lock(&dev_priv->binding_mutex);
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700388 vmw_binding_state_scrub(uctx->cbs);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700389
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100390 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
391
392 cmd = vmw_fifo_reserve(dev_priv, submit_size);
393 if (unlikely(cmd == NULL)) {
394 DRM_ERROR("Failed reserving FIFO space for context "
395 "unbinding.\n");
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700396 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100397 return -ENOMEM;
398 }
399
400 cmd2 = (void *) cmd;
401 if (readback) {
402 cmd1 = (void *) cmd;
403 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
404 cmd1->header.size = sizeof(cmd1->body);
405 cmd1->body.cid = res->id;
406 cmd2 = (void *) (&cmd1[1]);
407 }
408 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
409 cmd2->header.size = sizeof(cmd2->body);
410 cmd2->body.cid = res->id;
411 cmd2->body.mobid = SVGA3D_INVALID_ID;
412
413 vmw_fifo_commit(dev_priv, submit_size);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700414 mutex_unlock(&dev_priv->binding_mutex);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100415
416 /*
417 * Create a fence object and fence the backup buffer.
418 */
419
420 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
421 &fence, NULL);
422
423 vmw_fence_single_bo(bo, fence);
424
425 if (likely(fence != NULL))
426 vmw_fence_obj_unreference(&fence);
427
428 return 0;
429}
430
431static int vmw_gb_context_destroy(struct vmw_resource *res)
432{
433 struct vmw_private *dev_priv = res->dev_priv;
434 struct {
435 SVGA3dCmdHeader header;
436 SVGA3dCmdDestroyGBContext body;
437 } *cmd;
438
439 if (likely(res->id == -1))
440 return 0;
441
442 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
443 if (unlikely(cmd == NULL)) {
444 DRM_ERROR("Failed reserving FIFO space for context "
445 "destruction.\n");
446 return -ENOMEM;
447 }
448
449 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
450 cmd->header.size = sizeof(cmd->body);
451 cmd->body.cid = res->id;
452 vmw_fifo_commit(dev_priv, sizeof(*cmd));
453 if (dev_priv->query_cid == res->id)
454 dev_priv->query_cid_valid = false;
455 vmw_resource_release_id(res);
Thomas Hellstrom153b3d52015-06-25 10:47:43 -0700456 vmw_fifo_resource_dec(dev_priv);
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100457
458 return 0;
459}
460
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700461/*
462 * DX context.
463 */
464
465static int vmw_dx_context_create(struct vmw_resource *res)
466{
467 struct vmw_private *dev_priv = res->dev_priv;
468 int ret;
469 struct {
470 SVGA3dCmdHeader header;
471 SVGA3dCmdDXDefineContext body;
472 } *cmd;
473
474 if (likely(res->id != -1))
475 return 0;
476
477 ret = vmw_resource_alloc_id(res);
478 if (unlikely(ret != 0)) {
479 DRM_ERROR("Failed to allocate a context id.\n");
480 goto out_no_id;
481 }
482
483 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
484 ret = -EBUSY;
485 goto out_no_fifo;
486 }
487
488 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
489 if (unlikely(cmd == NULL)) {
490 DRM_ERROR("Failed reserving FIFO space for context "
491 "creation.\n");
492 ret = -ENOMEM;
493 goto out_no_fifo;
494 }
495
496 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
497 cmd->header.size = sizeof(cmd->body);
498 cmd->body.cid = res->id;
499 vmw_fifo_commit(dev_priv, sizeof(*cmd));
500 vmw_fifo_resource_inc(dev_priv);
501
502 return 0;
503
504out_no_fifo:
505 vmw_resource_release_id(res);
506out_no_id:
507 return ret;
508}
509
510static int vmw_dx_context_bind(struct vmw_resource *res,
511 struct ttm_validate_buffer *val_buf)
512{
513 struct vmw_private *dev_priv = res->dev_priv;
514 struct {
515 SVGA3dCmdHeader header;
516 SVGA3dCmdDXBindContext body;
517 } *cmd;
518 struct ttm_buffer_object *bo = val_buf->bo;
519
520 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
521
522 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
523 if (unlikely(cmd == NULL)) {
524 DRM_ERROR("Failed reserving FIFO space for context "
525 "binding.\n");
526 return -ENOMEM;
527 }
528
529 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
530 cmd->header.size = sizeof(cmd->body);
531 cmd->body.cid = res->id;
532 cmd->body.mobid = bo->mem.start;
533 cmd->body.validContents = res->backup_dirty;
534 res->backup_dirty = false;
535 vmw_fifo_commit(dev_priv, sizeof(*cmd));
536
537
538 return 0;
539}
540
541/**
542 * vmw_dx_context_scrub_cotables - Scrub all bindings and
543 * cotables from a context
544 *
545 * @ctx: Pointer to the context resource
546 * @readback: Whether to save the otable contents on scrubbing.
547 *
548 * COtables must be unbound before their context, but unbinding requires
549 * the backup buffer being reserved, whereas scrubbing does not.
550 * This function scrubs all cotables of a context, potentially reading back
551 * the contents into their backup buffers. However, scrubbing cotables
552 * also makes the device context invalid, so scrub all bindings first so
553 * that doesn't have to be done later with an invalid context.
554 */
555void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
556 bool readback)
557{
558 struct vmw_user_context *uctx =
559 container_of(ctx, struct vmw_user_context, res);
560 int i;
561
562 vmw_binding_state_scrub(uctx->cbs);
563 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
564 struct vmw_resource *res;
565
566 /* Avoid racing with ongoing cotable destruction. */
567 spin_lock(&uctx->cotable_lock);
568 res = uctx->cotables[vmw_cotable_scrub_order[i]];
569 if (res)
570 res = vmw_resource_reference_unless_doomed(res);
571 spin_unlock(&uctx->cotable_lock);
572 if (!res)
573 continue;
574
575 WARN_ON(vmw_cotable_scrub(res, readback));
576 vmw_resource_unreference(&res);
577 }
578}
579
580static int vmw_dx_context_unbind(struct vmw_resource *res,
581 bool readback,
582 struct ttm_validate_buffer *val_buf)
583{
584 struct vmw_private *dev_priv = res->dev_priv;
585 struct ttm_buffer_object *bo = val_buf->bo;
586 struct vmw_fence_obj *fence;
587
588 struct {
589 SVGA3dCmdHeader header;
590 SVGA3dCmdDXReadbackContext body;
591 } *cmd1;
592 struct {
593 SVGA3dCmdHeader header;
594 SVGA3dCmdDXBindContext body;
595 } *cmd2;
596 uint32_t submit_size;
597 uint8_t *cmd;
598
599
600 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
601
602 mutex_lock(&dev_priv->binding_mutex);
603 vmw_dx_context_scrub_cotables(res, readback);
604
605 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
606
607 cmd = vmw_fifo_reserve(dev_priv, submit_size);
608 if (unlikely(cmd == NULL)) {
609 DRM_ERROR("Failed reserving FIFO space for context "
610 "unbinding.\n");
611 mutex_unlock(&dev_priv->binding_mutex);
612 return -ENOMEM;
613 }
614
615 cmd2 = (void *) cmd;
616 if (readback) {
617 cmd1 = (void *) cmd;
618 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
619 cmd1->header.size = sizeof(cmd1->body);
620 cmd1->body.cid = res->id;
621 cmd2 = (void *) (&cmd1[1]);
622 }
623 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
624 cmd2->header.size = sizeof(cmd2->body);
625 cmd2->body.cid = res->id;
626 cmd2->body.mobid = SVGA3D_INVALID_ID;
627
628 vmw_fifo_commit(dev_priv, submit_size);
629 mutex_unlock(&dev_priv->binding_mutex);
630
631 /*
632 * Create a fence object and fence the backup buffer.
633 */
634
635 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
636 &fence, NULL);
637
638 vmw_fence_single_bo(bo, fence);
639
640 if (likely(fence != NULL))
641 vmw_fence_obj_unreference(&fence);
642
643 return 0;
644}
645
646static int vmw_dx_context_destroy(struct vmw_resource *res)
647{
648 struct vmw_private *dev_priv = res->dev_priv;
649 struct {
650 SVGA3dCmdHeader header;
651 SVGA3dCmdDXDestroyContext body;
652 } *cmd;
653
654 if (likely(res->id == -1))
655 return 0;
656
657 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
658 if (unlikely(cmd == NULL)) {
659 DRM_ERROR("Failed reserving FIFO space for context "
660 "destruction.\n");
661 return -ENOMEM;
662 }
663
664 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
665 cmd->header.size = sizeof(cmd->body);
666 cmd->body.cid = res->id;
667 vmw_fifo_commit(dev_priv, sizeof(*cmd));
668 if (dev_priv->query_cid == res->id)
669 dev_priv->query_cid_valid = false;
670 vmw_resource_release_id(res);
671 vmw_fifo_resource_dec(dev_priv);
672
673 return 0;
674}
675
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000676/**
677 * User-space context management:
678 */
679
680static struct vmw_resource *
681vmw_user_context_base_to_res(struct ttm_base_object *base)
682{
683 return &(container_of(base, struct vmw_user_context, base)->res);
684}
685
686static void vmw_user_context_free(struct vmw_resource *res)
687{
688 struct vmw_user_context *ctx =
689 container_of(res, struct vmw_user_context, res);
690 struct vmw_private *dev_priv = res->dev_priv;
691
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700692 if (ctx->cbs)
693 vmw_binding_state_free(ctx->cbs);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000694 ttm_base_object_kfree(ctx, base);
695 ttm_mem_global_free(vmw_mem_glob(dev_priv),
696 vmw_user_context_size);
697}
698
699/**
700 * This function is called when user space has no more references on the
701 * base object. It releases the base-object's reference on the resource object.
702 */
703
704static void vmw_user_context_base_release(struct ttm_base_object **p_base)
705{
706 struct ttm_base_object *base = *p_base;
707 struct vmw_user_context *ctx =
708 container_of(base, struct vmw_user_context, base);
709 struct vmw_resource *res = &ctx->res;
710
711 *p_base = NULL;
712 vmw_resource_unreference(&res);
713}
714
715int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
716 struct drm_file *file_priv)
717{
718 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
719 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
720
721 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
722}
723
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700724static int vmw_context_define(struct drm_device *dev, void *data,
725 struct drm_file *file_priv, bool dx)
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000726{
727 struct vmw_private *dev_priv = vmw_priv(dev);
728 struct vmw_user_context *ctx;
729 struct vmw_resource *res;
730 struct vmw_resource *tmp;
731 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
732 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000733 int ret;
734
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700735 if (!dev_priv->has_dx && dx) {
736 DRM_ERROR("DX contexts not supported by device.\n");
737 return -EINVAL;
738 }
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000739
740 /*
741 * Approximate idr memory usage with 128 bytes. It will be limited
742 * by maximum number_of contexts anyway.
743 */
744
745 if (unlikely(vmw_user_context_size == 0))
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200746 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
747 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000748
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100749 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000750 if (unlikely(ret != 0))
751 return ret;
752
753 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
754 vmw_user_context_size,
755 false, true);
756 if (unlikely(ret != 0)) {
757 if (ret != -ERESTARTSYS)
758 DRM_ERROR("Out of graphics memory for context"
759 " creation.\n");
760 goto out_unlock;
761 }
762
763 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
764 if (unlikely(ctx == NULL)) {
765 ttm_mem_global_free(vmw_mem_glob(dev_priv),
766 vmw_user_context_size);
767 ret = -ENOMEM;
768 goto out_unlock;
769 }
770
771 res = &ctx->res;
772 ctx->base.shareable = false;
773 ctx->base.tfile = NULL;
774
775 /*
776 * From here on, the destructor takes over resource freeing.
777 */
778
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700779 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000780 if (unlikely(ret != 0))
781 goto out_unlock;
782
783 tmp = vmw_resource_reference(&ctx->res);
784 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
785 &vmw_user_context_base_release, NULL);
786
787 if (unlikely(ret != 0)) {
788 vmw_resource_unreference(&tmp);
789 goto out_err;
790 }
791
792 arg->cid = ctx->base.hash.key;
793out_err:
794 vmw_resource_unreference(&res);
795out_unlock:
Thomas Hellstrom294adf72014-02-27 12:34:51 +0100796 ttm_read_unlock(&dev_priv->reservation_sem);
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000797 return ret;
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000798}
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700799
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700800int vmw_context_define_ioctl(struct drm_device *dev, void *data,
801 struct drm_file *file_priv)
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700802{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700803 return vmw_context_define(dev, data, file_priv, false);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700804}
805
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700806int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
807 struct drm_file *file_priv)
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700808{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700809 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
810 struct drm_vmw_context_arg *rep = &arg->rep;
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700811
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700812 switch (arg->req) {
813 case drm_vmw_context_legacy:
814 return vmw_context_define(dev, rep, file_priv, false);
815 case drm_vmw_context_dx:
816 return vmw_context_define(dev, rep, file_priv, true);
Thomas Hellstromb5c3b1a62013-10-08 02:27:17 -0700817 default:
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700818 break;
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700819 }
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700820 return -EINVAL;
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100821}
822
823/**
824 * vmw_context_binding_list - Return a list of context bindings
825 *
826 * @ctx: The context resource
827 *
828 * Returns the current list of bindings of the given context. Note that
829 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
830 */
831struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
832{
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700833 struct vmw_user_context *uctx =
834 container_of(ctx, struct vmw_user_context, res);
835
836 return vmw_binding_state_list(uctx->cbs);
Thomas Hellstrom30f82d812014-02-05 08:13:56 +0100837}
Thomas Hellstrom18e4a462014-06-09 12:39:22 +0200838
839struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
840{
841 return container_of(ctx, struct vmw_user_context, res)->man;
842}
Thomas Hellstromd80efd52015-08-10 10:39:35 -0700843
844struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
845 SVGACOTableType cotable_type)
846{
847 if (cotable_type >= SVGA_COTABLE_DX10_MAX)
848 return ERR_PTR(-EINVAL);
849
850 return vmw_resource_reference
851 (container_of(ctx, struct vmw_user_context, res)->
852 cotables[cotable_type]);
853}
854
855/**
856 * vmw_context_binding_state -
857 * Return a pointer to a context binding state structure
858 *
859 * @ctx: The context resource
860 *
861 * Returns the current state of bindings of the given context. Note that
862 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
863 */
864struct vmw_ctx_binding_state *
865vmw_context_binding_state(struct vmw_resource *ctx)
866{
867 return container_of(ctx, struct vmw_user_context, res)->cbs;
868}