blob: 308e78fdc55e24fed73c1ae6738246eb81ade601 [file] [log] [blame]
Thomas Hellstrom543831c2012-11-20 12:19:36 +00001/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h"
31
32struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
35};
36
37static void vmw_user_context_free(struct vmw_resource *res);
38static struct vmw_resource *
39vmw_user_context_base_to_res(struct ttm_base_object *base);
40
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +010041static int vmw_gb_context_create(struct vmw_resource *res);
42static int vmw_gb_context_bind(struct vmw_resource *res,
43 struct ttm_validate_buffer *val_buf);
44static int vmw_gb_context_unbind(struct vmw_resource *res,
45 bool readback,
46 struct ttm_validate_buffer *val_buf);
47static int vmw_gb_context_destroy(struct vmw_resource *res);
48
Thomas Hellstrom543831c2012-11-20 12:19:36 +000049static uint64_t vmw_user_context_size;
50
51static const struct vmw_user_resource_conv user_context_conv = {
52 .object_type = VMW_RES_CONTEXT,
53 .base_obj_to_res = vmw_user_context_base_to_res,
54 .res_free = vmw_user_context_free
55};
56
57const struct vmw_user_resource_conv *user_context_converter =
58 &user_context_conv;
59
60
61static const struct vmw_res_func vmw_legacy_context_func = {
62 .res_type = vmw_res_context,
63 .needs_backup = false,
64 .may_evict = false,
65 .type_name = "legacy contexts",
66 .backup_placement = NULL,
67 .create = NULL,
68 .destroy = NULL,
69 .bind = NULL,
70 .unbind = NULL
71};
72
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +010073static const struct vmw_res_func vmw_gb_context_func = {
74 .res_type = vmw_res_context,
75 .needs_backup = true,
76 .may_evict = true,
77 .type_name = "guest backed contexts",
78 .backup_placement = &vmw_mob_placement,
79 .create = vmw_gb_context_create,
80 .destroy = vmw_gb_context_destroy,
81 .bind = vmw_gb_context_bind,
82 .unbind = vmw_gb_context_unbind
83};
84
Thomas Hellstrom543831c2012-11-20 12:19:36 +000085/**
86 * Context management:
87 */
88
89static void vmw_hw_context_destroy(struct vmw_resource *res)
90{
91
92 struct vmw_private *dev_priv = res->dev_priv;
93 struct {
94 SVGA3dCmdHeader header;
95 SVGA3dCmdDestroyContext body;
96 } *cmd;
97
98
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +010099 if (res->func->destroy == vmw_gb_context_destroy) {
100 mutex_lock(&dev_priv->cmdbuf_mutex);
101 (void) vmw_gb_context_destroy(res);
102 if (dev_priv->pinned_bo != NULL &&
103 !dev_priv->query_cid_valid)
104 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
105 mutex_unlock(&dev_priv->cmdbuf_mutex);
106 return;
107 }
108
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000109 vmw_execbuf_release_pinned_bo(dev_priv);
110 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
111 if (unlikely(cmd == NULL)) {
112 DRM_ERROR("Failed reserving FIFO space for surface "
113 "destruction.\n");
114 return;
115 }
116
117 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
118 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
119 cmd->body.cid = cpu_to_le32(res->id);
120
121 vmw_fifo_commit(dev_priv, sizeof(*cmd));
122 vmw_3d_resource_dec(dev_priv, false);
123}
124
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100125static int vmw_gb_context_init(struct vmw_private *dev_priv,
126 struct vmw_resource *res,
127 void (*res_free) (struct vmw_resource *res))
128{
129 int ret;
130
131 ret = vmw_resource_init(dev_priv, res, true,
132 res_free, &vmw_gb_context_func);
133 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
134
135 if (unlikely(ret != 0)) {
136 if (res_free)
137 res_free(res);
138 else
139 kfree(res);
140 return ret;
141 }
142
143 vmw_resource_activate(res, vmw_hw_context_destroy);
144 return 0;
145}
146
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000147static int vmw_context_init(struct vmw_private *dev_priv,
148 struct vmw_resource *res,
149 void (*res_free) (struct vmw_resource *res))
150{
151 int ret;
152
153 struct {
154 SVGA3dCmdHeader header;
155 SVGA3dCmdDefineContext body;
156 } *cmd;
157
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100158 if (dev_priv->has_mob)
159 return vmw_gb_context_init(dev_priv, res, res_free);
160
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000161 ret = vmw_resource_init(dev_priv, res, false,
162 res_free, &vmw_legacy_context_func);
163
164 if (unlikely(ret != 0)) {
165 DRM_ERROR("Failed to allocate a resource id.\n");
166 goto out_early;
167 }
168
169 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
170 DRM_ERROR("Out of hw context ids.\n");
171 vmw_resource_unreference(&res);
172 return -ENOMEM;
173 }
174
175 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
176 if (unlikely(cmd == NULL)) {
177 DRM_ERROR("Fifo reserve failed.\n");
178 vmw_resource_unreference(&res);
179 return -ENOMEM;
180 }
181
182 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
183 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
184 cmd->body.cid = cpu_to_le32(res->id);
185
186 vmw_fifo_commit(dev_priv, sizeof(*cmd));
187 (void) vmw_3d_resource_inc(dev_priv, false);
188 vmw_resource_activate(res, vmw_hw_context_destroy);
189 return 0;
190
191out_early:
192 if (res_free == NULL)
193 kfree(res);
194 else
195 res_free(res);
196 return ret;
197}
198
199struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
200{
201 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
202 int ret;
203
204 if (unlikely(res == NULL))
205 return NULL;
206
207 ret = vmw_context_init(dev_priv, res, NULL);
208
209 return (ret == 0) ? res : NULL;
210}
211
Thomas Hellstrom58a0c5f2012-11-21 11:36:36 +0100212
213static int vmw_gb_context_create(struct vmw_resource *res)
214{
215 struct vmw_private *dev_priv = res->dev_priv;
216 int ret;
217 struct {
218 SVGA3dCmdHeader header;
219 SVGA3dCmdDefineGBContext body;
220 } *cmd;
221
222 if (likely(res->id != -1))
223 return 0;
224
225 ret = vmw_resource_alloc_id(res);
226 if (unlikely(ret != 0)) {
227 DRM_ERROR("Failed to allocate a context id.\n");
228 goto out_no_id;
229 }
230
231 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
232 ret = -EBUSY;
233 goto out_no_fifo;
234 }
235
236 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
237 if (unlikely(cmd == NULL)) {
238 DRM_ERROR("Failed reserving FIFO space for context "
239 "creation.\n");
240 ret = -ENOMEM;
241 goto out_no_fifo;
242 }
243
244 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
245 cmd->header.size = sizeof(cmd->body);
246 cmd->body.cid = res->id;
247 vmw_fifo_commit(dev_priv, sizeof(*cmd));
248 (void) vmw_3d_resource_inc(dev_priv, false);
249
250 return 0;
251
252out_no_fifo:
253 vmw_resource_release_id(res);
254out_no_id:
255 return ret;
256}
257
258static int vmw_gb_context_bind(struct vmw_resource *res,
259 struct ttm_validate_buffer *val_buf)
260{
261 struct vmw_private *dev_priv = res->dev_priv;
262 struct {
263 SVGA3dCmdHeader header;
264 SVGA3dCmdBindGBContext body;
265 } *cmd;
266 struct ttm_buffer_object *bo = val_buf->bo;
267
268 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
269
270 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
271 if (unlikely(cmd == NULL)) {
272 DRM_ERROR("Failed reserving FIFO space for context "
273 "binding.\n");
274 return -ENOMEM;
275 }
276
277 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
278 cmd->header.size = sizeof(cmd->body);
279 cmd->body.cid = res->id;
280 cmd->body.mobid = bo->mem.start;
281 cmd->body.validContents = res->backup_dirty;
282 res->backup_dirty = false;
283 vmw_fifo_commit(dev_priv, sizeof(*cmd));
284
285 return 0;
286}
287
288static int vmw_gb_context_unbind(struct vmw_resource *res,
289 bool readback,
290 struct ttm_validate_buffer *val_buf)
291{
292 struct vmw_private *dev_priv = res->dev_priv;
293 struct ttm_buffer_object *bo = val_buf->bo;
294 struct vmw_fence_obj *fence;
295
296 struct {
297 SVGA3dCmdHeader header;
298 SVGA3dCmdReadbackGBContext body;
299 } *cmd1;
300 struct {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdBindGBContext body;
303 } *cmd2;
304 uint32_t submit_size;
305 uint8_t *cmd;
306
307
308 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
309
310 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
311
312 cmd = vmw_fifo_reserve(dev_priv, submit_size);
313 if (unlikely(cmd == NULL)) {
314 DRM_ERROR("Failed reserving FIFO space for context "
315 "unbinding.\n");
316 return -ENOMEM;
317 }
318
319 cmd2 = (void *) cmd;
320 if (readback) {
321 cmd1 = (void *) cmd;
322 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
323 cmd1->header.size = sizeof(cmd1->body);
324 cmd1->body.cid = res->id;
325 cmd2 = (void *) (&cmd1[1]);
326 }
327 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
328 cmd2->header.size = sizeof(cmd2->body);
329 cmd2->body.cid = res->id;
330 cmd2->body.mobid = SVGA3D_INVALID_ID;
331
332 vmw_fifo_commit(dev_priv, submit_size);
333
334 /*
335 * Create a fence object and fence the backup buffer.
336 */
337
338 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
339 &fence, NULL);
340
341 vmw_fence_single_bo(bo, fence);
342
343 if (likely(fence != NULL))
344 vmw_fence_obj_unreference(&fence);
345
346 return 0;
347}
348
349static int vmw_gb_context_destroy(struct vmw_resource *res)
350{
351 struct vmw_private *dev_priv = res->dev_priv;
352 struct {
353 SVGA3dCmdHeader header;
354 SVGA3dCmdDestroyGBContext body;
355 } *cmd;
356
357 if (likely(res->id == -1))
358 return 0;
359
360 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
361 if (unlikely(cmd == NULL)) {
362 DRM_ERROR("Failed reserving FIFO space for context "
363 "destruction.\n");
364 return -ENOMEM;
365 }
366
367 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
368 cmd->header.size = sizeof(cmd->body);
369 cmd->body.cid = res->id;
370 vmw_fifo_commit(dev_priv, sizeof(*cmd));
371 if (dev_priv->query_cid == res->id)
372 dev_priv->query_cid_valid = false;
373 vmw_resource_release_id(res);
374 vmw_3d_resource_dec(dev_priv, false);
375
376 return 0;
377}
378
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000379/**
380 * User-space context management:
381 */
382
383static struct vmw_resource *
384vmw_user_context_base_to_res(struct ttm_base_object *base)
385{
386 return &(container_of(base, struct vmw_user_context, base)->res);
387}
388
389static void vmw_user_context_free(struct vmw_resource *res)
390{
391 struct vmw_user_context *ctx =
392 container_of(res, struct vmw_user_context, res);
393 struct vmw_private *dev_priv = res->dev_priv;
394
395 ttm_base_object_kfree(ctx, base);
396 ttm_mem_global_free(vmw_mem_glob(dev_priv),
397 vmw_user_context_size);
398}
399
400/**
401 * This function is called when user space has no more references on the
402 * base object. It releases the base-object's reference on the resource object.
403 */
404
405static void vmw_user_context_base_release(struct ttm_base_object **p_base)
406{
407 struct ttm_base_object *base = *p_base;
408 struct vmw_user_context *ctx =
409 container_of(base, struct vmw_user_context, base);
410 struct vmw_resource *res = &ctx->res;
411
412 *p_base = NULL;
413 vmw_resource_unreference(&res);
414}
415
416int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
417 struct drm_file *file_priv)
418{
419 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
420 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
421
422 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
423}
424
425int vmw_context_define_ioctl(struct drm_device *dev, void *data,
426 struct drm_file *file_priv)
427{
428 struct vmw_private *dev_priv = vmw_priv(dev);
429 struct vmw_user_context *ctx;
430 struct vmw_resource *res;
431 struct vmw_resource *tmp;
432 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
433 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
434 struct vmw_master *vmaster = vmw_master(file_priv->master);
435 int ret;
436
437
438 /*
439 * Approximate idr memory usage with 128 bytes. It will be limited
440 * by maximum number_of contexts anyway.
441 */
442
443 if (unlikely(vmw_user_context_size == 0))
444 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
445
446 ret = ttm_read_lock(&vmaster->lock, true);
447 if (unlikely(ret != 0))
448 return ret;
449
450 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
451 vmw_user_context_size,
452 false, true);
453 if (unlikely(ret != 0)) {
454 if (ret != -ERESTARTSYS)
455 DRM_ERROR("Out of graphics memory for context"
456 " creation.\n");
457 goto out_unlock;
458 }
459
460 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
461 if (unlikely(ctx == NULL)) {
462 ttm_mem_global_free(vmw_mem_glob(dev_priv),
463 vmw_user_context_size);
464 ret = -ENOMEM;
465 goto out_unlock;
466 }
467
468 res = &ctx->res;
469 ctx->base.shareable = false;
470 ctx->base.tfile = NULL;
471
472 /*
473 * From here on, the destructor takes over resource freeing.
474 */
475
476 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
477 if (unlikely(ret != 0))
478 goto out_unlock;
479
480 tmp = vmw_resource_reference(&ctx->res);
481 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
482 &vmw_user_context_base_release, NULL);
483
484 if (unlikely(ret != 0)) {
485 vmw_resource_unreference(&tmp);
486 goto out_err;
487 }
488
489 arg->cid = ctx->base.hash.key;
490out_err:
491 vmw_resource_unreference(&res);
492out_unlock:
493 ttm_read_unlock(&vmaster->lock);
494 return ret;
495
496}