blob: c7efbd47ab8469e3b38c6d92d57a7d10e144cea0 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30#include "ttm/ttm_object.h"
31#include "ttm/ttm_placement.h"
32#include "drmP.h"
33
34#define VMW_RES_CONTEXT ttm_driver_type0
35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2
37
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +000038/* XXX: This isn't a real hardware flag, but just a hack for kernel to
39 * know about primary surfaces. Find a better way to accomplish this.
40 */
41#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
42
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000043struct vmw_user_context {
44 struct ttm_base_object base;
45 struct vmw_resource res;
46};
47
48struct vmw_user_surface {
49 struct ttm_base_object base;
50 struct vmw_surface srf;
51};
52
53struct vmw_user_dma_buffer {
54 struct ttm_base_object base;
55 struct vmw_dma_buffer dma;
56};
57
58struct vmw_bo_user_rep {
59 uint32_t handle;
60 uint64_t map_handle;
61};
62
63struct vmw_stream {
64 struct vmw_resource res;
65 uint32_t stream_id;
66};
67
68struct vmw_user_stream {
69 struct ttm_base_object base;
70 struct vmw_stream stream;
71};
72
73static inline struct vmw_dma_buffer *
74vmw_dma_buffer(struct ttm_buffer_object *bo)
75{
76 return container_of(bo, struct vmw_dma_buffer, base);
77}
78
79static inline struct vmw_user_dma_buffer *
80vmw_user_dma_buffer(struct ttm_buffer_object *bo)
81{
82 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
83 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
84}
85
86struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
87{
88 kref_get(&res->kref);
89 return res;
90}
91
92static void vmw_resource_release(struct kref *kref)
93{
94 struct vmw_resource *res =
95 container_of(kref, struct vmw_resource, kref);
96 struct vmw_private *dev_priv = res->dev_priv;
97
98 idr_remove(res->idr, res->id);
99 write_unlock(&dev_priv->resource_lock);
100
101 if (likely(res->hw_destroy != NULL))
102 res->hw_destroy(res);
103
104 if (res->res_free != NULL)
105 res->res_free(res);
106 else
107 kfree(res);
108
109 write_lock(&dev_priv->resource_lock);
110}
111
112void vmw_resource_unreference(struct vmw_resource **p_res)
113{
114 struct vmw_resource *res = *p_res;
115 struct vmw_private *dev_priv = res->dev_priv;
116
117 *p_res = NULL;
118 write_lock(&dev_priv->resource_lock);
119 kref_put(&res->kref, vmw_resource_release);
120 write_unlock(&dev_priv->resource_lock);
121}
122
123static int vmw_resource_init(struct vmw_private *dev_priv,
124 struct vmw_resource *res,
125 struct idr *idr,
126 enum ttm_object_type obj_type,
127 void (*res_free) (struct vmw_resource *res))
128{
129 int ret;
130
131 kref_init(&res->kref);
132 res->hw_destroy = NULL;
133 res->res_free = res_free;
134 res->res_type = obj_type;
135 res->idr = idr;
136 res->avail = false;
137 res->dev_priv = dev_priv;
138
139 do {
140 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
141 return -ENOMEM;
142
143 write_lock(&dev_priv->resource_lock);
144 ret = idr_get_new_above(idr, res, 1, &res->id);
145 write_unlock(&dev_priv->resource_lock);
146
147 } while (ret == -EAGAIN);
148
149 return ret;
150}
151
152/**
153 * vmw_resource_activate
154 *
155 * @res: Pointer to the newly created resource
156 * @hw_destroy: Destroy function. NULL if none.
157 *
158 * Activate a resource after the hardware has been made aware of it.
159 * Set tye destroy function to @destroy. Typically this frees the
160 * resource and destroys the hardware resources associated with it.
161 * Activate basically means that the function vmw_resource_lookup will
162 * find it.
163 */
164
165static void vmw_resource_activate(struct vmw_resource *res,
166 void (*hw_destroy) (struct vmw_resource *))
167{
168 struct vmw_private *dev_priv = res->dev_priv;
169
170 write_lock(&dev_priv->resource_lock);
171 res->avail = true;
172 res->hw_destroy = hw_destroy;
173 write_unlock(&dev_priv->resource_lock);
174}
175
176struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
177 struct idr *idr, int id)
178{
179 struct vmw_resource *res;
180
181 read_lock(&dev_priv->resource_lock);
182 res = idr_find(idr, id);
183 if (res && res->avail)
184 kref_get(&res->kref);
185 else
186 res = NULL;
187 read_unlock(&dev_priv->resource_lock);
188
189 if (unlikely(res == NULL))
190 return NULL;
191
192 return res;
193}
194
195/**
196 * Context management:
197 */
198
199static void vmw_hw_context_destroy(struct vmw_resource *res)
200{
201
202 struct vmw_private *dev_priv = res->dev_priv;
203 struct {
204 SVGA3dCmdHeader header;
205 SVGA3dCmdDestroyContext body;
206 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
207
208 if (unlikely(cmd == NULL)) {
209 DRM_ERROR("Failed reserving FIFO space for surface "
210 "destruction.\n");
211 return;
212 }
213
214 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
215 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
216 cmd->body.cid = cpu_to_le32(res->id);
217
218 vmw_fifo_commit(dev_priv, sizeof(*cmd));
219}
220
221static int vmw_context_init(struct vmw_private *dev_priv,
222 struct vmw_resource *res,
223 void (*res_free) (struct vmw_resource *res))
224{
225 int ret;
226
227 struct {
228 SVGA3dCmdHeader header;
229 SVGA3dCmdDefineContext body;
230 } *cmd;
231
232 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
233 VMW_RES_CONTEXT, res_free);
234
235 if (unlikely(ret != 0)) {
236 if (res_free == NULL)
237 kfree(res);
238 else
239 res_free(res);
240 return ret;
241 }
242
243 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
244 if (unlikely(cmd == NULL)) {
245 DRM_ERROR("Fifo reserve failed.\n");
246 vmw_resource_unreference(&res);
247 return -ENOMEM;
248 }
249
250 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
251 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
252 cmd->body.cid = cpu_to_le32(res->id);
253
254 vmw_fifo_commit(dev_priv, sizeof(*cmd));
255 vmw_resource_activate(res, vmw_hw_context_destroy);
256 return 0;
257}
258
259struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
260{
261 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
262 int ret;
263
264 if (unlikely(res == NULL))
265 return NULL;
266
267 ret = vmw_context_init(dev_priv, res, NULL);
268 return (ret == 0) ? res : NULL;
269}
270
271/**
272 * User-space context management:
273 */
274
275static void vmw_user_context_free(struct vmw_resource *res)
276{
277 struct vmw_user_context *ctx =
278 container_of(res, struct vmw_user_context, res);
279
280 kfree(ctx);
281}
282
283/**
284 * This function is called when user space has no more references on the
285 * base object. It releases the base-object's reference on the resource object.
286 */
287
288static void vmw_user_context_base_release(struct ttm_base_object **p_base)
289{
290 struct ttm_base_object *base = *p_base;
291 struct vmw_user_context *ctx =
292 container_of(base, struct vmw_user_context, base);
293 struct vmw_resource *res = &ctx->res;
294
295 *p_base = NULL;
296 vmw_resource_unreference(&res);
297}
298
299int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *file_priv)
301{
302 struct vmw_private *dev_priv = vmw_priv(dev);
303 struct vmw_resource *res;
304 struct vmw_user_context *ctx;
305 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
306 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
307 int ret = 0;
308
309 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
310 if (unlikely(res == NULL))
311 return -EINVAL;
312
313 if (res->res_free != &vmw_user_context_free) {
314 ret = -EINVAL;
315 goto out;
316 }
317
318 ctx = container_of(res, struct vmw_user_context, res);
319 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
320 ret = -EPERM;
321 goto out;
322 }
323
324 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
325out:
326 vmw_resource_unreference(&res);
327 return ret;
328}
329
330int vmw_context_define_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *file_priv)
332{
333 struct vmw_private *dev_priv = vmw_priv(dev);
334 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
335 struct vmw_resource *res;
336 struct vmw_resource *tmp;
337 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
338 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
339 int ret;
340
341 if (unlikely(ctx == NULL))
342 return -ENOMEM;
343
344 res = &ctx->res;
345 ctx->base.shareable = false;
346 ctx->base.tfile = NULL;
347
348 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
349 if (unlikely(ret != 0))
350 return ret;
351
352 tmp = vmw_resource_reference(&ctx->res);
353 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
354 &vmw_user_context_base_release, NULL);
355
356 if (unlikely(ret != 0)) {
357 vmw_resource_unreference(&tmp);
358 goto out_err;
359 }
360
361 arg->cid = res->id;
362out_err:
363 vmw_resource_unreference(&res);
364 return ret;
365
366}
367
368int vmw_context_check(struct vmw_private *dev_priv,
369 struct ttm_object_file *tfile,
370 int id)
371{
372 struct vmw_resource *res;
373 int ret = 0;
374
375 read_lock(&dev_priv->resource_lock);
376 res = idr_find(&dev_priv->context_idr, id);
377 if (res && res->avail) {
378 struct vmw_user_context *ctx =
379 container_of(res, struct vmw_user_context, res);
380 if (ctx->base.tfile != tfile && !ctx->base.shareable)
381 ret = -EPERM;
382 } else
383 ret = -EINVAL;
384 read_unlock(&dev_priv->resource_lock);
385
386 return ret;
387}
388
389
390/**
391 * Surface management.
392 */
393
394static void vmw_hw_surface_destroy(struct vmw_resource *res)
395{
396
397 struct vmw_private *dev_priv = res->dev_priv;
398 struct {
399 SVGA3dCmdHeader header;
400 SVGA3dCmdDestroySurface body;
401 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
402
403 if (unlikely(cmd == NULL)) {
404 DRM_ERROR("Failed reserving FIFO space for surface "
405 "destruction.\n");
406 return;
407 }
408
409 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
410 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
411 cmd->body.sid = cpu_to_le32(res->id);
412
413 vmw_fifo_commit(dev_priv, sizeof(*cmd));
414}
415
416void vmw_surface_res_free(struct vmw_resource *res)
417{
418 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
419
420 kfree(srf->sizes);
421 kfree(srf->snooper.image);
422 kfree(srf);
423}
424
425int vmw_surface_init(struct vmw_private *dev_priv,
426 struct vmw_surface *srf,
427 void (*res_free) (struct vmw_resource *res))
428{
429 int ret;
430 struct {
431 SVGA3dCmdHeader header;
432 SVGA3dCmdDefineSurface body;
433 } *cmd;
434 SVGA3dSize *cmd_size;
435 struct vmw_resource *res = &srf->res;
436 struct drm_vmw_size *src_size;
437 size_t submit_size;
438 uint32_t cmd_len;
439 int i;
440
441 BUG_ON(res_free == NULL);
442 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
443 VMW_RES_SURFACE, res_free);
444
445 if (unlikely(ret != 0)) {
446 res_free(res);
447 return ret;
448 }
449
450 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
451 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
452
453 cmd = vmw_fifo_reserve(dev_priv, submit_size);
454 if (unlikely(cmd == NULL)) {
455 DRM_ERROR("Fifo reserve failed for create surface.\n");
456 vmw_resource_unreference(&res);
457 return -ENOMEM;
458 }
459
460 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
461 cmd->header.size = cpu_to_le32(cmd_len);
462 cmd->body.sid = cpu_to_le32(res->id);
463 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
464 cmd->body.format = cpu_to_le32(srf->format);
465 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
466 cmd->body.face[i].numMipLevels =
467 cpu_to_le32(srf->mip_levels[i]);
468 }
469
470 cmd += 1;
471 cmd_size = (SVGA3dSize *) cmd;
472 src_size = srf->sizes;
473
474 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
475 cmd_size->width = cpu_to_le32(src_size->width);
476 cmd_size->height = cpu_to_le32(src_size->height);
477 cmd_size->depth = cpu_to_le32(src_size->depth);
478 }
479
480 vmw_fifo_commit(dev_priv, submit_size);
481 vmw_resource_activate(res, vmw_hw_surface_destroy);
482 return 0;
483}
484
485static void vmw_user_surface_free(struct vmw_resource *res)
486{
487 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
488 struct vmw_user_surface *user_srf =
489 container_of(srf, struct vmw_user_surface, srf);
490
491 kfree(srf->sizes);
492 kfree(srf->snooper.image);
493 kfree(user_srf);
494}
495
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100496int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
497 struct ttm_object_file *tfile,
498 uint32_t handle, struct vmw_surface **out)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000499{
500 struct vmw_resource *res;
501 struct vmw_surface *srf;
502 struct vmw_user_surface *user_srf;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100503 struct ttm_base_object *base;
504 int ret = -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000505
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100506 base = ttm_base_object_lookup(tfile, handle);
507 if (unlikely(base == NULL))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000508 return -EINVAL;
509
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100510 if (unlikely(base->object_type != VMW_RES_SURFACE))
511 goto out_bad_resource;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000512
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100513 user_srf = container_of(base, struct vmw_user_surface, base);
514 srf = &user_srf->srf;
515 res = &srf->res;
516
517 read_lock(&dev_priv->resource_lock);
518
519 if (!res->avail || res->res_free != &vmw_user_surface_free) {
520 read_unlock(&dev_priv->resource_lock);
521 goto out_bad_resource;
522 }
523
524 kref_get(&res->kref);
525 read_unlock(&dev_priv->resource_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000526
527 *out = srf;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100528 ret = 0;
529
530out_bad_resource:
531 ttm_base_object_unref(&base);
532
533 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000534}
535
536static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
537{
538 struct ttm_base_object *base = *p_base;
539 struct vmw_user_surface *user_srf =
540 container_of(base, struct vmw_user_surface, base);
541 struct vmw_resource *res = &user_srf->srf.res;
542
543 *p_base = NULL;
544 vmw_resource_unreference(&res);
545}
546
547int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
548 struct drm_file *file_priv)
549{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000550 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
551 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000552
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100553 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000554}
555
556int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
557 struct drm_file *file_priv)
558{
559 struct vmw_private *dev_priv = vmw_priv(dev);
560 struct vmw_user_surface *user_srf =
561 kmalloc(sizeof(*user_srf), GFP_KERNEL);
562 struct vmw_surface *srf;
563 struct vmw_resource *res;
564 struct vmw_resource *tmp;
565 union drm_vmw_surface_create_arg *arg =
566 (union drm_vmw_surface_create_arg *)data;
567 struct drm_vmw_surface_create_req *req = &arg->req;
568 struct drm_vmw_surface_arg *rep = &arg->rep;
569 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
570 struct drm_vmw_size __user *user_sizes;
571 int ret;
572 int i;
573
574 if (unlikely(user_srf == NULL))
575 return -ENOMEM;
576
577 srf = &user_srf->srf;
578 res = &srf->res;
579
580 srf->flags = req->flags;
581 srf->format = req->format;
582 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
583 srf->num_sizes = 0;
584 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
585 srf->num_sizes += srf->mip_levels[i];
586
587 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
588 DRM_VMW_MAX_MIP_LEVELS) {
589 ret = -EINVAL;
590 goto out_err0;
591 }
592
593 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
594 if (unlikely(srf->sizes == NULL)) {
595 ret = -ENOMEM;
596 goto out_err0;
597 }
598
599 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
600 req->size_addr;
601
602 ret = copy_from_user(srf->sizes, user_sizes,
603 srf->num_sizes * sizeof(*srf->sizes));
604 if (unlikely(ret != 0))
605 goto out_err1;
606
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000607 if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
608 /* we should not send this flag down to hardware since
609 * its not a official one
610 */
611 srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
612 srf->scanout = true;
613 } else {
614 srf->scanout = false;
615 }
Thomas Hellstrom50ec3b72010-01-13 22:28:37 +0100616
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +0000617 if (srf->scanout &&
Thomas Hellstrom50ec3b72010-01-13 22:28:37 +0100618 srf->num_sizes == 1 &&
619 srf->sizes[0].width == 64 &&
620 srf->sizes[0].height == 64 &&
621 srf->format == SVGA3D_A8R8G8B8) {
622
623 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
624 /* clear the image */
625 if (srf->snooper.image) {
626 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
627 } else {
628 DRM_ERROR("Failed to allocate cursor_image\n");
629 ret = -ENOMEM;
630 goto out_err1;
631 }
632 } else {
633 srf->snooper.image = NULL;
634 }
635 srf->snooper.crtc = NULL;
636
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000637 user_srf->base.shareable = false;
638 user_srf->base.tfile = NULL;
639
640 /**
641 * From this point, the generic resource management functions
642 * destroy the object on failure.
643 */
644
645 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
646 if (unlikely(ret != 0))
647 return ret;
648
649 tmp = vmw_resource_reference(&srf->res);
650 ret = ttm_base_object_init(tfile, &user_srf->base,
651 req->shareable, VMW_RES_SURFACE,
652 &vmw_user_surface_base_release, NULL);
653
654 if (unlikely(ret != 0)) {
655 vmw_resource_unreference(&tmp);
656 vmw_resource_unreference(&res);
657 return ret;
658 }
659
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100660 rep->sid = user_srf->base.hash.key;
661 if (rep->sid == SVGA3D_INVALID_ID)
662 DRM_ERROR("Created bad Surface ID.\n");
663
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000664 vmw_resource_unreference(&res);
665 return 0;
666out_err1:
667 kfree(srf->sizes);
668out_err0:
669 kfree(user_srf);
670 return ret;
671}
672
673int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
674 struct drm_file *file_priv)
675{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000676 union drm_vmw_surface_reference_arg *arg =
677 (union drm_vmw_surface_reference_arg *)data;
678 struct drm_vmw_surface_arg *req = &arg->req;
679 struct drm_vmw_surface_create_req *rep = &arg->rep;
680 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000681 struct vmw_surface *srf;
682 struct vmw_user_surface *user_srf;
683 struct drm_vmw_size __user *user_sizes;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100684 struct ttm_base_object *base;
685 int ret = -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000686
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100687 base = ttm_base_object_lookup(tfile, req->sid);
688 if (unlikely(base == NULL)) {
689 DRM_ERROR("Could not find surface to reference.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000690 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000691 }
692
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100693 if (unlikely(base->object_type != VMW_RES_SURFACE))
694 goto out_bad_resource;
695
696 user_srf = container_of(base, struct vmw_user_surface, base);
697 srf = &user_srf->srf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000698
699 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
700 if (unlikely(ret != 0)) {
701 DRM_ERROR("Could not add a reference to a surface.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100702 goto out_no_reference;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000703 }
704
705 rep->flags = srf->flags;
706 rep->format = srf->format;
707 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
708 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
709 rep->size_addr;
710
711 if (user_sizes)
712 ret = copy_to_user(user_sizes, srf->sizes,
713 srf->num_sizes * sizeof(*srf->sizes));
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100714 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000715 DRM_ERROR("copy_to_user failed %p %u\n",
716 user_sizes, srf->num_sizes);
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100717out_bad_resource:
718out_no_reference:
719 ttm_base_object_unref(&base);
720
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000721 return ret;
722}
723
724int vmw_surface_check(struct vmw_private *dev_priv,
725 struct ttm_object_file *tfile,
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100726 uint32_t handle, int *id)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000727{
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100728 struct ttm_base_object *base;
729 struct vmw_user_surface *user_srf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000730
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100731 int ret = -EPERM;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000732
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100733 base = ttm_base_object_lookup(tfile, handle);
734 if (unlikely(base == NULL))
735 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000736
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100737 if (unlikely(base->object_type != VMW_RES_SURFACE))
738 goto out_bad_surface;
739
740 user_srf = container_of(base, struct vmw_user_surface, base);
741 *id = user_srf->srf.res.id;
742 ret = 0;
743
744out_bad_surface:
745 /**
746 * FIXME: May deadlock here when called from the
747 * command parsing code.
748 */
749
750 ttm_base_object_unref(&base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000751 return ret;
752}
753
754/**
755 * Buffer management.
756 */
757
758static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
759 unsigned long num_pages)
760{
761 static size_t bo_user_size = ~0;
762
763 size_t page_array_size =
764 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
765
766 if (unlikely(bo_user_size == ~0)) {
767 bo_user_size = glob->ttm_bo_extra_size +
768 ttm_round_pot(sizeof(struct vmw_dma_buffer));
769 }
770
771 return bo_user_size + page_array_size;
772}
773
Thomas Hellstromeffe1102010-01-13 22:28:39 +0100774void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000775{
776 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
777 struct ttm_bo_global *glob = bo->glob;
778 struct vmw_private *dev_priv =
779 container_of(bo->bdev, struct vmw_private, bdev);
780
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000781 if (vmw_bo->gmr_bound) {
782 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
783 spin_lock(&glob->lru_lock);
784 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
785 spin_unlock(&glob->lru_lock);
Thomas Hellstromeffe1102010-01-13 22:28:39 +0100786 vmw_bo->gmr_bound = false;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000787 }
Thomas Hellstromeffe1102010-01-13 22:28:39 +0100788}
789
790void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
791{
792 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
793 struct ttm_bo_global *glob = bo->glob;
794
795 vmw_dmabuf_gmr_unbind(bo);
796 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000797 kfree(vmw_bo);
798}
799
800int vmw_dmabuf_init(struct vmw_private *dev_priv,
801 struct vmw_dma_buffer *vmw_bo,
802 size_t size, struct ttm_placement *placement,
803 bool interruptible,
804 void (*bo_free) (struct ttm_buffer_object *bo))
805{
806 struct ttm_bo_device *bdev = &dev_priv->bdev;
807 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
808 size_t acc_size;
809 int ret;
810
811 BUG_ON(!bo_free);
812
813 acc_size =
814 vmw_dmabuf_acc_size(bdev->glob,
815 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
816
817 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
818 if (unlikely(ret != 0)) {
819 /* we must free the bo here as
820 * ttm_buffer_object_init does so as well */
821 bo_free(&vmw_bo->base);
822 return ret;
823 }
824
825 memset(vmw_bo, 0, sizeof(*vmw_bo));
826
827 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
828 INIT_LIST_HEAD(&vmw_bo->validate_list);
829 vmw_bo->gmr_id = 0;
830 vmw_bo->gmr_bound = false;
831
832 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
833 ttm_bo_type_device, placement,
834 0, 0, interruptible,
835 NULL, acc_size, bo_free);
836 return ret;
837}
838
839static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
840{
841 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000842 struct ttm_bo_global *glob = bo->glob;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000843
Thomas Hellstromeffe1102010-01-13 22:28:39 +0100844 vmw_dmabuf_gmr_unbind(bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000845 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000846 kfree(vmw_user_bo);
847}
848
849static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
850{
851 struct vmw_user_dma_buffer *vmw_user_bo;
852 struct ttm_base_object *base = *p_base;
853 struct ttm_buffer_object *bo;
854
855 *p_base = NULL;
856
857 if (unlikely(base == NULL))
858 return;
859
860 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
861 bo = &vmw_user_bo->dma.base;
862 ttm_bo_unref(&bo);
863}
864
865int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
866 struct drm_file *file_priv)
867{
868 struct vmw_private *dev_priv = vmw_priv(dev);
869 union drm_vmw_alloc_dmabuf_arg *arg =
870 (union drm_vmw_alloc_dmabuf_arg *)data;
871 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
872 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
873 struct vmw_user_dma_buffer *vmw_user_bo;
874 struct ttm_buffer_object *tmp;
875 struct vmw_master *vmaster = vmw_master(file_priv->master);
876 int ret;
877
878 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
879 if (unlikely(vmw_user_bo == NULL))
880 return -ENOMEM;
881
882 ret = ttm_read_lock(&vmaster->lock, true);
883 if (unlikely(ret != 0)) {
884 kfree(vmw_user_bo);
885 return ret;
886 }
887
888 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
Thomas Hellstrom8ba51522010-01-16 16:05:05 +0100889 &vmw_vram_sys_placement, true,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000890 &vmw_user_dmabuf_destroy);
891 if (unlikely(ret != 0))
892 return ret;
893
894 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
895 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
896 &vmw_user_bo->base,
897 false,
898 ttm_buffer_type,
899 &vmw_user_dmabuf_release, NULL);
900 if (unlikely(ret != 0)) {
901 ttm_bo_unref(&tmp);
902 } else {
903 rep->handle = vmw_user_bo->base.hash.key;
904 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
905 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
906 rep->cur_gmr_offset = 0;
907 }
908 ttm_bo_unref(&tmp);
909
910 ttm_read_unlock(&vmaster->lock);
911
912 return 0;
913}
914
915int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
916 struct drm_file *file_priv)
917{
918 struct drm_vmw_unref_dmabuf_arg *arg =
919 (struct drm_vmw_unref_dmabuf_arg *)data;
920
921 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
922 arg->handle,
923 TTM_REF_USAGE);
924}
925
926uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
927 uint32_t cur_validate_node)
928{
929 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
930
931 if (likely(vmw_bo->on_validate_list))
932 return vmw_bo->cur_validate_node;
933
934 vmw_bo->cur_validate_node = cur_validate_node;
935 vmw_bo->on_validate_list = true;
936
937 return cur_validate_node;
938}
939
940void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
941{
942 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
943
944 vmw_bo->on_validate_list = false;
945}
946
947uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
948{
949 struct vmw_dma_buffer *vmw_bo;
950
951 if (bo->mem.mem_type == TTM_PL_VRAM)
952 return SVGA_GMR_FRAMEBUFFER;
953
954 vmw_bo = vmw_dma_buffer(bo);
955
956 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
957}
958
959void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
960{
961 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
962 vmw_bo->gmr_bound = true;
963 vmw_bo->gmr_id = id;
964}
965
966int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
967 uint32_t handle, struct vmw_dma_buffer **out)
968{
969 struct vmw_user_dma_buffer *vmw_user_bo;
970 struct ttm_base_object *base;
971
972 base = ttm_base_object_lookup(tfile, handle);
973 if (unlikely(base == NULL)) {
974 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
975 (unsigned long)handle);
976 return -ESRCH;
977 }
978
979 if (unlikely(base->object_type != ttm_buffer_type)) {
980 ttm_base_object_unref(&base);
981 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
982 (unsigned long)handle);
983 return -EINVAL;
984 }
985
986 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
987 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
988 ttm_base_object_unref(&base);
989 *out = &vmw_user_bo->dma;
990
991 return 0;
992}
993
994/**
995 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
996 * when we're out of ids, causing GMR space to be allocated
997 * out of VRAM.
998 */
999
1000int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
1001{
1002 struct ttm_bo_global *glob = dev_priv->bdev.glob;
1003 int id;
1004 int ret;
1005
1006 do {
1007 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
1008 return -ENOMEM;
1009
1010 spin_lock(&glob->lru_lock);
1011 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1012 spin_unlock(&glob->lru_lock);
1013 } while (ret == -EAGAIN);
1014
1015 if (unlikely(ret != 0))
1016 return ret;
1017
1018 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1019 spin_lock(&glob->lru_lock);
1020 ida_remove(&dev_priv->gmr_ida, id);
1021 spin_unlock(&glob->lru_lock);
1022 return -EBUSY;
1023 }
1024
1025 *p_id = (uint32_t) id;
1026 return 0;
1027}
1028
1029/*
1030 * Stream managment
1031 */
1032
1033static void vmw_stream_destroy(struct vmw_resource *res)
1034{
1035 struct vmw_private *dev_priv = res->dev_priv;
1036 struct vmw_stream *stream;
1037 int ret;
1038
1039 DRM_INFO("%s: unref\n", __func__);
1040 stream = container_of(res, struct vmw_stream, res);
1041
1042 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1043 WARN_ON(ret != 0);
1044}
1045
1046static int vmw_stream_init(struct vmw_private *dev_priv,
1047 struct vmw_stream *stream,
1048 void (*res_free) (struct vmw_resource *res))
1049{
1050 struct vmw_resource *res = &stream->res;
1051 int ret;
1052
1053 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1054 VMW_RES_STREAM, res_free);
1055
1056 if (unlikely(ret != 0)) {
1057 if (res_free == NULL)
1058 kfree(stream);
1059 else
1060 res_free(&stream->res);
1061 return ret;
1062 }
1063
1064 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1065 if (ret) {
1066 vmw_resource_unreference(&res);
1067 return ret;
1068 }
1069
1070 DRM_INFO("%s: claimed\n", __func__);
1071
1072 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1073 return 0;
1074}
1075
1076/**
1077 * User-space context management:
1078 */
1079
1080static void vmw_user_stream_free(struct vmw_resource *res)
1081{
1082 struct vmw_user_stream *stream =
1083 container_of(res, struct vmw_user_stream, stream.res);
1084
1085 kfree(stream);
1086}
1087
1088/**
1089 * This function is called when user space has no more references on the
1090 * base object. It releases the base-object's reference on the resource object.
1091 */
1092
1093static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1094{
1095 struct ttm_base_object *base = *p_base;
1096 struct vmw_user_stream *stream =
1097 container_of(base, struct vmw_user_stream, base);
1098 struct vmw_resource *res = &stream->stream.res;
1099
1100 *p_base = NULL;
1101 vmw_resource_unreference(&res);
1102}
1103
1104int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1105 struct drm_file *file_priv)
1106{
1107 struct vmw_private *dev_priv = vmw_priv(dev);
1108 struct vmw_resource *res;
1109 struct vmw_user_stream *stream;
1110 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1111 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1112 int ret = 0;
1113
1114 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1115 if (unlikely(res == NULL))
1116 return -EINVAL;
1117
1118 if (res->res_free != &vmw_user_stream_free) {
1119 ret = -EINVAL;
1120 goto out;
1121 }
1122
1123 stream = container_of(res, struct vmw_user_stream, stream.res);
1124 if (stream->base.tfile != tfile) {
1125 ret = -EINVAL;
1126 goto out;
1127 }
1128
1129 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1130out:
1131 vmw_resource_unreference(&res);
1132 return ret;
1133}
1134
1135int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *file_priv)
1137{
1138 struct vmw_private *dev_priv = vmw_priv(dev);
1139 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1140 struct vmw_resource *res;
1141 struct vmw_resource *tmp;
1142 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1143 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1144 int ret;
1145
1146 if (unlikely(stream == NULL))
1147 return -ENOMEM;
1148
1149 res = &stream->stream.res;
1150 stream->base.shareable = false;
1151 stream->base.tfile = NULL;
1152
1153 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1154 if (unlikely(ret != 0))
1155 return ret;
1156
1157 tmp = vmw_resource_reference(res);
1158 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1159 &vmw_user_stream_base_release, NULL);
1160
1161 if (unlikely(ret != 0)) {
1162 vmw_resource_unreference(&tmp);
1163 goto out_err;
1164 }
1165
1166 arg->stream_id = res->id;
1167out_err:
1168 vmw_resource_unreference(&res);
1169 return ret;
1170}
1171
1172int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1173 struct ttm_object_file *tfile,
1174 uint32_t *inout_id, struct vmw_resource **out)
1175{
1176 struct vmw_user_stream *stream;
1177 struct vmw_resource *res;
1178 int ret;
1179
1180 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1181 if (unlikely(res == NULL))
1182 return -EINVAL;
1183
1184 if (res->res_free != &vmw_user_stream_free) {
1185 ret = -EINVAL;
1186 goto err_ref;
1187 }
1188
1189 stream = container_of(res, struct vmw_user_stream, stream.res);
1190 if (stream->base.tfile != tfile) {
1191 ret = -EPERM;
1192 goto err_ref;
1193 }
1194
1195 *inout_id = stream->stream.stream_id;
1196 *out = res;
1197 return 0;
1198err_ref:
1199 vmw_resource_unreference(&res);
1200 return ret;
1201}