blob: 93a68a61419d1fe372ed2414a2ab55accdca4e97 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30#include "ttm/ttm_object.h"
31#include "ttm/ttm_placement.h"
32#include "drmP.h"
33
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
37};
38
39struct vmw_user_surface {
40 struct ttm_base_object base;
41 struct vmw_surface srf;
42};
43
44struct vmw_user_dma_buffer {
45 struct ttm_base_object base;
46 struct vmw_dma_buffer dma;
47};
48
49struct vmw_bo_user_rep {
50 uint32_t handle;
51 uint64_t map_handle;
52};
53
54struct vmw_stream {
55 struct vmw_resource res;
56 uint32_t stream_id;
57};
58
59struct vmw_user_stream {
60 struct ttm_base_object base;
61 struct vmw_stream stream;
62};
63
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +020064struct vmw_surface_offset {
65 uint32_t face;
66 uint32_t mip;
67 uint32_t bo_offset;
68};
69
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000070static inline struct vmw_dma_buffer *
71vmw_dma_buffer(struct ttm_buffer_object *bo)
72{
73 return container_of(bo, struct vmw_dma_buffer, base);
74}
75
76static inline struct vmw_user_dma_buffer *
77vmw_user_dma_buffer(struct ttm_buffer_object *bo)
78{
79 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
80 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
81}
82
83struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
84{
85 kref_get(&res->kref);
86 return res;
87}
88
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +020089
90/**
91 * vmw_resource_release_id - release a resource id to the id manager.
92 *
93 * @res: Pointer to the resource.
94 *
95 * Release the resource id to the resource id manager and set it to -1
96 */
97static void vmw_resource_release_id(struct vmw_resource *res)
98{
99 struct vmw_private *dev_priv = res->dev_priv;
100
101 write_lock(&dev_priv->resource_lock);
102 if (res->id != -1)
103 idr_remove(res->idr, res->id);
104 res->id = -1;
105 write_unlock(&dev_priv->resource_lock);
106}
107
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000108static void vmw_resource_release(struct kref *kref)
109{
110 struct vmw_resource *res =
111 container_of(kref, struct vmw_resource, kref);
112 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200113 int id = res->id;
114 struct idr *idr = res->idr;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000115
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200116 res->avail = false;
117 if (res->remove_from_lists != NULL)
118 res->remove_from_lists(res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000119 write_unlock(&dev_priv->resource_lock);
120
121 if (likely(res->hw_destroy != NULL))
122 res->hw_destroy(res);
123
124 if (res->res_free != NULL)
125 res->res_free(res);
126 else
127 kfree(res);
128
129 write_lock(&dev_priv->resource_lock);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200130
131 if (id != -1)
132 idr_remove(idr, id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000133}
134
135void vmw_resource_unreference(struct vmw_resource **p_res)
136{
137 struct vmw_resource *res = *p_res;
138 struct vmw_private *dev_priv = res->dev_priv;
139
140 *p_res = NULL;
141 write_lock(&dev_priv->resource_lock);
142 kref_put(&res->kref, vmw_resource_release);
143 write_unlock(&dev_priv->resource_lock);
144}
145
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200146
147/**
148 * vmw_resource_alloc_id - release a resource id to the id manager.
149 *
150 * @dev_priv: Pointer to the device private structure.
151 * @res: Pointer to the resource.
152 *
153 * Allocate the lowest free resource from the resource manager, and set
154 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
155 */
156static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
157 struct vmw_resource *res)
158{
159 int ret;
160
161 BUG_ON(res->id != -1);
162
163 do {
164 if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
165 return -ENOMEM;
166
167 write_lock(&dev_priv->resource_lock);
168 ret = idr_get_new_above(res->idr, res, 1, &res->id);
169 write_unlock(&dev_priv->resource_lock);
170
171 } while (ret == -EAGAIN);
172
173 return ret;
174}
175
176
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000177static int vmw_resource_init(struct vmw_private *dev_priv,
178 struct vmw_resource *res,
179 struct idr *idr,
180 enum ttm_object_type obj_type,
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200181 bool delay_id,
182 void (*res_free) (struct vmw_resource *res),
183 void (*remove_from_lists)
184 (struct vmw_resource *res))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000185{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000186 kref_init(&res->kref);
187 res->hw_destroy = NULL;
188 res->res_free = res_free;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200189 res->remove_from_lists = remove_from_lists;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000190 res->res_type = obj_type;
191 res->idr = idr;
192 res->avail = false;
193 res->dev_priv = dev_priv;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200194 INIT_LIST_HEAD(&res->query_head);
Thomas Hellstromf18c8842011-10-04 20:13:31 +0200195 INIT_LIST_HEAD(&res->validate_head);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200196 res->id = -1;
197 if (delay_id)
198 return 0;
199 else
200 return vmw_resource_alloc_id(dev_priv, res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000201}
202
203/**
204 * vmw_resource_activate
205 *
206 * @res: Pointer to the newly created resource
207 * @hw_destroy: Destroy function. NULL if none.
208 *
209 * Activate a resource after the hardware has been made aware of it.
210 * Set tye destroy function to @destroy. Typically this frees the
211 * resource and destroys the hardware resources associated with it.
212 * Activate basically means that the function vmw_resource_lookup will
213 * find it.
214 */
215
216static void vmw_resource_activate(struct vmw_resource *res,
217 void (*hw_destroy) (struct vmw_resource *))
218{
219 struct vmw_private *dev_priv = res->dev_priv;
220
221 write_lock(&dev_priv->resource_lock);
222 res->avail = true;
223 res->hw_destroy = hw_destroy;
224 write_unlock(&dev_priv->resource_lock);
225}
226
227struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
228 struct idr *idr, int id)
229{
230 struct vmw_resource *res;
231
232 read_lock(&dev_priv->resource_lock);
233 res = idr_find(idr, id);
234 if (res && res->avail)
235 kref_get(&res->kref);
236 else
237 res = NULL;
238 read_unlock(&dev_priv->resource_lock);
239
240 if (unlikely(res == NULL))
241 return NULL;
242
243 return res;
244}
245
246/**
247 * Context management:
248 */
249
250static void vmw_hw_context_destroy(struct vmw_resource *res)
251{
252
253 struct vmw_private *dev_priv = res->dev_priv;
254 struct {
255 SVGA3dCmdHeader header;
256 SVGA3dCmdDestroyContext body;
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200257 } *cmd;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000258
Thomas Hellstrome2fa3a72011-10-04 20:13:30 +0200259
260 vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
261
262 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000263 if (unlikely(cmd == NULL)) {
264 DRM_ERROR("Failed reserving FIFO space for surface "
265 "destruction.\n");
266 return;
267 }
268
269 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
270 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
271 cmd->body.cid = cpu_to_le32(res->id);
272
273 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000274 vmw_3d_resource_dec(dev_priv, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000275}
276
277static int vmw_context_init(struct vmw_private *dev_priv,
278 struct vmw_resource *res,
279 void (*res_free) (struct vmw_resource *res))
280{
281 int ret;
282
283 struct {
284 SVGA3dCmdHeader header;
285 SVGA3dCmdDefineContext body;
286 } *cmd;
287
288 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200289 VMW_RES_CONTEXT, false, res_free, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000290
291 if (unlikely(ret != 0)) {
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200292 DRM_ERROR("Failed to allocate a resource id.\n");
293 goto out_early;
294 }
295
296 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
297 DRM_ERROR("Out of hw context ids.\n");
298 vmw_resource_unreference(&res);
299 return -ENOMEM;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000300 }
301
302 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
303 if (unlikely(cmd == NULL)) {
304 DRM_ERROR("Fifo reserve failed.\n");
305 vmw_resource_unreference(&res);
306 return -ENOMEM;
307 }
308
309 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
310 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
311 cmd->body.cid = cpu_to_le32(res->id);
312
313 vmw_fifo_commit(dev_priv, sizeof(*cmd));
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000314 (void) vmw_3d_resource_inc(dev_priv, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000315 vmw_resource_activate(res, vmw_hw_context_destroy);
316 return 0;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200317
318out_early:
319 if (res_free == NULL)
320 kfree(res);
321 else
322 res_free(res);
323 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000324}
325
326struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
327{
328 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
329 int ret;
330
331 if (unlikely(res == NULL))
332 return NULL;
333
334 ret = vmw_context_init(dev_priv, res, NULL);
335 return (ret == 0) ? res : NULL;
336}
337
338/**
339 * User-space context management:
340 */
341
342static void vmw_user_context_free(struct vmw_resource *res)
343{
344 struct vmw_user_context *ctx =
345 container_of(res, struct vmw_user_context, res);
346
347 kfree(ctx);
348}
349
350/**
351 * This function is called when user space has no more references on the
352 * base object. It releases the base-object's reference on the resource object.
353 */
354
355static void vmw_user_context_base_release(struct ttm_base_object **p_base)
356{
357 struct ttm_base_object *base = *p_base;
358 struct vmw_user_context *ctx =
359 container_of(base, struct vmw_user_context, base);
360 struct vmw_resource *res = &ctx->res;
361
362 *p_base = NULL;
363 vmw_resource_unreference(&res);
364}
365
366int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
367 struct drm_file *file_priv)
368{
369 struct vmw_private *dev_priv = vmw_priv(dev);
370 struct vmw_resource *res;
371 struct vmw_user_context *ctx;
372 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
373 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
374 int ret = 0;
375
376 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
377 if (unlikely(res == NULL))
378 return -EINVAL;
379
380 if (res->res_free != &vmw_user_context_free) {
381 ret = -EINVAL;
382 goto out;
383 }
384
385 ctx = container_of(res, struct vmw_user_context, res);
386 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
387 ret = -EPERM;
388 goto out;
389 }
390
391 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
392out:
393 vmw_resource_unreference(&res);
394 return ret;
395}
396
397int vmw_context_define_ioctl(struct drm_device *dev, void *data,
398 struct drm_file *file_priv)
399{
400 struct vmw_private *dev_priv = vmw_priv(dev);
401 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
402 struct vmw_resource *res;
403 struct vmw_resource *tmp;
404 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
405 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
406 int ret;
407
408 if (unlikely(ctx == NULL))
409 return -ENOMEM;
410
411 res = &ctx->res;
412 ctx->base.shareable = false;
413 ctx->base.tfile = NULL;
414
415 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
416 if (unlikely(ret != 0))
417 return ret;
418
419 tmp = vmw_resource_reference(&ctx->res);
420 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
421 &vmw_user_context_base_release, NULL);
422
423 if (unlikely(ret != 0)) {
424 vmw_resource_unreference(&tmp);
425 goto out_err;
426 }
427
428 arg->cid = res->id;
429out_err:
430 vmw_resource_unreference(&res);
431 return ret;
432
433}
434
435int vmw_context_check(struct vmw_private *dev_priv,
436 struct ttm_object_file *tfile,
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000437 int id,
438 struct vmw_resource **p_res)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000439{
440 struct vmw_resource *res;
441 int ret = 0;
442
443 read_lock(&dev_priv->resource_lock);
444 res = idr_find(&dev_priv->context_idr, id);
445 if (res && res->avail) {
446 struct vmw_user_context *ctx =
447 container_of(res, struct vmw_user_context, res);
448 if (ctx->base.tfile != tfile && !ctx->base.shareable)
449 ret = -EPERM;
Thomas Hellstrombe38ab62011-08-31 07:42:54 +0000450 if (p_res)
451 *p_res = vmw_resource_reference(res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000452 } else
453 ret = -EINVAL;
454 read_unlock(&dev_priv->resource_lock);
455
456 return ret;
457}
458
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200459struct vmw_bpp {
460 uint8_t bpp;
461 uint8_t s_bpp;
462};
463
464/*
465 * Size table for the supported SVGA3D surface formats. It consists of
466 * two values. The bpp value and the s_bpp value which is short for
467 * "stride bits per pixel" The values are given in such a way that the
468 * minimum stride for the image is calculated using
469 *
470 * min_stride = w*s_bpp
471 *
472 * and the total memory requirement for the image is
473 *
474 * h*min_stride*bpp/s_bpp
475 *
476 */
477static const struct vmw_bpp vmw_sf_bpp[] = {
478 [SVGA3D_FORMAT_INVALID] = {0, 0},
479 [SVGA3D_X8R8G8B8] = {32, 32},
480 [SVGA3D_A8R8G8B8] = {32, 32},
481 [SVGA3D_R5G6B5] = {16, 16},
482 [SVGA3D_X1R5G5B5] = {16, 16},
483 [SVGA3D_A1R5G5B5] = {16, 16},
484 [SVGA3D_A4R4G4B4] = {16, 16},
485 [SVGA3D_Z_D32] = {32, 32},
486 [SVGA3D_Z_D16] = {16, 16},
487 [SVGA3D_Z_D24S8] = {32, 32},
488 [SVGA3D_Z_D15S1] = {16, 16},
489 [SVGA3D_LUMINANCE8] = {8, 8},
490 [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
491 [SVGA3D_LUMINANCE16] = {16, 16},
492 [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
493 [SVGA3D_DXT1] = {4, 16},
494 [SVGA3D_DXT2] = {8, 32},
495 [SVGA3D_DXT3] = {8, 32},
496 [SVGA3D_DXT4] = {8, 32},
497 [SVGA3D_DXT5] = {8, 32},
498 [SVGA3D_BUMPU8V8] = {16, 16},
499 [SVGA3D_BUMPL6V5U5] = {16, 16},
500 [SVGA3D_BUMPX8L8V8U8] = {32, 32},
501 [SVGA3D_ARGB_S10E5] = {16, 16},
502 [SVGA3D_ARGB_S23E8] = {32, 32},
503 [SVGA3D_A2R10G10B10] = {32, 32},
504 [SVGA3D_V8U8] = {16, 16},
505 [SVGA3D_Q8W8V8U8] = {32, 32},
506 [SVGA3D_CxV8U8] = {16, 16},
507 [SVGA3D_X8L8V8U8] = {32, 32},
508 [SVGA3D_A2W10V10U10] = {32, 32},
509 [SVGA3D_ALPHA8] = {8, 8},
510 [SVGA3D_R_S10E5] = {16, 16},
511 [SVGA3D_R_S23E8] = {32, 32},
512 [SVGA3D_RG_S10E5] = {16, 16},
513 [SVGA3D_RG_S23E8] = {32, 32},
514 [SVGA3D_BUFFER] = {8, 8},
515 [SVGA3D_Z_D24X8] = {32, 32},
516 [SVGA3D_V16U16] = {32, 32},
517 [SVGA3D_G16R16] = {32, 32},
518 [SVGA3D_A16B16G16R16] = {64, 64},
519 [SVGA3D_UYVY] = {12, 12},
520 [SVGA3D_YUY2] = {12, 12},
521 [SVGA3D_NV12] = {12, 8},
522 [SVGA3D_AYUV] = {32, 32},
523 [SVGA3D_BC4_UNORM] = {4, 16},
524 [SVGA3D_BC5_UNORM] = {8, 32},
525 [SVGA3D_Z_DF16] = {16, 16},
526 [SVGA3D_Z_DF24] = {24, 24},
527 [SVGA3D_Z_D24S8_INT] = {32, 32}
528};
529
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000530
531/**
532 * Surface management.
533 */
534
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200535struct vmw_surface_dma {
536 SVGA3dCmdHeader header;
537 SVGA3dCmdSurfaceDMA body;
538 SVGA3dCopyBox cb;
539 SVGA3dCmdSurfaceDMASuffix suffix;
540};
541
542struct vmw_surface_define {
543 SVGA3dCmdHeader header;
544 SVGA3dCmdDefineSurface body;
545};
546
547struct vmw_surface_destroy {
548 SVGA3dCmdHeader header;
549 SVGA3dCmdDestroySurface body;
550};
551
552
553/**
554 * vmw_surface_dma_size - Compute fifo size for a dma command.
555 *
556 * @srf: Pointer to a struct vmw_surface
557 *
558 * Computes the required size for a surface dma command for backup or
559 * restoration of the surface represented by @srf.
560 */
561static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
562{
563 return srf->num_sizes * sizeof(struct vmw_surface_dma);
564}
565
566
567/**
568 * vmw_surface_define_size - Compute fifo size for a surface define command.
569 *
570 * @srf: Pointer to a struct vmw_surface
571 *
572 * Computes the required size for a surface define command for the definition
573 * of the surface represented by @srf.
574 */
575static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
576{
577 return sizeof(struct vmw_surface_define) + srf->num_sizes *
578 sizeof(SVGA3dSize);
579}
580
581
582/**
583 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
584 *
585 * Computes the required size for a surface destroy command for the destruction
586 * of a hw surface.
587 */
588static inline uint32_t vmw_surface_destroy_size(void)
589{
590 return sizeof(struct vmw_surface_destroy);
591}
592
593/**
594 * vmw_surface_destroy_encode - Encode a surface_destroy command.
595 *
596 * @id: The surface id
597 * @cmd_space: Pointer to memory area in which the commands should be encoded.
598 */
599static void vmw_surface_destroy_encode(uint32_t id,
600 void *cmd_space)
601{
602 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
603 cmd_space;
604
605 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
606 cmd->header.size = sizeof(cmd->body);
607 cmd->body.sid = id;
608}
609
610/**
611 * vmw_surface_define_encode - Encode a surface_define command.
612 *
613 * @srf: Pointer to a struct vmw_surface object.
614 * @cmd_space: Pointer to memory area in which the commands should be encoded.
615 */
616static void vmw_surface_define_encode(const struct vmw_surface *srf,
617 void *cmd_space)
618{
619 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
620 cmd_space;
621 struct drm_vmw_size *src_size;
622 SVGA3dSize *cmd_size;
623 uint32_t cmd_len;
624 int i;
625
626 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
627
628 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
629 cmd->header.size = cmd_len;
630 cmd->body.sid = srf->res.id;
631 cmd->body.surfaceFlags = srf->flags;
632 cmd->body.format = cpu_to_le32(srf->format);
633 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
634 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
635
636 cmd += 1;
637 cmd_size = (SVGA3dSize *) cmd;
638 src_size = srf->sizes;
639
640 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
641 cmd_size->width = src_size->width;
642 cmd_size->height = src_size->height;
643 cmd_size->depth = src_size->depth;
644 }
645}
646
647
648/**
649 * vmw_surface_dma_encode - Encode a surface_dma command.
650 *
651 * @srf: Pointer to a struct vmw_surface object.
652 * @cmd_space: Pointer to memory area in which the commands should be encoded.
653 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
654 * should be placed or read from.
655 * @to_surface: Boolean whether to DMA to the surface or from the surface.
656 */
657static void vmw_surface_dma_encode(struct vmw_surface *srf,
658 void *cmd_space,
659 const SVGAGuestPtr *ptr,
660 bool to_surface)
661{
662 uint32_t i;
663 uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
664 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
665 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
666
667 for (i = 0; i < srf->num_sizes; ++i) {
668 SVGA3dCmdHeader *header = &cmd->header;
669 SVGA3dCmdSurfaceDMA *body = &cmd->body;
670 SVGA3dCopyBox *cb = &cmd->cb;
671 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
672 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
673 const struct drm_vmw_size *cur_size = &srf->sizes[i];
674
675 header->id = SVGA_3D_CMD_SURFACE_DMA;
676 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
677
678 body->guest.ptr = *ptr;
679 body->guest.ptr.offset += cur_offset->bo_offset;
680 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
681 body->host.sid = srf->res.id;
682 body->host.face = cur_offset->face;
683 body->host.mipmap = cur_offset->mip;
684 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
685 SVGA3D_READ_HOST_VRAM);
686 cb->x = 0;
687 cb->y = 0;
688 cb->z = 0;
689 cb->srcx = 0;
690 cb->srcy = 0;
691 cb->srcz = 0;
692 cb->w = cur_size->width;
693 cb->h = cur_size->height;
694 cb->d = cur_size->depth;
695
696 suffix->suffixSize = sizeof(*suffix);
697 suffix->maximumOffset = body->guest.pitch*cur_size->height*
698 cur_size->depth*bpp / stride_bpp;
699 suffix->flags.discard = 0;
700 suffix->flags.unsynchronized = 0;
701 suffix->flags.reserved = 0;
702 ++cmd;
703 }
704};
705
706
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000707static void vmw_hw_surface_destroy(struct vmw_resource *res)
708{
709
710 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200711 struct vmw_surface *srf;
712 void *cmd;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000713
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200714 if (res->id != -1) {
715
716 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
717 if (unlikely(cmd == NULL)) {
718 DRM_ERROR("Failed reserving FIFO space for surface "
719 "destruction.\n");
720 return;
721 }
722
723 vmw_surface_destroy_encode(res->id, cmd);
724 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
725
726 /*
727 * used_memory_size_atomic, or separate lock
728 * to avoid taking dev_priv::cmdbuf_mutex in
729 * the destroy path.
730 */
731
732 mutex_lock(&dev_priv->cmdbuf_mutex);
733 srf = container_of(res, struct vmw_surface, res);
734 dev_priv->used_memory_size -= srf->backup_size;
735 mutex_unlock(&dev_priv->cmdbuf_mutex);
736
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000737 }
Thomas Hellstrom05730b32011-08-31 07:42:52 +0000738 vmw_3d_resource_dec(dev_priv, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000739}
740
741void vmw_surface_res_free(struct vmw_resource *res)
742{
743 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
744
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200745 if (srf->backup)
746 ttm_bo_unref(&srf->backup);
747 kfree(srf->offsets);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000748 kfree(srf->sizes);
749 kfree(srf->snooper.image);
750 kfree(srf);
751}
752
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200753
754/**
755 * vmw_surface_do_validate - make a surface available to the device.
756 *
757 * @dev_priv: Pointer to a device private struct.
758 * @srf: Pointer to a struct vmw_surface.
759 *
760 * If the surface doesn't have a hw id, allocate one, and optionally
761 * DMA the backed up surface contents to the device.
762 *
763 * Returns -EBUSY if there wasn't sufficient device resources to
764 * complete the validation. Retry after freeing up resources.
765 *
766 * May return other errors if the kernel is out of guest resources.
767 */
768int vmw_surface_do_validate(struct vmw_private *dev_priv,
769 struct vmw_surface *srf)
770{
771 struct vmw_resource *res = &srf->res;
772 struct list_head val_list;
773 struct ttm_validate_buffer val_buf;
774 uint32_t submit_size;
775 uint8_t *cmd;
776 int ret;
777
778 if (likely(res->id != -1))
779 return 0;
780
781 if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
782 dev_priv->memory_size))
783 return -EBUSY;
784
785 /*
786 * Reserve- and validate the backup DMA bo.
787 */
788
789 if (srf->backup) {
790 INIT_LIST_HEAD(&val_list);
791 val_buf.bo = ttm_bo_reference(srf->backup);
792 val_buf.new_sync_obj_arg = (void *)((unsigned long)
793 DRM_VMW_FENCE_FLAG_EXEC);
794 list_add_tail(&val_buf.head, &val_list);
795 ret = ttm_eu_reserve_buffers(&val_list);
796 if (unlikely(ret != 0))
797 goto out_no_reserve;
798
799 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
800 true, false, false);
801 if (unlikely(ret != 0))
802 goto out_no_validate;
803 }
804
805 /*
806 * Alloc id for the resource.
807 */
808
809 ret = vmw_resource_alloc_id(dev_priv, res);
810 if (unlikely(ret != 0)) {
811 DRM_ERROR("Failed to allocate a surface id.\n");
812 goto out_no_id;
813 }
814 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
815 ret = -EBUSY;
816 goto out_no_fifo;
817 }
818
819
820 /*
821 * Encode surface define- and dma commands.
822 */
823
824 submit_size = vmw_surface_define_size(srf);
825 if (srf->backup)
826 submit_size += vmw_surface_dma_size(srf);
827
828 cmd = vmw_fifo_reserve(dev_priv, submit_size);
829 if (unlikely(cmd == NULL)) {
830 DRM_ERROR("Failed reserving FIFO space for surface "
831 "validation.\n");
832 ret = -ENOMEM;
833 goto out_no_fifo;
834 }
835
836 vmw_surface_define_encode(srf, cmd);
837 if (srf->backup) {
838 SVGAGuestPtr ptr;
839
840 cmd += vmw_surface_define_size(srf);
841 vmw_bo_get_guest_ptr(srf->backup, &ptr);
842 vmw_surface_dma_encode(srf, cmd, &ptr, true);
843 }
844
845 vmw_fifo_commit(dev_priv, submit_size);
846
847 /*
848 * Create a fence object and fence the backup buffer.
849 */
850
851 if (srf->backup) {
852 struct vmw_fence_obj *fence;
853
854 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
855 &fence, NULL);
856 ttm_eu_fence_buffer_objects(&val_list, fence);
857 if (likely(fence != NULL))
858 vmw_fence_obj_unreference(&fence);
859 ttm_bo_unref(&val_buf.bo);
860 ttm_bo_unref(&srf->backup);
861 }
862
863 /*
864 * Surface memory usage accounting.
865 */
866
867 dev_priv->used_memory_size += srf->backup_size;
868
869 return 0;
870
871out_no_fifo:
872 vmw_resource_release_id(res);
873out_no_id:
874out_no_validate:
875 if (srf->backup)
876 ttm_eu_backoff_reservation(&val_list);
877out_no_reserve:
878 if (srf->backup)
879 ttm_bo_unref(&val_buf.bo);
880 return ret;
881}
882
883/**
884 * vmw_surface_evict - Evict a hw surface.
885 *
886 * @dev_priv: Pointer to a device private struct.
887 * @srf: Pointer to a struct vmw_surface
888 *
889 * DMA the contents of a hw surface to a backup guest buffer object,
890 * and destroy the hw surface, releasing its id.
891 */
892int vmw_surface_evict(struct vmw_private *dev_priv,
893 struct vmw_surface *srf)
894{
895 struct vmw_resource *res = &srf->res;
896 struct list_head val_list;
897 struct ttm_validate_buffer val_buf;
898 uint32_t submit_size;
899 uint8_t *cmd;
900 int ret;
901 struct vmw_fence_obj *fence;
902 SVGAGuestPtr ptr;
903
904 BUG_ON(res->id == -1);
905
906 /*
907 * Create a surface backup buffer object.
908 */
909
910 if (!srf->backup) {
911 ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
912 ttm_bo_type_device,
913 &vmw_srf_placement, 0, 0, true,
914 NULL, &srf->backup);
915 if (unlikely(ret != 0))
916 return ret;
917 }
918
919 /*
920 * Reserve- and validate the backup DMA bo.
921 */
922
923 INIT_LIST_HEAD(&val_list);
924 val_buf.bo = ttm_bo_reference(srf->backup);
925 val_buf.new_sync_obj_arg = (void *)(unsigned long)
926 DRM_VMW_FENCE_FLAG_EXEC;
927 list_add_tail(&val_buf.head, &val_list);
928 ret = ttm_eu_reserve_buffers(&val_list);
929 if (unlikely(ret != 0))
930 goto out_no_reserve;
931
932 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
933 true, false, false);
934 if (unlikely(ret != 0))
935 goto out_no_validate;
936
937
938 /*
939 * Encode the dma- and surface destroy commands.
940 */
941
942 submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
943 cmd = vmw_fifo_reserve(dev_priv, submit_size);
944 if (unlikely(cmd == NULL)) {
945 DRM_ERROR("Failed reserving FIFO space for surface "
946 "eviction.\n");
947 ret = -ENOMEM;
948 goto out_no_fifo;
949 }
950
951 vmw_bo_get_guest_ptr(srf->backup, &ptr);
952 vmw_surface_dma_encode(srf, cmd, &ptr, false);
953 cmd += vmw_surface_dma_size(srf);
954 vmw_surface_destroy_encode(res->id, cmd);
955 vmw_fifo_commit(dev_priv, submit_size);
956
957 /*
958 * Surface memory usage accounting.
959 */
960
961 dev_priv->used_memory_size -= srf->backup_size;
962
963 /*
964 * Create a fence object and fence the DMA buffer.
965 */
966
967 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
968 &fence, NULL);
969 ttm_eu_fence_buffer_objects(&val_list, fence);
970 if (likely(fence != NULL))
971 vmw_fence_obj_unreference(&fence);
972 ttm_bo_unref(&val_buf.bo);
973
974 /*
975 * Release the surface ID.
976 */
977
978 vmw_resource_release_id(res);
979
980 return 0;
981
982out_no_fifo:
983out_no_validate:
984 if (srf->backup)
985 ttm_eu_backoff_reservation(&val_list);
986out_no_reserve:
987 ttm_bo_unref(&val_buf.bo);
988 ttm_bo_unref(&srf->backup);
989 return ret;
990}
991
992
993/**
994 * vmw_surface_validate - make a surface available to the device, evicting
995 * other surfaces if needed.
996 *
997 * @dev_priv: Pointer to a device private struct.
998 * @srf: Pointer to a struct vmw_surface.
999 *
1000 * Try to validate a surface and if it fails due to limited device resources,
1001 * repeatedly try to evict other surfaces until the request can be
1002 * acommodated.
1003 *
1004 * May return errors if out of resources.
1005 */
1006int vmw_surface_validate(struct vmw_private *dev_priv,
1007 struct vmw_surface *srf)
1008{
1009 int ret;
1010 struct vmw_surface *evict_srf;
1011
1012 do {
1013 write_lock(&dev_priv->resource_lock);
1014 list_del_init(&srf->lru_head);
1015 write_unlock(&dev_priv->resource_lock);
1016
1017 ret = vmw_surface_do_validate(dev_priv, srf);
1018 if (likely(ret != -EBUSY))
1019 break;
1020
1021 write_lock(&dev_priv->resource_lock);
1022 if (list_empty(&dev_priv->surface_lru)) {
1023 DRM_ERROR("Out of device memory for surfaces.\n");
1024 ret = -EBUSY;
1025 write_unlock(&dev_priv->resource_lock);
1026 break;
1027 }
1028
1029 evict_srf = vmw_surface_reference
1030 (list_first_entry(&dev_priv->surface_lru,
1031 struct vmw_surface,
1032 lru_head));
1033 list_del_init(&evict_srf->lru_head);
1034
1035 write_unlock(&dev_priv->resource_lock);
1036 (void) vmw_surface_evict(dev_priv, evict_srf);
1037
1038 vmw_surface_unreference(&evict_srf);
1039
1040 } while (1);
1041
1042 if (unlikely(ret != 0 && srf->res.id != -1)) {
1043 write_lock(&dev_priv->resource_lock);
1044 list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
1045 write_unlock(&dev_priv->resource_lock);
1046 }
1047
1048 return ret;
1049}
1050
1051
1052/**
1053 * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1054 *
1055 * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1056 *
1057 * As part of the resource destruction, remove the surface from any
1058 * lookup lists.
1059 */
1060static void vmw_surface_remove_from_lists(struct vmw_resource *res)
1061{
1062 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1063
1064 list_del_init(&srf->lru_head);
1065}
1066
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001067int vmw_surface_init(struct vmw_private *dev_priv,
1068 struct vmw_surface *srf,
1069 void (*res_free) (struct vmw_resource *res))
1070{
1071 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001072 struct vmw_resource *res = &srf->res;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001073
1074 BUG_ON(res_free == NULL);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001075 INIT_LIST_HEAD(&srf->lru_head);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001076 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001077 VMW_RES_SURFACE, true, res_free,
1078 vmw_surface_remove_from_lists);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001079
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001080 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001081 res_free(res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001082
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001083 /*
1084 * The surface won't be visible to hardware until a
1085 * surface validate.
1086 */
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001087
Thomas Hellstrom05730b32011-08-31 07:42:52 +00001088 (void) vmw_3d_resource_inc(dev_priv, false);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001089 vmw_resource_activate(res, vmw_hw_surface_destroy);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001090 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001091}
1092
1093static void vmw_user_surface_free(struct vmw_resource *res)
1094{
1095 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1096 struct vmw_user_surface *user_srf =
1097 container_of(srf, struct vmw_user_surface, srf);
1098
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001099 if (srf->backup)
1100 ttm_bo_unref(&srf->backup);
1101 kfree(srf->offsets);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001102 kfree(srf->sizes);
1103 kfree(srf->snooper.image);
1104 kfree(user_srf);
1105}
1106
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001107/**
1108 * vmw_resource_unreserve - unreserve resources previously reserved for
1109 * command submission.
1110 *
1111 * @list_head: list of resources to unreserve.
1112 *
1113 * Currently only surfaces are considered, and unreserving a surface
1114 * means putting it back on the device's surface lru list,
1115 * so that it can be evicted if necessary.
1116 * This function traverses the resource list and
1117 * checks whether resources are surfaces, and in that case puts them back
1118 * on the device's surface LRU list.
1119 */
1120void vmw_resource_unreserve(struct list_head *list)
1121{
1122 struct vmw_resource *res;
1123 struct vmw_surface *srf;
1124 rwlock_t *lock = NULL;
1125
1126 list_for_each_entry(res, list, validate_head) {
1127
1128 if (res->res_free != &vmw_surface_res_free &&
1129 res->res_free != &vmw_user_surface_free)
1130 continue;
1131
1132 if (unlikely(lock == NULL)) {
1133 lock = &res->dev_priv->resource_lock;
1134 write_lock(lock);
1135 }
1136
1137 srf = container_of(res, struct vmw_surface, res);
1138 list_del_init(&srf->lru_head);
1139 list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
1140 }
1141
1142 if (lock != NULL)
1143 write_unlock(lock);
1144}
1145
1146
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001147int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1148 struct ttm_object_file *tfile,
1149 uint32_t handle, struct vmw_surface **out)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001150{
1151 struct vmw_resource *res;
1152 struct vmw_surface *srf;
1153 struct vmw_user_surface *user_srf;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001154 struct ttm_base_object *base;
1155 int ret = -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001156
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001157 base = ttm_base_object_lookup(tfile, handle);
1158 if (unlikely(base == NULL))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001159 return -EINVAL;
1160
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001161 if (unlikely(base->object_type != VMW_RES_SURFACE))
1162 goto out_bad_resource;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001163
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001164 user_srf = container_of(base, struct vmw_user_surface, base);
1165 srf = &user_srf->srf;
1166 res = &srf->res;
1167
1168 read_lock(&dev_priv->resource_lock);
1169
1170 if (!res->avail || res->res_free != &vmw_user_surface_free) {
1171 read_unlock(&dev_priv->resource_lock);
1172 goto out_bad_resource;
1173 }
1174
1175 kref_get(&res->kref);
1176 read_unlock(&dev_priv->resource_lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001177
1178 *out = srf;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001179 ret = 0;
1180
1181out_bad_resource:
1182 ttm_base_object_unref(&base);
1183
1184 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001185}
1186
1187static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
1188{
1189 struct ttm_base_object *base = *p_base;
1190 struct vmw_user_surface *user_srf =
1191 container_of(base, struct vmw_user_surface, base);
1192 struct vmw_resource *res = &user_srf->srf.res;
1193
1194 *p_base = NULL;
1195 vmw_resource_unreference(&res);
1196}
1197
1198int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1199 struct drm_file *file_priv)
1200{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001201 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
1202 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001203
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001204 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001205}
1206
1207int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1208 struct drm_file *file_priv)
1209{
1210 struct vmw_private *dev_priv = vmw_priv(dev);
1211 struct vmw_user_surface *user_srf =
1212 kmalloc(sizeof(*user_srf), GFP_KERNEL);
1213 struct vmw_surface *srf;
1214 struct vmw_resource *res;
1215 struct vmw_resource *tmp;
1216 union drm_vmw_surface_create_arg *arg =
1217 (union drm_vmw_surface_create_arg *)data;
1218 struct drm_vmw_surface_create_req *req = &arg->req;
1219 struct drm_vmw_surface_arg *rep = &arg->rep;
1220 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1221 struct drm_vmw_size __user *user_sizes;
1222 int ret;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001223 int i, j;
1224 uint32_t cur_bo_offset;
1225 struct drm_vmw_size *cur_size;
1226 struct vmw_surface_offset *cur_offset;
1227 uint32_t stride_bpp;
1228 uint32_t bpp;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001229
1230 if (unlikely(user_srf == NULL))
1231 return -ENOMEM;
1232
1233 srf = &user_srf->srf;
1234 res = &srf->res;
1235
1236 srf->flags = req->flags;
1237 srf->format = req->format;
Jakob Bornecrantza87897e2010-02-09 21:29:47 +00001238 srf->scanout = req->scanout;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001239 srf->backup = NULL;
1240
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001241 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
1242 srf->num_sizes = 0;
1243 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1244 srf->num_sizes += srf->mip_levels[i];
1245
1246 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1247 DRM_VMW_MAX_MIP_LEVELS) {
1248 ret = -EINVAL;
1249 goto out_err0;
1250 }
1251
1252 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1253 if (unlikely(srf->sizes == NULL)) {
1254 ret = -ENOMEM;
1255 goto out_err0;
1256 }
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001257 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1258 GFP_KERNEL);
1259 if (unlikely(srf->sizes == NULL)) {
1260 ret = -ENOMEM;
1261 goto out_no_offsets;
1262 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001263
1264 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1265 req->size_addr;
1266
1267 ret = copy_from_user(srf->sizes, user_sizes,
1268 srf->num_sizes * sizeof(*srf->sizes));
Dan Carpenter9b8eb4d2010-06-04 12:24:13 +02001269 if (unlikely(ret != 0)) {
1270 ret = -EFAULT;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001271 goto out_err1;
Dan Carpenter9b8eb4d2010-06-04 12:24:13 +02001272 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001273
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001274 cur_bo_offset = 0;
1275 cur_offset = srf->offsets;
1276 cur_size = srf->sizes;
1277
1278 bpp = vmw_sf_bpp[srf->format].bpp;
1279 stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1280
1281 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1282 for (j = 0; j < srf->mip_levels[i]; ++j) {
1283 uint32_t stride =
1284 (cur_size->width * stride_bpp + 7) >> 3;
1285
1286 cur_offset->face = i;
1287 cur_offset->mip = j;
1288 cur_offset->bo_offset = cur_bo_offset;
1289 cur_bo_offset += stride * cur_size->height *
1290 cur_size->depth * bpp / stride_bpp;
1291 ++cur_offset;
1292 ++cur_size;
1293 }
1294 }
1295 srf->backup_size = cur_bo_offset;
1296
Jakob Bornecrantz5ffdb652010-01-30 03:38:08 +00001297 if (srf->scanout &&
Thomas Hellstrom50ec3b72010-01-13 22:28:37 +01001298 srf->num_sizes == 1 &&
1299 srf->sizes[0].width == 64 &&
1300 srf->sizes[0].height == 64 &&
1301 srf->format == SVGA3D_A8R8G8B8) {
1302
Rakib Mullickf35119d2011-07-25 17:12:56 -07001303 /* allocate image area and clear it */
1304 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
1305 if (!srf->snooper.image) {
Thomas Hellstrom50ec3b72010-01-13 22:28:37 +01001306 DRM_ERROR("Failed to allocate cursor_image\n");
1307 ret = -ENOMEM;
1308 goto out_err1;
1309 }
1310 } else {
1311 srf->snooper.image = NULL;
1312 }
1313 srf->snooper.crtc = NULL;
1314
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001315 user_srf->base.shareable = false;
1316 user_srf->base.tfile = NULL;
1317
1318 /**
1319 * From this point, the generic resource management functions
1320 * destroy the object on failure.
1321 */
1322
1323 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1324 if (unlikely(ret != 0))
1325 return ret;
1326
1327 tmp = vmw_resource_reference(&srf->res);
1328 ret = ttm_base_object_init(tfile, &user_srf->base,
1329 req->shareable, VMW_RES_SURFACE,
1330 &vmw_user_surface_base_release, NULL);
1331
1332 if (unlikely(ret != 0)) {
1333 vmw_resource_unreference(&tmp);
1334 vmw_resource_unreference(&res);
1335 return ret;
1336 }
1337
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001338 rep->sid = user_srf->base.hash.key;
1339 if (rep->sid == SVGA3D_INVALID_ID)
1340 DRM_ERROR("Created bad Surface ID.\n");
1341
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001342 vmw_resource_unreference(&res);
1343 return 0;
1344out_err1:
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001345 kfree(srf->offsets);
1346out_no_offsets:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001347 kfree(srf->sizes);
1348out_err0:
1349 kfree(user_srf);
1350 return ret;
1351}
1352
1353int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1354 struct drm_file *file_priv)
1355{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001356 union drm_vmw_surface_reference_arg *arg =
1357 (union drm_vmw_surface_reference_arg *)data;
1358 struct drm_vmw_surface_arg *req = &arg->req;
1359 struct drm_vmw_surface_create_req *rep = &arg->rep;
1360 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001361 struct vmw_surface *srf;
1362 struct vmw_user_surface *user_srf;
1363 struct drm_vmw_size __user *user_sizes;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001364 struct ttm_base_object *base;
1365 int ret = -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001366
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001367 base = ttm_base_object_lookup(tfile, req->sid);
1368 if (unlikely(base == NULL)) {
1369 DRM_ERROR("Could not find surface to reference.\n");
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001370 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001371 }
1372
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001373 if (unlikely(base->object_type != VMW_RES_SURFACE))
1374 goto out_bad_resource;
1375
1376 user_srf = container_of(base, struct vmw_user_surface, base);
1377 srf = &user_srf->srf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001378
1379 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
1380 if (unlikely(ret != 0)) {
1381 DRM_ERROR("Could not add a reference to a surface.\n");
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001382 goto out_no_reference;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001383 }
1384
1385 rep->flags = srf->flags;
1386 rep->format = srf->format;
1387 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1388 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1389 rep->size_addr;
1390
1391 if (user_sizes)
1392 ret = copy_to_user(user_sizes, srf->sizes,
1393 srf->num_sizes * sizeof(*srf->sizes));
Dan Carpenter9b8eb4d2010-06-04 12:24:13 +02001394 if (unlikely(ret != 0)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001395 DRM_ERROR("copy_to_user failed %p %u\n",
1396 user_sizes, srf->num_sizes);
Dan Carpenter9b8eb4d2010-06-04 12:24:13 +02001397 ret = -EFAULT;
1398 }
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001399out_bad_resource:
1400out_no_reference:
1401 ttm_base_object_unref(&base);
1402
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001403 return ret;
1404}
1405
1406int vmw_surface_check(struct vmw_private *dev_priv,
1407 struct ttm_object_file *tfile,
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001408 uint32_t handle, int *id)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001409{
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001410 struct ttm_base_object *base;
1411 struct vmw_user_surface *user_srf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001412
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001413 int ret = -EPERM;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001414
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001415 base = ttm_base_object_lookup(tfile, handle);
1416 if (unlikely(base == NULL))
1417 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001418
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +01001419 if (unlikely(base->object_type != VMW_RES_SURFACE))
1420 goto out_bad_surface;
1421
1422 user_srf = container_of(base, struct vmw_user_surface, base);
1423 *id = user_srf->srf.res.id;
1424 ret = 0;
1425
1426out_bad_surface:
1427 /**
1428 * FIXME: May deadlock here when called from the
1429 * command parsing code.
1430 */
1431
1432 ttm_base_object_unref(&base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001433 return ret;
1434}
1435
1436/**
1437 * Buffer management.
1438 */
1439
1440static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
1441 unsigned long num_pages)
1442{
1443 static size_t bo_user_size = ~0;
1444
1445 size_t page_array_size =
1446 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
1447
1448 if (unlikely(bo_user_size == ~0)) {
1449 bo_user_size = glob->ttm_bo_extra_size +
1450 ttm_round_pot(sizeof(struct vmw_dma_buffer));
1451 }
1452
1453 return bo_user_size + page_array_size;
1454}
1455
Thomas Hellstromeffe1102010-01-13 22:28:39 +01001456void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
1457{
1458 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1459 struct ttm_bo_global *glob = bo->glob;
1460
Thomas Hellstromeffe1102010-01-13 22:28:39 +01001461 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001462 kfree(vmw_bo);
1463}
1464
1465int vmw_dmabuf_init(struct vmw_private *dev_priv,
1466 struct vmw_dma_buffer *vmw_bo,
1467 size_t size, struct ttm_placement *placement,
1468 bool interruptible,
1469 void (*bo_free) (struct ttm_buffer_object *bo))
1470{
1471 struct ttm_bo_device *bdev = &dev_priv->bdev;
1472 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1473 size_t acc_size;
1474 int ret;
1475
1476 BUG_ON(!bo_free);
1477
1478 acc_size =
1479 vmw_dmabuf_acc_size(bdev->glob,
1480 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1481
1482 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1483 if (unlikely(ret != 0)) {
1484 /* we must free the bo here as
1485 * ttm_buffer_object_init does so as well */
1486 bo_free(&vmw_bo->base);
1487 return ret;
1488 }
1489
1490 memset(vmw_bo, 0, sizeof(*vmw_bo));
1491
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001492 INIT_LIST_HEAD(&vmw_bo->validate_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001493
1494 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1495 ttm_bo_type_device, placement,
1496 0, 0, interruptible,
1497 NULL, acc_size, bo_free);
1498 return ret;
1499}
1500
1501static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1502{
1503 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001504 struct ttm_bo_global *glob = bo->glob;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001505
1506 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001507 kfree(vmw_user_bo);
1508}
1509
1510static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
1511{
1512 struct vmw_user_dma_buffer *vmw_user_bo;
1513 struct ttm_base_object *base = *p_base;
1514 struct ttm_buffer_object *bo;
1515
1516 *p_base = NULL;
1517
1518 if (unlikely(base == NULL))
1519 return;
1520
1521 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1522 bo = &vmw_user_bo->dma.base;
1523 ttm_bo_unref(&bo);
1524}
1525
1526int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1527 struct drm_file *file_priv)
1528{
1529 struct vmw_private *dev_priv = vmw_priv(dev);
1530 union drm_vmw_alloc_dmabuf_arg *arg =
1531 (union drm_vmw_alloc_dmabuf_arg *)data;
1532 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
1533 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
1534 struct vmw_user_dma_buffer *vmw_user_bo;
1535 struct ttm_buffer_object *tmp;
1536 struct vmw_master *vmaster = vmw_master(file_priv->master);
1537 int ret;
1538
1539 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1540 if (unlikely(vmw_user_bo == NULL))
1541 return -ENOMEM;
1542
1543 ret = ttm_read_lock(&vmaster->lock, true);
1544 if (unlikely(ret != 0)) {
1545 kfree(vmw_user_bo);
1546 return ret;
1547 }
1548
1549 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
Thomas Hellstrom8ba51522010-01-16 16:05:05 +01001550 &vmw_vram_sys_placement, true,
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001551 &vmw_user_dmabuf_destroy);
1552 if (unlikely(ret != 0))
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +01001553 goto out_no_dmabuf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001554
1555 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
1556 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
1557 &vmw_user_bo->base,
1558 false,
1559 ttm_buffer_type,
1560 &vmw_user_dmabuf_release, NULL);
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +01001561 if (unlikely(ret != 0))
1562 goto out_no_base_object;
1563 else {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001564 rep->handle = vmw_user_bo->base.hash.key;
1565 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
1566 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
1567 rep->cur_gmr_offset = 0;
1568 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001569
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +01001570out_no_base_object:
1571 ttm_bo_unref(&tmp);
1572out_no_dmabuf:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001573 ttm_read_unlock(&vmaster->lock);
1574
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +01001575 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001576}
1577
1578int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
1579 struct drm_file *file_priv)
1580{
1581 struct drm_vmw_unref_dmabuf_arg *arg =
1582 (struct drm_vmw_unref_dmabuf_arg *)data;
1583
1584 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1585 arg->handle,
1586 TTM_REF_USAGE);
1587}
1588
1589uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
1590 uint32_t cur_validate_node)
1591{
1592 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1593
1594 if (likely(vmw_bo->on_validate_list))
1595 return vmw_bo->cur_validate_node;
1596
1597 vmw_bo->cur_validate_node = cur_validate_node;
1598 vmw_bo->on_validate_list = true;
1599
1600 return cur_validate_node;
1601}
1602
1603void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
1604{
1605 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1606
1607 vmw_bo->on_validate_list = false;
1608}
1609
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001610int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1611 uint32_t handle, struct vmw_dma_buffer **out)
1612{
1613 struct vmw_user_dma_buffer *vmw_user_bo;
1614 struct ttm_base_object *base;
1615
1616 base = ttm_base_object_lookup(tfile, handle);
1617 if (unlikely(base == NULL)) {
1618 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1619 (unsigned long)handle);
1620 return -ESRCH;
1621 }
1622
1623 if (unlikely(base->object_type != ttm_buffer_type)) {
1624 ttm_base_object_unref(&base);
1625 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1626 (unsigned long)handle);
1627 return -EINVAL;
1628 }
1629
1630 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1631 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
1632 ttm_base_object_unref(&base);
1633 *out = &vmw_user_bo->dma;
1634
1635 return 0;
1636}
1637
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001638/*
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001639 * Stream management
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001640 */
1641
1642static void vmw_stream_destroy(struct vmw_resource *res)
1643{
1644 struct vmw_private *dev_priv = res->dev_priv;
1645 struct vmw_stream *stream;
1646 int ret;
1647
1648 DRM_INFO("%s: unref\n", __func__);
1649 stream = container_of(res, struct vmw_stream, res);
1650
1651 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1652 WARN_ON(ret != 0);
1653}
1654
1655static int vmw_stream_init(struct vmw_private *dev_priv,
1656 struct vmw_stream *stream,
1657 void (*res_free) (struct vmw_resource *res))
1658{
1659 struct vmw_resource *res = &stream->res;
1660 int ret;
1661
1662 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +02001663 VMW_RES_STREAM, false, res_free, NULL);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001664
1665 if (unlikely(ret != 0)) {
1666 if (res_free == NULL)
1667 kfree(stream);
1668 else
1669 res_free(&stream->res);
1670 return ret;
1671 }
1672
1673 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1674 if (ret) {
1675 vmw_resource_unreference(&res);
1676 return ret;
1677 }
1678
1679 DRM_INFO("%s: claimed\n", __func__);
1680
1681 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1682 return 0;
1683}
1684
1685/**
1686 * User-space context management:
1687 */
1688
1689static void vmw_user_stream_free(struct vmw_resource *res)
1690{
1691 struct vmw_user_stream *stream =
1692 container_of(res, struct vmw_user_stream, stream.res);
1693
1694 kfree(stream);
1695}
1696
1697/**
1698 * This function is called when user space has no more references on the
1699 * base object. It releases the base-object's reference on the resource object.
1700 */
1701
1702static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1703{
1704 struct ttm_base_object *base = *p_base;
1705 struct vmw_user_stream *stream =
1706 container_of(base, struct vmw_user_stream, base);
1707 struct vmw_resource *res = &stream->stream.res;
1708
1709 *p_base = NULL;
1710 vmw_resource_unreference(&res);
1711}
1712
1713int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1714 struct drm_file *file_priv)
1715{
1716 struct vmw_private *dev_priv = vmw_priv(dev);
1717 struct vmw_resource *res;
1718 struct vmw_user_stream *stream;
1719 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1720 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1721 int ret = 0;
1722
1723 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1724 if (unlikely(res == NULL))
1725 return -EINVAL;
1726
1727 if (res->res_free != &vmw_user_stream_free) {
1728 ret = -EINVAL;
1729 goto out;
1730 }
1731
1732 stream = container_of(res, struct vmw_user_stream, stream.res);
1733 if (stream->base.tfile != tfile) {
1734 ret = -EINVAL;
1735 goto out;
1736 }
1737
1738 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1739out:
1740 vmw_resource_unreference(&res);
1741 return ret;
1742}
1743
1744int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1745 struct drm_file *file_priv)
1746{
1747 struct vmw_private *dev_priv = vmw_priv(dev);
1748 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1749 struct vmw_resource *res;
1750 struct vmw_resource *tmp;
1751 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1752 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1753 int ret;
1754
1755 if (unlikely(stream == NULL))
1756 return -ENOMEM;
1757
1758 res = &stream->stream.res;
1759 stream->base.shareable = false;
1760 stream->base.tfile = NULL;
1761
1762 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1763 if (unlikely(ret != 0))
1764 return ret;
1765
1766 tmp = vmw_resource_reference(res);
1767 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1768 &vmw_user_stream_base_release, NULL);
1769
1770 if (unlikely(ret != 0)) {
1771 vmw_resource_unreference(&tmp);
1772 goto out_err;
1773 }
1774
1775 arg->stream_id = res->id;
1776out_err:
1777 vmw_resource_unreference(&res);
1778 return ret;
1779}
1780
1781int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1782 struct ttm_object_file *tfile,
1783 uint32_t *inout_id, struct vmw_resource **out)
1784{
1785 struct vmw_user_stream *stream;
1786 struct vmw_resource *res;
1787 int ret;
1788
1789 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1790 if (unlikely(res == NULL))
1791 return -EINVAL;
1792
1793 if (res->res_free != &vmw_user_stream_free) {
1794 ret = -EINVAL;
1795 goto err_ref;
1796 }
1797
1798 stream = container_of(res, struct vmw_user_stream, stream.res);
1799 if (stream->base.tfile != tfile) {
1800 ret = -EPERM;
1801 goto err_ref;
1802 }
1803
1804 *inout_id = stream->stream.stream_id;
1805 *out = res;
1806 return 0;
1807err_ref:
1808 vmw_resource_unreference(&res);
1809 return ret;
1810}