blob: 6fdd82d42f6549d2af208516ad22b26d45785e28 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/vmwgfx_drm.h>
30#include <drm/ttm/ttm_object.h>
31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h>
Thomas Hellstrom543831c2012-11-20 12:19:36 +000033#include "vmwgfx_resource_priv.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034
Thomas Hellstromea029c22013-11-12 00:09:54 -080035#define VMW_RES_EVICT_ERR_COUNT 10
36
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037struct vmw_user_dma_buffer {
Thomas Hellstromc486d4f2013-11-08 02:30:50 -080038 struct ttm_prime_object prime;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000039 struct vmw_dma_buffer dma;
40};
41
42struct vmw_bo_user_rep {
43 uint32_t handle;
44 uint64_t map_handle;
45};
46
47struct vmw_stream {
48 struct vmw_resource res;
49 uint32_t stream_id;
50};
51
52struct vmw_user_stream {
53 struct ttm_base_object base;
54 struct vmw_stream stream;
55};
56
Thomas Hellstromc0951b72012-11-20 12:19:35 +000057
58static uint64_t vmw_user_stream_size;
59
60static const struct vmw_res_func vmw_stream_func = {
61 .res_type = vmw_res_stream,
62 .needs_backup = false,
63 .may_evict = false,
64 .type_name = "video streams",
65 .backup_placement = NULL,
66 .create = NULL,
67 .destroy = NULL,
68 .bind = NULL,
69 .unbind = NULL
70};
71
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000072static inline struct vmw_dma_buffer *
73vmw_dma_buffer(struct ttm_buffer_object *bo)
74{
75 return container_of(bo, struct vmw_dma_buffer, base);
76}
77
78static inline struct vmw_user_dma_buffer *
79vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80{
81 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83}
84
85struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86{
87 kref_get(&res->kref);
88 return res;
89}
90
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +020091
92/**
93 * vmw_resource_release_id - release a resource id to the id manager.
94 *
95 * @res: Pointer to the resource.
96 *
97 * Release the resource id to the resource id manager and set it to -1
98 */
Thomas Hellstrom543831c2012-11-20 12:19:36 +000099void vmw_resource_release_id(struct vmw_resource *res)
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200100{
101 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200103
104 write_lock(&dev_priv->resource_lock);
105 if (res->id != -1)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000106 idr_remove(idr, res->id);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200107 res->id = -1;
108 write_unlock(&dev_priv->resource_lock);
109}
110
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000111static void vmw_resource_release(struct kref *kref)
112{
113 struct vmw_resource *res =
114 container_of(kref, struct vmw_resource, kref);
115 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000116 int id;
117 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000118
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200119 res->avail = false;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000120 list_del_init(&res->lru_head);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000121 write_unlock(&dev_priv->resource_lock);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000122 if (res->backup) {
123 struct ttm_buffer_object *bo = &res->backup->base;
124
125 ttm_bo_reserve(bo, false, false, false, 0);
126 if (!list_empty(&res->mob_head) &&
127 res->func->unbind != NULL) {
128 struct ttm_validate_buffer val_buf;
129
130 val_buf.bo = bo;
131 res->func->unbind(res, false, &val_buf);
132 }
133 res->backup_dirty = false;
134 list_del_init(&res->mob_head);
135 ttm_bo_unreserve(bo);
136 vmw_dmabuf_unreference(&res->backup);
137 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000138
139 if (likely(res->hw_destroy != NULL))
140 res->hw_destroy(res);
141
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000142 id = res->id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000143 if (res->res_free != NULL)
144 res->res_free(res);
145 else
146 kfree(res);
147
148 write_lock(&dev_priv->resource_lock);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200149
150 if (id != -1)
151 idr_remove(idr, id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000152}
153
154void vmw_resource_unreference(struct vmw_resource **p_res)
155{
156 struct vmw_resource *res = *p_res;
157 struct vmw_private *dev_priv = res->dev_priv;
158
159 *p_res = NULL;
160 write_lock(&dev_priv->resource_lock);
161 kref_put(&res->kref, vmw_resource_release);
162 write_unlock(&dev_priv->resource_lock);
163}
164
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200165
166/**
167 * vmw_resource_alloc_id - release a resource id to the id manager.
168 *
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200169 * @res: Pointer to the resource.
170 *
171 * Allocate the lowest free resource from the resource manager, and set
172 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
173 */
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000174int vmw_resource_alloc_id(struct vmw_resource *res)
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200175{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000176 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200177 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000178 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200179
180 BUG_ON(res->id != -1);
181
Tejun Heocc39a8f2013-02-27 17:04:14 -0800182 idr_preload(GFP_KERNEL);
183 write_lock(&dev_priv->resource_lock);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200184
Tejun Heocc39a8f2013-02-27 17:04:14 -0800185 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
186 if (ret >= 0)
187 res->id = ret;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200188
Tejun Heocc39a8f2013-02-27 17:04:14 -0800189 write_unlock(&dev_priv->resource_lock);
190 idr_preload_end();
191 return ret < 0 ? ret : 0;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200192}
193
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000194/**
195 * vmw_resource_init - initialize a struct vmw_resource
196 *
197 * @dev_priv: Pointer to a device private struct.
198 * @res: The struct vmw_resource to initialize.
199 * @obj_type: Resource object type.
200 * @delay_id: Boolean whether to defer device id allocation until
201 * the first validation.
202 * @res_free: Resource destructor.
203 * @func: Resource function table.
204 */
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000205int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
206 bool delay_id,
207 void (*res_free) (struct vmw_resource *res),
208 const struct vmw_res_func *func)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000209{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000210 kref_init(&res->kref);
211 res->hw_destroy = NULL;
212 res->res_free = res_free;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000213 res->avail = false;
214 res->dev_priv = dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000215 res->func = func;
216 INIT_LIST_HEAD(&res->lru_head);
217 INIT_LIST_HEAD(&res->mob_head);
Thomas Hellstrom173fb7d2013-10-08 02:32:36 -0700218 INIT_LIST_HEAD(&res->binding_head);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200219 res->id = -1;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000220 res->backup = NULL;
221 res->backup_offset = 0;
222 res->backup_dirty = false;
223 res->res_dirty = false;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200224 if (delay_id)
225 return 0;
226 else
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000227 return vmw_resource_alloc_id(res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000228}
229
230/**
231 * vmw_resource_activate
232 *
233 * @res: Pointer to the newly created resource
234 * @hw_destroy: Destroy function. NULL if none.
235 *
236 * Activate a resource after the hardware has been made aware of it.
237 * Set tye destroy function to @destroy. Typically this frees the
238 * resource and destroys the hardware resources associated with it.
239 * Activate basically means that the function vmw_resource_lookup will
240 * find it.
241 */
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000242void vmw_resource_activate(struct vmw_resource *res,
243 void (*hw_destroy) (struct vmw_resource *))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000244{
245 struct vmw_private *dev_priv = res->dev_priv;
246
247 write_lock(&dev_priv->resource_lock);
248 res->avail = true;
249 res->hw_destroy = hw_destroy;
250 write_unlock(&dev_priv->resource_lock);
251}
252
253struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
254 struct idr *idr, int id)
255{
256 struct vmw_resource *res;
257
258 read_lock(&dev_priv->resource_lock);
259 res = idr_find(idr, id);
260 if (res && res->avail)
261 kref_get(&res->kref);
262 else
263 res = NULL;
264 read_unlock(&dev_priv->resource_lock);
265
266 if (unlikely(res == NULL))
267 return NULL;
268
269 return res;
270}
271
272/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000273 * vmw_user_resource_lookup_handle - lookup a struct resource from a
274 * TTM user-space handle and perform basic type checks
275 *
276 * @dev_priv: Pointer to a device private struct
277 * @tfile: Pointer to a struct ttm_object_file identifying the caller
278 * @handle: The TTM user-space handle
279 * @converter: Pointer to an object describing the resource type
280 * @p_res: On successful return the location pointed to will contain
281 * a pointer to a refcounted struct vmw_resource.
282 *
283 * If the handle can't be found or is associated with an incorrect resource
284 * type, -EINVAL will be returned.
285 */
286int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
287 struct ttm_object_file *tfile,
288 uint32_t handle,
289 const struct vmw_user_resource_conv
290 *converter,
291 struct vmw_resource **p_res)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000292{
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100293 struct ttm_base_object *base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000294 struct vmw_resource *res;
295 int ret = -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000296
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100297 base = ttm_base_object_lookup(tfile, handle);
298 if (unlikely(base == NULL))
299 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000300
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800301 if (unlikely(ttm_base_object_type(base) != converter->object_type))
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000302 goto out_bad_resource;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100303
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000304 res = converter->base_obj_to_res(base);
305
306 read_lock(&dev_priv->resource_lock);
307 if (!res->avail || res->res_free != converter->res_free) {
308 read_unlock(&dev_priv->resource_lock);
309 goto out_bad_resource;
310 }
311
312 kref_get(&res->kref);
313 read_unlock(&dev_priv->resource_lock);
314
315 *p_res = res;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100316 ret = 0;
317
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000318out_bad_resource:
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100319 ttm_base_object_unref(&base);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000320
321 return ret;
322}
323
324/**
325 * Helper function that looks either a surface or dmabuf.
326 *
327 * The pointer this pointed at by out_surf and out_buf needs to be null.
328 */
329int vmw_user_lookup_handle(struct vmw_private *dev_priv,
330 struct ttm_object_file *tfile,
331 uint32_t handle,
332 struct vmw_surface **out_surf,
333 struct vmw_dma_buffer **out_buf)
334{
335 struct vmw_resource *res;
336 int ret;
337
338 BUG_ON(*out_surf || *out_buf);
339
340 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
341 user_surface_converter,
342 &res);
343 if (!ret) {
344 *out_surf = vmw_res_to_srf(res);
345 return 0;
346 }
347
348 *out_surf = NULL;
349 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000350 return ret;
351}
352
353/**
354 * Buffer management.
355 */
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800356
357/**
358 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
359 *
360 * @dev_priv: Pointer to a struct vmw_private identifying the device.
361 * @size: The requested buffer size.
362 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
363 */
364static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
365 bool user)
366{
367 static size_t struct_size, user_struct_size;
368 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
369 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
370
371 if (unlikely(struct_size == 0)) {
372 size_t backend_size = ttm_round_pot(vmw_tt_size);
373
374 struct_size = backend_size +
375 ttm_round_pot(sizeof(struct vmw_dma_buffer));
376 user_struct_size = backend_size +
377 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
378 }
379
380 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
381 page_array_size +=
382 ttm_round_pot(num_pages * sizeof(dma_addr_t));
383
384 return ((user) ? user_struct_size : struct_size) +
385 page_array_size;
386}
387
Thomas Hellstromeffe1102010-01-13 22:28:39 +0100388void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
389{
390 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
Thomas Hellstromeffe1102010-01-13 22:28:39 +0100391
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000392 kfree(vmw_bo);
393}
394
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800395static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
396{
397 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
398
399 ttm_prime_object_kfree(vmw_user_bo, prime);
400}
401
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000402int vmw_dmabuf_init(struct vmw_private *dev_priv,
403 struct vmw_dma_buffer *vmw_bo,
404 size_t size, struct ttm_placement *placement,
405 bool interruptible,
406 void (*bo_free) (struct ttm_buffer_object *bo))
407{
408 struct ttm_bo_device *bdev = &dev_priv->bdev;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000409 size_t acc_size;
410 int ret;
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800411 bool user = (bo_free == &vmw_user_dmabuf_destroy);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000412
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800413 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000414
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800415 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000416 memset(vmw_bo, 0, sizeof(*vmw_bo));
417
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000418 INIT_LIST_HEAD(&vmw_bo->res_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000419
420 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800421 (user) ? ttm_bo_type_device :
422 ttm_bo_type_kernel, placement,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +0000423 0, interruptible,
Dave Airlie129b78b2012-04-02 11:46:06 +0100424 NULL, acc_size, NULL, bo_free);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000425 return ret;
426}
427
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000428static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
429{
430 struct vmw_user_dma_buffer *vmw_user_bo;
431 struct ttm_base_object *base = *p_base;
432 struct ttm_buffer_object *bo;
433
434 *p_base = NULL;
435
436 if (unlikely(base == NULL))
437 return;
438
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800439 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
440 prime.base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000441 bo = &vmw_user_bo->dma.base;
442 ttm_bo_unref(&bo);
443}
444
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100445static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
446 enum ttm_ref_type ref_type)
447{
448 struct vmw_user_dma_buffer *user_bo;
449 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
450
451 switch (ref_type) {
452 case TTM_REF_SYNCCPU_WRITE:
453 ttm_bo_synccpu_write_release(&user_bo->dma.base);
454 break;
455 default:
456 BUG();
457 }
458}
459
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000460/**
461 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
462 *
463 * @dev_priv: Pointer to a struct device private.
464 * @tfile: Pointer to a struct ttm_object_file on which to register the user
465 * object.
466 * @size: Size of the dma buffer.
467 * @shareable: Boolean whether the buffer is shareable with other open files.
468 * @handle: Pointer to where the handle value should be assigned.
469 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
470 * should be assigned.
471 */
472int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
473 struct ttm_object_file *tfile,
474 uint32_t size,
475 bool shareable,
476 uint32_t *handle,
477 struct vmw_dma_buffer **p_dma_buf)
478{
479 struct vmw_user_dma_buffer *user_bo;
480 struct ttm_buffer_object *tmp;
481 int ret;
482
483 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
484 if (unlikely(user_bo == NULL)) {
485 DRM_ERROR("Failed to allocate a buffer.\n");
486 return -ENOMEM;
487 }
488
489 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100490 (dev_priv->has_mob) ?
491 &vmw_sys_placement :
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000492 &vmw_vram_sys_placement, true,
493 &vmw_user_dmabuf_destroy);
494 if (unlikely(ret != 0))
495 return ret;
496
497 tmp = ttm_bo_reference(&user_bo->dma.base);
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800498 ret = ttm_prime_object_init(tfile,
499 size,
500 &user_bo->prime,
501 shareable,
502 ttm_buffer_type,
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100503 &vmw_user_dmabuf_release,
504 &vmw_user_dmabuf_ref_obj_release);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000505 if (unlikely(ret != 0)) {
506 ttm_bo_unref(&tmp);
507 goto out_no_base_object;
508 }
509
510 *p_dma_buf = &user_bo->dma;
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800511 *handle = user_bo->prime.base.hash.key;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000512
513out_no_base_object:
514 return ret;
515}
516
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100517/**
518 * vmw_user_dmabuf_verify_access - verify access permissions on this
519 * buffer object.
520 *
521 * @bo: Pointer to the buffer object being accessed
522 * @tfile: Identifying the caller.
523 */
524int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
525 struct ttm_object_file *tfile)
526{
527 struct vmw_user_dma_buffer *vmw_user_bo;
528
529 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
530 return -EPERM;
531
532 vmw_user_bo = vmw_user_dma_buffer(bo);
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800533 return (vmw_user_bo->prime.base.tfile == tfile ||
534 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100535}
536
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100537/**
538 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
539 * access, idling previous GPU operations on the buffer and optionally
540 * blocking it for further command submissions.
541 *
542 * @user_bo: Pointer to the buffer object being grabbed for CPU access
543 * @tfile: Identifying the caller.
544 * @flags: Flags indicating how the grab should be performed.
545 *
546 * A blocking grab will be automatically released when @tfile is closed.
547 */
548static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
549 struct ttm_object_file *tfile,
550 uint32_t flags)
551{
552 struct ttm_buffer_object *bo = &user_bo->dma.base;
553 bool existed;
554 int ret;
555
556 if (flags & drm_vmw_synccpu_allow_cs) {
557 struct ttm_bo_device *bdev = bo->bdev;
558
559 spin_lock(&bdev->fence_lock);
560 ret = ttm_bo_wait(bo, false, true,
561 !!(flags & drm_vmw_synccpu_dontblock));
562 spin_unlock(&bdev->fence_lock);
563 return ret;
564 }
565
566 ret = ttm_bo_synccpu_write_grab
567 (bo, !!(flags & drm_vmw_synccpu_dontblock));
568 if (unlikely(ret != 0))
569 return ret;
570
571 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
572 TTM_REF_SYNCCPU_WRITE, &existed);
573 if (ret != 0 || existed)
574 ttm_bo_synccpu_write_release(&user_bo->dma.base);
575
576 return ret;
577}
578
579/**
580 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
581 * and unblock command submission on the buffer if blocked.
582 *
583 * @handle: Handle identifying the buffer object.
584 * @tfile: Identifying the caller.
585 * @flags: Flags indicating the type of release.
586 */
587static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
588 struct ttm_object_file *tfile,
589 uint32_t flags)
590{
591 if (!(flags & drm_vmw_synccpu_allow_cs))
592 return ttm_ref_object_base_unref(tfile, handle,
593 TTM_REF_SYNCCPU_WRITE);
594
595 return 0;
596}
597
598/**
599 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
600 * functionality.
601 *
602 * @dev: Identifies the drm device.
603 * @data: Pointer to the ioctl argument.
604 * @file_priv: Identifies the caller.
605 *
606 * This function checks the ioctl arguments for validity and calls the
607 * relevant synccpu functions.
608 */
609int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
610 struct drm_file *file_priv)
611{
612 struct drm_vmw_synccpu_arg *arg =
613 (struct drm_vmw_synccpu_arg *) data;
614 struct vmw_dma_buffer *dma_buf;
615 struct vmw_user_dma_buffer *user_bo;
616 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
617 int ret;
618
619 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
620 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
621 drm_vmw_synccpu_dontblock |
622 drm_vmw_synccpu_allow_cs)) != 0) {
623 DRM_ERROR("Illegal synccpu flags.\n");
624 return -EINVAL;
625 }
626
627 switch (arg->op) {
628 case drm_vmw_synccpu_grab:
629 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
630 if (unlikely(ret != 0))
631 return ret;
632
633 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
634 dma);
635 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
636 vmw_dmabuf_unreference(&dma_buf);
637 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
638 ret != -EBUSY)) {
639 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
640 (unsigned int) arg->handle);
641 return ret;
642 }
643 break;
644 case drm_vmw_synccpu_release:
645 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
646 arg->flags);
647 if (unlikely(ret != 0)) {
648 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
649 (unsigned int) arg->handle);
650 return ret;
651 }
652 break;
653 default:
654 DRM_ERROR("Invalid synccpu operation.\n");
655 return -EINVAL;
656 }
657
658 return 0;
659}
660
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000661int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
662 struct drm_file *file_priv)
663{
664 struct vmw_private *dev_priv = vmw_priv(dev);
665 union drm_vmw_alloc_dmabuf_arg *arg =
666 (union drm_vmw_alloc_dmabuf_arg *)data;
667 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
668 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000669 struct vmw_dma_buffer *dma_buf;
670 uint32_t handle;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000671 struct vmw_master *vmaster = vmw_master(file_priv->master);
672 int ret;
673
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000674 ret = ttm_read_lock(&vmaster->lock, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000675 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000676 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000677
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000678 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
679 req->size, false, &handle, &dma_buf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000680 if (unlikely(ret != 0))
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +0100681 goto out_no_dmabuf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000682
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000683 rep->handle = handle;
David Herrmann72525b32013-07-24 21:08:53 +0200684 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000685 rep->cur_gmr_id = handle;
686 rep->cur_gmr_offset = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000687
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000688 vmw_dmabuf_unreference(&dma_buf);
689
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +0100690out_no_dmabuf:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000691 ttm_read_unlock(&vmaster->lock);
692
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +0100693 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000694}
695
696int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
697 struct drm_file *file_priv)
698{
699 struct drm_vmw_unref_dmabuf_arg *arg =
700 (struct drm_vmw_unref_dmabuf_arg *)data;
701
702 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
703 arg->handle,
704 TTM_REF_USAGE);
705}
706
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000707int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
708 uint32_t handle, struct vmw_dma_buffer **out)
709{
710 struct vmw_user_dma_buffer *vmw_user_bo;
711 struct ttm_base_object *base;
712
713 base = ttm_base_object_lookup(tfile, handle);
714 if (unlikely(base == NULL)) {
715 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
716 (unsigned long)handle);
717 return -ESRCH;
718 }
719
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800720 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000721 ttm_base_object_unref(&base);
722 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
723 (unsigned long)handle);
724 return -EINVAL;
725 }
726
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800727 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
728 prime.base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000729 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
730 ttm_base_object_unref(&base);
731 *out = &vmw_user_bo->dma;
732
733 return 0;
734}
735
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000736int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100737 struct vmw_dma_buffer *dma_buf,
738 uint32_t *handle)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000739{
740 struct vmw_user_dma_buffer *user_bo;
741
742 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
743 return -EINVAL;
744
745 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100746
747 *handle = user_bo->prime.base.hash.key;
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800748 return ttm_ref_object_add(tfile, &user_bo->prime.base,
749 TTM_REF_USAGE, NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000750}
751
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000752/*
Uwe Kleine-König65155b32010-06-11 12:17:01 +0200753 * Stream management
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000754 */
755
756static void vmw_stream_destroy(struct vmw_resource *res)
757{
758 struct vmw_private *dev_priv = res->dev_priv;
759 struct vmw_stream *stream;
760 int ret;
761
762 DRM_INFO("%s: unref\n", __func__);
763 stream = container_of(res, struct vmw_stream, res);
764
765 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
766 WARN_ON(ret != 0);
767}
768
769static int vmw_stream_init(struct vmw_private *dev_priv,
770 struct vmw_stream *stream,
771 void (*res_free) (struct vmw_resource *res))
772{
773 struct vmw_resource *res = &stream->res;
774 int ret;
775
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000776 ret = vmw_resource_init(dev_priv, res, false, res_free,
777 &vmw_stream_func);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000778
779 if (unlikely(ret != 0)) {
780 if (res_free == NULL)
781 kfree(stream);
782 else
783 res_free(&stream->res);
784 return ret;
785 }
786
787 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
788 if (ret) {
789 vmw_resource_unreference(&res);
790 return ret;
791 }
792
793 DRM_INFO("%s: claimed\n", __func__);
794
795 vmw_resource_activate(&stream->res, vmw_stream_destroy);
796 return 0;
797}
798
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000799static void vmw_user_stream_free(struct vmw_resource *res)
800{
801 struct vmw_user_stream *stream =
802 container_of(res, struct vmw_user_stream, stream.res);
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200803 struct vmw_private *dev_priv = res->dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000804
Thomas Hellstromcdad0522012-11-06 11:31:50 +0000805 ttm_base_object_kfree(stream, base);
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200806 ttm_mem_global_free(vmw_mem_glob(dev_priv),
807 vmw_user_stream_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000808}
809
810/**
811 * This function is called when user space has no more references on the
812 * base object. It releases the base-object's reference on the resource object.
813 */
814
815static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
816{
817 struct ttm_base_object *base = *p_base;
818 struct vmw_user_stream *stream =
819 container_of(base, struct vmw_user_stream, base);
820 struct vmw_resource *res = &stream->stream.res;
821
822 *p_base = NULL;
823 vmw_resource_unreference(&res);
824}
825
826int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
827 struct drm_file *file_priv)
828{
829 struct vmw_private *dev_priv = vmw_priv(dev);
830 struct vmw_resource *res;
831 struct vmw_user_stream *stream;
832 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
833 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000834 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000835 int ret = 0;
836
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000837
838 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000839 if (unlikely(res == NULL))
840 return -EINVAL;
841
842 if (res->res_free != &vmw_user_stream_free) {
843 ret = -EINVAL;
844 goto out;
845 }
846
847 stream = container_of(res, struct vmw_user_stream, stream.res);
848 if (stream->base.tfile != tfile) {
849 ret = -EINVAL;
850 goto out;
851 }
852
853 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
854out:
855 vmw_resource_unreference(&res);
856 return ret;
857}
858
859int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
860 struct drm_file *file_priv)
861{
862 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200863 struct vmw_user_stream *stream;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000864 struct vmw_resource *res;
865 struct vmw_resource *tmp;
866 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
867 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200868 struct vmw_master *vmaster = vmw_master(file_priv->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000869 int ret;
870
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200871 /*
872 * Approximate idr memory usage with 128 bytes. It will be limited
873 * by maximum number_of streams anyway?
874 */
875
876 if (unlikely(vmw_user_stream_size == 0))
877 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
878
879 ret = ttm_read_lock(&vmaster->lock, true);
880 if (unlikely(ret != 0))
881 return ret;
882
883 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
884 vmw_user_stream_size,
885 false, true);
886 if (unlikely(ret != 0)) {
887 if (ret != -ERESTARTSYS)
888 DRM_ERROR("Out of graphics memory for stream"
889 " creation.\n");
890 goto out_unlock;
891 }
892
893
894 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
895 if (unlikely(stream == NULL)) {
896 ttm_mem_global_free(vmw_mem_glob(dev_priv),
897 vmw_user_stream_size);
898 ret = -ENOMEM;
899 goto out_unlock;
900 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000901
902 res = &stream->stream.res;
903 stream->base.shareable = false;
904 stream->base.tfile = NULL;
905
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200906 /*
907 * From here on, the destructor takes over resource freeing.
908 */
909
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000910 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
911 if (unlikely(ret != 0))
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200912 goto out_unlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000913
914 tmp = vmw_resource_reference(res);
915 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
916 &vmw_user_stream_base_release, NULL);
917
918 if (unlikely(ret != 0)) {
919 vmw_resource_unreference(&tmp);
920 goto out_err;
921 }
922
923 arg->stream_id = res->id;
924out_err:
925 vmw_resource_unreference(&res);
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200926out_unlock:
927 ttm_read_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000928 return ret;
929}
930
931int vmw_user_stream_lookup(struct vmw_private *dev_priv,
932 struct ttm_object_file *tfile,
933 uint32_t *inout_id, struct vmw_resource **out)
934{
935 struct vmw_user_stream *stream;
936 struct vmw_resource *res;
937 int ret;
938
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000939 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
940 *inout_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000941 if (unlikely(res == NULL))
942 return -EINVAL;
943
944 if (res->res_free != &vmw_user_stream_free) {
945 ret = -EINVAL;
946 goto err_ref;
947 }
948
949 stream = container_of(res, struct vmw_user_stream, stream.res);
950 if (stream->base.tfile != tfile) {
951 ret = -EPERM;
952 goto err_ref;
953 }
954
955 *inout_id = stream->stream.stream_id;
956 *out = res;
957 return 0;
958err_ref:
959 vmw_resource_unreference(&res);
960 return ret;
961}
Dave Airlie5e1782d2012-08-28 01:53:54 +0000962
963
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800964/**
965 * vmw_dumb_create - Create a dumb kms buffer
966 *
967 * @file_priv: Pointer to a struct drm_file identifying the caller.
968 * @dev: Pointer to the drm device.
969 * @args: Pointer to a struct drm_mode_create_dumb structure
970 *
971 * This is a driver callback for the core drm create_dumb functionality.
972 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
973 * that the arguments have a different format.
974 */
Dave Airlie5e1782d2012-08-28 01:53:54 +0000975int vmw_dumb_create(struct drm_file *file_priv,
976 struct drm_device *dev,
977 struct drm_mode_create_dumb *args)
978{
979 struct vmw_private *dev_priv = vmw_priv(dev);
980 struct vmw_master *vmaster = vmw_master(file_priv->master);
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800981 struct vmw_dma_buffer *dma_buf;
Dave Airlie5e1782d2012-08-28 01:53:54 +0000982 int ret;
983
984 args->pitch = args->width * ((args->bpp + 7) / 8);
985 args->size = args->pitch * args->height;
986
Dave Airlie5e1782d2012-08-28 01:53:54 +0000987 ret = ttm_read_lock(&vmaster->lock, true);
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800988 if (unlikely(ret != 0))
Dave Airlie5e1782d2012-08-28 01:53:54 +0000989 return ret;
Dave Airlie5e1782d2012-08-28 01:53:54 +0000990
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800991 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
992 args->size, false, &args->handle,
993 &dma_buf);
994 if (unlikely(ret != 0))
Dave Airlie5e1782d2012-08-28 01:53:54 +0000995 goto out_no_dmabuf;
996
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800997 vmw_dmabuf_unreference(&dma_buf);
Dave Airlie5e1782d2012-08-28 01:53:54 +0000998out_no_dmabuf:
999 ttm_read_unlock(&vmaster->lock);
1000 return ret;
1001}
1002
Thomas Hellstromd69d51d2013-11-28 00:28:30 -08001003/**
1004 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1005 *
1006 * @file_priv: Pointer to a struct drm_file identifying the caller.
1007 * @dev: Pointer to the drm device.
1008 * @handle: Handle identifying the dumb buffer.
1009 * @offset: The address space offset returned.
1010 *
1011 * This is a driver callback for the core drm dumb_map_offset functionality.
1012 */
Dave Airlie5e1782d2012-08-28 01:53:54 +00001013int vmw_dumb_map_offset(struct drm_file *file_priv,
1014 struct drm_device *dev, uint32_t handle,
1015 uint64_t *offset)
1016{
1017 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1018 struct vmw_dma_buffer *out_buf;
1019 int ret;
1020
1021 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1022 if (ret != 0)
1023 return -EINVAL;
1024
David Herrmann72525b32013-07-24 21:08:53 +02001025 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
Dave Airlie5e1782d2012-08-28 01:53:54 +00001026 vmw_dmabuf_unreference(&out_buf);
1027 return 0;
1028}
1029
Thomas Hellstromd69d51d2013-11-28 00:28:30 -08001030/**
1031 * vmw_dumb_destroy - Destroy a dumb boffer
1032 *
1033 * @file_priv: Pointer to a struct drm_file identifying the caller.
1034 * @dev: Pointer to the drm device.
1035 * @handle: Handle identifying the dumb buffer.
1036 *
1037 * This is a driver callback for the core drm dumb_destroy functionality.
1038 */
Dave Airlie5e1782d2012-08-28 01:53:54 +00001039int vmw_dumb_destroy(struct drm_file *file_priv,
1040 struct drm_device *dev,
1041 uint32_t handle)
1042{
1043 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1044 handle, TTM_REF_USAGE);
1045}
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001046
1047/**
1048 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1049 *
1050 * @res: The resource for which to allocate a backup buffer.
1051 * @interruptible: Whether any sleeps during allocation should be
1052 * performed while interruptible.
1053 */
1054static int vmw_resource_buf_alloc(struct vmw_resource *res,
1055 bool interruptible)
1056{
1057 unsigned long size =
1058 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1059 struct vmw_dma_buffer *backup;
1060 int ret;
1061
1062 if (likely(res->backup)) {
1063 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1064 return 0;
1065 }
1066
1067 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1068 if (unlikely(backup == NULL))
1069 return -ENOMEM;
1070
1071 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1072 res->func->backup_placement,
1073 interruptible,
1074 &vmw_dmabuf_bo_free);
1075 if (unlikely(ret != 0))
1076 goto out_no_dmabuf;
1077
1078 res->backup = backup;
1079
1080out_no_dmabuf:
1081 return ret;
1082}
1083
1084/**
1085 * vmw_resource_do_validate - Make a resource up-to-date and visible
1086 * to the device.
1087 *
1088 * @res: The resource to make visible to the device.
1089 * @val_buf: Information about a buffer possibly
1090 * containing backup data if a bind operation is needed.
1091 *
1092 * On hardware resource shortage, this function returns -EBUSY and
1093 * should be retried once resources have been freed up.
1094 */
1095static int vmw_resource_do_validate(struct vmw_resource *res,
1096 struct ttm_validate_buffer *val_buf)
1097{
1098 int ret = 0;
1099 const struct vmw_res_func *func = res->func;
1100
1101 if (unlikely(res->id == -1)) {
1102 ret = func->create(res);
1103 if (unlikely(ret != 0))
1104 return ret;
1105 }
1106
1107 if (func->bind &&
1108 ((func->needs_backup && list_empty(&res->mob_head) &&
1109 val_buf->bo != NULL) ||
1110 (!func->needs_backup && val_buf->bo != NULL))) {
1111 ret = func->bind(res, val_buf);
1112 if (unlikely(ret != 0))
1113 goto out_bind_failed;
1114 if (func->needs_backup)
1115 list_add_tail(&res->mob_head, &res->backup->res_list);
1116 }
1117
1118 /*
1119 * Only do this on write operations, and move to
1120 * vmw_resource_unreserve if it can be called after
1121 * backup buffers have been unreserved. Otherwise
1122 * sort out locking.
1123 */
1124 res->res_dirty = true;
1125
1126 return 0;
1127
1128out_bind_failed:
1129 func->destroy(res);
1130
1131 return ret;
1132}
1133
1134/**
1135 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1136 * command submission.
1137 *
1138 * @res: Pointer to the struct vmw_resource to unreserve.
1139 * @new_backup: Pointer to new backup buffer if command submission
1140 * switched.
1141 * @new_backup_offset: New backup offset if @new_backup is !NULL.
1142 *
1143 * Currently unreserving a resource means putting it back on the device's
1144 * resource lru list, so that it can be evicted if necessary.
1145 */
1146void vmw_resource_unreserve(struct vmw_resource *res,
1147 struct vmw_dma_buffer *new_backup,
1148 unsigned long new_backup_offset)
1149{
1150 struct vmw_private *dev_priv = res->dev_priv;
1151
1152 if (!list_empty(&res->lru_head))
1153 return;
1154
1155 if (new_backup && new_backup != res->backup) {
1156
1157 if (res->backup) {
Maarten Lankhorst8bd4ce52013-06-27 13:48:27 +02001158 lockdep_assert_held(&res->backup->base.resv->lock.base);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001159 list_del_init(&res->mob_head);
1160 vmw_dmabuf_unreference(&res->backup);
1161 }
1162
1163 res->backup = vmw_dmabuf_reference(new_backup);
Maarten Lankhorst8bd4ce52013-06-27 13:48:27 +02001164 lockdep_assert_held(&new_backup->base.resv->lock.base);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001165 list_add_tail(&res->mob_head, &new_backup->res_list);
1166 }
1167 if (new_backup)
1168 res->backup_offset = new_backup_offset;
1169
Thomas Hellstrom26682482013-10-09 01:42:50 -07001170 if (!res->func->may_evict || res->id == -1)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001171 return;
1172
1173 write_lock(&dev_priv->resource_lock);
1174 list_add_tail(&res->lru_head,
1175 &res->dev_priv->res_lru[res->func->res_type]);
1176 write_unlock(&dev_priv->resource_lock);
1177}
1178
1179/**
1180 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1181 * for a resource and in that case, allocate
1182 * one, reserve and validate it.
1183 *
1184 * @res: The resource for which to allocate a backup buffer.
1185 * @interruptible: Whether any sleeps during allocation should be
1186 * performed while interruptible.
1187 * @val_buf: On successful return contains data about the
1188 * reserved and validated backup buffer.
1189 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +02001190static int
1191vmw_resource_check_buffer(struct vmw_resource *res,
Maarten Lankhorstecff6652013-06-27 13:48:17 +02001192 bool interruptible,
1193 struct ttm_validate_buffer *val_buf)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001194{
1195 struct list_head val_list;
1196 bool backup_dirty = false;
1197 int ret;
1198
1199 if (unlikely(res->backup == NULL)) {
1200 ret = vmw_resource_buf_alloc(res, interruptible);
1201 if (unlikely(ret != 0))
1202 return ret;
1203 }
1204
1205 INIT_LIST_HEAD(&val_list);
1206 val_buf->bo = ttm_bo_reference(&res->backup->base);
1207 list_add_tail(&val_buf->head, &val_list);
Thomas Hellstromac492512013-11-15 00:06:47 -08001208 ret = ttm_eu_reserve_buffers(NULL, &val_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001209 if (unlikely(ret != 0))
1210 goto out_no_reserve;
1211
1212 if (res->func->needs_backup && list_empty(&res->mob_head))
1213 return 0;
1214
1215 backup_dirty = res->backup_dirty;
1216 ret = ttm_bo_validate(&res->backup->base,
1217 res->func->backup_placement,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001218 true, false);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001219
1220 if (unlikely(ret != 0))
1221 goto out_no_validate;
1222
1223 return 0;
1224
1225out_no_validate:
Thomas Hellstromac492512013-11-15 00:06:47 -08001226 ttm_eu_backoff_reservation(NULL, &val_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001227out_no_reserve:
1228 ttm_bo_unref(&val_buf->bo);
1229 if (backup_dirty)
1230 vmw_dmabuf_unreference(&res->backup);
1231
1232 return ret;
1233}
1234
1235/**
1236 * vmw_resource_reserve - Reserve a resource for command submission
1237 *
1238 * @res: The resource to reserve.
1239 *
1240 * This function takes the resource off the LRU list and make sure
1241 * a backup buffer is present for guest-backed resources. However,
1242 * the buffer may not be bound to the resource at this point.
1243 *
1244 */
1245int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1246{
1247 struct vmw_private *dev_priv = res->dev_priv;
1248 int ret;
1249
1250 write_lock(&dev_priv->resource_lock);
1251 list_del_init(&res->lru_head);
1252 write_unlock(&dev_priv->resource_lock);
1253
1254 if (res->func->needs_backup && res->backup == NULL &&
1255 !no_backup) {
1256 ret = vmw_resource_buf_alloc(res, true);
1257 if (unlikely(ret != 0))
1258 return ret;
1259 }
1260
1261 return 0;
1262}
1263
1264/**
1265 * vmw_resource_backoff_reservation - Unreserve and unreference a
1266 * backup buffer
1267 *.
1268 * @val_buf: Backup buffer information.
1269 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +02001270static void
Thomas Hellstromac492512013-11-15 00:06:47 -08001271vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001272{
1273 struct list_head val_list;
1274
1275 if (likely(val_buf->bo == NULL))
1276 return;
1277
1278 INIT_LIST_HEAD(&val_list);
1279 list_add_tail(&val_buf->head, &val_list);
Thomas Hellstromac492512013-11-15 00:06:47 -08001280 ttm_eu_backoff_reservation(NULL, &val_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001281 ttm_bo_unref(&val_buf->bo);
1282}
1283
1284/**
1285 * vmw_resource_do_evict - Evict a resource, and transfer its data
1286 * to a backup buffer.
1287 *
1288 * @res: The resource to evict.
Thomas Hellstromea029c22013-11-12 00:09:54 -08001289 * @interruptible: Whether to wait interruptible.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001290 */
Thomas Hellstromea029c22013-11-12 00:09:54 -08001291int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001292{
1293 struct ttm_validate_buffer val_buf;
1294 const struct vmw_res_func *func = res->func;
1295 int ret;
1296
1297 BUG_ON(!func->may_evict);
1298
1299 val_buf.bo = NULL;
Thomas Hellstromac492512013-11-15 00:06:47 -08001300 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001301 if (unlikely(ret != 0))
1302 return ret;
1303
1304 if (unlikely(func->unbind != NULL &&
1305 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1306 ret = func->unbind(res, res->res_dirty, &val_buf);
1307 if (unlikely(ret != 0))
1308 goto out_no_unbind;
1309 list_del_init(&res->mob_head);
1310 }
1311 ret = func->destroy(res);
1312 res->backup_dirty = true;
1313 res->res_dirty = false;
1314out_no_unbind:
Thomas Hellstromac492512013-11-15 00:06:47 -08001315 vmw_resource_backoff_reservation(&val_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001316
1317 return ret;
1318}
1319
1320
1321/**
1322 * vmw_resource_validate - Make a resource up-to-date and visible
1323 * to the device.
1324 *
1325 * @res: The resource to make visible to the device.
1326 *
1327 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1328 * be reserved and validated.
1329 * On hardware resource shortage, this function will repeatedly evict
1330 * resources of the same type until the validation succeeds.
1331 */
1332int vmw_resource_validate(struct vmw_resource *res)
1333{
1334 int ret;
1335 struct vmw_resource *evict_res;
1336 struct vmw_private *dev_priv = res->dev_priv;
1337 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1338 struct ttm_validate_buffer val_buf;
Thomas Hellstromea029c22013-11-12 00:09:54 -08001339 unsigned err_count = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001340
1341 if (likely(!res->func->may_evict))
1342 return 0;
1343
1344 val_buf.bo = NULL;
1345 if (res->backup)
1346 val_buf.bo = &res->backup->base;
1347 do {
1348 ret = vmw_resource_do_validate(res, &val_buf);
1349 if (likely(ret != -EBUSY))
1350 break;
1351
1352 write_lock(&dev_priv->resource_lock);
1353 if (list_empty(lru_list) || !res->func->may_evict) {
Thomas Hellstromea029c22013-11-12 00:09:54 -08001354 DRM_ERROR("Out of device device resources "
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001355 "for %s.\n", res->func->type_name);
1356 ret = -EBUSY;
1357 write_unlock(&dev_priv->resource_lock);
1358 break;
1359 }
1360
1361 evict_res = vmw_resource_reference
1362 (list_first_entry(lru_list, struct vmw_resource,
1363 lru_head));
1364 list_del_init(&evict_res->lru_head);
1365
1366 write_unlock(&dev_priv->resource_lock);
Thomas Hellstromea029c22013-11-12 00:09:54 -08001367
1368 ret = vmw_resource_do_evict(evict_res, true);
1369 if (unlikely(ret != 0)) {
1370 write_lock(&dev_priv->resource_lock);
1371 list_add_tail(&evict_res->lru_head, lru_list);
1372 write_unlock(&dev_priv->resource_lock);
1373 if (ret == -ERESTARTSYS ||
1374 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1375 vmw_resource_unreference(&evict_res);
1376 goto out_no_validate;
1377 }
1378 }
1379
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001380 vmw_resource_unreference(&evict_res);
1381 } while (1);
1382
1383 if (unlikely(ret != 0))
1384 goto out_no_validate;
1385 else if (!res->func->needs_backup && res->backup) {
1386 list_del_init(&res->mob_head);
1387 vmw_dmabuf_unreference(&res->backup);
1388 }
1389
1390 return 0;
1391
1392out_no_validate:
1393 return ret;
1394}
1395
1396/**
1397 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1398 * object without unreserving it.
1399 *
1400 * @bo: Pointer to the struct ttm_buffer_object to fence.
1401 * @fence: Pointer to the fence. If NULL, this function will
1402 * insert a fence into the command stream..
1403 *
1404 * Contrary to the ttm_eu version of this function, it takes only
1405 * a single buffer object instead of a list, and it also doesn't
1406 * unreserve the buffer object, which needs to be done separately.
1407 */
1408void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1409 struct vmw_fence_obj *fence)
1410{
1411 struct ttm_bo_device *bdev = bo->bdev;
1412 struct ttm_bo_driver *driver = bdev->driver;
1413 struct vmw_fence_obj *old_fence_obj;
1414 struct vmw_private *dev_priv =
1415 container_of(bdev, struct vmw_private, bdev);
1416
1417 if (fence == NULL)
1418 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1419 else
1420 driver->sync_obj_ref(fence);
1421
1422 spin_lock(&bdev->fence_lock);
1423
1424 old_fence_obj = bo->sync_obj;
1425 bo->sync_obj = fence;
1426
1427 spin_unlock(&bdev->fence_lock);
1428
1429 if (old_fence_obj)
1430 vmw_fence_obj_unreference(&old_fence_obj);
1431}
1432
1433/**
1434 * vmw_resource_move_notify - TTM move_notify_callback
1435 *
1436 * @bo: The TTM buffer object about to move.
1437 * @mem: The truct ttm_mem_reg indicating to what memory
1438 * region the move is taking place.
1439 *
Thomas Hellstromf4689112012-11-21 11:29:13 +01001440 * Evicts the Guest Backed hardware resource if the backup
1441 * buffer is being moved out of MOB memory.
1442 * Note that this function should not race with the resource
1443 * validation code as long as it accesses only members of struct
1444 * resource that remain static while bo::res is !NULL and
1445 * while we have @bo reserved. struct resource::backup is *not* a
1446 * static member. The resource validation code will take care
1447 * to set @bo::res to NULL, while having @bo reserved when the
1448 * buffer is no longer bound to the resource, so @bo:res can be
1449 * used to determine whether there is a need to unbind and whether
1450 * it is safe to unbind.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001451 */
1452void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1453 struct ttm_mem_reg *mem)
1454{
Thomas Hellstromf4689112012-11-21 11:29:13 +01001455 struct vmw_dma_buffer *dma_buf;
1456
1457 if (mem == NULL)
1458 return;
1459
1460 if (bo->destroy != vmw_dmabuf_bo_free &&
1461 bo->destroy != vmw_user_dmabuf_destroy)
1462 return;
1463
1464 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1465
1466 if (mem->mem_type != VMW_PL_MOB) {
1467 struct vmw_resource *res, *n;
1468 struct ttm_bo_device *bdev = bo->bdev;
1469 struct ttm_validate_buffer val_buf;
1470
1471 val_buf.bo = bo;
1472
1473 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1474
1475 if (unlikely(res->func->unbind == NULL))
1476 continue;
1477
1478 (void) res->func->unbind(res, true, &val_buf);
1479 res->backup_dirty = true;
1480 res->res_dirty = false;
1481 list_del_init(&res->mob_head);
1482 }
1483
1484 spin_lock(&bdev->fence_lock);
1485 (void) ttm_bo_wait(bo, false, false, false);
1486 spin_unlock(&bdev->fence_lock);
1487 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001488}
1489
1490/**
1491 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1492 *
1493 * @res: The resource being queried.
1494 */
1495bool vmw_resource_needs_backup(const struct vmw_resource *res)
1496{
1497 return res->func->needs_backup;
1498}
1499
1500/**
1501 * vmw_resource_evict_type - Evict all resources of a specific type
1502 *
1503 * @dev_priv: Pointer to a device private struct
1504 * @type: The resource type to evict
1505 *
1506 * To avoid thrashing starvation or as part of the hibernation sequence,
Thomas Hellstromea029c22013-11-12 00:09:54 -08001507 * try to evict all evictable resources of a specific type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001508 */
1509static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1510 enum vmw_res_type type)
1511{
1512 struct list_head *lru_list = &dev_priv->res_lru[type];
1513 struct vmw_resource *evict_res;
Thomas Hellstromea029c22013-11-12 00:09:54 -08001514 unsigned err_count = 0;
1515 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001516
1517 do {
1518 write_lock(&dev_priv->resource_lock);
1519
1520 if (list_empty(lru_list))
1521 goto out_unlock;
1522
1523 evict_res = vmw_resource_reference(
1524 list_first_entry(lru_list, struct vmw_resource,
1525 lru_head));
1526 list_del_init(&evict_res->lru_head);
1527 write_unlock(&dev_priv->resource_lock);
Thomas Hellstromea029c22013-11-12 00:09:54 -08001528
1529 ret = vmw_resource_do_evict(evict_res, false);
1530 if (unlikely(ret != 0)) {
1531 write_lock(&dev_priv->resource_lock);
1532 list_add_tail(&evict_res->lru_head, lru_list);
1533 write_unlock(&dev_priv->resource_lock);
1534 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1535 vmw_resource_unreference(&evict_res);
1536 return;
1537 }
1538 }
1539
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001540 vmw_resource_unreference(&evict_res);
1541 } while (1);
1542
1543out_unlock:
1544 write_unlock(&dev_priv->resource_lock);
1545}
1546
1547/**
1548 * vmw_resource_evict_all - Evict all evictable resources
1549 *
1550 * @dev_priv: Pointer to a device private struct
1551 *
1552 * To avoid thrashing starvation or as part of the hibernation sequence,
1553 * evict all evictable resources. In particular this means that all
1554 * guest-backed resources that are registered with the device are
1555 * evicted and the OTable becomes clean.
1556 */
1557void vmw_resource_evict_all(struct vmw_private *dev_priv)
1558{
1559 enum vmw_res_type type;
1560
1561 mutex_lock(&dev_priv->cmdbuf_mutex);
1562
1563 for (type = 0; type < vmw_res_max; ++type)
1564 vmw_resource_evict_type(dev_priv, type);
1565
1566 mutex_unlock(&dev_priv->cmdbuf_mutex);
1567}