blob: 12e68e58d9e4b85474f78101a7f263172bc8f7f7 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/vmwgfx_drm.h>
30#include <drm/ttm/ttm_object.h>
31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h>
Thomas Hellstrom543831c2012-11-20 12:19:36 +000033#include "vmwgfx_resource_priv.h"
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000034
Thomas Hellstromea029c22013-11-12 00:09:54 -080035#define VMW_RES_EVICT_ERR_COUNT 10
36
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000037struct vmw_user_dma_buffer {
Thomas Hellstromc486d4f2013-11-08 02:30:50 -080038 struct ttm_prime_object prime;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000039 struct vmw_dma_buffer dma;
40};
41
42struct vmw_bo_user_rep {
43 uint32_t handle;
44 uint64_t map_handle;
45};
46
47struct vmw_stream {
48 struct vmw_resource res;
49 uint32_t stream_id;
50};
51
52struct vmw_user_stream {
53 struct ttm_base_object base;
54 struct vmw_stream stream;
55};
56
Thomas Hellstromc0951b72012-11-20 12:19:35 +000057
58static uint64_t vmw_user_stream_size;
59
60static const struct vmw_res_func vmw_stream_func = {
61 .res_type = vmw_res_stream,
62 .needs_backup = false,
63 .may_evict = false,
64 .type_name = "video streams",
65 .backup_placement = NULL,
66 .create = NULL,
67 .destroy = NULL,
68 .bind = NULL,
69 .unbind = NULL
70};
71
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000072static inline struct vmw_dma_buffer *
73vmw_dma_buffer(struct ttm_buffer_object *bo)
74{
75 return container_of(bo, struct vmw_dma_buffer, base);
76}
77
78static inline struct vmw_user_dma_buffer *
79vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80{
81 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83}
84
85struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86{
87 kref_get(&res->kref);
88 return res;
89}
90
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +020091
92/**
93 * vmw_resource_release_id - release a resource id to the id manager.
94 *
95 * @res: Pointer to the resource.
96 *
97 * Release the resource id to the resource id manager and set it to -1
98 */
Thomas Hellstrom543831c2012-11-20 12:19:36 +000099void vmw_resource_release_id(struct vmw_resource *res)
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200100{
101 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200103
104 write_lock(&dev_priv->resource_lock);
105 if (res->id != -1)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000106 idr_remove(idr, res->id);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200107 res->id = -1;
108 write_unlock(&dev_priv->resource_lock);
109}
110
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000111static void vmw_resource_release(struct kref *kref)
112{
113 struct vmw_resource *res =
114 container_of(kref, struct vmw_resource, kref);
115 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000116 int id;
117 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000118
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200119 res->avail = false;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000120 list_del_init(&res->lru_head);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000121 write_unlock(&dev_priv->resource_lock);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000122 if (res->backup) {
123 struct ttm_buffer_object *bo = &res->backup->base;
124
125 ttm_bo_reserve(bo, false, false, false, 0);
126 if (!list_empty(&res->mob_head) &&
127 res->func->unbind != NULL) {
128 struct ttm_validate_buffer val_buf;
129
130 val_buf.bo = bo;
131 res->func->unbind(res, false, &val_buf);
132 }
133 res->backup_dirty = false;
134 list_del_init(&res->mob_head);
135 ttm_bo_unreserve(bo);
136 vmw_dmabuf_unreference(&res->backup);
137 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000138
139 if (likely(res->hw_destroy != NULL))
140 res->hw_destroy(res);
141
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000142 id = res->id;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000143 if (res->res_free != NULL)
144 res->res_free(res);
145 else
146 kfree(res);
147
148 write_lock(&dev_priv->resource_lock);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200149
150 if (id != -1)
151 idr_remove(idr, id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000152}
153
154void vmw_resource_unreference(struct vmw_resource **p_res)
155{
156 struct vmw_resource *res = *p_res;
157 struct vmw_private *dev_priv = res->dev_priv;
158
159 *p_res = NULL;
160 write_lock(&dev_priv->resource_lock);
161 kref_put(&res->kref, vmw_resource_release);
162 write_unlock(&dev_priv->resource_lock);
163}
164
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200165
166/**
167 * vmw_resource_alloc_id - release a resource id to the id manager.
168 *
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200169 * @res: Pointer to the resource.
170 *
171 * Allocate the lowest free resource from the resource manager, and set
172 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
173 */
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000174int vmw_resource_alloc_id(struct vmw_resource *res)
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200175{
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000176 struct vmw_private *dev_priv = res->dev_priv;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200177 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000178 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200179
180 BUG_ON(res->id != -1);
181
Tejun Heocc39a8f2013-02-27 17:04:14 -0800182 idr_preload(GFP_KERNEL);
183 write_lock(&dev_priv->resource_lock);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200184
Tejun Heocc39a8f2013-02-27 17:04:14 -0800185 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
186 if (ret >= 0)
187 res->id = ret;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200188
Tejun Heocc39a8f2013-02-27 17:04:14 -0800189 write_unlock(&dev_priv->resource_lock);
190 idr_preload_end();
191 return ret < 0 ? ret : 0;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200192}
193
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000194/**
195 * vmw_resource_init - initialize a struct vmw_resource
196 *
197 * @dev_priv: Pointer to a device private struct.
198 * @res: The struct vmw_resource to initialize.
199 * @obj_type: Resource object type.
200 * @delay_id: Boolean whether to defer device id allocation until
201 * the first validation.
202 * @res_free: Resource destructor.
203 * @func: Resource function table.
204 */
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000205int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
206 bool delay_id,
207 void (*res_free) (struct vmw_resource *res),
208 const struct vmw_res_func *func)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000209{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000210 kref_init(&res->kref);
211 res->hw_destroy = NULL;
212 res->res_free = res_free;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000213 res->avail = false;
214 res->dev_priv = dev_priv;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000215 res->func = func;
216 INIT_LIST_HEAD(&res->lru_head);
217 INIT_LIST_HEAD(&res->mob_head);
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200218 res->id = -1;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000219 res->backup = NULL;
220 res->backup_offset = 0;
221 res->backup_dirty = false;
222 res->res_dirty = false;
Thomas Hellstrom5bb39e82011-10-04 20:13:33 +0200223 if (delay_id)
224 return 0;
225 else
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000226 return vmw_resource_alloc_id(res);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000227}
228
229/**
230 * vmw_resource_activate
231 *
232 * @res: Pointer to the newly created resource
233 * @hw_destroy: Destroy function. NULL if none.
234 *
235 * Activate a resource after the hardware has been made aware of it.
236 * Set tye destroy function to @destroy. Typically this frees the
237 * resource and destroys the hardware resources associated with it.
238 * Activate basically means that the function vmw_resource_lookup will
239 * find it.
240 */
Thomas Hellstrom543831c2012-11-20 12:19:36 +0000241void vmw_resource_activate(struct vmw_resource *res,
242 void (*hw_destroy) (struct vmw_resource *))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000243{
244 struct vmw_private *dev_priv = res->dev_priv;
245
246 write_lock(&dev_priv->resource_lock);
247 res->avail = true;
248 res->hw_destroy = hw_destroy;
249 write_unlock(&dev_priv->resource_lock);
250}
251
252struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
253 struct idr *idr, int id)
254{
255 struct vmw_resource *res;
256
257 read_lock(&dev_priv->resource_lock);
258 res = idr_find(idr, id);
259 if (res && res->avail)
260 kref_get(&res->kref);
261 else
262 res = NULL;
263 read_unlock(&dev_priv->resource_lock);
264
265 if (unlikely(res == NULL))
266 return NULL;
267
268 return res;
269}
270
271/**
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000272 * vmw_user_resource_lookup_handle - lookup a struct resource from a
273 * TTM user-space handle and perform basic type checks
274 *
275 * @dev_priv: Pointer to a device private struct
276 * @tfile: Pointer to a struct ttm_object_file identifying the caller
277 * @handle: The TTM user-space handle
278 * @converter: Pointer to an object describing the resource type
279 * @p_res: On successful return the location pointed to will contain
280 * a pointer to a refcounted struct vmw_resource.
281 *
282 * If the handle can't be found or is associated with an incorrect resource
283 * type, -EINVAL will be returned.
284 */
285int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
286 struct ttm_object_file *tfile,
287 uint32_t handle,
288 const struct vmw_user_resource_conv
289 *converter,
290 struct vmw_resource **p_res)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000291{
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100292 struct ttm_base_object *base;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000293 struct vmw_resource *res;
294 int ret = -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000295
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100296 base = ttm_base_object_lookup(tfile, handle);
297 if (unlikely(base == NULL))
298 return -EINVAL;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000299
Thomas Hellstrom79e5f812013-11-08 02:12:51 -0800300 if (unlikely(ttm_base_object_type(base) != converter->object_type))
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000301 goto out_bad_resource;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100302
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000303 res = converter->base_obj_to_res(base);
304
305 read_lock(&dev_priv->resource_lock);
306 if (!res->avail || res->res_free != converter->res_free) {
307 read_unlock(&dev_priv->resource_lock);
308 goto out_bad_resource;
309 }
310
311 kref_get(&res->kref);
312 read_unlock(&dev_priv->resource_lock);
313
314 *p_res = res;
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100315 ret = 0;
316
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000317out_bad_resource:
Thomas Hellstrom7a73ba72009-12-22 16:53:41 +0100318 ttm_base_object_unref(&base);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000319
320 return ret;
321}
322
323/**
324 * Helper function that looks either a surface or dmabuf.
325 *
326 * The pointer this pointed at by out_surf and out_buf needs to be null.
327 */
328int vmw_user_lookup_handle(struct vmw_private *dev_priv,
329 struct ttm_object_file *tfile,
330 uint32_t handle,
331 struct vmw_surface **out_surf,
332 struct vmw_dma_buffer **out_buf)
333{
334 struct vmw_resource *res;
335 int ret;
336
337 BUG_ON(*out_surf || *out_buf);
338
339 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
340 user_surface_converter,
341 &res);
342 if (!ret) {
343 *out_surf = vmw_res_to_srf(res);
344 return 0;
345 }
346
347 *out_surf = NULL;
348 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000349 return ret;
350}
351
352/**
353 * Buffer management.
354 */
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800355
356/**
357 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
358 *
359 * @dev_priv: Pointer to a struct vmw_private identifying the device.
360 * @size: The requested buffer size.
361 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
362 */
363static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
364 bool user)
365{
366 static size_t struct_size, user_struct_size;
367 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
368 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
369
370 if (unlikely(struct_size == 0)) {
371 size_t backend_size = ttm_round_pot(vmw_tt_size);
372
373 struct_size = backend_size +
374 ttm_round_pot(sizeof(struct vmw_dma_buffer));
375 user_struct_size = backend_size +
376 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
377 }
378
379 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
380 page_array_size +=
381 ttm_round_pot(num_pages * sizeof(dma_addr_t));
382
383 return ((user) ? user_struct_size : struct_size) +
384 page_array_size;
385}
386
Thomas Hellstromeffe1102010-01-13 22:28:39 +0100387void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
388{
389 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
Thomas Hellstromeffe1102010-01-13 22:28:39 +0100390
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000391 kfree(vmw_bo);
392}
393
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800394static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
395{
396 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
397
398 ttm_prime_object_kfree(vmw_user_bo, prime);
399}
400
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000401int vmw_dmabuf_init(struct vmw_private *dev_priv,
402 struct vmw_dma_buffer *vmw_bo,
403 size_t size, struct ttm_placement *placement,
404 bool interruptible,
405 void (*bo_free) (struct ttm_buffer_object *bo))
406{
407 struct ttm_bo_device *bdev = &dev_priv->bdev;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000408 size_t acc_size;
409 int ret;
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800410 bool user = (bo_free == &vmw_user_dmabuf_destroy);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000411
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800412 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000413
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800414 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000415 memset(vmw_bo, 0, sizeof(*vmw_bo));
416
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000417 INIT_LIST_HEAD(&vmw_bo->res_list);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000418
419 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
Thomas Hellstrom308d17e2013-11-28 01:46:56 -0800420 (user) ? ttm_bo_type_device :
421 ttm_bo_type_kernel, placement,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +0000422 0, interruptible,
Dave Airlie129b78b2012-04-02 11:46:06 +0100423 NULL, acc_size, NULL, bo_free);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000424 return ret;
425}
426
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000427static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
428{
429 struct vmw_user_dma_buffer *vmw_user_bo;
430 struct ttm_base_object *base = *p_base;
431 struct ttm_buffer_object *bo;
432
433 *p_base = NULL;
434
435 if (unlikely(base == NULL))
436 return;
437
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800438 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
439 prime.base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000440 bo = &vmw_user_bo->dma.base;
441 ttm_bo_unref(&bo);
442}
443
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100444static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
445 enum ttm_ref_type ref_type)
446{
447 struct vmw_user_dma_buffer *user_bo;
448 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
449
450 switch (ref_type) {
451 case TTM_REF_SYNCCPU_WRITE:
452 ttm_bo_synccpu_write_release(&user_bo->dma.base);
453 break;
454 default:
455 BUG();
456 }
457}
458
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000459/**
460 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
461 *
462 * @dev_priv: Pointer to a struct device private.
463 * @tfile: Pointer to a struct ttm_object_file on which to register the user
464 * object.
465 * @size: Size of the dma buffer.
466 * @shareable: Boolean whether the buffer is shareable with other open files.
467 * @handle: Pointer to where the handle value should be assigned.
468 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
469 * should be assigned.
470 */
471int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
472 struct ttm_object_file *tfile,
473 uint32_t size,
474 bool shareable,
475 uint32_t *handle,
476 struct vmw_dma_buffer **p_dma_buf)
477{
478 struct vmw_user_dma_buffer *user_bo;
479 struct ttm_buffer_object *tmp;
480 int ret;
481
482 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
483 if (unlikely(user_bo == NULL)) {
484 DRM_ERROR("Failed to allocate a buffer.\n");
485 return -ENOMEM;
486 }
487
488 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
Thomas Hellstrom96c5f0d2012-11-21 11:19:53 +0100489 (dev_priv->has_mob) ?
490 &vmw_sys_placement :
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000491 &vmw_vram_sys_placement, true,
492 &vmw_user_dmabuf_destroy);
493 if (unlikely(ret != 0))
494 return ret;
495
496 tmp = ttm_bo_reference(&user_bo->dma.base);
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800497 ret = ttm_prime_object_init(tfile,
498 size,
499 &user_bo->prime,
500 shareable,
501 ttm_buffer_type,
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100502 &vmw_user_dmabuf_release,
503 &vmw_user_dmabuf_ref_obj_release);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000504 if (unlikely(ret != 0)) {
505 ttm_bo_unref(&tmp);
506 goto out_no_base_object;
507 }
508
509 *p_dma_buf = &user_bo->dma;
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800510 *handle = user_bo->prime.base.hash.key;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000511
512out_no_base_object:
513 return ret;
514}
515
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100516/**
517 * vmw_user_dmabuf_verify_access - verify access permissions on this
518 * buffer object.
519 *
520 * @bo: Pointer to the buffer object being accessed
521 * @tfile: Identifying the caller.
522 */
523int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
524 struct ttm_object_file *tfile)
525{
526 struct vmw_user_dma_buffer *vmw_user_bo;
527
528 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
529 return -EPERM;
530
531 vmw_user_bo = vmw_user_dma_buffer(bo);
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800532 return (vmw_user_bo->prime.base.tfile == tfile ||
533 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
Thomas Hellstromd08a9b92012-11-21 16:04:18 +0100534}
535
Thomas Hellstrom1d7a5cb2012-11-21 12:32:19 +0100536/**
537 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
538 * access, idling previous GPU operations on the buffer and optionally
539 * blocking it for further command submissions.
540 *
541 * @user_bo: Pointer to the buffer object being grabbed for CPU access
542 * @tfile: Identifying the caller.
543 * @flags: Flags indicating how the grab should be performed.
544 *
545 * A blocking grab will be automatically released when @tfile is closed.
546 */
547static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
548 struct ttm_object_file *tfile,
549 uint32_t flags)
550{
551 struct ttm_buffer_object *bo = &user_bo->dma.base;
552 bool existed;
553 int ret;
554
555 if (flags & drm_vmw_synccpu_allow_cs) {
556 struct ttm_bo_device *bdev = bo->bdev;
557
558 spin_lock(&bdev->fence_lock);
559 ret = ttm_bo_wait(bo, false, true,
560 !!(flags & drm_vmw_synccpu_dontblock));
561 spin_unlock(&bdev->fence_lock);
562 return ret;
563 }
564
565 ret = ttm_bo_synccpu_write_grab
566 (bo, !!(flags & drm_vmw_synccpu_dontblock));
567 if (unlikely(ret != 0))
568 return ret;
569
570 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
571 TTM_REF_SYNCCPU_WRITE, &existed);
572 if (ret != 0 || existed)
573 ttm_bo_synccpu_write_release(&user_bo->dma.base);
574
575 return ret;
576}
577
578/**
579 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
580 * and unblock command submission on the buffer if blocked.
581 *
582 * @handle: Handle identifying the buffer object.
583 * @tfile: Identifying the caller.
584 * @flags: Flags indicating the type of release.
585 */
586static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
587 struct ttm_object_file *tfile,
588 uint32_t flags)
589{
590 if (!(flags & drm_vmw_synccpu_allow_cs))
591 return ttm_ref_object_base_unref(tfile, handle,
592 TTM_REF_SYNCCPU_WRITE);
593
594 return 0;
595}
596
597/**
598 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
599 * functionality.
600 *
601 * @dev: Identifies the drm device.
602 * @data: Pointer to the ioctl argument.
603 * @file_priv: Identifies the caller.
604 *
605 * This function checks the ioctl arguments for validity and calls the
606 * relevant synccpu functions.
607 */
608int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
609 struct drm_file *file_priv)
610{
611 struct drm_vmw_synccpu_arg *arg =
612 (struct drm_vmw_synccpu_arg *) data;
613 struct vmw_dma_buffer *dma_buf;
614 struct vmw_user_dma_buffer *user_bo;
615 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
616 int ret;
617
618 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
619 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
620 drm_vmw_synccpu_dontblock |
621 drm_vmw_synccpu_allow_cs)) != 0) {
622 DRM_ERROR("Illegal synccpu flags.\n");
623 return -EINVAL;
624 }
625
626 switch (arg->op) {
627 case drm_vmw_synccpu_grab:
628 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
629 if (unlikely(ret != 0))
630 return ret;
631
632 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
633 dma);
634 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
635 vmw_dmabuf_unreference(&dma_buf);
636 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
637 ret != -EBUSY)) {
638 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
639 (unsigned int) arg->handle);
640 return ret;
641 }
642 break;
643 case drm_vmw_synccpu_release:
644 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
645 arg->flags);
646 if (unlikely(ret != 0)) {
647 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
648 (unsigned int) arg->handle);
649 return ret;
650 }
651 break;
652 default:
653 DRM_ERROR("Invalid synccpu operation.\n");
654 return -EINVAL;
655 }
656
657 return 0;
658}
659
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000660int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
661 struct drm_file *file_priv)
662{
663 struct vmw_private *dev_priv = vmw_priv(dev);
664 union drm_vmw_alloc_dmabuf_arg *arg =
665 (union drm_vmw_alloc_dmabuf_arg *)data;
666 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
667 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000668 struct vmw_dma_buffer *dma_buf;
669 uint32_t handle;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000670 struct vmw_master *vmaster = vmw_master(file_priv->master);
671 int ret;
672
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000673 ret = ttm_read_lock(&vmaster->lock, true);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000674 if (unlikely(ret != 0))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000675 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000676
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000677 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
678 req->size, false, &handle, &dma_buf);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000679 if (unlikely(ret != 0))
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +0100680 goto out_no_dmabuf;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000681
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000682 rep->handle = handle;
David Herrmann72525b32013-07-24 21:08:53 +0200683 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000684 rep->cur_gmr_id = handle;
685 rep->cur_gmr_offset = 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000686
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000687 vmw_dmabuf_unreference(&dma_buf);
688
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +0100689out_no_dmabuf:
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000690 ttm_read_unlock(&vmaster->lock);
691
Thomas Hellstrom2f5993c2010-11-17 13:24:48 +0100692 return ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000693}
694
695int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
696 struct drm_file *file_priv)
697{
698 struct drm_vmw_unref_dmabuf_arg *arg =
699 (struct drm_vmw_unref_dmabuf_arg *)data;
700
701 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
702 arg->handle,
703 TTM_REF_USAGE);
704}
705
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000706int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
707 uint32_t handle, struct vmw_dma_buffer **out)
708{
709 struct vmw_user_dma_buffer *vmw_user_bo;
710 struct ttm_base_object *base;
711
712 base = ttm_base_object_lookup(tfile, handle);
713 if (unlikely(base == NULL)) {
714 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
715 (unsigned long)handle);
716 return -ESRCH;
717 }
718
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800719 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000720 ttm_base_object_unref(&base);
721 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
722 (unsigned long)handle);
723 return -EINVAL;
724 }
725
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800726 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
727 prime.base);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000728 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
729 ttm_base_object_unref(&base);
730 *out = &vmw_user_bo->dma;
731
732 return 0;
733}
734
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000735int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100736 struct vmw_dma_buffer *dma_buf,
737 uint32_t *handle)
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000738{
739 struct vmw_user_dma_buffer *user_bo;
740
741 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
742 return -EINVAL;
743
744 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
Thomas Hellstroma97e2192012-11-21 11:45:13 +0100745
746 *handle = user_bo->prime.base.hash.key;
Thomas Hellstromc486d4f2013-11-08 02:30:50 -0800747 return ttm_ref_object_add(tfile, &user_bo->prime.base,
748 TTM_REF_USAGE, NULL);
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000749}
750
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000751/*
Uwe Kleine-König65155b32010-06-11 12:17:01 +0200752 * Stream management
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000753 */
754
755static void vmw_stream_destroy(struct vmw_resource *res)
756{
757 struct vmw_private *dev_priv = res->dev_priv;
758 struct vmw_stream *stream;
759 int ret;
760
761 DRM_INFO("%s: unref\n", __func__);
762 stream = container_of(res, struct vmw_stream, res);
763
764 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
765 WARN_ON(ret != 0);
766}
767
768static int vmw_stream_init(struct vmw_private *dev_priv,
769 struct vmw_stream *stream,
770 void (*res_free) (struct vmw_resource *res))
771{
772 struct vmw_resource *res = &stream->res;
773 int ret;
774
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000775 ret = vmw_resource_init(dev_priv, res, false, res_free,
776 &vmw_stream_func);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000777
778 if (unlikely(ret != 0)) {
779 if (res_free == NULL)
780 kfree(stream);
781 else
782 res_free(&stream->res);
783 return ret;
784 }
785
786 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
787 if (ret) {
788 vmw_resource_unreference(&res);
789 return ret;
790 }
791
792 DRM_INFO("%s: claimed\n", __func__);
793
794 vmw_resource_activate(&stream->res, vmw_stream_destroy);
795 return 0;
796}
797
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000798static void vmw_user_stream_free(struct vmw_resource *res)
799{
800 struct vmw_user_stream *stream =
801 container_of(res, struct vmw_user_stream, stream.res);
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200802 struct vmw_private *dev_priv = res->dev_priv;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000803
Thomas Hellstromcdad0522012-11-06 11:31:50 +0000804 ttm_base_object_kfree(stream, base);
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200805 ttm_mem_global_free(vmw_mem_glob(dev_priv),
806 vmw_user_stream_size);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000807}
808
809/**
810 * This function is called when user space has no more references on the
811 * base object. It releases the base-object's reference on the resource object.
812 */
813
814static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
815{
816 struct ttm_base_object *base = *p_base;
817 struct vmw_user_stream *stream =
818 container_of(base, struct vmw_user_stream, base);
819 struct vmw_resource *res = &stream->stream.res;
820
821 *p_base = NULL;
822 vmw_resource_unreference(&res);
823}
824
825int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
826 struct drm_file *file_priv)
827{
828 struct vmw_private *dev_priv = vmw_priv(dev);
829 struct vmw_resource *res;
830 struct vmw_user_stream *stream;
831 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
832 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000833 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000834 int ret = 0;
835
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000836
837 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000838 if (unlikely(res == NULL))
839 return -EINVAL;
840
841 if (res->res_free != &vmw_user_stream_free) {
842 ret = -EINVAL;
843 goto out;
844 }
845
846 stream = container_of(res, struct vmw_user_stream, stream.res);
847 if (stream->base.tfile != tfile) {
848 ret = -EINVAL;
849 goto out;
850 }
851
852 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
853out:
854 vmw_resource_unreference(&res);
855 return ret;
856}
857
858int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
859 struct drm_file *file_priv)
860{
861 struct vmw_private *dev_priv = vmw_priv(dev);
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200862 struct vmw_user_stream *stream;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000863 struct vmw_resource *res;
864 struct vmw_resource *tmp;
865 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
866 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200867 struct vmw_master *vmaster = vmw_master(file_priv->master);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000868 int ret;
869
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200870 /*
871 * Approximate idr memory usage with 128 bytes. It will be limited
872 * by maximum number_of streams anyway?
873 */
874
875 if (unlikely(vmw_user_stream_size == 0))
876 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
877
878 ret = ttm_read_lock(&vmaster->lock, true);
879 if (unlikely(ret != 0))
880 return ret;
881
882 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
883 vmw_user_stream_size,
884 false, true);
885 if (unlikely(ret != 0)) {
886 if (ret != -ERESTARTSYS)
887 DRM_ERROR("Out of graphics memory for stream"
888 " creation.\n");
889 goto out_unlock;
890 }
891
892
893 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
894 if (unlikely(stream == NULL)) {
895 ttm_mem_global_free(vmw_mem_glob(dev_priv),
896 vmw_user_stream_size);
897 ret = -ENOMEM;
898 goto out_unlock;
899 }
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000900
901 res = &stream->stream.res;
902 stream->base.shareable = false;
903 stream->base.tfile = NULL;
904
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200905 /*
906 * From here on, the destructor takes over resource freeing.
907 */
908
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000909 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
910 if (unlikely(ret != 0))
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200911 goto out_unlock;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000912
913 tmp = vmw_resource_reference(res);
914 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
915 &vmw_user_stream_base_release, NULL);
916
917 if (unlikely(ret != 0)) {
918 vmw_resource_unreference(&tmp);
919 goto out_err;
920 }
921
922 arg->stream_id = res->id;
923out_err:
924 vmw_resource_unreference(&res);
Thomas Hellstrom414ee502011-10-07 15:23:06 +0200925out_unlock:
926 ttm_read_unlock(&vmaster->lock);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000927 return ret;
928}
929
930int vmw_user_stream_lookup(struct vmw_private *dev_priv,
931 struct ttm_object_file *tfile,
932 uint32_t *inout_id, struct vmw_resource **out)
933{
934 struct vmw_user_stream *stream;
935 struct vmw_resource *res;
936 int ret;
937
Thomas Hellstromc0951b72012-11-20 12:19:35 +0000938 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
939 *inout_id);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000940 if (unlikely(res == NULL))
941 return -EINVAL;
942
943 if (res->res_free != &vmw_user_stream_free) {
944 ret = -EINVAL;
945 goto err_ref;
946 }
947
948 stream = container_of(res, struct vmw_user_stream, stream.res);
949 if (stream->base.tfile != tfile) {
950 ret = -EPERM;
951 goto err_ref;
952 }
953
954 *inout_id = stream->stream.stream_id;
955 *out = res;
956 return 0;
957err_ref:
958 vmw_resource_unreference(&res);
959 return ret;
960}
Dave Airlie5e1782d2012-08-28 01:53:54 +0000961
962
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800963/**
964 * vmw_dumb_create - Create a dumb kms buffer
965 *
966 * @file_priv: Pointer to a struct drm_file identifying the caller.
967 * @dev: Pointer to the drm device.
968 * @args: Pointer to a struct drm_mode_create_dumb structure
969 *
970 * This is a driver callback for the core drm create_dumb functionality.
971 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
972 * that the arguments have a different format.
973 */
Dave Airlie5e1782d2012-08-28 01:53:54 +0000974int vmw_dumb_create(struct drm_file *file_priv,
975 struct drm_device *dev,
976 struct drm_mode_create_dumb *args)
977{
978 struct vmw_private *dev_priv = vmw_priv(dev);
979 struct vmw_master *vmaster = vmw_master(file_priv->master);
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800980 struct vmw_dma_buffer *dma_buf;
Dave Airlie5e1782d2012-08-28 01:53:54 +0000981 int ret;
982
983 args->pitch = args->width * ((args->bpp + 7) / 8);
984 args->size = args->pitch * args->height;
985
Dave Airlie5e1782d2012-08-28 01:53:54 +0000986 ret = ttm_read_lock(&vmaster->lock, true);
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800987 if (unlikely(ret != 0))
Dave Airlie5e1782d2012-08-28 01:53:54 +0000988 return ret;
Dave Airlie5e1782d2012-08-28 01:53:54 +0000989
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800990 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
991 args->size, false, &args->handle,
992 &dma_buf);
993 if (unlikely(ret != 0))
Dave Airlie5e1782d2012-08-28 01:53:54 +0000994 goto out_no_dmabuf;
995
Thomas Hellstromd69d51d2013-11-28 00:28:30 -0800996 vmw_dmabuf_unreference(&dma_buf);
Dave Airlie5e1782d2012-08-28 01:53:54 +0000997out_no_dmabuf:
998 ttm_read_unlock(&vmaster->lock);
999 return ret;
1000}
1001
Thomas Hellstromd69d51d2013-11-28 00:28:30 -08001002/**
1003 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1004 *
1005 * @file_priv: Pointer to a struct drm_file identifying the caller.
1006 * @dev: Pointer to the drm device.
1007 * @handle: Handle identifying the dumb buffer.
1008 * @offset: The address space offset returned.
1009 *
1010 * This is a driver callback for the core drm dumb_map_offset functionality.
1011 */
Dave Airlie5e1782d2012-08-28 01:53:54 +00001012int vmw_dumb_map_offset(struct drm_file *file_priv,
1013 struct drm_device *dev, uint32_t handle,
1014 uint64_t *offset)
1015{
1016 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1017 struct vmw_dma_buffer *out_buf;
1018 int ret;
1019
1020 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1021 if (ret != 0)
1022 return -EINVAL;
1023
David Herrmann72525b32013-07-24 21:08:53 +02001024 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
Dave Airlie5e1782d2012-08-28 01:53:54 +00001025 vmw_dmabuf_unreference(&out_buf);
1026 return 0;
1027}
1028
Thomas Hellstromd69d51d2013-11-28 00:28:30 -08001029/**
1030 * vmw_dumb_destroy - Destroy a dumb boffer
1031 *
1032 * @file_priv: Pointer to a struct drm_file identifying the caller.
1033 * @dev: Pointer to the drm device.
1034 * @handle: Handle identifying the dumb buffer.
1035 *
1036 * This is a driver callback for the core drm dumb_destroy functionality.
1037 */
Dave Airlie5e1782d2012-08-28 01:53:54 +00001038int vmw_dumb_destroy(struct drm_file *file_priv,
1039 struct drm_device *dev,
1040 uint32_t handle)
1041{
1042 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1043 handle, TTM_REF_USAGE);
1044}
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001045
1046/**
1047 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1048 *
1049 * @res: The resource for which to allocate a backup buffer.
1050 * @interruptible: Whether any sleeps during allocation should be
1051 * performed while interruptible.
1052 */
1053static int vmw_resource_buf_alloc(struct vmw_resource *res,
1054 bool interruptible)
1055{
1056 unsigned long size =
1057 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1058 struct vmw_dma_buffer *backup;
1059 int ret;
1060
1061 if (likely(res->backup)) {
1062 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1063 return 0;
1064 }
1065
1066 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1067 if (unlikely(backup == NULL))
1068 return -ENOMEM;
1069
1070 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1071 res->func->backup_placement,
1072 interruptible,
1073 &vmw_dmabuf_bo_free);
1074 if (unlikely(ret != 0))
1075 goto out_no_dmabuf;
1076
1077 res->backup = backup;
1078
1079out_no_dmabuf:
1080 return ret;
1081}
1082
1083/**
1084 * vmw_resource_do_validate - Make a resource up-to-date and visible
1085 * to the device.
1086 *
1087 * @res: The resource to make visible to the device.
1088 * @val_buf: Information about a buffer possibly
1089 * containing backup data if a bind operation is needed.
1090 *
1091 * On hardware resource shortage, this function returns -EBUSY and
1092 * should be retried once resources have been freed up.
1093 */
1094static int vmw_resource_do_validate(struct vmw_resource *res,
1095 struct ttm_validate_buffer *val_buf)
1096{
1097 int ret = 0;
1098 const struct vmw_res_func *func = res->func;
1099
1100 if (unlikely(res->id == -1)) {
1101 ret = func->create(res);
1102 if (unlikely(ret != 0))
1103 return ret;
1104 }
1105
1106 if (func->bind &&
1107 ((func->needs_backup && list_empty(&res->mob_head) &&
1108 val_buf->bo != NULL) ||
1109 (!func->needs_backup && val_buf->bo != NULL))) {
1110 ret = func->bind(res, val_buf);
1111 if (unlikely(ret != 0))
1112 goto out_bind_failed;
1113 if (func->needs_backup)
1114 list_add_tail(&res->mob_head, &res->backup->res_list);
1115 }
1116
1117 /*
1118 * Only do this on write operations, and move to
1119 * vmw_resource_unreserve if it can be called after
1120 * backup buffers have been unreserved. Otherwise
1121 * sort out locking.
1122 */
1123 res->res_dirty = true;
1124
1125 return 0;
1126
1127out_bind_failed:
1128 func->destroy(res);
1129
1130 return ret;
1131}
1132
1133/**
1134 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1135 * command submission.
1136 *
1137 * @res: Pointer to the struct vmw_resource to unreserve.
1138 * @new_backup: Pointer to new backup buffer if command submission
1139 * switched.
1140 * @new_backup_offset: New backup offset if @new_backup is !NULL.
1141 *
1142 * Currently unreserving a resource means putting it back on the device's
1143 * resource lru list, so that it can be evicted if necessary.
1144 */
1145void vmw_resource_unreserve(struct vmw_resource *res,
1146 struct vmw_dma_buffer *new_backup,
1147 unsigned long new_backup_offset)
1148{
1149 struct vmw_private *dev_priv = res->dev_priv;
1150
1151 if (!list_empty(&res->lru_head))
1152 return;
1153
1154 if (new_backup && new_backup != res->backup) {
1155
1156 if (res->backup) {
Maarten Lankhorst8bd4ce52013-06-27 13:48:27 +02001157 lockdep_assert_held(&res->backup->base.resv->lock.base);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001158 list_del_init(&res->mob_head);
1159 vmw_dmabuf_unreference(&res->backup);
1160 }
1161
1162 res->backup = vmw_dmabuf_reference(new_backup);
Maarten Lankhorst8bd4ce52013-06-27 13:48:27 +02001163 lockdep_assert_held(&new_backup->base.resv->lock.base);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001164 list_add_tail(&res->mob_head, &new_backup->res_list);
1165 }
1166 if (new_backup)
1167 res->backup_offset = new_backup_offset;
1168
Thomas Hellstrom26682482013-10-09 01:42:50 -07001169 if (!res->func->may_evict || res->id == -1)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001170 return;
1171
1172 write_lock(&dev_priv->resource_lock);
1173 list_add_tail(&res->lru_head,
1174 &res->dev_priv->res_lru[res->func->res_type]);
1175 write_unlock(&dev_priv->resource_lock);
1176}
1177
1178/**
1179 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1180 * for a resource and in that case, allocate
1181 * one, reserve and validate it.
1182 *
1183 * @res: The resource for which to allocate a backup buffer.
1184 * @interruptible: Whether any sleeps during allocation should be
1185 * performed while interruptible.
1186 * @val_buf: On successful return contains data about the
1187 * reserved and validated backup buffer.
1188 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +02001189static int
1190vmw_resource_check_buffer(struct vmw_resource *res,
Maarten Lankhorstecff6652013-06-27 13:48:17 +02001191 bool interruptible,
1192 struct ttm_validate_buffer *val_buf)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001193{
1194 struct list_head val_list;
1195 bool backup_dirty = false;
1196 int ret;
1197
1198 if (unlikely(res->backup == NULL)) {
1199 ret = vmw_resource_buf_alloc(res, interruptible);
1200 if (unlikely(ret != 0))
1201 return ret;
1202 }
1203
1204 INIT_LIST_HEAD(&val_list);
1205 val_buf->bo = ttm_bo_reference(&res->backup->base);
1206 list_add_tail(&val_buf->head, &val_list);
Thomas Hellstromac492512013-11-15 00:06:47 -08001207 ret = ttm_eu_reserve_buffers(NULL, &val_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001208 if (unlikely(ret != 0))
1209 goto out_no_reserve;
1210
1211 if (res->func->needs_backup && list_empty(&res->mob_head))
1212 return 0;
1213
1214 backup_dirty = res->backup_dirty;
1215 ret = ttm_bo_validate(&res->backup->base,
1216 res->func->backup_placement,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001217 true, false);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001218
1219 if (unlikely(ret != 0))
1220 goto out_no_validate;
1221
1222 return 0;
1223
1224out_no_validate:
Thomas Hellstromac492512013-11-15 00:06:47 -08001225 ttm_eu_backoff_reservation(NULL, &val_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001226out_no_reserve:
1227 ttm_bo_unref(&val_buf->bo);
1228 if (backup_dirty)
1229 vmw_dmabuf_unreference(&res->backup);
1230
1231 return ret;
1232}
1233
1234/**
1235 * vmw_resource_reserve - Reserve a resource for command submission
1236 *
1237 * @res: The resource to reserve.
1238 *
1239 * This function takes the resource off the LRU list and make sure
1240 * a backup buffer is present for guest-backed resources. However,
1241 * the buffer may not be bound to the resource at this point.
1242 *
1243 */
1244int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1245{
1246 struct vmw_private *dev_priv = res->dev_priv;
1247 int ret;
1248
1249 write_lock(&dev_priv->resource_lock);
1250 list_del_init(&res->lru_head);
1251 write_unlock(&dev_priv->resource_lock);
1252
1253 if (res->func->needs_backup && res->backup == NULL &&
1254 !no_backup) {
1255 ret = vmw_resource_buf_alloc(res, true);
1256 if (unlikely(ret != 0))
1257 return ret;
1258 }
1259
1260 return 0;
1261}
1262
1263/**
1264 * vmw_resource_backoff_reservation - Unreserve and unreference a
1265 * backup buffer
1266 *.
1267 * @val_buf: Backup buffer information.
1268 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +02001269static void
Thomas Hellstromac492512013-11-15 00:06:47 -08001270vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001271{
1272 struct list_head val_list;
1273
1274 if (likely(val_buf->bo == NULL))
1275 return;
1276
1277 INIT_LIST_HEAD(&val_list);
1278 list_add_tail(&val_buf->head, &val_list);
Thomas Hellstromac492512013-11-15 00:06:47 -08001279 ttm_eu_backoff_reservation(NULL, &val_list);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001280 ttm_bo_unref(&val_buf->bo);
1281}
1282
1283/**
1284 * vmw_resource_do_evict - Evict a resource, and transfer its data
1285 * to a backup buffer.
1286 *
1287 * @res: The resource to evict.
Thomas Hellstromea029c22013-11-12 00:09:54 -08001288 * @interruptible: Whether to wait interruptible.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001289 */
Thomas Hellstromea029c22013-11-12 00:09:54 -08001290int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001291{
1292 struct ttm_validate_buffer val_buf;
1293 const struct vmw_res_func *func = res->func;
1294 int ret;
1295
1296 BUG_ON(!func->may_evict);
1297
1298 val_buf.bo = NULL;
Thomas Hellstromac492512013-11-15 00:06:47 -08001299 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001300 if (unlikely(ret != 0))
1301 return ret;
1302
1303 if (unlikely(func->unbind != NULL &&
1304 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1305 ret = func->unbind(res, res->res_dirty, &val_buf);
1306 if (unlikely(ret != 0))
1307 goto out_no_unbind;
1308 list_del_init(&res->mob_head);
1309 }
1310 ret = func->destroy(res);
1311 res->backup_dirty = true;
1312 res->res_dirty = false;
1313out_no_unbind:
Thomas Hellstromac492512013-11-15 00:06:47 -08001314 vmw_resource_backoff_reservation(&val_buf);
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001315
1316 return ret;
1317}
1318
1319
1320/**
1321 * vmw_resource_validate - Make a resource up-to-date and visible
1322 * to the device.
1323 *
1324 * @res: The resource to make visible to the device.
1325 *
1326 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1327 * be reserved and validated.
1328 * On hardware resource shortage, this function will repeatedly evict
1329 * resources of the same type until the validation succeeds.
1330 */
1331int vmw_resource_validate(struct vmw_resource *res)
1332{
1333 int ret;
1334 struct vmw_resource *evict_res;
1335 struct vmw_private *dev_priv = res->dev_priv;
1336 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1337 struct ttm_validate_buffer val_buf;
Thomas Hellstromea029c22013-11-12 00:09:54 -08001338 unsigned err_count = 0;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001339
1340 if (likely(!res->func->may_evict))
1341 return 0;
1342
1343 val_buf.bo = NULL;
1344 if (res->backup)
1345 val_buf.bo = &res->backup->base;
1346 do {
1347 ret = vmw_resource_do_validate(res, &val_buf);
1348 if (likely(ret != -EBUSY))
1349 break;
1350
1351 write_lock(&dev_priv->resource_lock);
1352 if (list_empty(lru_list) || !res->func->may_evict) {
Thomas Hellstromea029c22013-11-12 00:09:54 -08001353 DRM_ERROR("Out of device device resources "
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001354 "for %s.\n", res->func->type_name);
1355 ret = -EBUSY;
1356 write_unlock(&dev_priv->resource_lock);
1357 break;
1358 }
1359
1360 evict_res = vmw_resource_reference
1361 (list_first_entry(lru_list, struct vmw_resource,
1362 lru_head));
1363 list_del_init(&evict_res->lru_head);
1364
1365 write_unlock(&dev_priv->resource_lock);
Thomas Hellstromea029c22013-11-12 00:09:54 -08001366
1367 ret = vmw_resource_do_evict(evict_res, true);
1368 if (unlikely(ret != 0)) {
1369 write_lock(&dev_priv->resource_lock);
1370 list_add_tail(&evict_res->lru_head, lru_list);
1371 write_unlock(&dev_priv->resource_lock);
1372 if (ret == -ERESTARTSYS ||
1373 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1374 vmw_resource_unreference(&evict_res);
1375 goto out_no_validate;
1376 }
1377 }
1378
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001379 vmw_resource_unreference(&evict_res);
1380 } while (1);
1381
1382 if (unlikely(ret != 0))
1383 goto out_no_validate;
1384 else if (!res->func->needs_backup && res->backup) {
1385 list_del_init(&res->mob_head);
1386 vmw_dmabuf_unreference(&res->backup);
1387 }
1388
1389 return 0;
1390
1391out_no_validate:
1392 return ret;
1393}
1394
1395/**
1396 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1397 * object without unreserving it.
1398 *
1399 * @bo: Pointer to the struct ttm_buffer_object to fence.
1400 * @fence: Pointer to the fence. If NULL, this function will
1401 * insert a fence into the command stream..
1402 *
1403 * Contrary to the ttm_eu version of this function, it takes only
1404 * a single buffer object instead of a list, and it also doesn't
1405 * unreserve the buffer object, which needs to be done separately.
1406 */
1407void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1408 struct vmw_fence_obj *fence)
1409{
1410 struct ttm_bo_device *bdev = bo->bdev;
1411 struct ttm_bo_driver *driver = bdev->driver;
1412 struct vmw_fence_obj *old_fence_obj;
1413 struct vmw_private *dev_priv =
1414 container_of(bdev, struct vmw_private, bdev);
1415
1416 if (fence == NULL)
1417 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1418 else
1419 driver->sync_obj_ref(fence);
1420
1421 spin_lock(&bdev->fence_lock);
1422
1423 old_fence_obj = bo->sync_obj;
1424 bo->sync_obj = fence;
1425
1426 spin_unlock(&bdev->fence_lock);
1427
1428 if (old_fence_obj)
1429 vmw_fence_obj_unreference(&old_fence_obj);
1430}
1431
1432/**
1433 * vmw_resource_move_notify - TTM move_notify_callback
1434 *
1435 * @bo: The TTM buffer object about to move.
1436 * @mem: The truct ttm_mem_reg indicating to what memory
1437 * region the move is taking place.
1438 *
Thomas Hellstromf4689112012-11-21 11:29:13 +01001439 * Evicts the Guest Backed hardware resource if the backup
1440 * buffer is being moved out of MOB memory.
1441 * Note that this function should not race with the resource
1442 * validation code as long as it accesses only members of struct
1443 * resource that remain static while bo::res is !NULL and
1444 * while we have @bo reserved. struct resource::backup is *not* a
1445 * static member. The resource validation code will take care
1446 * to set @bo::res to NULL, while having @bo reserved when the
1447 * buffer is no longer bound to the resource, so @bo:res can be
1448 * used to determine whether there is a need to unbind and whether
1449 * it is safe to unbind.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001450 */
1451void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1452 struct ttm_mem_reg *mem)
1453{
Thomas Hellstromf4689112012-11-21 11:29:13 +01001454 struct vmw_dma_buffer *dma_buf;
1455
1456 if (mem == NULL)
1457 return;
1458
1459 if (bo->destroy != vmw_dmabuf_bo_free &&
1460 bo->destroy != vmw_user_dmabuf_destroy)
1461 return;
1462
1463 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1464
1465 if (mem->mem_type != VMW_PL_MOB) {
1466 struct vmw_resource *res, *n;
1467 struct ttm_bo_device *bdev = bo->bdev;
1468 struct ttm_validate_buffer val_buf;
1469
1470 val_buf.bo = bo;
1471
1472 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1473
1474 if (unlikely(res->func->unbind == NULL))
1475 continue;
1476
1477 (void) res->func->unbind(res, true, &val_buf);
1478 res->backup_dirty = true;
1479 res->res_dirty = false;
1480 list_del_init(&res->mob_head);
1481 }
1482
1483 spin_lock(&bdev->fence_lock);
1484 (void) ttm_bo_wait(bo, false, false, false);
1485 spin_unlock(&bdev->fence_lock);
1486 }
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001487}
1488
1489/**
1490 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1491 *
1492 * @res: The resource being queried.
1493 */
1494bool vmw_resource_needs_backup(const struct vmw_resource *res)
1495{
1496 return res->func->needs_backup;
1497}
1498
1499/**
1500 * vmw_resource_evict_type - Evict all resources of a specific type
1501 *
1502 * @dev_priv: Pointer to a device private struct
1503 * @type: The resource type to evict
1504 *
1505 * To avoid thrashing starvation or as part of the hibernation sequence,
Thomas Hellstromea029c22013-11-12 00:09:54 -08001506 * try to evict all evictable resources of a specific type.
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001507 */
1508static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1509 enum vmw_res_type type)
1510{
1511 struct list_head *lru_list = &dev_priv->res_lru[type];
1512 struct vmw_resource *evict_res;
Thomas Hellstromea029c22013-11-12 00:09:54 -08001513 unsigned err_count = 0;
1514 int ret;
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001515
1516 do {
1517 write_lock(&dev_priv->resource_lock);
1518
1519 if (list_empty(lru_list))
1520 goto out_unlock;
1521
1522 evict_res = vmw_resource_reference(
1523 list_first_entry(lru_list, struct vmw_resource,
1524 lru_head));
1525 list_del_init(&evict_res->lru_head);
1526 write_unlock(&dev_priv->resource_lock);
Thomas Hellstromea029c22013-11-12 00:09:54 -08001527
1528 ret = vmw_resource_do_evict(evict_res, false);
1529 if (unlikely(ret != 0)) {
1530 write_lock(&dev_priv->resource_lock);
1531 list_add_tail(&evict_res->lru_head, lru_list);
1532 write_unlock(&dev_priv->resource_lock);
1533 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1534 vmw_resource_unreference(&evict_res);
1535 return;
1536 }
1537 }
1538
Thomas Hellstromc0951b72012-11-20 12:19:35 +00001539 vmw_resource_unreference(&evict_res);
1540 } while (1);
1541
1542out_unlock:
1543 write_unlock(&dev_priv->resource_lock);
1544}
1545
1546/**
1547 * vmw_resource_evict_all - Evict all evictable resources
1548 *
1549 * @dev_priv: Pointer to a device private struct
1550 *
1551 * To avoid thrashing starvation or as part of the hibernation sequence,
1552 * evict all evictable resources. In particular this means that all
1553 * guest-backed resources that are registered with the device are
1554 * evicted and the OTable becomes clean.
1555 */
1556void vmw_resource_evict_all(struct vmw_private *dev_priv)
1557{
1558 enum vmw_res_type type;
1559
1560 mutex_lock(&dev_priv->cmdbuf_mutex);
1561
1562 for (type = 0; type < vmw_res_max; ++type)
1563 vmw_resource_evict_type(dev_priv, type);
1564
1565 mutex_unlock(&dev_priv->cmdbuf_mutex);
1566}