blob: 6bdc866570ab8d67575ac3734d1f67573978df79 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26#include <drm/drmP.h>
27
28#include "amdgpu.h"
29#include <drm/amdgpu_drm.h>
30#include <linux/dma-buf.h>
31
32struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
33{
34 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
35 int npages = bo->tbo.num_pages;
36
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
38}
39
40void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
41{
42 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
43 int ret;
44
45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
46 &bo->dma_buf_vmap);
47 if (ret)
48 return ERR_PTR(ret);
49
50 return bo->dma_buf_vmap.virtual;
51}
52
53void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
54{
55 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
56
57 ttm_bo_kunmap(&bo->dma_buf_vmap);
58}
59
Christian König4d9c5142016-05-03 18:46:19 +020060struct drm_gem_object *
61amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
62 struct dma_buf_attachment *attach,
63 struct sg_table *sg)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040064{
Christian König72d76682015-09-03 17:34:59 +020065 struct reservation_object *resv = attach->dmabuf->resv;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040066 struct amdgpu_device *adev = dev->dev_private;
67 struct amdgpu_bo *bo;
68 int ret;
69
Christian König72d76682015-09-03 17:34:59 +020070 ww_mutex_lock(&resv->lock, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040071 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
Christian König72d76682015-09-03 17:34:59 +020072 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
73 ww_mutex_unlock(&resv->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040074 if (ret)
75 return ERR_PTR(ret);
76
Mario Kleiner8e94a462016-11-09 02:25:15 +010077 bo->prime_shared_count = 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040078 return &bo->gem_base;
79}
80
81int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
82{
83 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
Mario Kleiner8e94a462016-11-09 02:25:15 +010084 long ret = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040085
86 ret = amdgpu_bo_reserve(bo, false);
87 if (unlikely(ret != 0))
88 return ret;
89
Mario Kleiner8e94a462016-11-09 02:25:15 +010090 /*
91 * Wait for all shared fences to complete before we switch to future
92 * use of exclusive fence on this prime shared bo.
93 */
94 ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
95 MAX_SCHEDULE_TIMEOUT);
96 if (unlikely(ret < 0)) {
97 DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
98 amdgpu_bo_unreserve(bo);
99 return ret;
100 }
101
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400102 /* pin buffer into GTT */
103 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
Mario Kleiner8e94a462016-11-09 02:25:15 +0100104 if (likely(ret == 0))
105 bo->prime_shared_count++;
106
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107 amdgpu_bo_unreserve(bo);
108 return ret;
109}
110
111void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
112{
113 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
114 int ret = 0;
115
Michel Dänzerc81a1a72017-04-28 17:28:14 +0900116 ret = amdgpu_bo_reserve(bo, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117 if (unlikely(ret != 0))
118 return;
119
120 amdgpu_bo_unpin(bo);
Mario Kleiner8e94a462016-11-09 02:25:15 +0100121 if (bo->prime_shared_count)
122 bo->prime_shared_count--;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123 amdgpu_bo_unreserve(bo);
124}
125
126struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
127{
128 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
129
130 return bo->tbo.resv;
131}
132
133struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
134 struct drm_gem_object *gobj,
135 int flags)
136{
137 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
138
Christian Königcc325d12016-02-08 11:08:35 +0100139 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400140 return ERR_PTR(-EPERM);
141
142 return drm_gem_prime_export(dev, gobj, flags);
143}