blob: 8afec21dc45dc9a81fbea2b3064d6e11ab88e796 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26#include <drm/drmP.h>
27
28#include "amdgpu.h"
Samuel Li09052fc2017-12-08 16:18:59 -050029#include "amdgpu_display.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040030#include <drm/amdgpu_drm.h>
31#include <linux/dma-buf.h>
32
33struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
34{
35 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
36 int npages = bo->tbo.num_pages;
37
38 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
39}
40
41void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
42{
43 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
44 int ret;
45
46 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
47 &bo->dma_buf_vmap);
48 if (ret)
49 return ERR_PTR(ret);
50
51 return bo->dma_buf_vmap.virtual;
52}
53
54void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
55{
56 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
57
58 ttm_bo_kunmap(&bo->dma_buf_vmap);
59}
60
Samuel Lidfced2e2017-08-22 15:25:33 -040061int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
62{
63 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
64 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
65 unsigned asize = amdgpu_bo_size(bo);
66 int ret;
67
68 if (!vma->vm_file)
69 return -ENODEV;
70
71 if (adev == NULL)
72 return -ENODEV;
73
74 /* Check for valid size. */
75 if (asize < vma->vm_end - vma->vm_start)
76 return -EINVAL;
77
78 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
79 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
80 return -EPERM;
81 }
82 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
83
84 /* prime mmap does not need to check access, so allow here */
85 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
86 if (ret)
87 return ret;
88
89 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
90 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
91
92 return ret;
93}
94
Christian König4d9c5142016-05-03 18:46:19 +020095struct drm_gem_object *
96amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
97 struct dma_buf_attachment *attach,
98 struct sg_table *sg)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040099{
Christian König72d76682015-09-03 17:34:59 +0200100 struct reservation_object *resv = attach->dmabuf->resv;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400101 struct amdgpu_device *adev = dev->dev_private;
102 struct amdgpu_bo *bo;
103 int ret;
104
Christian König72d76682015-09-03 17:34:59 +0200105 ww_mutex_lock(&resv->lock, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400106 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
Yong Zhao2046d462017-07-20 18:49:09 -0400107 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo);
Christian König72d76682015-09-03 17:34:59 +0200108 ww_mutex_unlock(&resv->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400109 if (ret)
110 return ERR_PTR(ret);
111
Mario Kleiner8e94a462016-11-09 02:25:15 +0100112 bo->prime_shared_count = 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400113 return &bo->gem_base;
114}
115
116int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
117{
118 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
Mario Kleiner8e94a462016-11-09 02:25:15 +0100119 long ret = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400120
121 ret = amdgpu_bo_reserve(bo, false);
122 if (unlikely(ret != 0))
123 return ret;
124
Mario Kleiner8e94a462016-11-09 02:25:15 +0100125 /*
126 * Wait for all shared fences to complete before we switch to future
127 * use of exclusive fence on this prime shared bo.
128 */
129 ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
130 MAX_SCHEDULE_TIMEOUT);
131 if (unlikely(ret < 0)) {
132 DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
133 amdgpu_bo_unreserve(bo);
134 return ret;
135 }
136
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400137 /* pin buffer into GTT */
138 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
Mario Kleiner8e94a462016-11-09 02:25:15 +0100139 if (likely(ret == 0))
140 bo->prime_shared_count++;
141
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400142 amdgpu_bo_unreserve(bo);
143 return ret;
144}
145
146void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
147{
148 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
149 int ret = 0;
150
Michel Dänzerc81a1a72017-04-28 17:28:14 +0900151 ret = amdgpu_bo_reserve(bo, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400152 if (unlikely(ret != 0))
153 return;
154
155 amdgpu_bo_unpin(bo);
Mario Kleiner8e94a462016-11-09 02:25:15 +0100156 if (bo->prime_shared_count)
157 bo->prime_shared_count--;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400158 amdgpu_bo_unreserve(bo);
159}
160
161struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
162{
163 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
164
165 return bo->tbo.resv;
166}
167
Samuel Li09052fc2017-12-08 16:18:59 -0500168static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
169 enum dma_data_direction direction)
170{
171 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
172 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
173 struct ttm_operation_ctx ctx = { true, false };
174 u32 domain = amdgpu_display_framebuffer_domains(adev);
175 int ret;
176 bool reads = (direction == DMA_BIDIRECTIONAL ||
177 direction == DMA_FROM_DEVICE);
178
179 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
180 return 0;
181
182 /* move to gtt */
183 ret = amdgpu_bo_reserve(bo, false);
184 if (unlikely(ret != 0))
185 return ret;
186
187 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
188 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
189 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
190 }
191
192 amdgpu_bo_unreserve(bo);
193 return ret;
194}
195
196static const struct dma_buf_ops amdgpu_dmabuf_ops = {
197 .attach = drm_gem_map_attach,
198 .detach = drm_gem_map_detach,
199 .map_dma_buf = drm_gem_map_dma_buf,
200 .unmap_dma_buf = drm_gem_unmap_dma_buf,
201 .release = drm_gem_dmabuf_release,
202 .begin_cpu_access = amdgpu_gem_begin_cpu_access,
203 .map = drm_gem_dmabuf_kmap,
204 .map_atomic = drm_gem_dmabuf_kmap_atomic,
205 .unmap = drm_gem_dmabuf_kunmap,
206 .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
207 .mmap = drm_gem_dmabuf_mmap,
208 .vmap = drm_gem_dmabuf_vmap,
209 .vunmap = drm_gem_dmabuf_vunmap,
210};
211
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400212struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
213 struct drm_gem_object *gobj,
214 int flags)
215{
216 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
Christian König4b277242017-11-13 17:20:50 +0100217 struct dma_buf *buf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400218
Christian Könige1eb899b42017-08-25 09:14:43 +0200219 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
220 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400221 return ERR_PTR(-EPERM);
222
Christian König4b277242017-11-13 17:20:50 +0100223 buf = drm_gem_prime_export(dev, gobj, flags);
Samuel Li09052fc2017-12-08 16:18:59 -0500224 if (!IS_ERR(buf)) {
Christian König4b277242017-11-13 17:20:50 +0100225 buf->file->f_mapping = dev->anon_inode->i_mapping;
Samuel Li09052fc2017-12-08 16:18:59 -0500226 buf->ops = &amdgpu_dmabuf_ops;
227 }
228
Christian König4b277242017-11-13 17:20:50 +0100229 return buf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400230}
Samuel Li09052fc2017-12-08 16:18:59 -0500231
232struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
233 struct dma_buf *dma_buf)
234{
235 struct drm_gem_object *obj;
236
237 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
238 obj = dma_buf->priv;
239 if (obj->dev == dev) {
240 /*
241 * Importing dmabuf exported from out own gem increases
242 * refcount on gem itself instead of f_count of dmabuf.
243 */
244 drm_gem_object_get(obj);
245 return obj;
246 }
247 }
248
249 return drm_gem_prime_import(dev, dma_buf);
250}