blob: 4683626b065ff2cb06473ad8349bbaa6148cd68f [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26#include <drm/drmP.h>
27
28#include "amdgpu.h"
Samuel Li09052fc2017-12-08 16:18:59 -050029#include "amdgpu_display.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040030#include <drm/amdgpu_drm.h>
31#include <linux/dma-buf.h>
32
Christian König9021d2e2018-02-19 11:29:35 +010033static const struct dma_buf_ops amdgpu_dmabuf_ops;
34
Alex Deucherd38ceaf2015-04-20 16:55:21 -040035struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
36{
37 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
38 int npages = bo->tbo.num_pages;
39
40 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
41}
42
43void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
44{
45 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
46 int ret;
47
48 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
49 &bo->dma_buf_vmap);
50 if (ret)
51 return ERR_PTR(ret);
52
53 return bo->dma_buf_vmap.virtual;
54}
55
56void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
57{
58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
59
60 ttm_bo_kunmap(&bo->dma_buf_vmap);
61}
62
Samuel Lidfced2e2017-08-22 15:25:33 -040063int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
64{
65 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
66 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
67 unsigned asize = amdgpu_bo_size(bo);
68 int ret;
69
70 if (!vma->vm_file)
71 return -ENODEV;
72
73 if (adev == NULL)
74 return -ENODEV;
75
76 /* Check for valid size. */
77 if (asize < vma->vm_end - vma->vm_start)
78 return -EINVAL;
79
80 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
81 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
82 return -EPERM;
83 }
84 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
85
86 /* prime mmap does not need to check access, so allow here */
87 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
88 if (ret)
89 return ret;
90
91 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
92 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
93
94 return ret;
95}
96
Christian König4d9c5142016-05-03 18:46:19 +020097struct drm_gem_object *
98amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
99 struct dma_buf_attachment *attach,
100 struct sg_table *sg)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400101{
Christian König72d76682015-09-03 17:34:59 +0200102 struct reservation_object *resv = attach->dmabuf->resv;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400103 struct amdgpu_device *adev = dev->dev_private;
104 struct amdgpu_bo *bo;
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800105 struct amdgpu_bo_param bp;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400106 int ret;
107
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800108 memset(&bp, 0, sizeof(bp));
109 bp.size = attach->dmabuf->size;
110 bp.byte_align = PAGE_SIZE;
111 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
112 bp.flags = 0;
113 bp.type = ttm_bo_type_sg;
114 bp.resv = resv;
Christian König72d76682015-09-03 17:34:59 +0200115 ww_mutex_lock(&resv->lock, NULL);
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800116 ret = amdgpu_bo_create(adev, &bp, &bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117 if (ret)
Christian König59dd4772018-02-20 19:51:02 +0100118 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119
Christian Königeab3de22018-03-14 14:48:17 -0500120 bo->tbo.sg = sg;
121 bo->tbo.ttm->sg = sg;
Christian Könige3364df2018-02-20 19:42:40 +0100122 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
123 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
Christian König59dd4772018-02-20 19:51:02 +0100124 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
125 bo->prime_shared_count = 1;
126
127 ww_mutex_unlock(&resv->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400128 return &bo->gem_base;
Christian König59dd4772018-02-20 19:51:02 +0100129
130error:
131 ww_mutex_unlock(&resv->lock);
132 return ERR_PTR(ret);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400133}
134
Christian König5a137612018-02-16 13:16:11 +0100135static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
136 struct device *target_dev,
137 struct dma_buf_attachment *attach)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400138{
Christian König5a137612018-02-16 13:16:11 +0100139 struct drm_gem_object *obj = dma_buf->priv;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400140 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
Christian König2333bf92018-03-21 13:58:05 +0100141 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Christian König5a137612018-02-16 13:16:11 +0100142 long r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400143
Christian König5a137612018-02-16 13:16:11 +0100144 r = drm_gem_map_attach(dma_buf, target_dev, attach);
145 if (r)
146 return r;
147
148 r = amdgpu_bo_reserve(bo, false);
149 if (unlikely(r != 0))
150 goto error_detach;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400151
Christian König9021d2e2018-02-19 11:29:35 +0100152
Christian König2333bf92018-03-21 13:58:05 +0100153 if (attach->dev->driver != adev->dev->driver) {
Christian König9021d2e2018-02-19 11:29:35 +0100154 /*
155 * Wait for all shared fences to complete before we switch to future
156 * use of exclusive fence on this prime shared bo.
157 */
158 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
159 true, false,
160 MAX_SCHEDULE_TIMEOUT);
161 if (unlikely(r < 0)) {
162 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
163 goto error_unreserve;
164 }
Mario Kleiner8e94a462016-11-09 02:25:15 +0100165 }
166
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400167 /* pin buffer into GTT */
Christian König5a137612018-02-16 13:16:11 +0100168 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
Christian König9021d2e2018-02-19 11:29:35 +0100169 if (r)
170 goto error_unreserve;
171
Christian König2333bf92018-03-21 13:58:05 +0100172 if (attach->dev->driver != adev->dev->driver)
Mario Kleiner8e94a462016-11-09 02:25:15 +0100173 bo->prime_shared_count++;
174
Christian König5a137612018-02-16 13:16:11 +0100175error_unreserve:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400176 amdgpu_bo_unreserve(bo);
Christian König5a137612018-02-16 13:16:11 +0100177
178error_detach:
179 if (r)
180 drm_gem_map_detach(dma_buf, attach);
181 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400182}
183
Christian König5a137612018-02-16 13:16:11 +0100184static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
185 struct dma_buf_attachment *attach)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400186{
Christian König5a137612018-02-16 13:16:11 +0100187 struct drm_gem_object *obj = dma_buf->priv;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400188 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
Christian König2333bf92018-03-21 13:58:05 +0100189 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400190 int ret = 0;
191
Michel Dänzerc81a1a72017-04-28 17:28:14 +0900192 ret = amdgpu_bo_reserve(bo, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193 if (unlikely(ret != 0))
Christian König5a137612018-02-16 13:16:11 +0100194 goto error;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400195
196 amdgpu_bo_unpin(bo);
Christian König2333bf92018-03-21 13:58:05 +0100197 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
Mario Kleiner8e94a462016-11-09 02:25:15 +0100198 bo->prime_shared_count--;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400199 amdgpu_bo_unreserve(bo);
Christian König5a137612018-02-16 13:16:11 +0100200
201error:
202 drm_gem_map_detach(dma_buf, attach);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400203}
204
205struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
206{
207 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
208
209 return bo->tbo.resv;
210}
211
Samuel Li09052fc2017-12-08 16:18:59 -0500212static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
213 enum dma_data_direction direction)
214{
215 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
216 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
217 struct ttm_operation_ctx ctx = { true, false };
Samuel Li1d2361e2018-04-18 15:06:02 -0400218 u32 domain = amdgpu_display_supported_domains(adev);
Samuel Li09052fc2017-12-08 16:18:59 -0500219 int ret;
220 bool reads = (direction == DMA_BIDIRECTIONAL ||
221 direction == DMA_FROM_DEVICE);
222
223 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
224 return 0;
225
226 /* move to gtt */
227 ret = amdgpu_bo_reserve(bo, false);
228 if (unlikely(ret != 0))
229 return ret;
230
231 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
232 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
233 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
234 }
235
236 amdgpu_bo_unreserve(bo);
237 return ret;
238}
239
240static const struct dma_buf_ops amdgpu_dmabuf_ops = {
Christian König5a137612018-02-16 13:16:11 +0100241 .attach = amdgpu_gem_map_attach,
242 .detach = amdgpu_gem_map_detach,
Samuel Li09052fc2017-12-08 16:18:59 -0500243 .map_dma_buf = drm_gem_map_dma_buf,
244 .unmap_dma_buf = drm_gem_unmap_dma_buf,
245 .release = drm_gem_dmabuf_release,
246 .begin_cpu_access = amdgpu_gem_begin_cpu_access,
247 .map = drm_gem_dmabuf_kmap,
248 .map_atomic = drm_gem_dmabuf_kmap_atomic,
249 .unmap = drm_gem_dmabuf_kunmap,
250 .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
251 .mmap = drm_gem_dmabuf_mmap,
252 .vmap = drm_gem_dmabuf_vmap,
253 .vunmap = drm_gem_dmabuf_vunmap,
254};
255
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400256struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
257 struct drm_gem_object *gobj,
258 int flags)
259{
260 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
Christian König4b277242017-11-13 17:20:50 +0100261 struct dma_buf *buf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400262
Christian Könige1eb899b42017-08-25 09:14:43 +0200263 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
264 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400265 return ERR_PTR(-EPERM);
266
Christian König4b277242017-11-13 17:20:50 +0100267 buf = drm_gem_prime_export(dev, gobj, flags);
Samuel Li09052fc2017-12-08 16:18:59 -0500268 if (!IS_ERR(buf)) {
Christian König4b277242017-11-13 17:20:50 +0100269 buf->file->f_mapping = dev->anon_inode->i_mapping;
Samuel Li09052fc2017-12-08 16:18:59 -0500270 buf->ops = &amdgpu_dmabuf_ops;
271 }
272
Christian König4b277242017-11-13 17:20:50 +0100273 return buf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400274}
Samuel Li09052fc2017-12-08 16:18:59 -0500275
276struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
277 struct dma_buf *dma_buf)
278{
279 struct drm_gem_object *obj;
280
281 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
282 obj = dma_buf->priv;
283 if (obj->dev == dev) {
284 /*
285 * Importing dmabuf exported from out own gem increases
286 * refcount on gem itself instead of f_count of dmabuf.
287 */
288 drm_gem_object_get(obj);
289 return obj;
290 }
291 }
292
293 return drm_gem_prime_import(dev, dma_buf);
294}