Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2012 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * based on nouveau_prime.c |
| 23 | * |
| 24 | * Authors: Alex Deucher |
| 25 | */ |
| 26 | #include <drm/drmP.h> |
| 27 | |
| 28 | #include "amdgpu.h" |
Samuel Li | 09052fc | 2017-12-08 16:18:59 -0500 | [diff] [blame] | 29 | #include "amdgpu_display.h" |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 30 | #include <drm/amdgpu_drm.h> |
| 31 | #include <linux/dma-buf.h> |
| 32 | |
Christian König | 9021d2e | 2018-02-19 11:29:35 +0100 | [diff] [blame] | 33 | static const struct dma_buf_ops amdgpu_dmabuf_ops; |
| 34 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 35 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) |
| 36 | { |
| 37 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
| 38 | int npages = bo->tbo.num_pages; |
| 39 | |
| 40 | return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); |
| 41 | } |
| 42 | |
| 43 | void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) |
| 44 | { |
| 45 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
| 46 | int ret; |
| 47 | |
| 48 | ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, |
| 49 | &bo->dma_buf_vmap); |
| 50 | if (ret) |
| 51 | return ERR_PTR(ret); |
| 52 | |
| 53 | return bo->dma_buf_vmap.virtual; |
| 54 | } |
| 55 | |
| 56 | void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) |
| 57 | { |
| 58 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
| 59 | |
| 60 | ttm_bo_kunmap(&bo->dma_buf_vmap); |
| 61 | } |
| 62 | |
Samuel Li | dfced2e | 2017-08-22 15:25:33 -0400 | [diff] [blame] | 63 | int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) |
| 64 | { |
| 65 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
| 66 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
| 67 | unsigned asize = amdgpu_bo_size(bo); |
| 68 | int ret; |
| 69 | |
| 70 | if (!vma->vm_file) |
| 71 | return -ENODEV; |
| 72 | |
| 73 | if (adev == NULL) |
| 74 | return -ENODEV; |
| 75 | |
| 76 | /* Check for valid size. */ |
| 77 | if (asize < vma->vm_end - vma->vm_start) |
| 78 | return -EINVAL; |
| 79 | |
| 80 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || |
| 81 | (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { |
| 82 | return -EPERM; |
| 83 | } |
| 84 | vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; |
| 85 | |
| 86 | /* prime mmap does not need to check access, so allow here */ |
| 87 | ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); |
| 88 | if (ret) |
| 89 | return ret; |
| 90 | |
| 91 | ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); |
| 92 | drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); |
| 93 | |
| 94 | return ret; |
| 95 | } |
| 96 | |
Christian König | 4d9c514 | 2016-05-03 18:46:19 +0200 | [diff] [blame] | 97 | struct drm_gem_object * |
| 98 | amdgpu_gem_prime_import_sg_table(struct drm_device *dev, |
| 99 | struct dma_buf_attachment *attach, |
| 100 | struct sg_table *sg) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 101 | { |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 102 | struct reservation_object *resv = attach->dmabuf->resv; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 103 | struct amdgpu_device *adev = dev->dev_private; |
| 104 | struct amdgpu_bo *bo; |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 105 | struct amdgpu_bo_param bp; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 106 | int ret; |
| 107 | |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 108 | memset(&bp, 0, sizeof(bp)); |
| 109 | bp.size = attach->dmabuf->size; |
| 110 | bp.byte_align = PAGE_SIZE; |
| 111 | bp.domain = AMDGPU_GEM_DOMAIN_CPU; |
| 112 | bp.flags = 0; |
| 113 | bp.type = ttm_bo_type_sg; |
| 114 | bp.resv = resv; |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 115 | ww_mutex_lock(&resv->lock, NULL); |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 116 | ret = amdgpu_bo_create(adev, &bp, &bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 117 | if (ret) |
Christian König | 59dd477 | 2018-02-20 19:51:02 +0100 | [diff] [blame] | 118 | goto error; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 119 | |
Christian König | eab3de2 | 2018-03-14 14:48:17 -0500 | [diff] [blame] | 120 | bo->tbo.sg = sg; |
| 121 | bo->tbo.ttm->sg = sg; |
Christian König | e3364df | 2018-02-20 19:42:40 +0100 | [diff] [blame] | 122 | bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; |
| 123 | bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; |
Christian König | 59dd477 | 2018-02-20 19:51:02 +0100 | [diff] [blame] | 124 | if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) |
| 125 | bo->prime_shared_count = 1; |
| 126 | |
| 127 | ww_mutex_unlock(&resv->lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 128 | return &bo->gem_base; |
Christian König | 59dd477 | 2018-02-20 19:51:02 +0100 | [diff] [blame] | 129 | |
| 130 | error: |
| 131 | ww_mutex_unlock(&resv->lock); |
| 132 | return ERR_PTR(ret); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 133 | } |
| 134 | |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 135 | static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, |
| 136 | struct device *target_dev, |
| 137 | struct dma_buf_attachment *attach) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 138 | { |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 139 | struct drm_gem_object *obj = dma_buf->priv; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 140 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
Christian König | 2333bf9 | 2018-03-21 13:58:05 +0100 | [diff] [blame] | 141 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 142 | long r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 143 | |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 144 | r = drm_gem_map_attach(dma_buf, target_dev, attach); |
| 145 | if (r) |
| 146 | return r; |
| 147 | |
| 148 | r = amdgpu_bo_reserve(bo, false); |
| 149 | if (unlikely(r != 0)) |
| 150 | goto error_detach; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 151 | |
Christian König | 9021d2e | 2018-02-19 11:29:35 +0100 | [diff] [blame] | 152 | |
Christian König | 2333bf9 | 2018-03-21 13:58:05 +0100 | [diff] [blame] | 153 | if (attach->dev->driver != adev->dev->driver) { |
Christian König | 9021d2e | 2018-02-19 11:29:35 +0100 | [diff] [blame] | 154 | /* |
| 155 | * Wait for all shared fences to complete before we switch to future |
| 156 | * use of exclusive fence on this prime shared bo. |
| 157 | */ |
| 158 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, |
| 159 | true, false, |
| 160 | MAX_SCHEDULE_TIMEOUT); |
| 161 | if (unlikely(r < 0)) { |
| 162 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); |
| 163 | goto error_unreserve; |
| 164 | } |
Mario Kleiner | 8e94a46 | 2016-11-09 02:25:15 +0100 | [diff] [blame] | 165 | } |
| 166 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 167 | /* pin buffer into GTT */ |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 168 | r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); |
Christian König | 9021d2e | 2018-02-19 11:29:35 +0100 | [diff] [blame] | 169 | if (r) |
| 170 | goto error_unreserve; |
| 171 | |
Christian König | 2333bf9 | 2018-03-21 13:58:05 +0100 | [diff] [blame] | 172 | if (attach->dev->driver != adev->dev->driver) |
Mario Kleiner | 8e94a46 | 2016-11-09 02:25:15 +0100 | [diff] [blame] | 173 | bo->prime_shared_count++; |
| 174 | |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 175 | error_unreserve: |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 176 | amdgpu_bo_unreserve(bo); |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 177 | |
| 178 | error_detach: |
| 179 | if (r) |
| 180 | drm_gem_map_detach(dma_buf, attach); |
| 181 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 182 | } |
| 183 | |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 184 | static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, |
| 185 | struct dma_buf_attachment *attach) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 186 | { |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 187 | struct drm_gem_object *obj = dma_buf->priv; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 188 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
Christian König | 2333bf9 | 2018-03-21 13:58:05 +0100 | [diff] [blame] | 189 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 190 | int ret = 0; |
| 191 | |
Michel Dänzer | c81a1a7 | 2017-04-28 17:28:14 +0900 | [diff] [blame] | 192 | ret = amdgpu_bo_reserve(bo, true); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 193 | if (unlikely(ret != 0)) |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 194 | goto error; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 195 | |
| 196 | amdgpu_bo_unpin(bo); |
Christian König | 2333bf9 | 2018-03-21 13:58:05 +0100 | [diff] [blame] | 197 | if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) |
Mario Kleiner | 8e94a46 | 2016-11-09 02:25:15 +0100 | [diff] [blame] | 198 | bo->prime_shared_count--; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 199 | amdgpu_bo_unreserve(bo); |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 200 | |
| 201 | error: |
| 202 | drm_gem_map_detach(dma_buf, attach); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) |
| 206 | { |
| 207 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
| 208 | |
| 209 | return bo->tbo.resv; |
| 210 | } |
| 211 | |
Samuel Li | 09052fc | 2017-12-08 16:18:59 -0500 | [diff] [blame] | 212 | static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, |
| 213 | enum dma_data_direction direction) |
| 214 | { |
| 215 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); |
| 216 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
| 217 | struct ttm_operation_ctx ctx = { true, false }; |
Samuel Li | 1d2361e | 2018-04-18 15:06:02 -0400 | [diff] [blame] | 218 | u32 domain = amdgpu_display_supported_domains(adev); |
Samuel Li | 09052fc | 2017-12-08 16:18:59 -0500 | [diff] [blame] | 219 | int ret; |
| 220 | bool reads = (direction == DMA_BIDIRECTIONAL || |
| 221 | direction == DMA_FROM_DEVICE); |
| 222 | |
| 223 | if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) |
| 224 | return 0; |
| 225 | |
| 226 | /* move to gtt */ |
| 227 | ret = amdgpu_bo_reserve(bo, false); |
| 228 | if (unlikely(ret != 0)) |
| 229 | return ret; |
| 230 | |
| 231 | if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { |
| 232 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); |
| 233 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
| 234 | } |
| 235 | |
| 236 | amdgpu_bo_unreserve(bo); |
| 237 | return ret; |
| 238 | } |
| 239 | |
| 240 | static const struct dma_buf_ops amdgpu_dmabuf_ops = { |
Christian König | 5a13761 | 2018-02-16 13:16:11 +0100 | [diff] [blame] | 241 | .attach = amdgpu_gem_map_attach, |
| 242 | .detach = amdgpu_gem_map_detach, |
Samuel Li | 09052fc | 2017-12-08 16:18:59 -0500 | [diff] [blame] | 243 | .map_dma_buf = drm_gem_map_dma_buf, |
| 244 | .unmap_dma_buf = drm_gem_unmap_dma_buf, |
| 245 | .release = drm_gem_dmabuf_release, |
| 246 | .begin_cpu_access = amdgpu_gem_begin_cpu_access, |
| 247 | .map = drm_gem_dmabuf_kmap, |
| 248 | .map_atomic = drm_gem_dmabuf_kmap_atomic, |
| 249 | .unmap = drm_gem_dmabuf_kunmap, |
| 250 | .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, |
| 251 | .mmap = drm_gem_dmabuf_mmap, |
| 252 | .vmap = drm_gem_dmabuf_vmap, |
| 253 | .vunmap = drm_gem_dmabuf_vunmap, |
| 254 | }; |
| 255 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 256 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, |
| 257 | struct drm_gem_object *gobj, |
| 258 | int flags) |
| 259 | { |
| 260 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); |
Christian König | 4b27724 | 2017-11-13 17:20:50 +0100 | [diff] [blame] | 261 | struct dma_buf *buf; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 262 | |
Christian König | e1eb899b4 | 2017-08-25 09:14:43 +0200 | [diff] [blame] | 263 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || |
| 264 | bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 265 | return ERR_PTR(-EPERM); |
| 266 | |
Christian König | 4b27724 | 2017-11-13 17:20:50 +0100 | [diff] [blame] | 267 | buf = drm_gem_prime_export(dev, gobj, flags); |
Samuel Li | 09052fc | 2017-12-08 16:18:59 -0500 | [diff] [blame] | 268 | if (!IS_ERR(buf)) { |
Christian König | 4b27724 | 2017-11-13 17:20:50 +0100 | [diff] [blame] | 269 | buf->file->f_mapping = dev->anon_inode->i_mapping; |
Samuel Li | 09052fc | 2017-12-08 16:18:59 -0500 | [diff] [blame] | 270 | buf->ops = &amdgpu_dmabuf_ops; |
| 271 | } |
| 272 | |
Christian König | 4b27724 | 2017-11-13 17:20:50 +0100 | [diff] [blame] | 273 | return buf; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 274 | } |
Samuel Li | 09052fc | 2017-12-08 16:18:59 -0500 | [diff] [blame] | 275 | |
| 276 | struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, |
| 277 | struct dma_buf *dma_buf) |
| 278 | { |
| 279 | struct drm_gem_object *obj; |
| 280 | |
| 281 | if (dma_buf->ops == &amdgpu_dmabuf_ops) { |
| 282 | obj = dma_buf->priv; |
| 283 | if (obj->dev == dev) { |
| 284 | /* |
| 285 | * Importing dmabuf exported from out own gem increases |
| 286 | * refcount on gem itself instead of f_count of dmabuf. |
| 287 | */ |
| 288 | drm_gem_object_get(obj); |
| 289 | return obj; |
| 290 | } |
| 291 | } |
| 292 | |
| 293 | return drm_gem_prime_import(dev, dma_buf); |
| 294 | } |