blob: 37ff6bfb8c729d8cb4975fa6aa5a42e4b1cdf4d4 [file] [log] [blame]
Alex Deucher40f5cf92012-05-10 18:33:13 -04001/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * based on nouveau_prime.c
23 *
24 * Authors: Alex Deucher
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "radeon.h"
30#include "radeon_drm.h"
31
32#include <linux/dma-buf.h>
33
34static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
35 enum dma_data_direction dir)
36{
37 struct radeon_bo *bo = attachment->dmabuf->priv;
38 struct drm_device *dev = bo->rdev->ddev;
39 int npages = bo->tbo.num_pages;
40 struct sg_table *sg;
41 int nents;
42
43 mutex_lock(&dev->struct_mutex);
44 sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
45 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
46 mutex_unlock(&dev->struct_mutex);
47 return sg;
48}
49
50static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
51 struct sg_table *sg, enum dma_data_direction dir)
52{
53 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
54 sg_free_table(sg);
55 kfree(sg);
56}
57
58static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
59{
60 struct radeon_bo *bo = dma_buf->priv;
61
62 if (bo->gem_base.export_dma_buf == dma_buf) {
63 DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
64 bo->gem_base.export_dma_buf = NULL;
65 drm_gem_object_unreference_unlocked(&bo->gem_base);
66 }
67}
68
69static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
70{
71 return NULL;
72}
73
74static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
75{
76
77}
78static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
79{
80 return NULL;
81}
82
83static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
84{
85
86}
87
Dave Airlie946c7492012-05-29 15:12:24 +010088static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
89{
90 return -EINVAL;
91}
92
Dave Airlie41ceeeb2012-05-23 14:10:27 +010093const static struct dma_buf_ops radeon_dmabuf_ops = {
Alex Deucher40f5cf92012-05-10 18:33:13 -040094 .map_dma_buf = radeon_gem_map_dma_buf,
95 .unmap_dma_buf = radeon_gem_unmap_dma_buf,
96 .release = radeon_gem_dmabuf_release,
97 .kmap = radeon_gem_kmap,
98 .kmap_atomic = radeon_gem_kmap_atomic,
99 .kunmap = radeon_gem_kunmap,
100 .kunmap_atomic = radeon_gem_kunmap_atomic,
Dave Airlie946c7492012-05-29 15:12:24 +0100101 .mmap = radeon_gem_prime_mmap,
Alex Deucher40f5cf92012-05-10 18:33:13 -0400102};
103
104static int radeon_prime_create(struct drm_device *dev,
105 size_t size,
106 struct sg_table *sg,
107 struct radeon_bo **pbo)
108{
109 struct radeon_device *rdev = dev->dev_private;
110 struct radeon_bo *bo;
111 int ret;
112
113 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
114 RADEON_GEM_DOMAIN_GTT, sg, pbo);
115 if (ret)
116 return ret;
117 bo = *pbo;
118 bo->gem_base.driver_private = bo;
119
120 mutex_lock(&rdev->gem.mutex);
121 list_add_tail(&bo->list, &rdev->gem.objects);
122 mutex_unlock(&rdev->gem.mutex);
123
124 return 0;
125}
126
127struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
128 struct drm_gem_object *obj,
129 int flags)
130{
131 struct radeon_bo *bo = gem_to_radeon_bo(obj);
132 int ret = 0;
133
134 /* pin buffer into GTT */
135 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
136 if (ret)
137 return ERR_PTR(ret);
138
139 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
140}
141
142struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
143 struct dma_buf *dma_buf)
144{
145 struct dma_buf_attachment *attach;
146 struct sg_table *sg;
147 struct radeon_bo *bo;
148 int ret;
149
150 if (dma_buf->ops == &radeon_dmabuf_ops) {
151 bo = dma_buf->priv;
152 if (bo->gem_base.dev == dev) {
153 drm_gem_object_reference(&bo->gem_base);
154 return &bo->gem_base;
155 }
156 }
157
158 /* need to attach */
159 attach = dma_buf_attach(dma_buf, dev->dev);
160 if (IS_ERR(attach))
161 return ERR_CAST(attach);
162
163 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
164 if (IS_ERR(sg)) {
165 ret = PTR_ERR(sg);
166 goto fail_detach;
167 }
168
169 ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
170 if (ret)
171 goto fail_unmap;
172
173 bo->gem_base.import_attach = attach;
174
175 return &bo->gem_base;
176
177fail_unmap:
178 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
179fail_detach:
180 dma_buf_detach(dma_buf, attach);
181 return ERR_PTR(ret);
182}