blob: b8e05ae38212db2b8912f874dc4fe5fb627e7102 [file] [log] [blame]
Dave Airliee9bf5f32012-06-27 09:26:01 +01001/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 */
Dave Airlie22b33e82012-04-02 11:53:06 +010024
Ben Skeggs77145f12012-07-31 16:16:21 +100025#include <linux/dma-buf.h>
26
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Dave Airlie22b33e82012-04-02 11:53:06 +010028
Ben Skeggs77145f12012-07-31 16:16:21 +100029#include "nouveau_drm.h"
30#include "nouveau_gem.h"
Dave Airlie22b33e82012-04-02 11:53:06 +010031
32static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
33 enum dma_data_direction dir)
34{
35 struct nouveau_bo *nvbo = attachment->dmabuf->priv;
36 struct drm_device *dev = nvbo->gem->dev;
37 int npages = nvbo->bo.num_pages;
38 struct sg_table *sg;
39 int nents;
40
41 mutex_lock(&dev->struct_mutex);
42 sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
43 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
44 mutex_unlock(&dev->struct_mutex);
45 return sg;
46}
47
48static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
49 struct sg_table *sg, enum dma_data_direction dir)
50{
51 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
52 sg_free_table(sg);
53 kfree(sg);
54}
55
56static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
57{
58 struct nouveau_bo *nvbo = dma_buf->priv;
59
60 if (nvbo->gem->export_dma_buf == dma_buf) {
61 nvbo->gem->export_dma_buf = NULL;
62 drm_gem_object_unreference_unlocked(nvbo->gem);
63 }
64}
65
66static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
67{
68 return NULL;
69}
70
71static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
72{
73
74}
75static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
76{
77 return NULL;
78}
79
80static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
81{
82
83}
84
Dave Airliee1bbc4b2012-05-29 15:11:55 +010085static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
86{
87 return -EINVAL;
88}
89
Dave Airlie35916ac2012-05-31 13:52:17 +010090static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
91{
92 struct nouveau_bo *nvbo = dma_buf->priv;
93 struct drm_device *dev = nvbo->gem->dev;
94 int ret;
95
96 mutex_lock(&dev->struct_mutex);
97 if (nvbo->vmapping_count) {
98 nvbo->vmapping_count++;
99 goto out_unlock;
100 }
101
102 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
103 &nvbo->dma_buf_vmap);
104 if (ret) {
105 mutex_unlock(&dev->struct_mutex);
106 return ERR_PTR(ret);
107 }
108 nvbo->vmapping_count = 1;
109out_unlock:
110 mutex_unlock(&dev->struct_mutex);
111 return nvbo->dma_buf_vmap.virtual;
112}
113
114static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
115{
116 struct nouveau_bo *nvbo = dma_buf->priv;
117 struct drm_device *dev = nvbo->gem->dev;
118
119 mutex_lock(&dev->struct_mutex);
120 nvbo->vmapping_count--;
121 if (nvbo->vmapping_count == 0) {
122 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
123 }
124 mutex_unlock(&dev->struct_mutex);
125}
126
Dave Airlie41ceeeb2012-05-23 14:10:27 +0100127static const struct dma_buf_ops nouveau_dmabuf_ops = {
Dave Airlie22b33e82012-04-02 11:53:06 +0100128 .map_dma_buf = nouveau_gem_map_dma_buf,
129 .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
130 .release = nouveau_gem_dmabuf_release,
131 .kmap = nouveau_gem_kmap,
132 .kmap_atomic = nouveau_gem_kmap_atomic,
133 .kunmap = nouveau_gem_kunmap,
134 .kunmap_atomic = nouveau_gem_kunmap_atomic,
Dave Airliee1bbc4b2012-05-29 15:11:55 +0100135 .mmap = nouveau_gem_prime_mmap,
Dave Airlie35916ac2012-05-31 13:52:17 +0100136 .vmap = nouveau_gem_prime_vmap,
137 .vunmap = nouveau_gem_prime_vunmap,
Dave Airlie22b33e82012-04-02 11:53:06 +0100138};
139
140static int
141nouveau_prime_new(struct drm_device *dev,
142 size_t size,
143 struct sg_table *sg,
144 struct nouveau_bo **pnvbo)
145{
146 struct nouveau_bo *nvbo;
147 u32 flags = 0;
148 int ret;
149
150 flags = TTM_PL_FLAG_TT;
151
152 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
153 sg, pnvbo);
154 if (ret)
155 return ret;
156 nvbo = *pnvbo;
157
Dave Airlie22b33e82012-04-02 11:53:06 +0100158 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
159 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
160 if (!nvbo->gem) {
161 nouveau_bo_ref(NULL, pnvbo);
162 return -ENOMEM;
163 }
164
165 nvbo->gem->driver_private = nvbo;
166 return 0;
167}
168
169struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
170 struct drm_gem_object *obj, int flags)
171{
172 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
173 int ret = 0;
174
175 /* pin buffer into GTT */
176 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
177 if (ret)
178 return ERR_PTR(-EINVAL);
179
180 return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
181}
182
183struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
184 struct dma_buf *dma_buf)
185{
186 struct dma_buf_attachment *attach;
187 struct sg_table *sg;
188 struct nouveau_bo *nvbo;
189 int ret;
190
191 if (dma_buf->ops == &nouveau_dmabuf_ops) {
192 nvbo = dma_buf->priv;
193 if (nvbo->gem) {
194 if (nvbo->gem->dev == dev) {
195 drm_gem_object_reference(nvbo->gem);
Seung-Woo Kimbe8a42a2012-09-27 15:30:06 +0900196 dma_buf_put(dma_buf);
Dave Airlie22b33e82012-04-02 11:53:06 +0100197 return nvbo->gem;
198 }
199 }
200 }
201 /* need to attach */
202 attach = dma_buf_attach(dma_buf, dev->dev);
203 if (IS_ERR(attach))
204 return ERR_PTR(PTR_ERR(attach));
205
206 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
207 if (IS_ERR(sg)) {
208 ret = PTR_ERR(sg);
209 goto fail_detach;
210 }
211
212 ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
213 if (ret)
214 goto fail_unmap;
215
216 nvbo->gem->import_attach = attach;
217
218 return nvbo->gem;
219
220fail_unmap:
221 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
222fail_detach:
223 dma_buf_detach(dma_buf, attach);
224 return ERR_PTR(ret);
225}
226