blob: 63ee1a9f7978eb38d8f061a357390eeb02051120 [file] [log] [blame]
Daniel Vetter1286ff72012-05-10 15:25:09 +02001/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
David Howells760285e2012-10-02 18:01:07 +010026#include <drm/drmP.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020027#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
Dave Airlie6a101cb2012-05-23 14:09:32 +010030static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
Chris Wilson9da3da62012-06-01 15:20:22 +010031 enum dma_data_direction dir)
Daniel Vetter1286ff72012-05-10 15:25:09 +020032{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
Chris Wilson9da3da62012-06-01 15:20:22 +010034 struct sg_table *st;
35 struct scatterlist *src, *dst;
36 int ret, i;
Daniel Vetter1286ff72012-05-10 15:25:09 +020037
Chris Wilson9da3da62012-06-01 15:20:22 +010038 ret = i915_mutex_lock_interruptible(obj->base.dev);
Daniel Vetter1286ff72012-05-10 15:25:09 +020039 if (ret)
40 return ERR_PTR(ret);
41
Chris Wilson37e680a2012-06-07 15:38:42 +010042 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +020043 if (ret) {
Chris Wilson9da3da62012-06-01 15:20:22 +010044 st = ERR_PTR(ret);
Chris Wilson6c085a72012-08-20 11:40:46 +020045 goto out;
Daniel Vetter1286ff72012-05-10 15:25:09 +020046 }
47
Chris Wilson9da3da62012-06-01 15:20:22 +010048 /* Copy sg so that we make an independent mapping */
49 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
50 if (st == NULL) {
51 st = ERR_PTR(-ENOMEM);
52 goto out;
53 }
54
55 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
56 if (ret) {
57 kfree(st);
58 st = ERR_PTR(ret);
59 goto out;
60 }
61
62 src = obj->pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) {
Imre Deak67d5a502013-02-18 19:28:02 +020065 sg_set_page(dst, sg_page(src), src->length, 0);
Chris Wilson9da3da62012-06-01 15:20:22 +010066 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 sg_free_table(st);
72 kfree(st);
73 st = ERR_PTR(-ENOMEM);
74 goto out;
75 }
76
Chris Wilsona5570172012-09-04 21:02:54 +010077 i915_gem_object_pin_pages(obj);
78
Daniel Vetter1286ff72012-05-10 15:25:09 +020079out:
Chris Wilson9da3da62012-06-01 15:20:22 +010080 mutex_unlock(&obj->base.dev->struct_mutex);
81 return st;
Daniel Vetter1286ff72012-05-10 15:25:09 +020082}
83
Dave Airlie6a101cb2012-05-23 14:09:32 +010084static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
Chris Wilson2f745ad2012-09-04 21:02:58 +010085 struct sg_table *sg,
86 enum dma_data_direction dir)
Daniel Vetter1286ff72012-05-10 15:25:09 +020087{
88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
89 sg_free_table(sg);
90 kfree(sg);
91}
92
Dave Airlie9a70cc22012-05-22 13:09:21 +010093static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
94{
95 struct drm_i915_gem_object *obj = dma_buf->priv;
96 struct drm_device *dev = obj->base.dev;
Imre Deak67d5a502013-02-18 19:28:02 +020097 struct sg_page_iter sg_iter;
Chris Wilson9da3da62012-06-01 15:20:22 +010098 struct page **pages;
99 int ret, i;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100100
101 ret = i915_mutex_lock_interruptible(dev);
102 if (ret)
103 return ERR_PTR(ret);
104
105 if (obj->dma_buf_vmapping) {
106 obj->vmapping_count++;
107 goto out_unlock;
108 }
109
Chris Wilson37e680a2012-06-07 15:38:42 +0100110 ret = i915_gem_object_get_pages(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +0100111 if (ret)
112 goto error;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100113
Chris Wilson9da3da62012-06-01 15:20:22 +0100114 ret = -ENOMEM;
115
Imre Deak67d5a502013-02-18 19:28:02 +0200116 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
Chris Wilson9da3da62012-06-01 15:20:22 +0100117 if (pages == NULL)
118 goto error;
119
Imre Deak67d5a502013-02-18 19:28:02 +0200120 i = 0;
Dave Airlieb11b88e2013-05-01 14:23:41 +1000121 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +0200122 pages[i++] = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100123
Imre Deak67d5a502013-02-18 19:28:02 +0200124 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
Chris Wilson9da3da62012-06-01 15:20:22 +0100125 drm_free_large(pages);
126
127 if (!obj->dma_buf_vmapping)
128 goto error;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100129
130 obj->vmapping_count = 1;
Chris Wilsona5570172012-09-04 21:02:54 +0100131 i915_gem_object_pin_pages(obj);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100132out_unlock:
133 mutex_unlock(&dev->struct_mutex);
134 return obj->dma_buf_vmapping;
Chris Wilson9da3da62012-06-01 15:20:22 +0100135
136error:
137 mutex_unlock(&dev->struct_mutex);
138 return ERR_PTR(ret);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100139}
140
141static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
142{
143 struct drm_i915_gem_object *obj = dma_buf->priv;
144 struct drm_device *dev = obj->base.dev;
145 int ret;
146
147 ret = i915_mutex_lock_interruptible(dev);
148 if (ret)
149 return;
150
Chris Wilsona5570172012-09-04 21:02:54 +0100151 if (--obj->vmapping_count == 0) {
Dave Airlie9a70cc22012-05-22 13:09:21 +0100152 vunmap(obj->dma_buf_vmapping);
153 obj->dma_buf_vmapping = NULL;
Chris Wilsona5570172012-09-04 21:02:54 +0100154
155 i915_gem_object_unpin_pages(obj);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100156 }
157 mutex_unlock(&dev->struct_mutex);
158}
159
Daniel Vetter1286ff72012-05-10 15:25:09 +0200160static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
161{
162 return NULL;
163}
164
165static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
166{
167
168}
169static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
170{
171 return NULL;
172}
173
174static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
175{
176
177}
178
Dave Airlie2dad9d42012-05-29 15:11:22 +0100179static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
180{
181 return -EINVAL;
182}
183
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000184static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
185{
186 struct drm_i915_gem_object *obj = dma_buf->priv;
187 struct drm_device *dev = obj->base.dev;
188 int ret;
189 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
190
191 ret = i915_mutex_lock_interruptible(dev);
192 if (ret)
193 return ret;
194
195 ret = i915_gem_object_set_to_cpu_domain(obj, write);
196 mutex_unlock(&dev->struct_mutex);
197 return ret;
198}
199
Dave Airlie6a101cb2012-05-23 14:09:32 +0100200static const struct dma_buf_ops i915_dmabuf_ops = {
Daniel Vetter1286ff72012-05-10 15:25:09 +0200201 .map_dma_buf = i915_gem_map_dma_buf,
202 .unmap_dma_buf = i915_gem_unmap_dma_buf,
Daniel Vetterc1d67982013-08-15 00:02:30 +0200203 .release = drm_gem_dmabuf_release,
Daniel Vetter1286ff72012-05-10 15:25:09 +0200204 .kmap = i915_gem_dmabuf_kmap,
205 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
206 .kunmap = i915_gem_dmabuf_kunmap,
207 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
Dave Airlie2dad9d42012-05-29 15:11:22 +0100208 .mmap = i915_gem_dmabuf_mmap,
Dave Airlie9a70cc22012-05-22 13:09:21 +0100209 .vmap = i915_gem_dmabuf_vmap,
210 .vunmap = i915_gem_dmabuf_vunmap,
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000211 .begin_cpu_access = i915_gem_begin_cpu_access,
Daniel Vetter1286ff72012-05-10 15:25:09 +0200212};
213
214struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
Chris Wilson9da3da62012-06-01 15:20:22 +0100215 struct drm_gem_object *gem_obj, int flags)
Daniel Vetter1286ff72012-05-10 15:25:09 +0200216{
217 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
218
Dave Airlie5b424272012-12-20 10:51:09 +1000219 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200220}
221
Chris Wilson2f745ad2012-09-04 21:02:58 +0100222static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
223{
224 struct sg_table *sg;
225
226 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
227 if (IS_ERR(sg))
228 return PTR_ERR(sg);
229
230 obj->pages = sg;
231 obj->has_dma_mapping = true;
232 return 0;
233}
234
235static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
236{
237 dma_buf_unmap_attachment(obj->base.import_attach,
238 obj->pages, DMA_BIDIRECTIONAL);
239 obj->has_dma_mapping = false;
240}
241
242static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
243 .get_pages = i915_gem_object_get_pages_dmabuf,
244 .put_pages = i915_gem_object_put_pages_dmabuf,
245};
246
Daniel Vetter1286ff72012-05-10 15:25:09 +0200247struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
Chris Wilson9da3da62012-06-01 15:20:22 +0100248 struct dma_buf *dma_buf)
Daniel Vetter1286ff72012-05-10 15:25:09 +0200249{
250 struct dma_buf_attachment *attach;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200251 struct drm_i915_gem_object *obj;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200252 int ret;
253
254 /* is this one of own objects? */
255 if (dma_buf->ops == &i915_dmabuf_ops) {
256 obj = dma_buf->priv;
257 /* is it from our device? */
258 if (obj->base.dev == dev) {
Seung-Woo Kimbe8a42a2012-09-27 15:30:06 +0900259 /*
260 * Importing dmabuf exported from out own gem increases
261 * refcount on gem itself instead of f_count of dmabuf.
262 */
Daniel Vetter1286ff72012-05-10 15:25:09 +0200263 drm_gem_object_reference(&obj->base);
264 return &obj->base;
265 }
266 }
267
268 /* need to attach */
269 attach = dma_buf_attach(dma_buf, dev->dev);
270 if (IS_ERR(attach))
271 return ERR_CAST(attach);
272
Imre Deak011c22822013-04-19 11:11:56 +1000273 get_dma_buf(dma_buf);
274
Chris Wilson42dcedd2012-11-15 11:32:30 +0000275 obj = i915_gem_object_alloc(dev);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200276 if (obj == NULL) {
277 ret = -ENOMEM;
Chris Wilson2f745ad2012-09-04 21:02:58 +0100278 goto fail_detach;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200279 }
280
David Herrmann89c82332013-07-11 11:56:32 +0200281 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
Chris Wilson2f745ad2012-09-04 21:02:58 +0100282 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200283 obj->base.import_attach = attach;
284
285 return &obj->base;
286
Daniel Vetter1286ff72012-05-10 15:25:09 +0200287fail_detach:
288 dma_buf_detach(dma_buf, attach);
Imre Deak011c22822013-04-19 11:11:56 +1000289 dma_buf_put(dma_buf);
290
Daniel Vetter1286ff72012-05-10 15:25:09 +0200291 return ERR_PTR(ret);
292}