blob: 616f07830b7e166332676ecedda5069629e6a21c [file] [log] [blame]
Daniel Vetter1286ff72012-05-10 15:25:09 +02001/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
David Howells760285e2012-10-02 18:01:07 +010026#include <drm/drmP.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020027#include "i915_drv.h"
28#include <linux/dma-buf.h>
29
Daniel Vetter608806a2013-08-08 09:10:38 +020030static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31{
32 return to_intel_bo(buf->priv);
33}
34
Dave Airlie6a101cb2012-05-23 14:09:32 +010035static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
Chris Wilson9da3da62012-06-01 15:20:22 +010036 enum dma_data_direction dir)
Daniel Vetter1286ff72012-05-10 15:25:09 +020037{
Daniel Vetter608806a2013-08-08 09:10:38 +020038 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
Chris Wilson9da3da62012-06-01 15:20:22 +010039 struct sg_table *st;
40 struct scatterlist *src, *dst;
41 int ret, i;
Daniel Vetter1286ff72012-05-10 15:25:09 +020042
Chris Wilson9da3da62012-06-01 15:20:22 +010043 ret = i915_mutex_lock_interruptible(obj->base.dev);
Daniel Vetter1286ff72012-05-10 15:25:09 +020044 if (ret)
Chris Wilson5cfacde2013-08-26 19:50:55 -030045 goto err;
Daniel Vetter1286ff72012-05-10 15:25:09 +020046
Chris Wilson37e680a2012-06-07 15:38:42 +010047 ret = i915_gem_object_get_pages(obj);
Chris Wilson5cfacde2013-08-26 19:50:55 -030048 if (ret)
49 goto err_unlock;
50
51 i915_gem_object_pin_pages(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +020052
Chris Wilson9da3da62012-06-01 15:20:22 +010053 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) {
Chris Wilson5cfacde2013-08-26 19:50:55 -030056 ret = -ENOMEM;
57 goto err_unpin;
Chris Wilson9da3da62012-06-01 15:20:22 +010058 }
59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
Chris Wilson5cfacde2013-08-26 19:50:55 -030061 if (ret)
62 goto err_free;
Chris Wilson9da3da62012-06-01 15:20:22 +010063
64 src = obj->pages->sgl;
65 dst = st->sgl;
66 for (i = 0; i < obj->pages->nents; i++) {
Imre Deak67d5a502013-02-18 19:28:02 +020067 sg_set_page(dst, sg_page(src), src->length, 0);
Chris Wilson9da3da62012-06-01 15:20:22 +010068 dst = sg_next(dst);
69 src = sg_next(src);
70 }
71
72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
Chris Wilson5cfacde2013-08-26 19:50:55 -030073 ret =-ENOMEM;
74 goto err_free_sg;
Chris Wilson9da3da62012-06-01 15:20:22 +010075 }
76
Chris Wilson9da3da62012-06-01 15:20:22 +010077 mutex_unlock(&obj->base.dev->struct_mutex);
78 return st;
Chris Wilson5cfacde2013-08-26 19:50:55 -030079
80err_free_sg:
81 sg_free_table(st);
82err_free:
83 kfree(st);
84err_unpin:
85 i915_gem_object_unpin_pages(obj);
86err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88err:
89 return ERR_PTR(ret);
Daniel Vetter1286ff72012-05-10 15:25:09 +020090}
91
Dave Airlie6a101cb2012-05-23 14:09:32 +010092static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
Chris Wilson2f745ad2012-09-04 21:02:58 +010093 struct sg_table *sg,
94 enum dma_data_direction dir)
Daniel Vetter1286ff72012-05-10 15:25:09 +020095{
Daniel Vetter608806a2013-08-08 09:10:38 +020096 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
Daniel Vetterf2142662013-08-08 09:10:37 +020097
Daniel Vetter1286ff72012-05-10 15:25:09 +020098 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
99 sg_free_table(sg);
100 kfree(sg);
Daniel Vetterf2142662013-08-08 09:10:37 +0200101
Chris Wilson6d192452016-04-08 12:11:09 +0100102 mutex_lock(&obj->base.dev->struct_mutex);
Daniel Vetterf2142662013-08-08 09:10:37 +0200103 i915_gem_object_unpin_pages(obj);
Daniel Vetterf2142662013-08-08 09:10:37 +0200104 mutex_unlock(&obj->base.dev->struct_mutex);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200105}
106
Dave Airlie9a70cc22012-05-22 13:09:21 +0100107static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
108{
Daniel Vetter608806a2013-08-08 09:10:38 +0200109 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100110 struct drm_device *dev = obj->base.dev;
Imre Deak67d5a502013-02-18 19:28:02 +0200111 struct sg_page_iter sg_iter;
Chris Wilson9da3da62012-06-01 15:20:22 +0100112 struct page **pages;
113 int ret, i;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100114
115 ret = i915_mutex_lock_interruptible(dev);
116 if (ret)
117 return ERR_PTR(ret);
118
119 if (obj->dma_buf_vmapping) {
120 obj->vmapping_count++;
121 goto out_unlock;
122 }
123
Chris Wilson37e680a2012-06-07 15:38:42 +0100124 ret = i915_gem_object_get_pages(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +0100125 if (ret)
Chris Wilson993fc6e2013-11-29 11:44:59 +0000126 goto err;
127
128 i915_gem_object_pin_pages(obj);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100129
Chris Wilson9da3da62012-06-01 15:20:22 +0100130 ret = -ENOMEM;
131
Imre Deak67d5a502013-02-18 19:28:02 +0200132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
Chris Wilson9da3da62012-06-01 15:20:22 +0100133 if (pages == NULL)
Chris Wilson993fc6e2013-11-29 11:44:59 +0000134 goto err_unpin;
Chris Wilson9da3da62012-06-01 15:20:22 +0100135
Imre Deak67d5a502013-02-18 19:28:02 +0200136 i = 0;
Dave Airlieb11b88e2013-05-01 14:23:41 +1000137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +0200138 pages[i++] = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100139
Imre Deak67d5a502013-02-18 19:28:02 +0200140 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
Chris Wilson9da3da62012-06-01 15:20:22 +0100141 drm_free_large(pages);
142
143 if (!obj->dma_buf_vmapping)
Chris Wilson993fc6e2013-11-29 11:44:59 +0000144 goto err_unpin;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100145
146 obj->vmapping_count = 1;
147out_unlock:
148 mutex_unlock(&dev->struct_mutex);
149 return obj->dma_buf_vmapping;
Chris Wilson9da3da62012-06-01 15:20:22 +0100150
Chris Wilson993fc6e2013-11-29 11:44:59 +0000151err_unpin:
152 i915_gem_object_unpin_pages(obj);
153err:
Chris Wilson9da3da62012-06-01 15:20:22 +0100154 mutex_unlock(&dev->struct_mutex);
155 return ERR_PTR(ret);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100156}
157
158static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
159{
Daniel Vetter608806a2013-08-08 09:10:38 +0200160 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100161 struct drm_device *dev = obj->base.dev;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100162
Chris Wilsonce7ec762014-04-07 17:01:47 -0300163 mutex_lock(&dev->struct_mutex);
Chris Wilsona5570172012-09-04 21:02:54 +0100164 if (--obj->vmapping_count == 0) {
Dave Airlie9a70cc22012-05-22 13:09:21 +0100165 vunmap(obj->dma_buf_vmapping);
166 obj->dma_buf_vmapping = NULL;
Chris Wilsona5570172012-09-04 21:02:54 +0100167
168 i915_gem_object_unpin_pages(obj);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100169 }
170 mutex_unlock(&dev->struct_mutex);
171}
172
Daniel Vetter1286ff72012-05-10 15:25:09 +0200173static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
174{
175 return NULL;
176}
177
178static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
179{
180
181}
182static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
183{
184 return NULL;
185}
186
187static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
188{
189
190}
191
Dave Airlie2dad9d42012-05-29 15:11:22 +0100192static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
193{
Tiago Vignatti2dbf0d92015-12-22 19:36:48 -0200194 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
195 int ret;
196
197 if (obj->base.size < vma->vm_end - vma->vm_start)
198 return -EINVAL;
199
200 if (!obj->base.filp)
201 return -ENODEV;
202
203 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
204 if (ret)
205 return ret;
206
207 fput(vma->vm_file);
208 vma->vm_file = get_file(obj->base.filp);
209
210 return 0;
Dave Airlie2dad9d42012-05-29 15:11:22 +0100211}
212
Tiago Vignatti831e9da2015-12-22 19:36:45 -0200213static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000214{
Daniel Vetter608806a2013-08-08 09:10:38 +0200215 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000216 struct drm_device *dev = obj->base.dev;
217 int ret;
218 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
219
220 ret = i915_mutex_lock_interruptible(dev);
221 if (ret)
222 return ret;
223
224 ret = i915_gem_object_set_to_cpu_domain(obj, write);
225 mutex_unlock(&dev->struct_mutex);
226 return ret;
227}
228
Tiago Vignatti346400c2015-12-22 19:36:47 -0200229static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
230{
231 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
232 struct drm_device *dev = obj->base.dev;
233 struct drm_i915_private *dev_priv = to_i915(dev);
234 bool was_interruptible;
235 int ret;
236
237 mutex_lock(&dev->struct_mutex);
238 was_interruptible = dev_priv->mm.interruptible;
239 dev_priv->mm.interruptible = false;
240
241 ret = i915_gem_object_set_to_gtt_domain(obj, false);
242
243 dev_priv->mm.interruptible = was_interruptible;
244 mutex_unlock(&dev->struct_mutex);
245
246 if (unlikely(ret))
247 DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
248}
249
Dave Airlie6a101cb2012-05-23 14:09:32 +0100250static const struct dma_buf_ops i915_dmabuf_ops = {
Daniel Vetter1286ff72012-05-10 15:25:09 +0200251 .map_dma_buf = i915_gem_map_dma_buf,
252 .unmap_dma_buf = i915_gem_unmap_dma_buf,
Daniel Vetterc1d67982013-08-15 00:02:30 +0200253 .release = drm_gem_dmabuf_release,
Daniel Vetter1286ff72012-05-10 15:25:09 +0200254 .kmap = i915_gem_dmabuf_kmap,
255 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
256 .kunmap = i915_gem_dmabuf_kunmap,
257 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
Dave Airlie2dad9d42012-05-29 15:11:22 +0100258 .mmap = i915_gem_dmabuf_mmap,
Dave Airlie9a70cc22012-05-22 13:09:21 +0100259 .vmap = i915_gem_dmabuf_vmap,
260 .vunmap = i915_gem_dmabuf_vunmap,
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000261 .begin_cpu_access = i915_gem_begin_cpu_access,
Tiago Vignatti346400c2015-12-22 19:36:47 -0200262 .end_cpu_access = i915_gem_end_cpu_access,
Daniel Vetter1286ff72012-05-10 15:25:09 +0200263};
264
265struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
Chris Wilson9da3da62012-06-01 15:20:22 +0100266 struct drm_gem_object *gem_obj, int flags)
Daniel Vetter1286ff72012-05-10 15:25:09 +0200267{
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100268 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Sumit Semwald8fbe342015-01-23 12:53:43 +0530269 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
270
271 exp_info.ops = &i915_dmabuf_ops;
272 exp_info.size = gem_obj->size;
273 exp_info.flags = flags;
274 exp_info.priv = gem_obj;
275
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100276
277 if (obj->ops->dmabuf_export) {
278 int ret = obj->ops->dmabuf_export(obj);
279 if (ret)
280 return ERR_PTR(ret);
281 }
282
Sumit Semwald8fbe342015-01-23 12:53:43 +0530283 return dma_buf_export(&exp_info);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200284}
285
Chris Wilson2f745ad2012-09-04 21:02:58 +0100286static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
287{
288 struct sg_table *sg;
289
290 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
291 if (IS_ERR(sg))
292 return PTR_ERR(sg);
293
294 obj->pages = sg;
Chris Wilson2f745ad2012-09-04 21:02:58 +0100295 return 0;
296}
297
298static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
299{
300 dma_buf_unmap_attachment(obj->base.import_attach,
301 obj->pages, DMA_BIDIRECTIONAL);
Chris Wilson2f745ad2012-09-04 21:02:58 +0100302}
303
304static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
305 .get_pages = i915_gem_object_get_pages_dmabuf,
306 .put_pages = i915_gem_object_put_pages_dmabuf,
307};
308
Daniel Vetter1286ff72012-05-10 15:25:09 +0200309struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
Chris Wilson9da3da62012-06-01 15:20:22 +0100310 struct dma_buf *dma_buf)
Daniel Vetter1286ff72012-05-10 15:25:09 +0200311{
312 struct dma_buf_attachment *attach;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200313 struct drm_i915_gem_object *obj;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200314 int ret;
315
316 /* is this one of own objects? */
317 if (dma_buf->ops == &i915_dmabuf_ops) {
Daniel Vetter608806a2013-08-08 09:10:38 +0200318 obj = dma_buf_to_obj(dma_buf);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200319 /* is it from our device? */
320 if (obj->base.dev == dev) {
Seung-Woo Kimbe8a42a2012-09-27 15:30:06 +0900321 /*
322 * Importing dmabuf exported from out own gem increases
323 * refcount on gem itself instead of f_count of dmabuf.
324 */
Daniel Vetter1286ff72012-05-10 15:25:09 +0200325 drm_gem_object_reference(&obj->base);
326 return &obj->base;
327 }
328 }
329
330 /* need to attach */
331 attach = dma_buf_attach(dma_buf, dev->dev);
332 if (IS_ERR(attach))
333 return ERR_CAST(attach);
334
Imre Deak011c22822013-04-19 11:11:56 +1000335 get_dma_buf(dma_buf);
336
Chris Wilson42dcedd2012-11-15 11:32:30 +0000337 obj = i915_gem_object_alloc(dev);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200338 if (obj == NULL) {
339 ret = -ENOMEM;
Chris Wilson2f745ad2012-09-04 21:02:58 +0100340 goto fail_detach;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200341 }
342
David Herrmann89c82332013-07-11 11:56:32 +0200343 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
Chris Wilson2f745ad2012-09-04 21:02:58 +0100344 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200345 obj->base.import_attach = attach;
346
347 return &obj->base;
348
Daniel Vetter1286ff72012-05-10 15:25:09 +0200349fail_detach:
350 dma_buf_detach(dma_buf, attach);
Imre Deak011c22822013-04-19 11:11:56 +1000351 dma_buf_put(dma_buf);
352
Daniel Vetter1286ff72012-05-10 15:25:09 +0200353 return ERR_PTR(ret);
354}