blob: 6176e589cf09f9b287cf25700a27a170255d6fe8 [file] [log] [blame]
Daniel Vetter1286ff72012-05-10 15:25:09 +02001/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
Chris Wilsonad778f82016-08-04 16:32:42 +010026
Daniel Vetter1286ff72012-05-10 15:25:09 +020027#include <linux/dma-buf.h>
Chris Wilsonad778f82016-08-04 16:32:42 +010028#include <linux/reservation.h>
29
30#include <drm/drmP.h>
31
32#include "i915_drv.h"
Daniel Vetter1286ff72012-05-10 15:25:09 +020033
Daniel Vetter608806a2013-08-08 09:10:38 +020034static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
35{
36 return to_intel_bo(buf->priv);
37}
38
Dave Airlie6a101cb2012-05-23 14:09:32 +010039static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
Chris Wilson9da3da62012-06-01 15:20:22 +010040 enum dma_data_direction dir)
Daniel Vetter1286ff72012-05-10 15:25:09 +020041{
Daniel Vetter608806a2013-08-08 09:10:38 +020042 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
Chris Wilson9da3da62012-06-01 15:20:22 +010043 struct sg_table *st;
44 struct scatterlist *src, *dst;
45 int ret, i;
Daniel Vetter1286ff72012-05-10 15:25:09 +020046
Chris Wilsona4f5ea62016-10-28 13:58:35 +010047 ret = i915_gem_object_pin_pages(obj);
Chris Wilson5cfacde2013-08-26 19:50:55 -030048 if (ret)
Chris Wilson7dd737f2016-10-28 13:58:38 +010049 goto err;
Chris Wilson5cfacde2013-08-26 19:50:55 -030050
Chris Wilson9da3da62012-06-01 15:20:22 +010051 /* Copy sg so that we make an independent mapping */
52 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
53 if (st == NULL) {
Chris Wilson5cfacde2013-08-26 19:50:55 -030054 ret = -ENOMEM;
Chris Wilson7dd737f2016-10-28 13:58:38 +010055 goto err_unpin_pages;
Chris Wilson9da3da62012-06-01 15:20:22 +010056 }
57
Chris Wilsona4f5ea62016-10-28 13:58:35 +010058 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
Chris Wilson5cfacde2013-08-26 19:50:55 -030059 if (ret)
60 goto err_free;
Chris Wilson9da3da62012-06-01 15:20:22 +010061
Chris Wilsona4f5ea62016-10-28 13:58:35 +010062 src = obj->mm.pages->sgl;
Chris Wilson9da3da62012-06-01 15:20:22 +010063 dst = st->sgl;
Chris Wilsona4f5ea62016-10-28 13:58:35 +010064 for (i = 0; i < obj->mm.pages->nents; i++) {
Imre Deak67d5a502013-02-18 19:28:02 +020065 sg_set_page(dst, sg_page(src), src->length, 0);
Chris Wilson9da3da62012-06-01 15:20:22 +010066 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
Chris Wilson7dd737f2016-10-28 13:58:38 +010071 ret = -ENOMEM;
Chris Wilson5cfacde2013-08-26 19:50:55 -030072 goto err_free_sg;
Chris Wilson9da3da62012-06-01 15:20:22 +010073 }
74
Chris Wilson9da3da62012-06-01 15:20:22 +010075 return st;
Chris Wilson5cfacde2013-08-26 19:50:55 -030076
77err_free_sg:
78 sg_free_table(st);
79err_free:
80 kfree(st);
Chris Wilson7dd737f2016-10-28 13:58:38 +010081err_unpin_pages:
Chris Wilson5cfacde2013-08-26 19:50:55 -030082 i915_gem_object_unpin_pages(obj);
Chris Wilson5cfacde2013-08-26 19:50:55 -030083err:
84 return ERR_PTR(ret);
Daniel Vetter1286ff72012-05-10 15:25:09 +020085}
86
Dave Airlie6a101cb2012-05-23 14:09:32 +010087static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
Chris Wilson2f745ad2012-09-04 21:02:58 +010088 struct sg_table *sg,
89 enum dma_data_direction dir)
Daniel Vetter1286ff72012-05-10 15:25:09 +020090{
Daniel Vetter608806a2013-08-08 09:10:38 +020091 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
Daniel Vetterf2142662013-08-08 09:10:37 +020092
Daniel Vetter1286ff72012-05-10 15:25:09 +020093 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
94 sg_free_table(sg);
95 kfree(sg);
Daniel Vetterf2142662013-08-08 09:10:37 +020096
97 i915_gem_object_unpin_pages(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +020098}
99
Dave Airlie9a70cc22012-05-22 13:09:21 +0100100static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
101{
Daniel Vetter608806a2013-08-08 09:10:38 +0200102 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100103
Chris Wilson7dd737f2016-10-28 13:58:38 +0100104 return i915_gem_object_pin_map(obj, I915_MAP_WB);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100105}
106
107static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
108{
Daniel Vetter608806a2013-08-08 09:10:38 +0200109 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100110
Chris Wilson0a798eb2016-04-08 12:11:11 +0100111 i915_gem_object_unpin_map(obj);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100112}
113
Daniel Vetter1286ff72012-05-10 15:25:09 +0200114static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
115{
116 return NULL;
117}
118
119static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
120{
121
122}
123static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
124{
Chris Wilsonc944a302017-05-03 21:25:17 +0100125 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
126 struct page *page;
127
128 if (page_num >= obj->base.size >> PAGE_SHIFT)
129 return NULL;
130
131 if (!i915_gem_object_has_struct_page(obj))
132 return NULL;
133
134 if (i915_gem_object_pin_pages(obj))
135 return NULL;
136
137 /* Synchronisation is left to the caller (via .begin_cpu_access()) */
138 page = i915_gem_object_get_page(obj, page_num);
139 if (IS_ERR(page))
140 goto err_unpin;
141
142 return kmap(page);
143
144err_unpin:
145 i915_gem_object_unpin_pages(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200146 return NULL;
147}
148
149static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
150{
Chris Wilsonc944a302017-05-03 21:25:17 +0100151 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200152
Chris Wilsonc944a302017-05-03 21:25:17 +0100153 kunmap(virt_to_page(addr));
154 i915_gem_object_unpin_pages(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200155}
156
Dave Airlie2dad9d42012-05-29 15:11:22 +0100157static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
158{
Tiago Vignatti2dbf0d92015-12-22 19:36:48 -0200159 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
160 int ret;
161
162 if (obj->base.size < vma->vm_end - vma->vm_start)
163 return -EINVAL;
164
165 if (!obj->base.filp)
166 return -ENODEV;
167
Miklos Szeredif74ac012017-02-20 16:51:23 +0100168 ret = call_mmap(obj->base.filp, vma);
Tiago Vignatti2dbf0d92015-12-22 19:36:48 -0200169 if (ret)
170 return ret;
171
172 fput(vma->vm_file);
173 vma->vm_file = get_file(obj->base.filp);
174
175 return 0;
Dave Airlie2dad9d42012-05-29 15:11:22 +0100176}
177
Tiago Vignatti831e9da2015-12-22 19:36:45 -0200178static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000179{
Daniel Vetter608806a2013-08-08 09:10:38 +0200180 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000181 struct drm_device *dev = obj->base.dev;
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000182 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
Chris Wilson7dd737f2016-10-28 13:58:38 +0100183 int err;
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000184
Chris Wilson7dd737f2016-10-28 13:58:38 +0100185 err = i915_gem_object_pin_pages(obj);
186 if (err)
187 return err;
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000188
Chris Wilson7dd737f2016-10-28 13:58:38 +0100189 err = i915_mutex_lock_interruptible(dev);
190 if (err)
191 goto out;
192
193 err = i915_gem_object_set_to_cpu_domain(obj, write);
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000194 mutex_unlock(&dev->struct_mutex);
Chris Wilson7dd737f2016-10-28 13:58:38 +0100195
196out:
197 i915_gem_object_unpin_pages(obj);
198 return err;
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000199}
200
Chris Wilson18b862d2016-03-18 20:02:39 +0000201static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
Tiago Vignatti346400c2015-12-22 19:36:47 -0200202{
203 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
204 struct drm_device *dev = obj->base.dev;
Chris Wilson7dd737f2016-10-28 13:58:38 +0100205 int err;
Tiago Vignatti346400c2015-12-22 19:36:47 -0200206
Chris Wilson7dd737f2016-10-28 13:58:38 +0100207 err = i915_gem_object_pin_pages(obj);
208 if (err)
209 return err;
Tiago Vignatti346400c2015-12-22 19:36:47 -0200210
Chris Wilson7dd737f2016-10-28 13:58:38 +0100211 err = i915_mutex_lock_interruptible(dev);
212 if (err)
213 goto out;
214
215 err = i915_gem_object_set_to_gtt_domain(obj, false);
Tiago Vignatti346400c2015-12-22 19:36:47 -0200216 mutex_unlock(&dev->struct_mutex);
217
Chris Wilson7dd737f2016-10-28 13:58:38 +0100218out:
219 i915_gem_object_unpin_pages(obj);
220 return err;
Tiago Vignatti346400c2015-12-22 19:36:47 -0200221}
222
Dave Airlie6a101cb2012-05-23 14:09:32 +0100223static const struct dma_buf_ops i915_dmabuf_ops = {
Daniel Vetter1286ff72012-05-10 15:25:09 +0200224 .map_dma_buf = i915_gem_map_dma_buf,
225 .unmap_dma_buf = i915_gem_unmap_dma_buf,
Daniel Vetterc1d67982013-08-15 00:02:30 +0200226 .release = drm_gem_dmabuf_release,
Logan Gunthorpef9b67f02017-04-19 13:36:10 -0600227 .map = i915_gem_dmabuf_kmap,
228 .map_atomic = i915_gem_dmabuf_kmap_atomic,
229 .unmap = i915_gem_dmabuf_kunmap,
230 .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
Dave Airlie2dad9d42012-05-29 15:11:22 +0100231 .mmap = i915_gem_dmabuf_mmap,
Dave Airlie9a70cc22012-05-22 13:09:21 +0100232 .vmap = i915_gem_dmabuf_vmap,
233 .vunmap = i915_gem_dmabuf_vunmap,
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000234 .begin_cpu_access = i915_gem_begin_cpu_access,
Tiago Vignatti346400c2015-12-22 19:36:47 -0200235 .end_cpu_access = i915_gem_end_cpu_access,
Daniel Vetter1286ff72012-05-10 15:25:09 +0200236};
237
238struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
Chris Wilson9da3da62012-06-01 15:20:22 +0100239 struct drm_gem_object *gem_obj, int flags)
Daniel Vetter1286ff72012-05-10 15:25:09 +0200240{
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100241 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Sumit Semwald8fbe342015-01-23 12:53:43 +0530242 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
243
244 exp_info.ops = &i915_dmabuf_ops;
245 exp_info.size = gem_obj->size;
246 exp_info.flags = flags;
247 exp_info.priv = gem_obj;
Chris Wilsond07f0e52016-10-28 13:58:44 +0100248 exp_info.resv = obj->resv;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530249
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100250 if (obj->ops->dmabuf_export) {
251 int ret = obj->ops->dmabuf_export(obj);
252 if (ret)
253 return ERR_PTR(ret);
254 }
255
Chris Wilsond07f0e52016-10-28 13:58:44 +0100256 return drm_gem_dmabuf_export(dev, &exp_info);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200257}
258
Chris Wilson03ac84f2016-10-28 13:58:36 +0100259static struct sg_table *
260i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
Chris Wilson2f745ad2012-09-04 21:02:58 +0100261{
Chris Wilson03ac84f2016-10-28 13:58:36 +0100262 return dma_buf_map_attachment(obj->base.import_attach,
263 DMA_BIDIRECTIONAL);
Chris Wilson2f745ad2012-09-04 21:02:58 +0100264}
265
Chris Wilson03ac84f2016-10-28 13:58:36 +0100266static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
267 struct sg_table *pages)
Chris Wilson2f745ad2012-09-04 21:02:58 +0100268{
Chris Wilson03ac84f2016-10-28 13:58:36 +0100269 dma_buf_unmap_attachment(obj->base.import_attach, pages,
270 DMA_BIDIRECTIONAL);
Chris Wilson2f745ad2012-09-04 21:02:58 +0100271}
272
273static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
274 .get_pages = i915_gem_object_get_pages_dmabuf,
275 .put_pages = i915_gem_object_put_pages_dmabuf,
276};
277
Daniel Vetter1286ff72012-05-10 15:25:09 +0200278struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
Chris Wilson9da3da62012-06-01 15:20:22 +0100279 struct dma_buf *dma_buf)
Daniel Vetter1286ff72012-05-10 15:25:09 +0200280{
281 struct dma_buf_attachment *attach;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200282 struct drm_i915_gem_object *obj;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200283 int ret;
284
285 /* is this one of own objects? */
286 if (dma_buf->ops == &i915_dmabuf_ops) {
Daniel Vetter608806a2013-08-08 09:10:38 +0200287 obj = dma_buf_to_obj(dma_buf);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200288 /* is it from our device? */
289 if (obj->base.dev == dev) {
Seung-Woo Kimbe8a42a2012-09-27 15:30:06 +0900290 /*
291 * Importing dmabuf exported from out own gem increases
292 * refcount on gem itself instead of f_count of dmabuf.
293 */
Chris Wilson25dc5562016-07-20 13:31:52 +0100294 return &i915_gem_object_get(obj)->base;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200295 }
296 }
297
298 /* need to attach */
299 attach = dma_buf_attach(dma_buf, dev->dev);
300 if (IS_ERR(attach))
301 return ERR_CAST(attach);
302
Imre Deak011c22822013-04-19 11:11:56 +1000303 get_dma_buf(dma_buf);
304
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000305 obj = i915_gem_object_alloc(to_i915(dev));
Daniel Vetter1286ff72012-05-10 15:25:09 +0200306 if (obj == NULL) {
307 ret = -ENOMEM;
Chris Wilson2f745ad2012-09-04 21:02:58 +0100308 goto fail_detach;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200309 }
310
David Herrmann89c82332013-07-11 11:56:32 +0200311 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
Chris Wilson2f745ad2012-09-04 21:02:58 +0100312 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200313 obj->base.import_attach = attach;
Chris Wilsond07f0e52016-10-28 13:58:44 +0100314 obj->resv = dma_buf->resv;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200315
Chris Wilson30bc06c2016-07-20 09:21:14 +0100316 /* We use GTT as shorthand for a coherent domain, one that is
317 * neither in the GPU cache nor in the CPU cache, where all
318 * writes are immediately visible in memory. (That's not strictly
319 * true, but it's close! There are internal buffers such as the
320 * write-combined buffer or a delay through the chipset for GTT
321 * writes that do require us to treat GTT as a separate cache domain.)
322 */
323 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
324 obj->base.write_domain = 0;
325
Daniel Vetter1286ff72012-05-10 15:25:09 +0200326 return &obj->base;
327
Daniel Vetter1286ff72012-05-10 15:25:09 +0200328fail_detach:
329 dma_buf_detach(dma_buf, attach);
Imre Deak011c22822013-04-19 11:11:56 +1000330 dma_buf_put(dma_buf);
331
Daniel Vetter1286ff72012-05-10 15:25:09 +0200332 return ERR_PTR(ret);
333}
Chris Wilson6cca22e2017-02-13 17:15:36 +0000334
335#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
336#include "selftests/mock_dmabuf.c"
337#include "selftests/i915_gem_dmabuf.c"
338#endif