blob: 10265bb356041adc86988568f188f1fe60cb6058 [file] [log] [blame]
Daniel Vetter1286ff72012-05-10 15:25:09 +02001/*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
Chris Wilsonad778f82016-08-04 16:32:42 +010026
Daniel Vetter1286ff72012-05-10 15:25:09 +020027#include <linux/dma-buf.h>
Chris Wilsonad778f82016-08-04 16:32:42 +010028#include <linux/reservation.h>
29
30#include <drm/drmP.h>
31
32#include "i915_drv.h"
Daniel Vetter1286ff72012-05-10 15:25:09 +020033
Daniel Vetter608806a2013-08-08 09:10:38 +020034static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
35{
36 return to_intel_bo(buf->priv);
37}
38
Dave Airlie6a101cb2012-05-23 14:09:32 +010039static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
Chris Wilson9da3da62012-06-01 15:20:22 +010040 enum dma_data_direction dir)
Daniel Vetter1286ff72012-05-10 15:25:09 +020041{
Daniel Vetter608806a2013-08-08 09:10:38 +020042 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
Chris Wilson9da3da62012-06-01 15:20:22 +010043 struct sg_table *st;
44 struct scatterlist *src, *dst;
45 int ret, i;
Daniel Vetter1286ff72012-05-10 15:25:09 +020046
Chris Wilson9da3da62012-06-01 15:20:22 +010047 ret = i915_mutex_lock_interruptible(obj->base.dev);
Daniel Vetter1286ff72012-05-10 15:25:09 +020048 if (ret)
Chris Wilson5cfacde2013-08-26 19:50:55 -030049 goto err;
Daniel Vetter1286ff72012-05-10 15:25:09 +020050
Chris Wilson37e680a2012-06-07 15:38:42 +010051 ret = i915_gem_object_get_pages(obj);
Chris Wilson5cfacde2013-08-26 19:50:55 -030052 if (ret)
53 goto err_unlock;
54
55 i915_gem_object_pin_pages(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +020056
Chris Wilson9da3da62012-06-01 15:20:22 +010057 /* Copy sg so that we make an independent mapping */
58 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
59 if (st == NULL) {
Chris Wilson5cfacde2013-08-26 19:50:55 -030060 ret = -ENOMEM;
61 goto err_unpin;
Chris Wilson9da3da62012-06-01 15:20:22 +010062 }
63
64 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
Chris Wilson5cfacde2013-08-26 19:50:55 -030065 if (ret)
66 goto err_free;
Chris Wilson9da3da62012-06-01 15:20:22 +010067
68 src = obj->pages->sgl;
69 dst = st->sgl;
70 for (i = 0; i < obj->pages->nents; i++) {
Imre Deak67d5a502013-02-18 19:28:02 +020071 sg_set_page(dst, sg_page(src), src->length, 0);
Chris Wilson9da3da62012-06-01 15:20:22 +010072 dst = sg_next(dst);
73 src = sg_next(src);
74 }
75
76 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
Chris Wilson5cfacde2013-08-26 19:50:55 -030077 ret =-ENOMEM;
78 goto err_free_sg;
Chris Wilson9da3da62012-06-01 15:20:22 +010079 }
80
Chris Wilson9da3da62012-06-01 15:20:22 +010081 mutex_unlock(&obj->base.dev->struct_mutex);
82 return st;
Chris Wilson5cfacde2013-08-26 19:50:55 -030083
84err_free_sg:
85 sg_free_table(st);
86err_free:
87 kfree(st);
88err_unpin:
89 i915_gem_object_unpin_pages(obj);
90err_unlock:
91 mutex_unlock(&obj->base.dev->struct_mutex);
92err:
93 return ERR_PTR(ret);
Daniel Vetter1286ff72012-05-10 15:25:09 +020094}
95
Dave Airlie6a101cb2012-05-23 14:09:32 +010096static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
Chris Wilson2f745ad2012-09-04 21:02:58 +010097 struct sg_table *sg,
98 enum dma_data_direction dir)
Daniel Vetter1286ff72012-05-10 15:25:09 +020099{
Daniel Vetter608806a2013-08-08 09:10:38 +0200100 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
Daniel Vetterf2142662013-08-08 09:10:37 +0200101
Daniel Vetter1286ff72012-05-10 15:25:09 +0200102 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
103 sg_free_table(sg);
104 kfree(sg);
Daniel Vetterf2142662013-08-08 09:10:37 +0200105
Chris Wilson6d192452016-04-08 12:11:09 +0100106 mutex_lock(&obj->base.dev->struct_mutex);
Daniel Vetterf2142662013-08-08 09:10:37 +0200107 i915_gem_object_unpin_pages(obj);
Daniel Vetterf2142662013-08-08 09:10:37 +0200108 mutex_unlock(&obj->base.dev->struct_mutex);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200109}
110
Dave Airlie9a70cc22012-05-22 13:09:21 +0100111static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
112{
Daniel Vetter608806a2013-08-08 09:10:38 +0200113 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100114 struct drm_device *dev = obj->base.dev;
Chris Wilson0a798eb2016-04-08 12:11:11 +0100115 void *addr;
116 int ret;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100117
118 ret = i915_mutex_lock_interruptible(dev);
119 if (ret)
120 return ERR_PTR(ret);
121
Chris Wilsond31d7cb2016-08-12 12:39:58 +0100122 addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100123 mutex_unlock(&dev->struct_mutex);
Chris Wilson9da3da62012-06-01 15:20:22 +0100124
Chris Wilson0a798eb2016-04-08 12:11:11 +0100125 return addr;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100126}
127
128static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
129{
Daniel Vetter608806a2013-08-08 09:10:38 +0200130 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100131 struct drm_device *dev = obj->base.dev;
Dave Airlie9a70cc22012-05-22 13:09:21 +0100132
Chris Wilsonce7ec7682014-04-07 17:01:47 -0300133 mutex_lock(&dev->struct_mutex);
Chris Wilson0a798eb2016-04-08 12:11:11 +0100134 i915_gem_object_unpin_map(obj);
Dave Airlie9a70cc22012-05-22 13:09:21 +0100135 mutex_unlock(&dev->struct_mutex);
136}
137
Daniel Vetter1286ff72012-05-10 15:25:09 +0200138static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
139{
140 return NULL;
141}
142
143static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
144{
145
146}
147static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
148{
149 return NULL;
150}
151
152static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
153{
154
155}
156
Dave Airlie2dad9d42012-05-29 15:11:22 +0100157static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
158{
Tiago Vignatti2dbf0d92015-12-22 19:36:48 -0200159 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
160 int ret;
161
162 if (obj->base.size < vma->vm_end - vma->vm_start)
163 return -EINVAL;
164
165 if (!obj->base.filp)
166 return -ENODEV;
167
168 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
169 if (ret)
170 return ret;
171
172 fput(vma->vm_file);
173 vma->vm_file = get_file(obj->base.filp);
174
175 return 0;
Dave Airlie2dad9d42012-05-29 15:11:22 +0100176}
177
Tiago Vignatti831e9da2015-12-22 19:36:45 -0200178static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000179{
Daniel Vetter608806a2013-08-08 09:10:38 +0200180 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000181 struct drm_device *dev = obj->base.dev;
182 int ret;
183 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
184
185 ret = i915_mutex_lock_interruptible(dev);
186 if (ret)
187 return ret;
188
189 ret = i915_gem_object_set_to_cpu_domain(obj, write);
190 mutex_unlock(&dev->struct_mutex);
191 return ret;
192}
193
Chris Wilson18b862d2016-03-18 20:02:39 +0000194static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
Tiago Vignatti346400c2015-12-22 19:36:47 -0200195{
196 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
197 struct drm_device *dev = obj->base.dev;
Tiago Vignatti346400c2015-12-22 19:36:47 -0200198 int ret;
199
Chris Wilson18b862d2016-03-18 20:02:39 +0000200 ret = i915_mutex_lock_interruptible(dev);
201 if (ret)
202 return ret;
Tiago Vignatti346400c2015-12-22 19:36:47 -0200203
204 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Tiago Vignatti346400c2015-12-22 19:36:47 -0200205 mutex_unlock(&dev->struct_mutex);
206
Chris Wilson18b862d2016-03-18 20:02:39 +0000207 return ret;
Tiago Vignatti346400c2015-12-22 19:36:47 -0200208}
209
Dave Airlie6a101cb2012-05-23 14:09:32 +0100210static const struct dma_buf_ops i915_dmabuf_ops = {
Daniel Vetter1286ff72012-05-10 15:25:09 +0200211 .map_dma_buf = i915_gem_map_dma_buf,
212 .unmap_dma_buf = i915_gem_unmap_dma_buf,
Daniel Vetterc1d67982013-08-15 00:02:30 +0200213 .release = drm_gem_dmabuf_release,
Daniel Vetter1286ff72012-05-10 15:25:09 +0200214 .kmap = i915_gem_dmabuf_kmap,
215 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
216 .kunmap = i915_gem_dmabuf_kunmap,
217 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
Dave Airlie2dad9d42012-05-29 15:11:22 +0100218 .mmap = i915_gem_dmabuf_mmap,
Dave Airlie9a70cc22012-05-22 13:09:21 +0100219 .vmap = i915_gem_dmabuf_vmap,
220 .vunmap = i915_gem_dmabuf_vunmap,
Dave Airlieec6f1bb2012-08-16 10:15:34 +1000221 .begin_cpu_access = i915_gem_begin_cpu_access,
Tiago Vignatti346400c2015-12-22 19:36:47 -0200222 .end_cpu_access = i915_gem_end_cpu_access,
Daniel Vetter1286ff72012-05-10 15:25:09 +0200223};
224
Chris Wilsonad778f82016-08-04 16:32:42 +0100225static void export_fences(struct drm_i915_gem_object *obj,
226 struct dma_buf *dma_buf)
227{
228 struct reservation_object *resv = dma_buf->resv;
229 struct drm_i915_gem_request *req;
230 unsigned long active;
231 int idx;
232
233 active = __I915_BO_ACTIVE(obj);
234 if (!active)
235 return;
236
237 /* Serialise with execbuf to prevent concurrent fence-loops */
238 mutex_lock(&obj->base.dev->struct_mutex);
239
240 /* Mark the object for future fences before racily adding old fences */
241 obj->base.dma_buf = dma_buf;
242
243 ww_mutex_lock(&resv->lock, NULL);
244
245 for_each_active(active, idx) {
246 req = i915_gem_active_get(&obj->last_read[idx],
247 &obj->base.dev->struct_mutex);
248 if (!req)
249 continue;
250
251 if (reservation_object_reserve_shared(resv) == 0)
252 reservation_object_add_shared_fence(resv, &req->fence);
253
254 i915_gem_request_put(req);
255 }
256
257 req = i915_gem_active_get(&obj->last_write,
258 &obj->base.dev->struct_mutex);
259 if (req) {
260 reservation_object_add_excl_fence(resv, &req->fence);
261 i915_gem_request_put(req);
262 }
263
264 ww_mutex_unlock(&resv->lock);
265 mutex_unlock(&obj->base.dev->struct_mutex);
266}
267
Daniel Vetter1286ff72012-05-10 15:25:09 +0200268struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
Chris Wilson9da3da62012-06-01 15:20:22 +0100269 struct drm_gem_object *gem_obj, int flags)
Daniel Vetter1286ff72012-05-10 15:25:09 +0200270{
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100271 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Sumit Semwald8fbe342015-01-23 12:53:43 +0530272 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Chris Wilsonad778f82016-08-04 16:32:42 +0100273 struct dma_buf *dma_buf;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530274
275 exp_info.ops = &i915_dmabuf_ops;
276 exp_info.size = gem_obj->size;
277 exp_info.flags = flags;
278 exp_info.priv = gem_obj;
279
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100280 if (obj->ops->dmabuf_export) {
281 int ret = obj->ops->dmabuf_export(obj);
282 if (ret)
283 return ERR_PTR(ret);
284 }
285
Chris Wilsonad778f82016-08-04 16:32:42 +0100286 dma_buf = dma_buf_export(&exp_info);
287 if (IS_ERR(dma_buf))
288 return dma_buf;
289
290 export_fences(obj, dma_buf);
291 return dma_buf;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200292}
293
Chris Wilson2f745ad2012-09-04 21:02:58 +0100294static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
295{
296 struct sg_table *sg;
297
298 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
299 if (IS_ERR(sg))
300 return PTR_ERR(sg);
301
302 obj->pages = sg;
Chris Wilson2f745ad2012-09-04 21:02:58 +0100303 return 0;
304}
305
306static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
307{
308 dma_buf_unmap_attachment(obj->base.import_attach,
309 obj->pages, DMA_BIDIRECTIONAL);
Chris Wilson2f745ad2012-09-04 21:02:58 +0100310}
311
312static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
313 .get_pages = i915_gem_object_get_pages_dmabuf,
314 .put_pages = i915_gem_object_put_pages_dmabuf,
315};
316
Daniel Vetter1286ff72012-05-10 15:25:09 +0200317struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
Chris Wilson9da3da62012-06-01 15:20:22 +0100318 struct dma_buf *dma_buf)
Daniel Vetter1286ff72012-05-10 15:25:09 +0200319{
320 struct dma_buf_attachment *attach;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200321 struct drm_i915_gem_object *obj;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200322 int ret;
323
324 /* is this one of own objects? */
325 if (dma_buf->ops == &i915_dmabuf_ops) {
Daniel Vetter608806a2013-08-08 09:10:38 +0200326 obj = dma_buf_to_obj(dma_buf);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200327 /* is it from our device? */
328 if (obj->base.dev == dev) {
Seung-Woo Kimbe8a42a2012-09-27 15:30:06 +0900329 /*
330 * Importing dmabuf exported from out own gem increases
331 * refcount on gem itself instead of f_count of dmabuf.
332 */
Chris Wilson25dc5562016-07-20 13:31:52 +0100333 return &i915_gem_object_get(obj)->base;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200334 }
335 }
336
337 /* need to attach */
338 attach = dma_buf_attach(dma_buf, dev->dev);
339 if (IS_ERR(attach))
340 return ERR_CAST(attach);
341
Imre Deak011c2282013-04-19 11:11:56 +1000342 get_dma_buf(dma_buf);
343
Chris Wilson42dcedd2012-11-15 11:32:30 +0000344 obj = i915_gem_object_alloc(dev);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200345 if (obj == NULL) {
346 ret = -ENOMEM;
Chris Wilson2f745ad2012-09-04 21:02:58 +0100347 goto fail_detach;
Daniel Vetter1286ff72012-05-10 15:25:09 +0200348 }
349
David Herrmann89c82332013-07-11 11:56:32 +0200350 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
Chris Wilson2f745ad2012-09-04 21:02:58 +0100351 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
Daniel Vetter1286ff72012-05-10 15:25:09 +0200352 obj->base.import_attach = attach;
353
Chris Wilson30bc06c2016-07-20 09:21:14 +0100354 /* We use GTT as shorthand for a coherent domain, one that is
355 * neither in the GPU cache nor in the CPU cache, where all
356 * writes are immediately visible in memory. (That's not strictly
357 * true, but it's close! There are internal buffers such as the
358 * write-combined buffer or a delay through the chipset for GTT
359 * writes that do require us to treat GTT as a separate cache domain.)
360 */
361 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
362 obj->base.write_domain = 0;
363
Daniel Vetter1286ff72012-05-10 15:25:09 +0200364 return &obj->base;
365
Daniel Vetter1286ff72012-05-10 15:25:09 +0200366fail_detach:
367 dma_buf_detach(dma_buf, attach);
Imre Deak011c2282013-04-19 11:11:56 +1000368 dma_buf_put(dma_buf);
369
Daniel Vetter1286ff72012-05-10 15:25:09 +0200370 return ERR_PTR(ret);
371}