blob: 7830d8e1f2127cad9e2bbcf590f2aea32206c43c [file] [log] [blame]
Dave Airlie32488772011-11-25 15:21:02 +00001/*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/drmP.h>
Dave Airlie32488772011-11-25 15:21:02 +000032
33/*
34 * DMA-BUF/GEM Object references and lifetime overview:
35 *
36 * On the export the dma_buf holds a reference to the exporting GEM
37 * object. It takes this reference in handle_to_fd_ioctl, when it
38 * first calls .prime_export and stores the exporting GEM object in
39 * the dma_buf priv. This reference is released when the dma_buf
40 * object goes away in the driver .release function.
41 *
42 * On the import the importing GEM object holds a reference to the
43 * dma_buf (which in turn holds a ref to the exporting GEM object).
44 * It takes that reference in the fd_to_handle ioctl.
45 * It calls dma_buf_get, creates an attachment to it and stores the
46 * attachment in the GEM object. When this attachment is destroyed
47 * when the imported object is destroyed, we remove the attachment
48 * and drop the reference to the dma_buf.
49 *
50 * Thus the chain of references always flows in one direction
51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
52 *
53 * Self-importing: if userspace is using PRIME as a replacement for flink
54 * then it will get a fd->handle request for a GEM object that it created.
55 * Drivers should detect this situation and return back the gem object
Aaron Plattner89177642013-01-15 20:47:42 +000056 * from the dma-buf private. Prime will do this automatically for drivers that
57 * use the drm_gem_prime_{import,export} helpers.
Dave Airlie32488772011-11-25 15:21:02 +000058 */
59
60struct drm_prime_member {
61 struct list_head entry;
62 struct dma_buf *dma_buf;
63 uint32_t handle;
64};
Dave Airlie219b4732013-04-22 09:54:36 +100065static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
Dave Airlie32488772011-11-25 15:21:02 +000066
Aaron Plattner89177642013-01-15 20:47:42 +000067static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
68 enum dma_data_direction dir)
69{
70 struct drm_gem_object *obj = attach->dmabuf->priv;
71 struct sg_table *sgt;
72
73 mutex_lock(&obj->dev->struct_mutex);
74
75 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
76
77 if (!IS_ERR_OR_NULL(sgt))
78 dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
79
80 mutex_unlock(&obj->dev->struct_mutex);
81 return sgt;
82}
83
84static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
85 struct sg_table *sgt, enum dma_data_direction dir)
86{
87 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
88 sg_free_table(sgt);
89 kfree(sgt);
90}
91
92static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
93{
94 struct drm_gem_object *obj = dma_buf->priv;
95
96 if (obj->export_dma_buf == dma_buf) {
97 /* drop the reference on the export fd holds */
98 obj->export_dma_buf = NULL;
99 drm_gem_object_unreference_unlocked(obj);
100 }
101}
102
103static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
104{
105 struct drm_gem_object *obj = dma_buf->priv;
106 struct drm_device *dev = obj->dev;
107
108 return dev->driver->gem_prime_vmap(obj);
109}
110
111static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
112{
113 struct drm_gem_object *obj = dma_buf->priv;
114 struct drm_device *dev = obj->dev;
115
116 dev->driver->gem_prime_vunmap(obj, vaddr);
117}
118
119static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
120 unsigned long page_num)
121{
122 return NULL;
123}
124
125static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
126 unsigned long page_num, void *addr)
127{
128
129}
130static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
131 unsigned long page_num)
132{
133 return NULL;
134}
135
136static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
137 unsigned long page_num, void *addr)
138{
139
140}
141
142static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
143 struct vm_area_struct *vma)
144{
145 return -EINVAL;
146}
147
148static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
149 .map_dma_buf = drm_gem_map_dma_buf,
150 .unmap_dma_buf = drm_gem_unmap_dma_buf,
151 .release = drm_gem_dmabuf_release,
152 .kmap = drm_gem_dmabuf_kmap,
153 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
154 .kunmap = drm_gem_dmabuf_kunmap,
155 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
156 .mmap = drm_gem_dmabuf_mmap,
157 .vmap = drm_gem_dmabuf_vmap,
158 .vunmap = drm_gem_dmabuf_vunmap,
159};
160
161/**
162 * DOC: PRIME Helpers
163 *
164 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
165 * simpler APIs by using the helper functions @drm_gem_prime_export and
166 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
167 * five lower-level driver callbacks:
168 *
169 * Export callbacks:
170 *
171 * - @gem_prime_pin (optional): prepare a GEM object for exporting
172 *
173 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
174 *
175 * - @gem_prime_vmap: vmap a buffer exported by your driver
176 *
177 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
178 *
179 * Import callback:
180 *
181 * - @gem_prime_import_sg_table (import): produce a GEM object from another
182 * driver's scatter/gather table
183 */
184
185struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
186 struct drm_gem_object *obj, int flags)
187{
188 if (dev->driver->gem_prime_pin) {
189 int ret = dev->driver->gem_prime_pin(obj);
190 if (ret)
191 return ERR_PTR(ret);
192 }
193 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
194 0600);
195}
196EXPORT_SYMBOL(drm_gem_prime_export);
197
Dave Airlie32488772011-11-25 15:21:02 +0000198int drm_gem_prime_handle_to_fd(struct drm_device *dev,
199 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
200 int *prime_fd)
201{
202 struct drm_gem_object *obj;
203 void *buf;
Dave Airlie219b4732013-04-22 09:54:36 +1000204 int ret = 0;
205 struct dma_buf *dmabuf;
Dave Airlie32488772011-11-25 15:21:02 +0000206
207 obj = drm_gem_object_lookup(dev, file_priv, handle);
208 if (!obj)
209 return -ENOENT;
210
211 mutex_lock(&file_priv->prime.lock);
212 /* re-export the original imported object */
213 if (obj->import_attach) {
Dave Airlie219b4732013-04-22 09:54:36 +1000214 dmabuf = obj->import_attach->dmabuf;
215 goto out_have_obj;
Dave Airlie32488772011-11-25 15:21:02 +0000216 }
217
218 if (obj->export_dma_buf) {
Dave Airlie219b4732013-04-22 09:54:36 +1000219 dmabuf = obj->export_dma_buf;
220 goto out_have_obj;
Dave Airlie32488772011-11-25 15:21:02 +0000221 }
Dave Airlie219b4732013-04-22 09:54:36 +1000222
223 buf = dev->driver->gem_prime_export(dev, obj, flags);
224 if (IS_ERR(buf)) {
225 /* normally the created dma-buf takes ownership of the ref,
226 * but if that fails then drop the ref
227 */
228 ret = PTR_ERR(buf);
229 goto out;
230 }
231 obj->export_dma_buf = buf;
232
Dave Airlie0ff926c2012-05-20 17:31:16 +0100233 /* if we've exported this buffer the cheat and add it to the import list
234 * so we get the correct handle back
235 */
Dave Airlie219b4732013-04-22 09:54:36 +1000236 ret = drm_prime_add_buf_handle(&file_priv->prime,
237 obj->export_dma_buf, handle);
238 if (ret)
239 goto out;
Dave Airlie0ff926c2012-05-20 17:31:16 +0100240
Dave Airlie219b4732013-04-22 09:54:36 +1000241 *prime_fd = dma_buf_fd(buf, flags);
Dave Airlie32488772011-11-25 15:21:02 +0000242 mutex_unlock(&file_priv->prime.lock);
243 return 0;
Dave Airlie219b4732013-04-22 09:54:36 +1000244
245out_have_obj:
246 get_dma_buf(dmabuf);
247 *prime_fd = dma_buf_fd(dmabuf, flags);
248out:
249 drm_gem_object_unreference_unlocked(obj);
250 mutex_unlock(&file_priv->prime.lock);
251 return ret;
Dave Airlie32488772011-11-25 15:21:02 +0000252}
253EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
254
Aaron Plattner89177642013-01-15 20:47:42 +0000255struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
256 struct dma_buf *dma_buf)
257{
258 struct dma_buf_attachment *attach;
259 struct sg_table *sgt;
260 struct drm_gem_object *obj;
261 int ret;
262
263 if (!dev->driver->gem_prime_import_sg_table)
264 return ERR_PTR(-EINVAL);
265
266 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
267 obj = dma_buf->priv;
268 if (obj->dev == dev) {
269 /*
270 * Importing dmabuf exported from out own gem increases
271 * refcount on gem itself instead of f_count of dmabuf.
272 */
273 drm_gem_object_reference(obj);
274 dma_buf_put(dma_buf);
275 return obj;
276 }
277 }
278
279 attach = dma_buf_attach(dma_buf, dev->dev);
280 if (IS_ERR(attach))
281 return ERR_PTR(PTR_ERR(attach));
282
283 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
284 if (IS_ERR_OR_NULL(sgt)) {
285 ret = PTR_ERR(sgt);
286 goto fail_detach;
287 }
288
289 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
290 if (IS_ERR(obj)) {
291 ret = PTR_ERR(obj);
292 goto fail_unmap;
293 }
294
295 obj->import_attach = attach;
296
297 return obj;
298
299fail_unmap:
300 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
301fail_detach:
302 dma_buf_detach(dma_buf, attach);
303 return ERR_PTR(ret);
304}
305EXPORT_SYMBOL(drm_gem_prime_import);
306
Dave Airlie32488772011-11-25 15:21:02 +0000307int drm_gem_prime_fd_to_handle(struct drm_device *dev,
308 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
309{
310 struct dma_buf *dma_buf;
311 struct drm_gem_object *obj;
312 int ret;
313
314 dma_buf = dma_buf_get(prime_fd);
315 if (IS_ERR(dma_buf))
316 return PTR_ERR(dma_buf);
317
318 mutex_lock(&file_priv->prime.lock);
319
Dave Airlie219b4732013-04-22 09:54:36 +1000320 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
Dave Airlie32488772011-11-25 15:21:02 +0000321 dma_buf, handle);
322 if (!ret) {
323 ret = 0;
324 goto out_put;
325 }
326
327 /* never seen this one, need to import */
328 obj = dev->driver->gem_prime_import(dev, dma_buf);
329 if (IS_ERR(obj)) {
330 ret = PTR_ERR(obj);
331 goto out_put;
332 }
333
334 ret = drm_gem_handle_create(file_priv, obj, handle);
335 drm_gem_object_unreference_unlocked(obj);
336 if (ret)
337 goto out_put;
338
Dave Airlie219b4732013-04-22 09:54:36 +1000339 ret = drm_prime_add_buf_handle(&file_priv->prime,
Dave Airlie32488772011-11-25 15:21:02 +0000340 dma_buf, *handle);
341 if (ret)
342 goto fail;
343
344 mutex_unlock(&file_priv->prime.lock);
345 return 0;
346
347fail:
348 /* hmm, if driver attached, we are relying on the free-object path
349 * to detach.. which seems ok..
350 */
351 drm_gem_object_handle_unreference_unlocked(obj);
352out_put:
353 dma_buf_put(dma_buf);
354 mutex_unlock(&file_priv->prime.lock);
355 return ret;
356}
357EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
358
359int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
360 struct drm_file *file_priv)
361{
362 struct drm_prime_handle *args = data;
363 uint32_t flags;
364
365 if (!drm_core_check_feature(dev, DRIVER_PRIME))
366 return -EINVAL;
367
368 if (!dev->driver->prime_handle_to_fd)
369 return -ENOSYS;
370
371 /* check flags are valid */
372 if (args->flags & ~DRM_CLOEXEC)
373 return -EINVAL;
374
375 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
376 flags = args->flags & DRM_CLOEXEC;
377
378 return dev->driver->prime_handle_to_fd(dev, file_priv,
379 args->handle, flags, &args->fd);
380}
381
382int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
383 struct drm_file *file_priv)
384{
385 struct drm_prime_handle *args = data;
386
387 if (!drm_core_check_feature(dev, DRIVER_PRIME))
388 return -EINVAL;
389
390 if (!dev->driver->prime_fd_to_handle)
391 return -ENOSYS;
392
393 return dev->driver->prime_fd_to_handle(dev, file_priv,
394 args->fd, &args->handle);
395}
396
397/*
398 * drm_prime_pages_to_sg
399 *
400 * this helper creates an sg table object from a set of pages
401 * the driver is responsible for mapping the pages into the
402 * importers address space
403 */
404struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
405{
406 struct sg_table *sg = NULL;
Dave Airlie32488772011-11-25 15:21:02 +0000407 int ret;
408
409 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
410 if (!sg)
411 goto out;
412
Rahul Sharmadca25cb2013-01-28 08:38:48 -0500413 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
414 nr_pages << PAGE_SHIFT, GFP_KERNEL);
Dave Airlie32488772011-11-25 15:21:02 +0000415 if (ret)
416 goto out;
417
Dave Airlie32488772011-11-25 15:21:02 +0000418 return sg;
419out:
420 kfree(sg);
421 return NULL;
422}
423EXPORT_SYMBOL(drm_prime_pages_to_sg);
424
Dave Airlie51ab7ba2012-05-18 15:40:33 +0100425/* export an sg table into an array of pages and addresses
426 this is currently required by the TTM driver in order to do correct fault
427 handling */
428int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
429 dma_addr_t *addrs, int max_pages)
430{
431 unsigned count;
432 struct scatterlist *sg;
433 struct page *page;
434 u32 len, offset;
435 int pg_index;
436 dma_addr_t addr;
437
438 pg_index = 0;
439 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
440 len = sg->length;
441 offset = sg->offset;
442 page = sg_page(sg);
443 addr = sg_dma_address(sg);
444
445 while (len > 0) {
446 if (WARN_ON(pg_index >= max_pages))
447 return -1;
448 pages[pg_index] = page;
449 if (addrs)
450 addrs[pg_index] = addr;
451
452 page++;
453 addr += PAGE_SIZE;
454 len -= PAGE_SIZE;
455 pg_index++;
456 }
457 }
458 return 0;
459}
460EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
Dave Airlie32488772011-11-25 15:21:02 +0000461/* helper function to cleanup a GEM/prime object */
462void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
463{
464 struct dma_buf_attachment *attach;
465 struct dma_buf *dma_buf;
466 attach = obj->import_attach;
467 if (sg)
468 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
469 dma_buf = attach->dmabuf;
470 dma_buf_detach(attach->dmabuf, attach);
471 /* remove the reference */
472 dma_buf_put(dma_buf);
473}
474EXPORT_SYMBOL(drm_prime_gem_destroy);
475
476void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
477{
478 INIT_LIST_HEAD(&prime_fpriv->head);
479 mutex_init(&prime_fpriv->lock);
480}
481EXPORT_SYMBOL(drm_prime_init_file_private);
482
483void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
484{
485 struct drm_prime_member *member, *safe;
486 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
487 list_del(&member->entry);
488 kfree(member);
489 }
490}
491EXPORT_SYMBOL(drm_prime_destroy_file_private);
492
Dave Airlie219b4732013-04-22 09:54:36 +1000493static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
Dave Airlie32488772011-11-25 15:21:02 +0000494{
495 struct drm_prime_member *member;
496
497 member = kmalloc(sizeof(*member), GFP_KERNEL);
498 if (!member)
499 return -ENOMEM;
500
Dave Airlie219b4732013-04-22 09:54:36 +1000501 get_dma_buf(dma_buf);
Dave Airlie32488772011-11-25 15:21:02 +0000502 member->dma_buf = dma_buf;
503 member->handle = handle;
504 list_add(&member->entry, &prime_fpriv->head);
505 return 0;
506}
Dave Airlie32488772011-11-25 15:21:02 +0000507
Dave Airlie219b4732013-04-22 09:54:36 +1000508int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
Dave Airlie32488772011-11-25 15:21:02 +0000509{
510 struct drm_prime_member *member;
511
512 list_for_each_entry(member, &prime_fpriv->head, entry) {
513 if (member->dma_buf == dma_buf) {
514 *handle = member->handle;
515 return 0;
516 }
517 }
518 return -ENOENT;
519}
Dave Airlie219b4732013-04-22 09:54:36 +1000520EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
Dave Airlie32488772011-11-25 15:21:02 +0000521
Dave Airlie219b4732013-04-22 09:54:36 +1000522void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
Dave Airlie32488772011-11-25 15:21:02 +0000523{
524 struct drm_prime_member *member, *safe;
525
526 mutex_lock(&prime_fpriv->lock);
527 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
528 if (member->dma_buf == dma_buf) {
Dave Airlie219b4732013-04-22 09:54:36 +1000529 dma_buf_put(dma_buf);
Dave Airlie32488772011-11-25 15:21:02 +0000530 list_del(&member->entry);
531 kfree(member);
532 }
533 }
534 mutex_unlock(&prime_fpriv->lock);
535}
Dave Airlie219b4732013-04-22 09:54:36 +1000536EXPORT_SYMBOL(drm_prime_remove_buf_handle);