blob: 27de1bb731db0a537d09a76c88c03fd7df7a5d04 [file] [log] [blame]
Pawel Osciak1a758d42010-10-11 10:59:36 -03001/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciak1a758d42010-10-11 10:59:36 -03007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
Sumit Semwal8c417d02012-06-14 10:37:45 -030013#include <linux/dma-buf.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030014#include <linux/module.h>
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030015#include <linux/scatterlist.h>
16#include <linux/sched.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030017#include <linux/slab.h>
18#include <linux/dma-mapping.h>
19
20#include <media/videobuf2-core.h>
H Hartley Sweetend0df3c32012-04-24 19:08:12 -030021#include <media/videobuf2-dma-contig.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030022#include <media/videobuf2-memops.h>
23
24struct vb2_dc_conf {
25 struct device *dev;
26};
27
28struct vb2_dc_buf {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -030029 struct device *dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -030030 void *vaddr;
Pawel Osciak1a758d42010-10-11 10:59:36 -030031 unsigned long size;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030032 dma_addr_t dma_addr;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030033 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030035
36 /* MMAP related */
Pawel Osciak1a758d42010-10-11 10:59:36 -030037 struct vb2_vmarea_handler handler;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030038 atomic_t refcount;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -030039 struct sg_table *sgt_base;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030040
41 /* USERPTR related */
42 struct vm_area_struct *vma;
Sumit Semwal8c417d02012-06-14 10:37:45 -030043
44 /* DMABUF related */
45 struct dma_buf_attachment *db_attach;
Pawel Osciak1a758d42010-10-11 10:59:36 -030046};
47
Laurent Pinchart40d8b762012-06-14 10:37:41 -030048/*********************************************/
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030049/* scatterlist table functions */
50/*********************************************/
51
52
53static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
55{
56 struct scatterlist *s;
57 unsigned int i;
58
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
64
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
67 }
68}
69
70static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71{
72 struct scatterlist *s;
73 dma_addr_t expected = sg_dma_address(sgt->sgl);
74 unsigned int i;
75 unsigned long size = 0;
76
77 for_each_sg(sgt->sgl, s, sgt->nents, i) {
78 if (sg_dma_address(s) != expected)
79 break;
80 expected = sg_dma_address(s) + sg_dma_len(s);
81 size += sg_dma_len(s);
82 }
83 return size;
84}
85
86/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -030087/* callbacks for all buffers */
88/*********************************************/
89
90static void *vb2_dc_cookie(void *buf_priv)
91{
92 struct vb2_dc_buf *buf = buf_priv;
93
94 return &buf->dma_addr;
95}
96
97static void *vb2_dc_vaddr(void *buf_priv)
98{
99 struct vb2_dc_buf *buf = buf_priv;
100
101 return buf->vaddr;
102}
103
104static unsigned int vb2_dc_num_users(void *buf_priv)
105{
106 struct vb2_dc_buf *buf = buf_priv;
107
108 return atomic_read(&buf->refcount);
109}
110
Marek Szyprowski199d1012012-06-14 10:37:44 -0300111static void vb2_dc_prepare(void *buf_priv)
112{
113 struct vb2_dc_buf *buf = buf_priv;
114 struct sg_table *sgt = buf->dma_sgt;
115
Sumit Semwal8c417d02012-06-14 10:37:45 -0300116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt || buf->db_attach)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300118 return;
119
120 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
121}
122
123static void vb2_dc_finish(void *buf_priv)
124{
125 struct vb2_dc_buf *buf = buf_priv;
126 struct sg_table *sgt = buf->dma_sgt;
127
Sumit Semwal8c417d02012-06-14 10:37:45 -0300128 /* DMABUF exporter will flush the cache for us */
129 if (!sgt || buf->db_attach)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300130 return;
131
132 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
133}
134
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300135/*********************************************/
136/* callbacks for MMAP buffers */
137/*********************************************/
138
139static void vb2_dc_put(void *buf_priv)
140{
141 struct vb2_dc_buf *buf = buf_priv;
142
143 if (!atomic_dec_and_test(&buf->refcount))
144 return;
145
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300146 if (buf->sgt_base) {
147 sg_free_table(buf->sgt_base);
148 kfree(buf->sgt_base);
149 }
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300150 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
Tomasz Stanislawski67a5d0c2012-08-07 13:19:49 -0300151 put_device(buf->dev);
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300152 kfree(buf);
153}
Pawel Osciak1a758d42010-10-11 10:59:36 -0300154
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300155static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300156{
157 struct vb2_dc_conf *conf = alloc_ctx;
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300158 struct device *dev = conf->dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300159 struct vb2_dc_buf *buf;
160
161 buf = kzalloc(sizeof *buf, GFP_KERNEL);
162 if (!buf)
163 return ERR_PTR(-ENOMEM);
164
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300165 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300166 if (!buf->vaddr) {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300167 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300168 kfree(buf);
169 return ERR_PTR(-ENOMEM);
170 }
171
Tomasz Stanislawski67a5d0c2012-08-07 13:19:49 -0300172 /* Prevent the device from being released while the buffer is used */
173 buf->dev = get_device(dev);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300174 buf->size = size;
175
176 buf->handler.refcount = &buf->refcount;
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300177 buf->handler.put = vb2_dc_put;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300178 buf->handler.arg = buf;
179
180 atomic_inc(&buf->refcount);
181
182 return buf;
183}
184
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300185static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300186{
187 struct vb2_dc_buf *buf = buf_priv;
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300188 int ret;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300189
190 if (!buf) {
191 printk(KERN_ERR "No buffer to map\n");
192 return -EINVAL;
193 }
194
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300195 /*
196 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
197 * map whole buffer
198 */
199 vma->vm_pgoff = 0;
200
201 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
202 buf->dma_addr, buf->size);
203
204 if (ret) {
205 pr_err("Remapping memory failed, error: %d\n", ret);
206 return ret;
207 }
208
209 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
210 vma->vm_private_data = &buf->handler;
211 vma->vm_ops = &vb2_common_vm_ops;
212
213 vma->vm_ops->open(vma);
214
215 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
216 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
217 buf->size);
218
219 return 0;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300220}
221
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300222/*********************************************/
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300223/* DMABUF ops for exporters */
224/*********************************************/
225
226struct vb2_dc_attachment {
227 struct sg_table sgt;
228 enum dma_data_direction dir;
229};
230
231static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
232 struct dma_buf_attachment *dbuf_attach)
233{
234 struct vb2_dc_attachment *attach;
235 unsigned int i;
236 struct scatterlist *rd, *wr;
237 struct sg_table *sgt;
238 struct vb2_dc_buf *buf = dbuf->priv;
239 int ret;
240
241 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
242 if (!attach)
243 return -ENOMEM;
244
245 sgt = &attach->sgt;
246 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
247 * map the same scatter list to multiple attachments at the same time.
248 */
249 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
250 if (ret) {
251 kfree(attach);
252 return -ENOMEM;
253 }
254
255 rd = buf->sgt_base->sgl;
256 wr = sgt->sgl;
257 for (i = 0; i < sgt->orig_nents; ++i) {
258 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
259 rd = sg_next(rd);
260 wr = sg_next(wr);
261 }
262
263 attach->dir = DMA_NONE;
264 dbuf_attach->priv = attach;
265
266 return 0;
267}
268
269static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
270 struct dma_buf_attachment *db_attach)
271{
272 struct vb2_dc_attachment *attach = db_attach->priv;
273 struct sg_table *sgt;
274
275 if (!attach)
276 return;
277
278 sgt = &attach->sgt;
279
280 /* release the scatterlist cache */
281 if (attach->dir != DMA_NONE)
282 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
283 attach->dir);
284 sg_free_table(sgt);
285 kfree(attach);
286 db_attach->priv = NULL;
287}
288
289static struct sg_table *vb2_dc_dmabuf_ops_map(
290 struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
291{
292 struct vb2_dc_attachment *attach = db_attach->priv;
293 /* stealing dmabuf mutex to serialize map/unmap operations */
294 struct mutex *lock = &db_attach->dmabuf->lock;
295 struct sg_table *sgt;
296 int ret;
297
298 mutex_lock(lock);
299
300 sgt = &attach->sgt;
301 /* return previously mapped sg table */
302 if (attach->dir == dir) {
303 mutex_unlock(lock);
304 return sgt;
305 }
306
307 /* release any previous cache */
308 if (attach->dir != DMA_NONE) {
309 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
310 attach->dir);
311 attach->dir = DMA_NONE;
312 }
313
314 /* mapping to the client with new direction */
315 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
316 if (ret <= 0) {
317 pr_err("failed to map scatterlist\n");
318 mutex_unlock(lock);
319 return ERR_PTR(-EIO);
320 }
321
322 attach->dir = dir;
323
324 mutex_unlock(lock);
325
326 return sgt;
327}
328
329static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
330 struct sg_table *sgt, enum dma_data_direction dir)
331{
332 /* nothing to be done here */
333}
334
335static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
336{
337 /* drop reference obtained in vb2_dc_get_dmabuf */
338 vb2_dc_put(dbuf->priv);
339}
340
341static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
342{
343 struct vb2_dc_buf *buf = dbuf->priv;
344
345 return buf->vaddr + pgnum * PAGE_SIZE;
346}
347
348static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
349{
350 struct vb2_dc_buf *buf = dbuf->priv;
351
352 return buf->vaddr;
353}
354
355static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
356 struct vm_area_struct *vma)
357{
358 return vb2_dc_mmap(dbuf->priv, vma);
359}
360
361static struct dma_buf_ops vb2_dc_dmabuf_ops = {
362 .attach = vb2_dc_dmabuf_ops_attach,
363 .detach = vb2_dc_dmabuf_ops_detach,
364 .map_dma_buf = vb2_dc_dmabuf_ops_map,
365 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
366 .kmap = vb2_dc_dmabuf_ops_kmap,
367 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
368 .vmap = vb2_dc_dmabuf_ops_vmap,
369 .mmap = vb2_dc_dmabuf_ops_mmap,
370 .release = vb2_dc_dmabuf_ops_release,
371};
372
373static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
374{
375 int ret;
376 struct sg_table *sgt;
377
378 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
379 if (!sgt) {
380 dev_err(buf->dev, "failed to alloc sg table\n");
381 return NULL;
382 }
383
384 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
385 buf->size);
386 if (ret < 0) {
387 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
388 kfree(sgt);
389 return NULL;
390 }
391
392 return sgt;
393}
394
395static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
396{
397 struct vb2_dc_buf *buf = buf_priv;
398 struct dma_buf *dbuf;
399
400 if (!buf->sgt_base)
401 buf->sgt_base = vb2_dc_get_base_sgt(buf);
402
403 if (WARN_ON(!buf->sgt_base))
404 return NULL;
405
406 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
407 if (IS_ERR(dbuf))
408 return NULL;
409
410 /* dmabuf keeps reference to vb2 buffer */
411 atomic_inc(&buf->refcount);
412
413 return dbuf;
414}
415
416/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300417/* callbacks for USERPTR buffers */
418/*********************************************/
419
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300420static inline int vma_is_io(struct vm_area_struct *vma)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300421{
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300422 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
423}
424
425static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
426 int n_pages, struct vm_area_struct *vma, int write)
427{
428 if (vma_is_io(vma)) {
429 unsigned int i;
430
431 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
432 unsigned long pfn;
433 int ret = follow_pfn(vma, start, &pfn);
434
435 if (ret) {
436 pr_err("no page for address %lu\n", start);
437 return ret;
438 }
439 pages[i] = pfn_to_page(pfn);
440 }
441 } else {
442 int n;
443
444 n = get_user_pages(current, current->mm, start & PAGE_MASK,
445 n_pages, write, 1, pages, NULL);
446 /* negative error means that no page was pinned */
447 n = max(n, 0);
448 if (n != n_pages) {
449 pr_err("got only %d of %d user pages\n", n, n_pages);
450 while (n)
451 put_page(pages[--n]);
452 return -EFAULT;
453 }
454 }
455
456 return 0;
457}
458
459static void vb2_dc_put_dirty_page(struct page *page)
460{
461 set_page_dirty_lock(page);
462 put_page(page);
463}
464
465static void vb2_dc_put_userptr(void *buf_priv)
466{
467 struct vb2_dc_buf *buf = buf_priv;
468 struct sg_table *sgt = buf->dma_sgt;
469
470 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
471 if (!vma_is_io(buf->vma))
472 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
473
474 sg_free_table(sgt);
475 kfree(sgt);
476 vb2_put_vma(buf->vma);
477 kfree(buf);
478}
479
480static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
481 unsigned long size, int write)
482{
483 struct vb2_dc_conf *conf = alloc_ctx;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300484 struct vb2_dc_buf *buf;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300485 unsigned long start;
486 unsigned long end;
487 unsigned long offset;
488 struct page **pages;
489 int n_pages;
490 int ret = 0;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300491 struct vm_area_struct *vma;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300492 struct sg_table *sgt;
493 unsigned long contig_size;
Marek Szyprowskid81e8702012-06-12 10:18:16 -0300494 unsigned long dma_align = dma_get_cache_alignment();
495
496 /* Only cache aligned DMA transfers are reliable */
497 if (!IS_ALIGNED(vaddr | size, dma_align)) {
498 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
499 return ERR_PTR(-EINVAL);
500 }
501
502 if (!size) {
503 pr_debug("size is zero\n");
504 return ERR_PTR(-EINVAL);
505 }
Pawel Osciak1a758d42010-10-11 10:59:36 -0300506
507 buf = kzalloc(sizeof *buf, GFP_KERNEL);
508 if (!buf)
509 return ERR_PTR(-ENOMEM);
510
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300511 buf->dev = conf->dev;
512 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
513
514 start = vaddr & PAGE_MASK;
515 offset = vaddr & ~PAGE_MASK;
516 end = PAGE_ALIGN(vaddr + size);
517 n_pages = (end - start) >> PAGE_SHIFT;
518
519 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
520 if (!pages) {
521 ret = -ENOMEM;
522 pr_err("failed to allocate pages table\n");
523 goto fail_buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300524 }
525
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300526 /* current->mm->mmap_sem is taken by videobuf2 core */
527 vma = find_vma(current->mm, vaddr);
528 if (!vma) {
529 pr_err("no vma for address %lu\n", vaddr);
530 ret = -EFAULT;
531 goto fail_pages;
532 }
533
534 if (vma->vm_end < vaddr + size) {
535 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
536 ret = -EFAULT;
537 goto fail_pages;
538 }
539
540 buf->vma = vb2_get_vma(vma);
541 if (!buf->vma) {
542 pr_err("failed to copy vma\n");
543 ret = -ENOMEM;
544 goto fail_pages;
545 }
546
547 /* extract page list from userspace mapping */
548 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
549 if (ret) {
550 pr_err("failed to get user pages\n");
551 goto fail_vma;
552 }
553
554 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
555 if (!sgt) {
556 pr_err("failed to allocate sg table\n");
557 ret = -ENOMEM;
558 goto fail_get_user_pages;
559 }
560
561 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
562 offset, size, GFP_KERNEL);
563 if (ret) {
564 pr_err("failed to initialize sg table\n");
565 goto fail_sgt;
566 }
567
568 /* pages are no longer needed */
569 kfree(pages);
570 pages = NULL;
571
572 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
573 buf->dma_dir);
574 if (sgt->nents <= 0) {
575 pr_err("failed to map scatterlist\n");
576 ret = -EIO;
577 goto fail_sgt_init;
578 }
579
580 contig_size = vb2_dc_get_contiguous_size(sgt);
581 if (contig_size < size) {
582 pr_err("contiguous mapping is too small %lu/%lu\n",
583 contig_size, size);
584 ret = -EFAULT;
585 goto fail_map_sg;
586 }
587
588 buf->dma_addr = sg_dma_address(sgt->sgl);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300589 buf->size = size;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300590 buf->dma_sgt = sgt;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300591
592 return buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300593
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300594fail_map_sg:
595 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300596
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300597fail_sgt_init:
598 if (!vma_is_io(buf->vma))
599 vb2_dc_sgt_foreach_page(sgt, put_page);
600 sg_free_table(sgt);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300601
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300602fail_sgt:
603 kfree(sgt);
604
605fail_get_user_pages:
606 if (pages && !vma_is_io(buf->vma))
607 while (n_pages)
608 put_page(pages[--n_pages]);
609
610fail_vma:
Pawel Osciak1a758d42010-10-11 10:59:36 -0300611 vb2_put_vma(buf->vma);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300612
613fail_pages:
614 kfree(pages); /* kfree is NULL-proof */
615
616fail_buf:
Pawel Osciak1a758d42010-10-11 10:59:36 -0300617 kfree(buf);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300618
619 return ERR_PTR(ret);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300620}
621
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300622/*********************************************/
Sumit Semwal8c417d02012-06-14 10:37:45 -0300623/* callbacks for DMABUF buffers */
624/*********************************************/
625
626static int vb2_dc_map_dmabuf(void *mem_priv)
627{
628 struct vb2_dc_buf *buf = mem_priv;
629 struct sg_table *sgt;
630 unsigned long contig_size;
631
632 if (WARN_ON(!buf->db_attach)) {
633 pr_err("trying to pin a non attached buffer\n");
634 return -EINVAL;
635 }
636
637 if (WARN_ON(buf->dma_sgt)) {
638 pr_err("dmabuf buffer is already pinned\n");
639 return 0;
640 }
641
642 /* get the associated scatterlist for this buffer */
643 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
644 if (IS_ERR_OR_NULL(sgt)) {
645 pr_err("Error getting dmabuf scatterlist\n");
646 return -EINVAL;
647 }
648
649 /* checking if dmabuf is big enough to store contiguous chunk */
650 contig_size = vb2_dc_get_contiguous_size(sgt);
651 if (contig_size < buf->size) {
652 pr_err("contiguous chunk is too small %lu/%lu b\n",
653 contig_size, buf->size);
654 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
655 return -EFAULT;
656 }
657
658 buf->dma_addr = sg_dma_address(sgt->sgl);
659 buf->dma_sgt = sgt;
660
661 return 0;
662}
663
664static void vb2_dc_unmap_dmabuf(void *mem_priv)
665{
666 struct vb2_dc_buf *buf = mem_priv;
667 struct sg_table *sgt = buf->dma_sgt;
668
669 if (WARN_ON(!buf->db_attach)) {
670 pr_err("trying to unpin a not attached buffer\n");
671 return;
672 }
673
674 if (WARN_ON(!sgt)) {
675 pr_err("dmabuf buffer is already unpinned\n");
676 return;
677 }
678
679 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
680
681 buf->dma_addr = 0;
682 buf->dma_sgt = NULL;
683}
684
685static void vb2_dc_detach_dmabuf(void *mem_priv)
686{
687 struct vb2_dc_buf *buf = mem_priv;
688
689 /* if vb2 works correctly you should never detach mapped buffer */
690 if (WARN_ON(buf->dma_addr))
691 vb2_dc_unmap_dmabuf(buf);
692
693 /* detach this attachment */
694 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
695 kfree(buf);
696}
697
698static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
699 unsigned long size, int write)
700{
701 struct vb2_dc_conf *conf = alloc_ctx;
702 struct vb2_dc_buf *buf;
703 struct dma_buf_attachment *dba;
704
705 if (dbuf->size < size)
706 return ERR_PTR(-EFAULT);
707
708 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
709 if (!buf)
710 return ERR_PTR(-ENOMEM);
711
712 buf->dev = conf->dev;
713 /* create attachment for the dmabuf with the user device */
714 dba = dma_buf_attach(dbuf, buf->dev);
715 if (IS_ERR(dba)) {
716 pr_err("failed to attach dmabuf\n");
717 kfree(buf);
718 return dba;
719 }
720
721 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
722 buf->size = size;
723 buf->db_attach = dba;
724
725 return buf;
726}
727
728/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300729/* DMA CONTIG exported functions */
730/*********************************************/
731
Pawel Osciak1a758d42010-10-11 10:59:36 -0300732const struct vb2_mem_ops vb2_dma_contig_memops = {
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300733 .alloc = vb2_dc_alloc,
734 .put = vb2_dc_put,
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300735 .get_dmabuf = vb2_dc_get_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300736 .cookie = vb2_dc_cookie,
737 .vaddr = vb2_dc_vaddr,
738 .mmap = vb2_dc_mmap,
739 .get_userptr = vb2_dc_get_userptr,
740 .put_userptr = vb2_dc_put_userptr,
Marek Szyprowski199d1012012-06-14 10:37:44 -0300741 .prepare = vb2_dc_prepare,
742 .finish = vb2_dc_finish,
Sumit Semwal8c417d02012-06-14 10:37:45 -0300743 .map_dmabuf = vb2_dc_map_dmabuf,
744 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
745 .attach_dmabuf = vb2_dc_attach_dmabuf,
746 .detach_dmabuf = vb2_dc_detach_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300747 .num_users = vb2_dc_num_users,
Pawel Osciak1a758d42010-10-11 10:59:36 -0300748};
749EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
750
751void *vb2_dma_contig_init_ctx(struct device *dev)
752{
753 struct vb2_dc_conf *conf;
754
755 conf = kzalloc(sizeof *conf, GFP_KERNEL);
756 if (!conf)
757 return ERR_PTR(-ENOMEM);
758
759 conf->dev = dev;
760
761 return conf;
762}
763EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
764
765void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
766{
767 kfree(alloc_ctx);
768}
769EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
770
771MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
Pawel Osciak95072082011-03-13 15:23:32 -0300772MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
Pawel Osciak1a758d42010-10-11 10:59:36 -0300773MODULE_LICENSE("GPL");