blob: 78c281c55ad7a9e62226b5ad5acaa26d390ebeca [file] [log] [blame]
Pawel Osciak1a758d42010-10-11 10:59:36 -03001/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciak1a758d42010-10-11 10:59:36 -03007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
Sumit Semwal8c417d02012-06-14 10:37:45 -030013#include <linux/dma-buf.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030014#include <linux/module.h>
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030015#include <linux/scatterlist.h>
16#include <linux/sched.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030017#include <linux/slab.h>
18#include <linux/dma-mapping.h>
19
20#include <media/videobuf2-core.h>
H Hartley Sweetend0df3c32012-04-24 19:08:12 -030021#include <media/videobuf2-dma-contig.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030022#include <media/videobuf2-memops.h>
23
24struct vb2_dc_conf {
25 struct device *dev;
26};
27
28struct vb2_dc_buf {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -030029 struct device *dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -030030 void *vaddr;
Pawel Osciak1a758d42010-10-11 10:59:36 -030031 unsigned long size;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030032 dma_addr_t dma_addr;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030033 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030035
36 /* MMAP related */
Pawel Osciak1a758d42010-10-11 10:59:36 -030037 struct vb2_vmarea_handler handler;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030038 atomic_t refcount;
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -030039 struct sg_table *sgt_base;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030040
41 /* USERPTR related */
42 struct vm_area_struct *vma;
Sumit Semwal8c417d02012-06-14 10:37:45 -030043
44 /* DMABUF related */
45 struct dma_buf_attachment *db_attach;
Pawel Osciak1a758d42010-10-11 10:59:36 -030046};
47
Laurent Pinchart40d8b762012-06-14 10:37:41 -030048/*********************************************/
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030049/* scatterlist table functions */
50/*********************************************/
51
52
53static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
55{
56 struct scatterlist *s;
57 unsigned int i;
58
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
64
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
67 }
68}
69
70static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71{
72 struct scatterlist *s;
73 dma_addr_t expected = sg_dma_address(sgt->sgl);
74 unsigned int i;
75 unsigned long size = 0;
76
77 for_each_sg(sgt->sgl, s, sgt->nents, i) {
78 if (sg_dma_address(s) != expected)
79 break;
80 expected = sg_dma_address(s) + sg_dma_len(s);
81 size += sg_dma_len(s);
82 }
83 return size;
84}
85
86/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -030087/* callbacks for all buffers */
88/*********************************************/
89
90static void *vb2_dc_cookie(void *buf_priv)
91{
92 struct vb2_dc_buf *buf = buf_priv;
93
94 return &buf->dma_addr;
95}
96
97static void *vb2_dc_vaddr(void *buf_priv)
98{
99 struct vb2_dc_buf *buf = buf_priv;
100
101 return buf->vaddr;
102}
103
104static unsigned int vb2_dc_num_users(void *buf_priv)
105{
106 struct vb2_dc_buf *buf = buf_priv;
107
108 return atomic_read(&buf->refcount);
109}
110
Marek Szyprowski199d1012012-06-14 10:37:44 -0300111static void vb2_dc_prepare(void *buf_priv)
112{
113 struct vb2_dc_buf *buf = buf_priv;
114 struct sg_table *sgt = buf->dma_sgt;
115
Sumit Semwal8c417d02012-06-14 10:37:45 -0300116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt || buf->db_attach)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300118 return;
119
120 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
121}
122
123static void vb2_dc_finish(void *buf_priv)
124{
125 struct vb2_dc_buf *buf = buf_priv;
126 struct sg_table *sgt = buf->dma_sgt;
127
Sumit Semwal8c417d02012-06-14 10:37:45 -0300128 /* DMABUF exporter will flush the cache for us */
129 if (!sgt || buf->db_attach)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300130 return;
131
132 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
133}
134
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300135/*********************************************/
136/* callbacks for MMAP buffers */
137/*********************************************/
138
139static void vb2_dc_put(void *buf_priv)
140{
141 struct vb2_dc_buf *buf = buf_priv;
142
143 if (!atomic_dec_and_test(&buf->refcount))
144 return;
145
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300146 if (buf->sgt_base) {
147 sg_free_table(buf->sgt_base);
148 kfree(buf->sgt_base);
149 }
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300150 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
151 kfree(buf);
152}
Pawel Osciak1a758d42010-10-11 10:59:36 -0300153
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300154static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300155{
156 struct vb2_dc_conf *conf = alloc_ctx;
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300157 struct device *dev = conf->dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300158 struct vb2_dc_buf *buf;
159
160 buf = kzalloc(sizeof *buf, GFP_KERNEL);
161 if (!buf)
162 return ERR_PTR(-ENOMEM);
163
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300164 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300165 if (!buf->vaddr) {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300166 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300167 kfree(buf);
168 return ERR_PTR(-ENOMEM);
169 }
170
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300171 buf->dev = dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300172 buf->size = size;
173
174 buf->handler.refcount = &buf->refcount;
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300175 buf->handler.put = vb2_dc_put;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300176 buf->handler.arg = buf;
177
178 atomic_inc(&buf->refcount);
179
180 return buf;
181}
182
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300183static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300184{
185 struct vb2_dc_buf *buf = buf_priv;
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300186 int ret;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300187
188 if (!buf) {
189 printk(KERN_ERR "No buffer to map\n");
190 return -EINVAL;
191 }
192
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300193 /*
194 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
195 * map whole buffer
196 */
197 vma->vm_pgoff = 0;
198
199 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
200 buf->dma_addr, buf->size);
201
202 if (ret) {
203 pr_err("Remapping memory failed, error: %d\n", ret);
204 return ret;
205 }
206
207 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
208 vma->vm_private_data = &buf->handler;
209 vma->vm_ops = &vb2_common_vm_ops;
210
211 vma->vm_ops->open(vma);
212
213 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
214 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
215 buf->size);
216
217 return 0;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300218}
219
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300220/*********************************************/
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300221/* DMABUF ops for exporters */
222/*********************************************/
223
224struct vb2_dc_attachment {
225 struct sg_table sgt;
226 enum dma_data_direction dir;
227};
228
229static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
230 struct dma_buf_attachment *dbuf_attach)
231{
232 struct vb2_dc_attachment *attach;
233 unsigned int i;
234 struct scatterlist *rd, *wr;
235 struct sg_table *sgt;
236 struct vb2_dc_buf *buf = dbuf->priv;
237 int ret;
238
239 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
240 if (!attach)
241 return -ENOMEM;
242
243 sgt = &attach->sgt;
244 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
245 * map the same scatter list to multiple attachments at the same time.
246 */
247 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
248 if (ret) {
249 kfree(attach);
250 return -ENOMEM;
251 }
252
253 rd = buf->sgt_base->sgl;
254 wr = sgt->sgl;
255 for (i = 0; i < sgt->orig_nents; ++i) {
256 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
257 rd = sg_next(rd);
258 wr = sg_next(wr);
259 }
260
261 attach->dir = DMA_NONE;
262 dbuf_attach->priv = attach;
263
264 return 0;
265}
266
267static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
268 struct dma_buf_attachment *db_attach)
269{
270 struct vb2_dc_attachment *attach = db_attach->priv;
271 struct sg_table *sgt;
272
273 if (!attach)
274 return;
275
276 sgt = &attach->sgt;
277
278 /* release the scatterlist cache */
279 if (attach->dir != DMA_NONE)
280 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
281 attach->dir);
282 sg_free_table(sgt);
283 kfree(attach);
284 db_attach->priv = NULL;
285}
286
287static struct sg_table *vb2_dc_dmabuf_ops_map(
288 struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
289{
290 struct vb2_dc_attachment *attach = db_attach->priv;
291 /* stealing dmabuf mutex to serialize map/unmap operations */
292 struct mutex *lock = &db_attach->dmabuf->lock;
293 struct sg_table *sgt;
294 int ret;
295
296 mutex_lock(lock);
297
298 sgt = &attach->sgt;
299 /* return previously mapped sg table */
300 if (attach->dir == dir) {
301 mutex_unlock(lock);
302 return sgt;
303 }
304
305 /* release any previous cache */
306 if (attach->dir != DMA_NONE) {
307 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
308 attach->dir);
309 attach->dir = DMA_NONE;
310 }
311
312 /* mapping to the client with new direction */
313 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
314 if (ret <= 0) {
315 pr_err("failed to map scatterlist\n");
316 mutex_unlock(lock);
317 return ERR_PTR(-EIO);
318 }
319
320 attach->dir = dir;
321
322 mutex_unlock(lock);
323
324 return sgt;
325}
326
327static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
328 struct sg_table *sgt, enum dma_data_direction dir)
329{
330 /* nothing to be done here */
331}
332
333static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
334{
335 /* drop reference obtained in vb2_dc_get_dmabuf */
336 vb2_dc_put(dbuf->priv);
337}
338
339static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
340{
341 struct vb2_dc_buf *buf = dbuf->priv;
342
343 return buf->vaddr + pgnum * PAGE_SIZE;
344}
345
346static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
347{
348 struct vb2_dc_buf *buf = dbuf->priv;
349
350 return buf->vaddr;
351}
352
353static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
354 struct vm_area_struct *vma)
355{
356 return vb2_dc_mmap(dbuf->priv, vma);
357}
358
359static struct dma_buf_ops vb2_dc_dmabuf_ops = {
360 .attach = vb2_dc_dmabuf_ops_attach,
361 .detach = vb2_dc_dmabuf_ops_detach,
362 .map_dma_buf = vb2_dc_dmabuf_ops_map,
363 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
364 .kmap = vb2_dc_dmabuf_ops_kmap,
365 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
366 .vmap = vb2_dc_dmabuf_ops_vmap,
367 .mmap = vb2_dc_dmabuf_ops_mmap,
368 .release = vb2_dc_dmabuf_ops_release,
369};
370
371static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
372{
373 int ret;
374 struct sg_table *sgt;
375
376 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
377 if (!sgt) {
378 dev_err(buf->dev, "failed to alloc sg table\n");
379 return NULL;
380 }
381
382 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
383 buf->size);
384 if (ret < 0) {
385 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
386 kfree(sgt);
387 return NULL;
388 }
389
390 return sgt;
391}
392
393static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
394{
395 struct vb2_dc_buf *buf = buf_priv;
396 struct dma_buf *dbuf;
397
398 if (!buf->sgt_base)
399 buf->sgt_base = vb2_dc_get_base_sgt(buf);
400
401 if (WARN_ON(!buf->sgt_base))
402 return NULL;
403
404 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
405 if (IS_ERR(dbuf))
406 return NULL;
407
408 /* dmabuf keeps reference to vb2 buffer */
409 atomic_inc(&buf->refcount);
410
411 return dbuf;
412}
413
414/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300415/* callbacks for USERPTR buffers */
416/*********************************************/
417
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300418static inline int vma_is_io(struct vm_area_struct *vma)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300419{
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300420 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
421}
422
423static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
424 int n_pages, struct vm_area_struct *vma, int write)
425{
426 if (vma_is_io(vma)) {
427 unsigned int i;
428
429 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
430 unsigned long pfn;
431 int ret = follow_pfn(vma, start, &pfn);
432
433 if (ret) {
434 pr_err("no page for address %lu\n", start);
435 return ret;
436 }
437 pages[i] = pfn_to_page(pfn);
438 }
439 } else {
440 int n;
441
442 n = get_user_pages(current, current->mm, start & PAGE_MASK,
443 n_pages, write, 1, pages, NULL);
444 /* negative error means that no page was pinned */
445 n = max(n, 0);
446 if (n != n_pages) {
447 pr_err("got only %d of %d user pages\n", n, n_pages);
448 while (n)
449 put_page(pages[--n]);
450 return -EFAULT;
451 }
452 }
453
454 return 0;
455}
456
457static void vb2_dc_put_dirty_page(struct page *page)
458{
459 set_page_dirty_lock(page);
460 put_page(page);
461}
462
463static void vb2_dc_put_userptr(void *buf_priv)
464{
465 struct vb2_dc_buf *buf = buf_priv;
466 struct sg_table *sgt = buf->dma_sgt;
467
468 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
469 if (!vma_is_io(buf->vma))
470 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
471
472 sg_free_table(sgt);
473 kfree(sgt);
474 vb2_put_vma(buf->vma);
475 kfree(buf);
476}
477
478static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
479 unsigned long size, int write)
480{
481 struct vb2_dc_conf *conf = alloc_ctx;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300482 struct vb2_dc_buf *buf;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300483 unsigned long start;
484 unsigned long end;
485 unsigned long offset;
486 struct page **pages;
487 int n_pages;
488 int ret = 0;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300489 struct vm_area_struct *vma;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300490 struct sg_table *sgt;
491 unsigned long contig_size;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300492
493 buf = kzalloc(sizeof *buf, GFP_KERNEL);
494 if (!buf)
495 return ERR_PTR(-ENOMEM);
496
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300497 buf->dev = conf->dev;
498 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
499
500 start = vaddr & PAGE_MASK;
501 offset = vaddr & ~PAGE_MASK;
502 end = PAGE_ALIGN(vaddr + size);
503 n_pages = (end - start) >> PAGE_SHIFT;
504
505 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
506 if (!pages) {
507 ret = -ENOMEM;
508 pr_err("failed to allocate pages table\n");
509 goto fail_buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300510 }
511
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300512 /* current->mm->mmap_sem is taken by videobuf2 core */
513 vma = find_vma(current->mm, vaddr);
514 if (!vma) {
515 pr_err("no vma for address %lu\n", vaddr);
516 ret = -EFAULT;
517 goto fail_pages;
518 }
519
520 if (vma->vm_end < vaddr + size) {
521 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
522 ret = -EFAULT;
523 goto fail_pages;
524 }
525
526 buf->vma = vb2_get_vma(vma);
527 if (!buf->vma) {
528 pr_err("failed to copy vma\n");
529 ret = -ENOMEM;
530 goto fail_pages;
531 }
532
533 /* extract page list from userspace mapping */
534 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
535 if (ret) {
536 pr_err("failed to get user pages\n");
537 goto fail_vma;
538 }
539
540 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
541 if (!sgt) {
542 pr_err("failed to allocate sg table\n");
543 ret = -ENOMEM;
544 goto fail_get_user_pages;
545 }
546
547 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
548 offset, size, GFP_KERNEL);
549 if (ret) {
550 pr_err("failed to initialize sg table\n");
551 goto fail_sgt;
552 }
553
554 /* pages are no longer needed */
555 kfree(pages);
556 pages = NULL;
557
558 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
559 buf->dma_dir);
560 if (sgt->nents <= 0) {
561 pr_err("failed to map scatterlist\n");
562 ret = -EIO;
563 goto fail_sgt_init;
564 }
565
566 contig_size = vb2_dc_get_contiguous_size(sgt);
567 if (contig_size < size) {
568 pr_err("contiguous mapping is too small %lu/%lu\n",
569 contig_size, size);
570 ret = -EFAULT;
571 goto fail_map_sg;
572 }
573
574 buf->dma_addr = sg_dma_address(sgt->sgl);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300575 buf->size = size;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300576 buf->dma_sgt = sgt;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300577
578 return buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300579
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300580fail_map_sg:
581 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300582
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300583fail_sgt_init:
584 if (!vma_is_io(buf->vma))
585 vb2_dc_sgt_foreach_page(sgt, put_page);
586 sg_free_table(sgt);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300587
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300588fail_sgt:
589 kfree(sgt);
590
591fail_get_user_pages:
592 if (pages && !vma_is_io(buf->vma))
593 while (n_pages)
594 put_page(pages[--n_pages]);
595
596fail_vma:
Pawel Osciak1a758d42010-10-11 10:59:36 -0300597 vb2_put_vma(buf->vma);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300598
599fail_pages:
600 kfree(pages); /* kfree is NULL-proof */
601
602fail_buf:
Pawel Osciak1a758d42010-10-11 10:59:36 -0300603 kfree(buf);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300604
605 return ERR_PTR(ret);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300606}
607
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300608/*********************************************/
Sumit Semwal8c417d02012-06-14 10:37:45 -0300609/* callbacks for DMABUF buffers */
610/*********************************************/
611
612static int vb2_dc_map_dmabuf(void *mem_priv)
613{
614 struct vb2_dc_buf *buf = mem_priv;
615 struct sg_table *sgt;
616 unsigned long contig_size;
617
618 if (WARN_ON(!buf->db_attach)) {
619 pr_err("trying to pin a non attached buffer\n");
620 return -EINVAL;
621 }
622
623 if (WARN_ON(buf->dma_sgt)) {
624 pr_err("dmabuf buffer is already pinned\n");
625 return 0;
626 }
627
628 /* get the associated scatterlist for this buffer */
629 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
630 if (IS_ERR_OR_NULL(sgt)) {
631 pr_err("Error getting dmabuf scatterlist\n");
632 return -EINVAL;
633 }
634
635 /* checking if dmabuf is big enough to store contiguous chunk */
636 contig_size = vb2_dc_get_contiguous_size(sgt);
637 if (contig_size < buf->size) {
638 pr_err("contiguous chunk is too small %lu/%lu b\n",
639 contig_size, buf->size);
640 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
641 return -EFAULT;
642 }
643
644 buf->dma_addr = sg_dma_address(sgt->sgl);
645 buf->dma_sgt = sgt;
646
647 return 0;
648}
649
650static void vb2_dc_unmap_dmabuf(void *mem_priv)
651{
652 struct vb2_dc_buf *buf = mem_priv;
653 struct sg_table *sgt = buf->dma_sgt;
654
655 if (WARN_ON(!buf->db_attach)) {
656 pr_err("trying to unpin a not attached buffer\n");
657 return;
658 }
659
660 if (WARN_ON(!sgt)) {
661 pr_err("dmabuf buffer is already unpinned\n");
662 return;
663 }
664
665 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
666
667 buf->dma_addr = 0;
668 buf->dma_sgt = NULL;
669}
670
671static void vb2_dc_detach_dmabuf(void *mem_priv)
672{
673 struct vb2_dc_buf *buf = mem_priv;
674
675 /* if vb2 works correctly you should never detach mapped buffer */
676 if (WARN_ON(buf->dma_addr))
677 vb2_dc_unmap_dmabuf(buf);
678
679 /* detach this attachment */
680 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
681 kfree(buf);
682}
683
684static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
685 unsigned long size, int write)
686{
687 struct vb2_dc_conf *conf = alloc_ctx;
688 struct vb2_dc_buf *buf;
689 struct dma_buf_attachment *dba;
690
691 if (dbuf->size < size)
692 return ERR_PTR(-EFAULT);
693
694 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
695 if (!buf)
696 return ERR_PTR(-ENOMEM);
697
698 buf->dev = conf->dev;
699 /* create attachment for the dmabuf with the user device */
700 dba = dma_buf_attach(dbuf, buf->dev);
701 if (IS_ERR(dba)) {
702 pr_err("failed to attach dmabuf\n");
703 kfree(buf);
704 return dba;
705 }
706
707 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
708 buf->size = size;
709 buf->db_attach = dba;
710
711 return buf;
712}
713
714/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300715/* DMA CONTIG exported functions */
716/*********************************************/
717
Pawel Osciak1a758d42010-10-11 10:59:36 -0300718const struct vb2_mem_ops vb2_dma_contig_memops = {
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300719 .alloc = vb2_dc_alloc,
720 .put = vb2_dc_put,
Tomasz Stanislawski9ef2cbe2012-06-14 11:32:25 -0300721 .get_dmabuf = vb2_dc_get_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300722 .cookie = vb2_dc_cookie,
723 .vaddr = vb2_dc_vaddr,
724 .mmap = vb2_dc_mmap,
725 .get_userptr = vb2_dc_get_userptr,
726 .put_userptr = vb2_dc_put_userptr,
Marek Szyprowski199d1012012-06-14 10:37:44 -0300727 .prepare = vb2_dc_prepare,
728 .finish = vb2_dc_finish,
Sumit Semwal8c417d02012-06-14 10:37:45 -0300729 .map_dmabuf = vb2_dc_map_dmabuf,
730 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
731 .attach_dmabuf = vb2_dc_attach_dmabuf,
732 .detach_dmabuf = vb2_dc_detach_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300733 .num_users = vb2_dc_num_users,
Pawel Osciak1a758d42010-10-11 10:59:36 -0300734};
735EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
736
737void *vb2_dma_contig_init_ctx(struct device *dev)
738{
739 struct vb2_dc_conf *conf;
740
741 conf = kzalloc(sizeof *conf, GFP_KERNEL);
742 if (!conf)
743 return ERR_PTR(-ENOMEM);
744
745 conf->dev = dev;
746
747 return conf;
748}
749EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
750
751void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
752{
753 kfree(alloc_ctx);
754}
755EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
756
757MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
Pawel Osciak95072082011-03-13 15:23:32 -0300758MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
Pawel Osciak1a758d42010-10-11 10:59:36 -0300759MODULE_LICENSE("GPL");