blob: 7fc71a0906db2e9704e24dfff3b37af896271aa4 [file] [log] [blame]
Pawel Osciak1a758d42010-10-11 10:59:36 -03001/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciak1a758d42010-10-11 10:59:36 -03007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
Sumit Semwal8c417d02012-06-14 10:37:45 -030013#include <linux/dma-buf.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030014#include <linux/module.h>
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030015#include <linux/scatterlist.h>
16#include <linux/sched.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030017#include <linux/slab.h>
18#include <linux/dma-mapping.h>
19
20#include <media/videobuf2-core.h>
H Hartley Sweetend0df3c32012-04-24 19:08:12 -030021#include <media/videobuf2-dma-contig.h>
Pawel Osciak1a758d42010-10-11 10:59:36 -030022#include <media/videobuf2-memops.h>
23
24struct vb2_dc_conf {
25 struct device *dev;
26};
27
28struct vb2_dc_buf {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -030029 struct device *dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -030030 void *vaddr;
Pawel Osciak1a758d42010-10-11 10:59:36 -030031 unsigned long size;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030032 dma_addr_t dma_addr;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030033 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030035
36 /* MMAP related */
Pawel Osciak1a758d42010-10-11 10:59:36 -030037 struct vb2_vmarea_handler handler;
Laurent Pinchart40d8b762012-06-14 10:37:41 -030038 atomic_t refcount;
39
40 /* USERPTR related */
41 struct vm_area_struct *vma;
Sumit Semwal8c417d02012-06-14 10:37:45 -030042
43 /* DMABUF related */
44 struct dma_buf_attachment *db_attach;
Pawel Osciak1a758d42010-10-11 10:59:36 -030045};
46
Laurent Pinchart40d8b762012-06-14 10:37:41 -030047/*********************************************/
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -030048/* scatterlist table functions */
49/*********************************************/
50
51
52static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
53 void (*cb)(struct page *pg))
54{
55 struct scatterlist *s;
56 unsigned int i;
57
58 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
59 struct page *page = sg_page(s);
60 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
61 >> PAGE_SHIFT;
62 unsigned int j;
63
64 for (j = 0; j < n_pages; ++j, ++page)
65 cb(page);
66 }
67}
68
69static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
70{
71 struct scatterlist *s;
72 dma_addr_t expected = sg_dma_address(sgt->sgl);
73 unsigned int i;
74 unsigned long size = 0;
75
76 for_each_sg(sgt->sgl, s, sgt->nents, i) {
77 if (sg_dma_address(s) != expected)
78 break;
79 expected = sg_dma_address(s) + sg_dma_len(s);
80 size += sg_dma_len(s);
81 }
82 return size;
83}
84
85/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -030086/* callbacks for all buffers */
87/*********************************************/
88
89static void *vb2_dc_cookie(void *buf_priv)
90{
91 struct vb2_dc_buf *buf = buf_priv;
92
93 return &buf->dma_addr;
94}
95
96static void *vb2_dc_vaddr(void *buf_priv)
97{
98 struct vb2_dc_buf *buf = buf_priv;
99
100 return buf->vaddr;
101}
102
103static unsigned int vb2_dc_num_users(void *buf_priv)
104{
105 struct vb2_dc_buf *buf = buf_priv;
106
107 return atomic_read(&buf->refcount);
108}
109
Marek Szyprowski199d1012012-06-14 10:37:44 -0300110static void vb2_dc_prepare(void *buf_priv)
111{
112 struct vb2_dc_buf *buf = buf_priv;
113 struct sg_table *sgt = buf->dma_sgt;
114
Sumit Semwal8c417d02012-06-14 10:37:45 -0300115 /* DMABUF exporter will flush the cache for us */
116 if (!sgt || buf->db_attach)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300117 return;
118
119 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
120}
121
122static void vb2_dc_finish(void *buf_priv)
123{
124 struct vb2_dc_buf *buf = buf_priv;
125 struct sg_table *sgt = buf->dma_sgt;
126
Sumit Semwal8c417d02012-06-14 10:37:45 -0300127 /* DMABUF exporter will flush the cache for us */
128 if (!sgt || buf->db_attach)
Marek Szyprowski199d1012012-06-14 10:37:44 -0300129 return;
130
131 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
132}
133
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300134/*********************************************/
135/* callbacks for MMAP buffers */
136/*********************************************/
137
138static void vb2_dc_put(void *buf_priv)
139{
140 struct vb2_dc_buf *buf = buf_priv;
141
142 if (!atomic_dec_and_test(&buf->refcount))
143 return;
144
145 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
146 kfree(buf);
147}
Pawel Osciak1a758d42010-10-11 10:59:36 -0300148
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300149static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300150{
151 struct vb2_dc_conf *conf = alloc_ctx;
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300152 struct device *dev = conf->dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300153 struct vb2_dc_buf *buf;
154
155 buf = kzalloc(sizeof *buf, GFP_KERNEL);
156 if (!buf)
157 return ERR_PTR(-ENOMEM);
158
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300159 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300160 if (!buf->vaddr) {
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300161 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300162 kfree(buf);
163 return ERR_PTR(-ENOMEM);
164 }
165
Tomasz Stanislawski72f86bf2012-06-14 10:37:40 -0300166 buf->dev = dev;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300167 buf->size = size;
168
169 buf->handler.refcount = &buf->refcount;
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300170 buf->handler.put = vb2_dc_put;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300171 buf->handler.arg = buf;
172
173 atomic_inc(&buf->refcount);
174
175 return buf;
176}
177
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300178static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300179{
180 struct vb2_dc_buf *buf = buf_priv;
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300181 int ret;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300182
183 if (!buf) {
184 printk(KERN_ERR "No buffer to map\n");
185 return -EINVAL;
186 }
187
Marek Szyprowskic60520f2012-06-14 11:32:21 -0300188 /*
189 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
190 * map whole buffer
191 */
192 vma->vm_pgoff = 0;
193
194 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
195 buf->dma_addr, buf->size);
196
197 if (ret) {
198 pr_err("Remapping memory failed, error: %d\n", ret);
199 return ret;
200 }
201
202 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
203 vma->vm_private_data = &buf->handler;
204 vma->vm_ops = &vb2_common_vm_ops;
205
206 vma->vm_ops->open(vma);
207
208 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
209 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
210 buf->size);
211
212 return 0;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300213}
214
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300215/*********************************************/
216/* callbacks for USERPTR buffers */
217/*********************************************/
218
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300219static inline int vma_is_io(struct vm_area_struct *vma)
Pawel Osciak1a758d42010-10-11 10:59:36 -0300220{
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300221 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
222}
223
224static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
225 int n_pages, struct vm_area_struct *vma, int write)
226{
227 if (vma_is_io(vma)) {
228 unsigned int i;
229
230 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
231 unsigned long pfn;
232 int ret = follow_pfn(vma, start, &pfn);
233
234 if (ret) {
235 pr_err("no page for address %lu\n", start);
236 return ret;
237 }
238 pages[i] = pfn_to_page(pfn);
239 }
240 } else {
241 int n;
242
243 n = get_user_pages(current, current->mm, start & PAGE_MASK,
244 n_pages, write, 1, pages, NULL);
245 /* negative error means that no page was pinned */
246 n = max(n, 0);
247 if (n != n_pages) {
248 pr_err("got only %d of %d user pages\n", n, n_pages);
249 while (n)
250 put_page(pages[--n]);
251 return -EFAULT;
252 }
253 }
254
255 return 0;
256}
257
258static void vb2_dc_put_dirty_page(struct page *page)
259{
260 set_page_dirty_lock(page);
261 put_page(page);
262}
263
264static void vb2_dc_put_userptr(void *buf_priv)
265{
266 struct vb2_dc_buf *buf = buf_priv;
267 struct sg_table *sgt = buf->dma_sgt;
268
269 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
270 if (!vma_is_io(buf->vma))
271 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
272
273 sg_free_table(sgt);
274 kfree(sgt);
275 vb2_put_vma(buf->vma);
276 kfree(buf);
277}
278
279static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
280 unsigned long size, int write)
281{
282 struct vb2_dc_conf *conf = alloc_ctx;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300283 struct vb2_dc_buf *buf;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300284 unsigned long start;
285 unsigned long end;
286 unsigned long offset;
287 struct page **pages;
288 int n_pages;
289 int ret = 0;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300290 struct vm_area_struct *vma;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300291 struct sg_table *sgt;
292 unsigned long contig_size;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300293
294 buf = kzalloc(sizeof *buf, GFP_KERNEL);
295 if (!buf)
296 return ERR_PTR(-ENOMEM);
297
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300298 buf->dev = conf->dev;
299 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
300
301 start = vaddr & PAGE_MASK;
302 offset = vaddr & ~PAGE_MASK;
303 end = PAGE_ALIGN(vaddr + size);
304 n_pages = (end - start) >> PAGE_SHIFT;
305
306 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
307 if (!pages) {
308 ret = -ENOMEM;
309 pr_err("failed to allocate pages table\n");
310 goto fail_buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300311 }
312
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300313 /* current->mm->mmap_sem is taken by videobuf2 core */
314 vma = find_vma(current->mm, vaddr);
315 if (!vma) {
316 pr_err("no vma for address %lu\n", vaddr);
317 ret = -EFAULT;
318 goto fail_pages;
319 }
320
321 if (vma->vm_end < vaddr + size) {
322 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
323 ret = -EFAULT;
324 goto fail_pages;
325 }
326
327 buf->vma = vb2_get_vma(vma);
328 if (!buf->vma) {
329 pr_err("failed to copy vma\n");
330 ret = -ENOMEM;
331 goto fail_pages;
332 }
333
334 /* extract page list from userspace mapping */
335 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
336 if (ret) {
337 pr_err("failed to get user pages\n");
338 goto fail_vma;
339 }
340
341 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
342 if (!sgt) {
343 pr_err("failed to allocate sg table\n");
344 ret = -ENOMEM;
345 goto fail_get_user_pages;
346 }
347
348 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
349 offset, size, GFP_KERNEL);
350 if (ret) {
351 pr_err("failed to initialize sg table\n");
352 goto fail_sgt;
353 }
354
355 /* pages are no longer needed */
356 kfree(pages);
357 pages = NULL;
358
359 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
360 buf->dma_dir);
361 if (sgt->nents <= 0) {
362 pr_err("failed to map scatterlist\n");
363 ret = -EIO;
364 goto fail_sgt_init;
365 }
366
367 contig_size = vb2_dc_get_contiguous_size(sgt);
368 if (contig_size < size) {
369 pr_err("contiguous mapping is too small %lu/%lu\n",
370 contig_size, size);
371 ret = -EFAULT;
372 goto fail_map_sg;
373 }
374
375 buf->dma_addr = sg_dma_address(sgt->sgl);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300376 buf->size = size;
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300377 buf->dma_sgt = sgt;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300378
379 return buf;
Pawel Osciak1a758d42010-10-11 10:59:36 -0300380
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300381fail_map_sg:
382 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300383
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300384fail_sgt_init:
385 if (!vma_is_io(buf->vma))
386 vb2_dc_sgt_foreach_page(sgt, put_page);
387 sg_free_table(sgt);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300388
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300389fail_sgt:
390 kfree(sgt);
391
392fail_get_user_pages:
393 if (pages && !vma_is_io(buf->vma))
394 while (n_pages)
395 put_page(pages[--n_pages]);
396
397fail_vma:
Pawel Osciak1a758d42010-10-11 10:59:36 -0300398 vb2_put_vma(buf->vma);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300399
400fail_pages:
401 kfree(pages); /* kfree is NULL-proof */
402
403fail_buf:
Pawel Osciak1a758d42010-10-11 10:59:36 -0300404 kfree(buf);
Tomasz Stanislawskie15dab72012-06-14 10:37:42 -0300405
406 return ERR_PTR(ret);
Pawel Osciak1a758d42010-10-11 10:59:36 -0300407}
408
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300409/*********************************************/
Sumit Semwal8c417d02012-06-14 10:37:45 -0300410/* callbacks for DMABUF buffers */
411/*********************************************/
412
413static int vb2_dc_map_dmabuf(void *mem_priv)
414{
415 struct vb2_dc_buf *buf = mem_priv;
416 struct sg_table *sgt;
417 unsigned long contig_size;
418
419 if (WARN_ON(!buf->db_attach)) {
420 pr_err("trying to pin a non attached buffer\n");
421 return -EINVAL;
422 }
423
424 if (WARN_ON(buf->dma_sgt)) {
425 pr_err("dmabuf buffer is already pinned\n");
426 return 0;
427 }
428
429 /* get the associated scatterlist for this buffer */
430 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
431 if (IS_ERR_OR_NULL(sgt)) {
432 pr_err("Error getting dmabuf scatterlist\n");
433 return -EINVAL;
434 }
435
436 /* checking if dmabuf is big enough to store contiguous chunk */
437 contig_size = vb2_dc_get_contiguous_size(sgt);
438 if (contig_size < buf->size) {
439 pr_err("contiguous chunk is too small %lu/%lu b\n",
440 contig_size, buf->size);
441 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
442 return -EFAULT;
443 }
444
445 buf->dma_addr = sg_dma_address(sgt->sgl);
446 buf->dma_sgt = sgt;
447
448 return 0;
449}
450
451static void vb2_dc_unmap_dmabuf(void *mem_priv)
452{
453 struct vb2_dc_buf *buf = mem_priv;
454 struct sg_table *sgt = buf->dma_sgt;
455
456 if (WARN_ON(!buf->db_attach)) {
457 pr_err("trying to unpin a not attached buffer\n");
458 return;
459 }
460
461 if (WARN_ON(!sgt)) {
462 pr_err("dmabuf buffer is already unpinned\n");
463 return;
464 }
465
466 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
467
468 buf->dma_addr = 0;
469 buf->dma_sgt = NULL;
470}
471
472static void vb2_dc_detach_dmabuf(void *mem_priv)
473{
474 struct vb2_dc_buf *buf = mem_priv;
475
476 /* if vb2 works correctly you should never detach mapped buffer */
477 if (WARN_ON(buf->dma_addr))
478 vb2_dc_unmap_dmabuf(buf);
479
480 /* detach this attachment */
481 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
482 kfree(buf);
483}
484
485static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
486 unsigned long size, int write)
487{
488 struct vb2_dc_conf *conf = alloc_ctx;
489 struct vb2_dc_buf *buf;
490 struct dma_buf_attachment *dba;
491
492 if (dbuf->size < size)
493 return ERR_PTR(-EFAULT);
494
495 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
496 if (!buf)
497 return ERR_PTR(-ENOMEM);
498
499 buf->dev = conf->dev;
500 /* create attachment for the dmabuf with the user device */
501 dba = dma_buf_attach(dbuf, buf->dev);
502 if (IS_ERR(dba)) {
503 pr_err("failed to attach dmabuf\n");
504 kfree(buf);
505 return dba;
506 }
507
508 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
509 buf->size = size;
510 buf->db_attach = dba;
511
512 return buf;
513}
514
515/*********************************************/
Laurent Pinchart40d8b762012-06-14 10:37:41 -0300516/* DMA CONTIG exported functions */
517/*********************************************/
518
Pawel Osciak1a758d42010-10-11 10:59:36 -0300519const struct vb2_mem_ops vb2_dma_contig_memops = {
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300520 .alloc = vb2_dc_alloc,
521 .put = vb2_dc_put,
522 .cookie = vb2_dc_cookie,
523 .vaddr = vb2_dc_vaddr,
524 .mmap = vb2_dc_mmap,
525 .get_userptr = vb2_dc_get_userptr,
526 .put_userptr = vb2_dc_put_userptr,
Marek Szyprowski199d1012012-06-14 10:37:44 -0300527 .prepare = vb2_dc_prepare,
528 .finish = vb2_dc_finish,
Sumit Semwal8c417d02012-06-14 10:37:45 -0300529 .map_dmabuf = vb2_dc_map_dmabuf,
530 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
531 .attach_dmabuf = vb2_dc_attach_dmabuf,
532 .detach_dmabuf = vb2_dc_detach_dmabuf,
Laurent Pinchartf7f129c2012-06-14 10:37:39 -0300533 .num_users = vb2_dc_num_users,
Pawel Osciak1a758d42010-10-11 10:59:36 -0300534};
535EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
536
537void *vb2_dma_contig_init_ctx(struct device *dev)
538{
539 struct vb2_dc_conf *conf;
540
541 conf = kzalloc(sizeof *conf, GFP_KERNEL);
542 if (!conf)
543 return ERR_PTR(-ENOMEM);
544
545 conf->dev = dev;
546
547 return conf;
548}
549EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
550
551void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
552{
553 kfree(alloc_ctx);
554}
555EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
556
557MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
Pawel Osciak95072082011-03-13 15:23:32 -0300558MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
Pawel Osciak1a758d42010-10-11 10:59:36 -0300559MODULE_LICENSE("GPL");