blob: d7bcb05c705884c24df86c25420f4584bc6cd536 [file] [log] [blame]
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -03001/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
Hans Verkuilffdc78e2013-03-02 05:12:08 -030024static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
Hans Verkuil0c3a14c2014-11-18 09:51:01 -030033struct vb2_dma_sg_conf {
34 struct device *dev;
35};
36
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030037struct vb2_dma_sg_buf {
Hans Verkuil0c3a14c2014-11-18 09:51:01 -030038 struct device *dev;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030039 void *vaddr;
40 struct page **pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030041 int offset;
Hans Verkuilcd474032014-11-18 09:50:58 -030042 enum dma_data_direction dma_dir;
Ricardo Ribalda22301242013-08-02 10:20:00 -030043 struct sg_table sg_table;
Hans Verkuile078b792014-11-18 09:51:03 -030044 /*
45 * This will point to sg_table when used with the MMAP or USERPTR
46 * memory model, and to the dma_buf sglist when used with the
47 * DMABUF memory model.
48 */
49 struct sg_table *dma_sgt;
Ricardo Ribalda22301242013-08-02 10:20:00 -030050 size_t size;
51 unsigned int num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030052 atomic_t refcount;
53 struct vb2_vmarea_handler handler;
Ricardo Ribalda50ac9522013-11-26 09:58:44 -030054 struct vm_area_struct *vma;
Hans Verkuile078b792014-11-18 09:51:03 -030055
56 struct dma_buf_attachment *db_attach;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030057};
58
59static void vb2_dma_sg_put(void *buf_priv);
60
Ricardo Ribaldadf237282013-08-02 10:19:59 -030061static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
62 gfp_t gfp_flags)
63{
64 unsigned int last_page = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -030065 int size = buf->size;
Ricardo Ribaldadf237282013-08-02 10:19:59 -030066
67 while (size > 0) {
68 struct page *pages;
69 int order;
70 int i;
71
72 order = get_order(size);
73 /* Dont over allocate*/
74 if ((PAGE_SIZE << order) > size)
75 order--;
76
77 pages = NULL;
78 while (!pages) {
79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
80 __GFP_NOWARN | gfp_flags, order);
81 if (pages)
82 break;
83
84 if (order == 0) {
85 while (last_page--)
86 __free_page(buf->pages[last_page]);
87 return -ENOMEM;
88 }
89 order--;
90 }
91
92 split_page(pages, order);
Ricardo Ribalda22301242013-08-02 10:20:00 -030093 for (i = 0; i < (1 << order); i++)
94 buf->pages[last_page++] = &pages[i];
Ricardo Ribaldadf237282013-08-02 10:19:59 -030095
96 size -= PAGE_SIZE << order;
97 }
98
99 return 0;
100}
101
Hans Verkuild935c572014-11-18 09:50:59 -0300102static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
103 enum dma_data_direction dma_dir, gfp_t gfp_flags)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300104{
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300105 struct vb2_dma_sg_conf *conf = alloc_ctx;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300106 struct vb2_dma_sg_buf *buf;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300107 struct sg_table *sgt;
Ricardo Ribaldadf237282013-08-02 10:19:59 -0300108 int ret;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300109 int num_pages;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300110 DEFINE_DMA_ATTRS(attrs);
111
112 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300113
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300114 if (WARN_ON(alloc_ctx == NULL))
115 return NULL;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300116 buf = kzalloc(sizeof *buf, GFP_KERNEL);
117 if (!buf)
118 return NULL;
119
120 buf->vaddr = NULL;
Hans Verkuild935c572014-11-18 09:50:59 -0300121 buf->dma_dir = dma_dir;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300122 buf->offset = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300123 buf->size = size;
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300124 /* size is already page aligned */
Ricardo Ribalda22301242013-08-02 10:20:00 -0300125 buf->num_pages = size >> PAGE_SHIFT;
Hans Verkuile078b792014-11-18 09:51:03 -0300126 buf->dma_sgt = &buf->sg_table;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300127
Ricardo Ribalda22301242013-08-02 10:20:00 -0300128 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300129 GFP_KERNEL);
130 if (!buf->pages)
131 goto fail_pages_array_alloc;
132
Ricardo Ribaldadf237282013-08-02 10:19:59 -0300133 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
134 if (ret)
135 goto fail_pages_alloc;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300136
Hans Verkuile078b792014-11-18 09:51:03 -0300137 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
Hans Verkuil47bc59c2014-08-01 09:18:50 -0300138 buf->num_pages, 0, size, GFP_KERNEL);
Ricardo Ribalda22301242013-08-02 10:20:00 -0300139 if (ret)
140 goto fail_table_alloc;
141
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300142 /* Prevent the device from being released while the buffer is used */
143 buf->dev = get_device(conf->dev);
Hans Verkuild790b7e2014-11-24 08:50:31 -0300144
145 sgt = &buf->sg_table;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300146 /*
147 * No need to sync to the device, this will happen later when the
148 * prepare() memop is called.
149 */
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300150 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
151 buf->dma_dir, &attrs);
152 if (!sgt->nents)
Hans Verkuild790b7e2014-11-24 08:50:31 -0300153 goto fail_map;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300154
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300155 buf->handler.refcount = &buf->refcount;
156 buf->handler.put = vb2_dma_sg_put;
157 buf->handler.arg = buf;
158
159 atomic_inc(&buf->refcount);
160
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300161 dprintk(1, "%s: Allocated buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300162 __func__, buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300163 return buf;
164
Hans Verkuild790b7e2014-11-24 08:50:31 -0300165fail_map:
166 put_device(buf->dev);
Hans Verkuile078b792014-11-18 09:51:03 -0300167 sg_free_table(buf->dma_sgt);
Ricardo Ribalda22301242013-08-02 10:20:00 -0300168fail_table_alloc:
169 num_pages = buf->num_pages;
170 while (num_pages--)
171 __free_page(buf->pages[num_pages]);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300172fail_pages_alloc:
Andrzej Pietrasiewicza9bb36a2011-01-28 09:42:51 -0300173 kfree(buf->pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300174fail_pages_array_alloc:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300175 kfree(buf);
176 return NULL;
177}
178
179static void vb2_dma_sg_put(void *buf_priv)
180{
181 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300182 struct sg_table *sgt = &buf->sg_table;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300183 int i = buf->num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300184
185 if (atomic_dec_and_test(&buf->refcount)) {
Hans Verkuil251a79f2014-11-18 09:51:08 -0300186 DEFINE_DMA_ATTRS(attrs);
187
188 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300189 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300190 buf->num_pages);
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300191 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
Hans Verkuil251a79f2014-11-18 09:51:08 -0300192 buf->dma_dir, &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300193 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300194 vm_unmap_ram(buf->vaddr, buf->num_pages);
Hans Verkuile078b792014-11-18 09:51:03 -0300195 sg_free_table(buf->dma_sgt);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300196 while (--i >= 0)
197 __free_page(buf->pages[i]);
198 kfree(buf->pages);
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300199 put_device(buf->dev);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300200 kfree(buf);
201 }
202}
203
Hans Verkuild790b7e2014-11-24 08:50:31 -0300204static void vb2_dma_sg_prepare(void *buf_priv)
205{
206 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuile078b792014-11-18 09:51:03 -0300207 struct sg_table *sgt = buf->dma_sgt;
208
209 /* DMABUF exporter will flush the cache for us */
210 if (buf->db_attach)
211 return;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300212
213 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
214}
215
216static void vb2_dma_sg_finish(void *buf_priv)
217{
218 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuile078b792014-11-18 09:51:03 -0300219 struct sg_table *sgt = buf->dma_sgt;
220
221 /* DMABUF exporter will flush the cache for us */
222 if (buf->db_attach)
223 return;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300224
225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
226}
227
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300228static inline int vma_is_io(struct vm_area_struct *vma)
229{
230 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
231}
232
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300233static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
Hans Verkuilcd474032014-11-18 09:50:58 -0300234 unsigned long size,
235 enum dma_data_direction dma_dir)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300236{
Hans Verkuild790b7e2014-11-24 08:50:31 -0300237 struct vb2_dma_sg_conf *conf = alloc_ctx;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300238 struct vb2_dma_sg_buf *buf;
239 unsigned long first, last;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300240 int num_pages_from_user;
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300241 struct vm_area_struct *vma;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300242 struct sg_table *sgt;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300243 DEFINE_DMA_ATTRS(attrs);
244
245 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300246
247 buf = kzalloc(sizeof *buf, GFP_KERNEL);
248 if (!buf)
249 return NULL;
250
251 buf->vaddr = NULL;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300252 buf->dev = conf->dev;
Hans Verkuilcd474032014-11-18 09:50:58 -0300253 buf->dma_dir = dma_dir;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300254 buf->offset = vaddr & ~PAGE_MASK;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300255 buf->size = size;
Hans Verkuile078b792014-11-18 09:51:03 -0300256 buf->dma_sgt = &buf->sg_table;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300257
258 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
259 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300260 buf->num_pages = last - first + 1;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300261
Ricardo Ribalda22301242013-08-02 10:20:00 -0300262 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300263 GFP_KERNEL);
264 if (!buf->pages)
Geyslan G. Bem64c832a2013-11-20 18:02:52 -0300265 goto userptr_fail_alloc_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300266
Jan Kara48b25a32015-03-17 08:56:31 -0300267 down_read(&current->mm->mmap_sem);
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300268 vma = find_vma(current->mm, vaddr);
269 if (!vma) {
270 dprintk(1, "no vma for address %lu\n", vaddr);
271 goto userptr_fail_find_vma;
272 }
273
274 if (vma->vm_end < vaddr + size) {
275 dprintk(1, "vma at %lu is too small for %lu bytes\n",
276 vaddr, size);
277 goto userptr_fail_find_vma;
278 }
279
280 buf->vma = vb2_get_vma(vma);
281 if (!buf->vma) {
282 dprintk(1, "failed to copy vma\n");
283 goto userptr_fail_find_vma;
284 }
285
286 if (vma_is_io(buf->vma)) {
287 for (num_pages_from_user = 0;
288 num_pages_from_user < buf->num_pages;
289 ++num_pages_from_user, vaddr += PAGE_SIZE) {
290 unsigned long pfn;
291
Ricardo Ribalda227ae222014-04-25 13:11:29 -0300292 if (follow_pfn(vma, vaddr, &pfn)) {
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300293 dprintk(1, "no page for address %lu\n", vaddr);
294 break;
295 }
296 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
297 }
298 } else
299 num_pages_from_user = get_user_pages(current, current->mm,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300300 vaddr & PAGE_MASK,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300301 buf->num_pages,
Hans Verkuilcd474032014-11-18 09:50:58 -0300302 buf->dma_dir == DMA_FROM_DEVICE,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300303 1, /* force */
304 buf->pages,
305 NULL);
Jan Kara48b25a32015-03-17 08:56:31 -0300306 up_read(&current->mm->mmap_sem);
Marek Szyprowskib037c0f2011-11-17 05:32:17 -0300307
Ricardo Ribalda22301242013-08-02 10:20:00 -0300308 if (num_pages_from_user != buf->num_pages)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300309 goto userptr_fail_get_user_pages;
310
Hans Verkuile078b792014-11-18 09:51:03 -0300311 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300312 buf->num_pages, buf->offset, size, 0))
313 goto userptr_fail_alloc_table_from_pages;
314
Hans Verkuild790b7e2014-11-24 08:50:31 -0300315 sgt = &buf->sg_table;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300316 /*
317 * No need to sync to the device, this will happen later when the
318 * prepare() memop is called.
319 */
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300320 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
321 buf->dma_dir, &attrs);
322 if (!sgt->nents)
Hans Verkuild790b7e2014-11-24 08:50:31 -0300323 goto userptr_fail_map;
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300324
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300325 return buf;
326
Hans Verkuild790b7e2014-11-24 08:50:31 -0300327userptr_fail_map:
328 sg_free_table(&buf->sg_table);
Ricardo Ribalda22301242013-08-02 10:20:00 -0300329userptr_fail_alloc_table_from_pages:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300330userptr_fail_get_user_pages:
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300331 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
Ricardo Ribalda202dfbd2013-11-06 15:48:38 -0300332 buf->num_pages, num_pages_from_user);
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300333 if (!vma_is_io(buf->vma))
334 while (--num_pages_from_user >= 0)
335 put_page(buf->pages[num_pages_from_user]);
Jan Kara48b25a32015-03-17 08:56:31 -0300336 down_read(&current->mm->mmap_sem);
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300337 vb2_put_vma(buf->vma);
338userptr_fail_find_vma:
Jan Kara48b25a32015-03-17 08:56:31 -0300339 up_read(&current->mm->mmap_sem);
Andrzej Pietrasiewicza9bb36a2011-01-28 09:42:51 -0300340 kfree(buf->pages);
Geyslan G. Bem64c832a2013-11-20 18:02:52 -0300341userptr_fail_alloc_pages:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300342 kfree(buf);
343 return NULL;
344}
345
346/*
347 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
348 * be used
349 */
350static void vb2_dma_sg_put_userptr(void *buf_priv)
351{
352 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300353 struct sg_table *sgt = &buf->sg_table;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300354 int i = buf->num_pages;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300355 DEFINE_DMA_ATTRS(attrs);
356
357 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300358
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300359 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300360 __func__, buf->num_pages);
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300361 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
362 &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300363 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300364 vm_unmap_ram(buf->vaddr, buf->num_pages);
Hans Verkuile078b792014-11-18 09:51:03 -0300365 sg_free_table(buf->dma_sgt);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300366 while (--i >= 0) {
Hans Verkuilcd474032014-11-18 09:50:58 -0300367 if (buf->dma_dir == DMA_FROM_DEVICE)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300368 set_page_dirty_lock(buf->pages[i]);
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300369 if (!vma_is_io(buf->vma))
370 put_page(buf->pages[i]);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300371 }
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300372 kfree(buf->pages);
Jan Kara48b25a32015-03-17 08:56:31 -0300373 down_read(&current->mm->mmap_sem);
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300374 vb2_put_vma(buf->vma);
Jan Kara48b25a32015-03-17 08:56:31 -0300375 up_read(&current->mm->mmap_sem);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300376 kfree(buf);
377}
378
379static void *vb2_dma_sg_vaddr(void *buf_priv)
380{
381 struct vb2_dma_sg_buf *buf = buf_priv;
382
383 BUG_ON(!buf);
384
Hans Verkuile078b792014-11-18 09:51:03 -0300385 if (!buf->vaddr) {
386 if (buf->db_attach)
387 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
388 else
389 buf->vaddr = vm_map_ram(buf->pages,
390 buf->num_pages, -1, PAGE_KERNEL);
391 }
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300392
393 /* add offset in case userptr is not page-aligned */
Hans Verkuile078b792014-11-18 09:51:03 -0300394 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300395}
396
397static unsigned int vb2_dma_sg_num_users(void *buf_priv)
398{
399 struct vb2_dma_sg_buf *buf = buf_priv;
400
401 return atomic_read(&buf->refcount);
402}
403
404static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
405{
406 struct vb2_dma_sg_buf *buf = buf_priv;
407 unsigned long uaddr = vma->vm_start;
408 unsigned long usize = vma->vm_end - vma->vm_start;
409 int i = 0;
410
411 if (!buf) {
412 printk(KERN_ERR "No memory to map\n");
413 return -EINVAL;
414 }
415
416 do {
417 int ret;
418
419 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
420 if (ret) {
421 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
422 return ret;
423 }
424
425 uaddr += PAGE_SIZE;
426 usize -= PAGE_SIZE;
427 } while (usize > 0);
428
429
430 /*
431 * Use common vm_area operations to track buffer refcount.
432 */
433 vma->vm_private_data = &buf->handler;
434 vma->vm_ops = &vb2_common_vm_ops;
435
436 vma->vm_ops->open(vma);
437
438 return 0;
439}
440
Hans Verkuile078b792014-11-18 09:51:03 -0300441/*********************************************/
Hans Verkuil041c7b62014-11-18 09:51:04 -0300442/* DMABUF ops for exporters */
443/*********************************************/
444
445struct vb2_dma_sg_attachment {
446 struct sg_table sgt;
447 enum dma_data_direction dma_dir;
448};
449
450static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
451 struct dma_buf_attachment *dbuf_attach)
452{
453 struct vb2_dma_sg_attachment *attach;
454 unsigned int i;
455 struct scatterlist *rd, *wr;
456 struct sg_table *sgt;
457 struct vb2_dma_sg_buf *buf = dbuf->priv;
458 int ret;
459
460 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
461 if (!attach)
462 return -ENOMEM;
463
464 sgt = &attach->sgt;
465 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
466 * map the same scatter list to multiple attachments at the same time.
467 */
468 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
469 if (ret) {
470 kfree(attach);
471 return -ENOMEM;
472 }
473
474 rd = buf->dma_sgt->sgl;
475 wr = sgt->sgl;
476 for (i = 0; i < sgt->orig_nents; ++i) {
477 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
478 rd = sg_next(rd);
479 wr = sg_next(wr);
480 }
481
482 attach->dma_dir = DMA_NONE;
483 dbuf_attach->priv = attach;
484
485 return 0;
486}
487
488static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
489 struct dma_buf_attachment *db_attach)
490{
491 struct vb2_dma_sg_attachment *attach = db_attach->priv;
492 struct sg_table *sgt;
493
494 if (!attach)
495 return;
496
497 sgt = &attach->sgt;
498
499 /* release the scatterlist cache */
500 if (attach->dma_dir != DMA_NONE)
501 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
502 attach->dma_dir);
503 sg_free_table(sgt);
504 kfree(attach);
505 db_attach->priv = NULL;
506}
507
508static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
509 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
510{
511 struct vb2_dma_sg_attachment *attach = db_attach->priv;
512 /* stealing dmabuf mutex to serialize map/unmap operations */
513 struct mutex *lock = &db_attach->dmabuf->lock;
514 struct sg_table *sgt;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300515
516 mutex_lock(lock);
517
518 sgt = &attach->sgt;
519 /* return previously mapped sg table */
520 if (attach->dma_dir == dma_dir) {
521 mutex_unlock(lock);
522 return sgt;
523 }
524
525 /* release any previous cache */
526 if (attach->dma_dir != DMA_NONE) {
527 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
528 attach->dma_dir);
529 attach->dma_dir = DMA_NONE;
530 }
531
532 /* mapping to the client with new direction */
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300533 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
534 dma_dir);
535 if (!sgt->nents) {
Hans Verkuil041c7b62014-11-18 09:51:04 -0300536 pr_err("failed to map scatterlist\n");
537 mutex_unlock(lock);
538 return ERR_PTR(-EIO);
539 }
540
541 attach->dma_dir = dma_dir;
542
543 mutex_unlock(lock);
544
545 return sgt;
546}
547
548static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
549 struct sg_table *sgt, enum dma_data_direction dma_dir)
550{
551 /* nothing to be done here */
552}
553
554static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
555{
556 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
557 vb2_dma_sg_put(dbuf->priv);
558}
559
560static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
561{
562 struct vb2_dma_sg_buf *buf = dbuf->priv;
563
564 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
565}
566
567static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
568{
569 struct vb2_dma_sg_buf *buf = dbuf->priv;
570
571 return vb2_dma_sg_vaddr(buf);
572}
573
574static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
575 struct vm_area_struct *vma)
576{
577 return vb2_dma_sg_mmap(dbuf->priv, vma);
578}
579
580static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
581 .attach = vb2_dma_sg_dmabuf_ops_attach,
582 .detach = vb2_dma_sg_dmabuf_ops_detach,
583 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
584 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
585 .kmap = vb2_dma_sg_dmabuf_ops_kmap,
586 .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
587 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
588 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
589 .release = vb2_dma_sg_dmabuf_ops_release,
590};
591
592static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
593{
594 struct vb2_dma_sg_buf *buf = buf_priv;
595 struct dma_buf *dbuf;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530596 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
597
598 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
599 exp_info.size = buf->size;
600 exp_info.flags = flags;
601 exp_info.priv = buf;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300602
603 if (WARN_ON(!buf->dma_sgt))
604 return NULL;
605
Sumit Semwald8fbe342015-01-23 12:53:43 +0530606 dbuf = dma_buf_export(&exp_info);
Hans Verkuil041c7b62014-11-18 09:51:04 -0300607 if (IS_ERR(dbuf))
608 return NULL;
609
610 /* dmabuf keeps reference to vb2 buffer */
611 atomic_inc(&buf->refcount);
612
613 return dbuf;
614}
615
616/*********************************************/
Hans Verkuile078b792014-11-18 09:51:03 -0300617/* callbacks for DMABUF buffers */
618/*********************************************/
619
620static int vb2_dma_sg_map_dmabuf(void *mem_priv)
621{
622 struct vb2_dma_sg_buf *buf = mem_priv;
623 struct sg_table *sgt;
624
625 if (WARN_ON(!buf->db_attach)) {
626 pr_err("trying to pin a non attached buffer\n");
627 return -EINVAL;
628 }
629
630 if (WARN_ON(buf->dma_sgt)) {
631 pr_err("dmabuf buffer is already pinned\n");
632 return 0;
633 }
634
635 /* get the associated scatterlist for this buffer */
636 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
637 if (IS_ERR(sgt)) {
638 pr_err("Error getting dmabuf scatterlist\n");
639 return -EINVAL;
640 }
641
642 buf->dma_sgt = sgt;
643 buf->vaddr = NULL;
644
645 return 0;
646}
647
648static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
649{
650 struct vb2_dma_sg_buf *buf = mem_priv;
651 struct sg_table *sgt = buf->dma_sgt;
652
653 if (WARN_ON(!buf->db_attach)) {
654 pr_err("trying to unpin a not attached buffer\n");
655 return;
656 }
657
658 if (WARN_ON(!sgt)) {
659 pr_err("dmabuf buffer is already unpinned\n");
660 return;
661 }
662
663 if (buf->vaddr) {
664 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
665 buf->vaddr = NULL;
666 }
667 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
668
669 buf->dma_sgt = NULL;
670}
671
672static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
673{
674 struct vb2_dma_sg_buf *buf = mem_priv;
675
676 /* if vb2 works correctly you should never detach mapped buffer */
677 if (WARN_ON(buf->dma_sgt))
678 vb2_dma_sg_unmap_dmabuf(buf);
679
680 /* detach this attachment */
681 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
682 kfree(buf);
683}
684
685static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
686 unsigned long size, enum dma_data_direction dma_dir)
687{
688 struct vb2_dma_sg_conf *conf = alloc_ctx;
689 struct vb2_dma_sg_buf *buf;
690 struct dma_buf_attachment *dba;
691
692 if (dbuf->size < size)
693 return ERR_PTR(-EFAULT);
694
695 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
696 if (!buf)
697 return ERR_PTR(-ENOMEM);
698
699 buf->dev = conf->dev;
700 /* create attachment for the dmabuf with the user device */
701 dba = dma_buf_attach(dbuf, buf->dev);
702 if (IS_ERR(dba)) {
703 pr_err("failed to attach dmabuf\n");
704 kfree(buf);
705 return dba;
706 }
707
708 buf->dma_dir = dma_dir;
709 buf->size = size;
710 buf->db_attach = dba;
711
712 return buf;
713}
714
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300715static void *vb2_dma_sg_cookie(void *buf_priv)
716{
717 struct vb2_dma_sg_buf *buf = buf_priv;
718
Hans Verkuile078b792014-11-18 09:51:03 -0300719 return buf->dma_sgt;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300720}
721
722const struct vb2_mem_ops vb2_dma_sg_memops = {
723 .alloc = vb2_dma_sg_alloc,
724 .put = vb2_dma_sg_put,
725 .get_userptr = vb2_dma_sg_get_userptr,
726 .put_userptr = vb2_dma_sg_put_userptr,
Hans Verkuild790b7e2014-11-24 08:50:31 -0300727 .prepare = vb2_dma_sg_prepare,
728 .finish = vb2_dma_sg_finish,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300729 .vaddr = vb2_dma_sg_vaddr,
730 .mmap = vb2_dma_sg_mmap,
731 .num_users = vb2_dma_sg_num_users,
Hans Verkuil041c7b62014-11-18 09:51:04 -0300732 .get_dmabuf = vb2_dma_sg_get_dmabuf,
Hans Verkuile078b792014-11-18 09:51:03 -0300733 .map_dmabuf = vb2_dma_sg_map_dmabuf,
734 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
735 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
736 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300737 .cookie = vb2_dma_sg_cookie,
738};
739EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
740
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300741void *vb2_dma_sg_init_ctx(struct device *dev)
742{
743 struct vb2_dma_sg_conf *conf;
744
745 conf = kzalloc(sizeof(*conf), GFP_KERNEL);
746 if (!conf)
747 return ERR_PTR(-ENOMEM);
748
749 conf->dev = dev;
750
751 return conf;
752}
753EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
754
755void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
756{
757 if (!IS_ERR_OR_NULL(alloc_ctx))
758 kfree(alloc_ctx);
759}
760EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
761
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300762MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
763MODULE_AUTHOR("Andrzej Pietrasiewicz");
764MODULE_LICENSE("GPL");