blob: e7153f708ade2703422ecdf73e0cee24875eece3 [file] [log] [blame]
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -03001/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
Junghak Sungc1399902015-09-22 10:30:29 -030020#include <media/videobuf2-v4l2.h>
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030021#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
Hans Verkuilffdc78e2013-03-02 05:12:08 -030024static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
Hans Verkuil0c3a14c2014-11-18 09:51:01 -030033struct vb2_dma_sg_conf {
34 struct device *dev;
35};
36
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030037struct vb2_dma_sg_buf {
Hans Verkuil0c3a14c2014-11-18 09:51:01 -030038 struct device *dev;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030039 void *vaddr;
40 struct page **pages;
Jan Kara3336c242015-07-13 11:55:47 -030041 struct frame_vector *vec;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030042 int offset;
Hans Verkuilcd474032014-11-18 09:50:58 -030043 enum dma_data_direction dma_dir;
Ricardo Ribalda22301242013-08-02 10:20:00 -030044 struct sg_table sg_table;
Hans Verkuile078b792014-11-18 09:51:03 -030045 /*
46 * This will point to sg_table when used with the MMAP or USERPTR
47 * memory model, and to the dma_buf sglist when used with the
48 * DMABUF memory model.
49 */
50 struct sg_table *dma_sgt;
Ricardo Ribalda22301242013-08-02 10:20:00 -030051 size_t size;
52 unsigned int num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030053 atomic_t refcount;
54 struct vb2_vmarea_handler handler;
Hans Verkuile078b792014-11-18 09:51:03 -030055
56 struct dma_buf_attachment *db_attach;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030057};
58
59static void vb2_dma_sg_put(void *buf_priv);
60
Ricardo Ribaldadf237282013-08-02 10:19:59 -030061static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
62 gfp_t gfp_flags)
63{
64 unsigned int last_page = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -030065 int size = buf->size;
Ricardo Ribaldadf237282013-08-02 10:19:59 -030066
67 while (size > 0) {
68 struct page *pages;
69 int order;
70 int i;
71
72 order = get_order(size);
73 /* Dont over allocate*/
74 if ((PAGE_SIZE << order) > size)
75 order--;
76
77 pages = NULL;
78 while (!pages) {
79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
80 __GFP_NOWARN | gfp_flags, order);
81 if (pages)
82 break;
83
84 if (order == 0) {
85 while (last_page--)
86 __free_page(buf->pages[last_page]);
87 return -ENOMEM;
88 }
89 order--;
90 }
91
92 split_page(pages, order);
Ricardo Ribalda22301242013-08-02 10:20:00 -030093 for (i = 0; i < (1 << order); i++)
94 buf->pages[last_page++] = &pages[i];
Ricardo Ribaldadf237282013-08-02 10:19:59 -030095
96 size -= PAGE_SIZE << order;
97 }
98
99 return 0;
100}
101
Hans Verkuild16e8322016-04-15 09:15:05 -0300102static void *vb2_dma_sg_alloc(void *alloc_ctx, const struct dma_attrs *dma_attrs,
103 unsigned long size, enum dma_data_direction dma_dir,
104 gfp_t gfp_flags)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300105{
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300106 struct vb2_dma_sg_conf *conf = alloc_ctx;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300107 struct vb2_dma_sg_buf *buf;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300108 struct sg_table *sgt;
Ricardo Ribaldadf237282013-08-02 10:19:59 -0300109 int ret;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300110 int num_pages;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300111 DEFINE_DMA_ATTRS(attrs);
112
113 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300114
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300115 if (WARN_ON(alloc_ctx == NULL))
116 return NULL;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300117 buf = kzalloc(sizeof *buf, GFP_KERNEL);
118 if (!buf)
119 return NULL;
120
121 buf->vaddr = NULL;
Hans Verkuild935c572014-11-18 09:50:59 -0300122 buf->dma_dir = dma_dir;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300123 buf->offset = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300124 buf->size = size;
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300125 /* size is already page aligned */
Ricardo Ribalda22301242013-08-02 10:20:00 -0300126 buf->num_pages = size >> PAGE_SHIFT;
Hans Verkuile078b792014-11-18 09:51:03 -0300127 buf->dma_sgt = &buf->sg_table;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300128
Ricardo Ribalda22301242013-08-02 10:20:00 -0300129 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300130 GFP_KERNEL);
131 if (!buf->pages)
132 goto fail_pages_array_alloc;
133
Ricardo Ribaldadf237282013-08-02 10:19:59 -0300134 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
135 if (ret)
136 goto fail_pages_alloc;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300137
Hans Verkuile078b792014-11-18 09:51:03 -0300138 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
Hans Verkuil47bc59c2014-08-01 09:18:50 -0300139 buf->num_pages, 0, size, GFP_KERNEL);
Ricardo Ribalda22301242013-08-02 10:20:00 -0300140 if (ret)
141 goto fail_table_alloc;
142
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300143 /* Prevent the device from being released while the buffer is used */
144 buf->dev = get_device(conf->dev);
Hans Verkuild790b7e2014-11-24 08:50:31 -0300145
146 sgt = &buf->sg_table;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300147 /*
148 * No need to sync to the device, this will happen later when the
149 * prepare() memop is called.
150 */
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300151 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
152 buf->dma_dir, &attrs);
153 if (!sgt->nents)
Hans Verkuild790b7e2014-11-24 08:50:31 -0300154 goto fail_map;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300155
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300156 buf->handler.refcount = &buf->refcount;
157 buf->handler.put = vb2_dma_sg_put;
158 buf->handler.arg = buf;
159
160 atomic_inc(&buf->refcount);
161
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300162 dprintk(1, "%s: Allocated buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300163 __func__, buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300164 return buf;
165
Hans Verkuild790b7e2014-11-24 08:50:31 -0300166fail_map:
167 put_device(buf->dev);
Hans Verkuile078b792014-11-18 09:51:03 -0300168 sg_free_table(buf->dma_sgt);
Ricardo Ribalda22301242013-08-02 10:20:00 -0300169fail_table_alloc:
170 num_pages = buf->num_pages;
171 while (num_pages--)
172 __free_page(buf->pages[num_pages]);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300173fail_pages_alloc:
Andrzej Pietrasiewicza9bb36a2011-01-28 09:42:51 -0300174 kfree(buf->pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300175fail_pages_array_alloc:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300176 kfree(buf);
177 return NULL;
178}
179
180static void vb2_dma_sg_put(void *buf_priv)
181{
182 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300183 struct sg_table *sgt = &buf->sg_table;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300184 int i = buf->num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300185
186 if (atomic_dec_and_test(&buf->refcount)) {
Hans Verkuil251a79f2014-11-18 09:51:08 -0300187 DEFINE_DMA_ATTRS(attrs);
188
189 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300190 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300191 buf->num_pages);
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300192 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
Hans Verkuil251a79f2014-11-18 09:51:08 -0300193 buf->dma_dir, &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300194 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300195 vm_unmap_ram(buf->vaddr, buf->num_pages);
Hans Verkuile078b792014-11-18 09:51:03 -0300196 sg_free_table(buf->dma_sgt);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300197 while (--i >= 0)
198 __free_page(buf->pages[i]);
199 kfree(buf->pages);
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300200 put_device(buf->dev);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300201 kfree(buf);
202 }
203}
204
Hans Verkuild790b7e2014-11-24 08:50:31 -0300205static void vb2_dma_sg_prepare(void *buf_priv)
206{
207 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuile078b792014-11-18 09:51:03 -0300208 struct sg_table *sgt = buf->dma_sgt;
209
210 /* DMABUF exporter will flush the cache for us */
211 if (buf->db_attach)
212 return;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300213
Tiffany Lin418dae22015-09-24 06:02:36 -0300214 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
215 buf->dma_dir);
Hans Verkuild790b7e2014-11-24 08:50:31 -0300216}
217
218static void vb2_dma_sg_finish(void *buf_priv)
219{
220 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuile078b792014-11-18 09:51:03 -0300221 struct sg_table *sgt = buf->dma_sgt;
222
223 /* DMABUF exporter will flush the cache for us */
224 if (buf->db_attach)
225 return;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300226
Tiffany Lin418dae22015-09-24 06:02:36 -0300227 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
Hans Verkuild790b7e2014-11-24 08:50:31 -0300228}
229
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300230static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
Hans Verkuilcd474032014-11-18 09:50:58 -0300231 unsigned long size,
232 enum dma_data_direction dma_dir)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300233{
Hans Verkuild790b7e2014-11-24 08:50:31 -0300234 struct vb2_dma_sg_conf *conf = alloc_ctx;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300235 struct vb2_dma_sg_buf *buf;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300236 struct sg_table *sgt;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300237 DEFINE_DMA_ATTRS(attrs);
Jan Kara3336c242015-07-13 11:55:47 -0300238 struct frame_vector *vec;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300239
240 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300241 buf = kzalloc(sizeof *buf, GFP_KERNEL);
242 if (!buf)
243 return NULL;
244
245 buf->vaddr = NULL;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300246 buf->dev = conf->dev;
Hans Verkuilcd474032014-11-18 09:50:58 -0300247 buf->dma_dir = dma_dir;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300248 buf->offset = vaddr & ~PAGE_MASK;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300249 buf->size = size;
Hans Verkuile078b792014-11-18 09:51:03 -0300250 buf->dma_sgt = &buf->sg_table;
Jan Kara3336c242015-07-13 11:55:47 -0300251 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
252 if (IS_ERR(vec))
253 goto userptr_fail_pfnvec;
254 buf->vec = vec;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300255
Jan Kara3336c242015-07-13 11:55:47 -0300256 buf->pages = frame_vector_pages(vec);
257 if (IS_ERR(buf->pages))
258 goto userptr_fail_sgtable;
259 buf->num_pages = frame_vector_count(vec);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300260
Hans Verkuile078b792014-11-18 09:51:03 -0300261 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300262 buf->num_pages, buf->offset, size, 0))
Jan Kara3336c242015-07-13 11:55:47 -0300263 goto userptr_fail_sgtable;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300264
Hans Verkuild790b7e2014-11-24 08:50:31 -0300265 sgt = &buf->sg_table;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300266 /*
267 * No need to sync to the device, this will happen later when the
268 * prepare() memop is called.
269 */
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300270 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
271 buf->dma_dir, &attrs);
272 if (!sgt->nents)
Hans Verkuild790b7e2014-11-24 08:50:31 -0300273 goto userptr_fail_map;
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300274
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300275 return buf;
276
Hans Verkuild790b7e2014-11-24 08:50:31 -0300277userptr_fail_map:
278 sg_free_table(&buf->sg_table);
Jan Kara3336c242015-07-13 11:55:47 -0300279userptr_fail_sgtable:
280 vb2_destroy_framevec(vec);
281userptr_fail_pfnvec:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300282 kfree(buf);
283 return NULL;
284}
285
286/*
287 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
288 * be used
289 */
290static void vb2_dma_sg_put_userptr(void *buf_priv)
291{
292 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300293 struct sg_table *sgt = &buf->sg_table;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300294 int i = buf->num_pages;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300295 DEFINE_DMA_ATTRS(attrs);
296
297 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300298
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300299 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300300 __func__, buf->num_pages);
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300301 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
302 &attrs);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300303 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300304 vm_unmap_ram(buf->vaddr, buf->num_pages);
Hans Verkuile078b792014-11-18 09:51:03 -0300305 sg_free_table(buf->dma_sgt);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300306 while (--i >= 0) {
Hans Verkuilcd474032014-11-18 09:50:58 -0300307 if (buf->dma_dir == DMA_FROM_DEVICE)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300308 set_page_dirty_lock(buf->pages[i]);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300309 }
Jan Kara3336c242015-07-13 11:55:47 -0300310 vb2_destroy_framevec(buf->vec);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300311 kfree(buf);
312}
313
314static void *vb2_dma_sg_vaddr(void *buf_priv)
315{
316 struct vb2_dma_sg_buf *buf = buf_priv;
317
318 BUG_ON(!buf);
319
Hans Verkuile078b792014-11-18 09:51:03 -0300320 if (!buf->vaddr) {
321 if (buf->db_attach)
322 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
323 else
324 buf->vaddr = vm_map_ram(buf->pages,
325 buf->num_pages, -1, PAGE_KERNEL);
326 }
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300327
328 /* add offset in case userptr is not page-aligned */
Hans Verkuile078b792014-11-18 09:51:03 -0300329 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300330}
331
332static unsigned int vb2_dma_sg_num_users(void *buf_priv)
333{
334 struct vb2_dma_sg_buf *buf = buf_priv;
335
336 return atomic_read(&buf->refcount);
337}
338
339static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
340{
341 struct vb2_dma_sg_buf *buf = buf_priv;
342 unsigned long uaddr = vma->vm_start;
343 unsigned long usize = vma->vm_end - vma->vm_start;
344 int i = 0;
345
346 if (!buf) {
347 printk(KERN_ERR "No memory to map\n");
348 return -EINVAL;
349 }
350
351 do {
352 int ret;
353
354 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
355 if (ret) {
356 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
357 return ret;
358 }
359
360 uaddr += PAGE_SIZE;
361 usize -= PAGE_SIZE;
362 } while (usize > 0);
363
364
365 /*
366 * Use common vm_area operations to track buffer refcount.
367 */
368 vma->vm_private_data = &buf->handler;
369 vma->vm_ops = &vb2_common_vm_ops;
370
371 vma->vm_ops->open(vma);
372
373 return 0;
374}
375
Hans Verkuile078b792014-11-18 09:51:03 -0300376/*********************************************/
Hans Verkuil041c7b62014-11-18 09:51:04 -0300377/* DMABUF ops for exporters */
378/*********************************************/
379
380struct vb2_dma_sg_attachment {
381 struct sg_table sgt;
382 enum dma_data_direction dma_dir;
383};
384
385static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
386 struct dma_buf_attachment *dbuf_attach)
387{
388 struct vb2_dma_sg_attachment *attach;
389 unsigned int i;
390 struct scatterlist *rd, *wr;
391 struct sg_table *sgt;
392 struct vb2_dma_sg_buf *buf = dbuf->priv;
393 int ret;
394
395 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
396 if (!attach)
397 return -ENOMEM;
398
399 sgt = &attach->sgt;
400 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
401 * map the same scatter list to multiple attachments at the same time.
402 */
403 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
404 if (ret) {
405 kfree(attach);
406 return -ENOMEM;
407 }
408
409 rd = buf->dma_sgt->sgl;
410 wr = sgt->sgl;
411 for (i = 0; i < sgt->orig_nents; ++i) {
412 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
413 rd = sg_next(rd);
414 wr = sg_next(wr);
415 }
416
417 attach->dma_dir = DMA_NONE;
418 dbuf_attach->priv = attach;
419
420 return 0;
421}
422
423static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
424 struct dma_buf_attachment *db_attach)
425{
426 struct vb2_dma_sg_attachment *attach = db_attach->priv;
427 struct sg_table *sgt;
428
429 if (!attach)
430 return;
431
432 sgt = &attach->sgt;
433
434 /* release the scatterlist cache */
435 if (attach->dma_dir != DMA_NONE)
436 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
437 attach->dma_dir);
438 sg_free_table(sgt);
439 kfree(attach);
440 db_attach->priv = NULL;
441}
442
443static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
444 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
445{
446 struct vb2_dma_sg_attachment *attach = db_attach->priv;
447 /* stealing dmabuf mutex to serialize map/unmap operations */
448 struct mutex *lock = &db_attach->dmabuf->lock;
449 struct sg_table *sgt;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300450
451 mutex_lock(lock);
452
453 sgt = &attach->sgt;
454 /* return previously mapped sg table */
455 if (attach->dma_dir == dma_dir) {
456 mutex_unlock(lock);
457 return sgt;
458 }
459
460 /* release any previous cache */
461 if (attach->dma_dir != DMA_NONE) {
462 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
463 attach->dma_dir);
464 attach->dma_dir = DMA_NONE;
465 }
466
467 /* mapping to the client with new direction */
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300468 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
469 dma_dir);
470 if (!sgt->nents) {
Hans Verkuil041c7b62014-11-18 09:51:04 -0300471 pr_err("failed to map scatterlist\n");
472 mutex_unlock(lock);
473 return ERR_PTR(-EIO);
474 }
475
476 attach->dma_dir = dma_dir;
477
478 mutex_unlock(lock);
479
480 return sgt;
481}
482
483static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
484 struct sg_table *sgt, enum dma_data_direction dma_dir)
485{
486 /* nothing to be done here */
487}
488
489static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
490{
491 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
492 vb2_dma_sg_put(dbuf->priv);
493}
494
495static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
496{
497 struct vb2_dma_sg_buf *buf = dbuf->priv;
498
499 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
500}
501
502static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
503{
504 struct vb2_dma_sg_buf *buf = dbuf->priv;
505
506 return vb2_dma_sg_vaddr(buf);
507}
508
509static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
510 struct vm_area_struct *vma)
511{
512 return vb2_dma_sg_mmap(dbuf->priv, vma);
513}
514
515static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
516 .attach = vb2_dma_sg_dmabuf_ops_attach,
517 .detach = vb2_dma_sg_dmabuf_ops_detach,
518 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
519 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
520 .kmap = vb2_dma_sg_dmabuf_ops_kmap,
521 .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
522 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
523 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
524 .release = vb2_dma_sg_dmabuf_ops_release,
525};
526
527static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
528{
529 struct vb2_dma_sg_buf *buf = buf_priv;
530 struct dma_buf *dbuf;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530531 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
532
533 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
534 exp_info.size = buf->size;
535 exp_info.flags = flags;
536 exp_info.priv = buf;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300537
538 if (WARN_ON(!buf->dma_sgt))
539 return NULL;
540
Sumit Semwald8fbe342015-01-23 12:53:43 +0530541 dbuf = dma_buf_export(&exp_info);
Hans Verkuil041c7b62014-11-18 09:51:04 -0300542 if (IS_ERR(dbuf))
543 return NULL;
544
545 /* dmabuf keeps reference to vb2 buffer */
546 atomic_inc(&buf->refcount);
547
548 return dbuf;
549}
550
551/*********************************************/
Hans Verkuile078b792014-11-18 09:51:03 -0300552/* callbacks for DMABUF buffers */
553/*********************************************/
554
555static int vb2_dma_sg_map_dmabuf(void *mem_priv)
556{
557 struct vb2_dma_sg_buf *buf = mem_priv;
558 struct sg_table *sgt;
559
560 if (WARN_ON(!buf->db_attach)) {
561 pr_err("trying to pin a non attached buffer\n");
562 return -EINVAL;
563 }
564
565 if (WARN_ON(buf->dma_sgt)) {
566 pr_err("dmabuf buffer is already pinned\n");
567 return 0;
568 }
569
570 /* get the associated scatterlist for this buffer */
571 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
572 if (IS_ERR(sgt)) {
573 pr_err("Error getting dmabuf scatterlist\n");
574 return -EINVAL;
575 }
576
577 buf->dma_sgt = sgt;
578 buf->vaddr = NULL;
579
580 return 0;
581}
582
583static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
584{
585 struct vb2_dma_sg_buf *buf = mem_priv;
586 struct sg_table *sgt = buf->dma_sgt;
587
588 if (WARN_ON(!buf->db_attach)) {
589 pr_err("trying to unpin a not attached buffer\n");
590 return;
591 }
592
593 if (WARN_ON(!sgt)) {
594 pr_err("dmabuf buffer is already unpinned\n");
595 return;
596 }
597
598 if (buf->vaddr) {
599 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
600 buf->vaddr = NULL;
601 }
602 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
603
604 buf->dma_sgt = NULL;
605}
606
607static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
608{
609 struct vb2_dma_sg_buf *buf = mem_priv;
610
611 /* if vb2 works correctly you should never detach mapped buffer */
612 if (WARN_ON(buf->dma_sgt))
613 vb2_dma_sg_unmap_dmabuf(buf);
614
615 /* detach this attachment */
616 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
617 kfree(buf);
618}
619
620static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
621 unsigned long size, enum dma_data_direction dma_dir)
622{
623 struct vb2_dma_sg_conf *conf = alloc_ctx;
624 struct vb2_dma_sg_buf *buf;
625 struct dma_buf_attachment *dba;
626
627 if (dbuf->size < size)
628 return ERR_PTR(-EFAULT);
629
630 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
631 if (!buf)
632 return ERR_PTR(-ENOMEM);
633
634 buf->dev = conf->dev;
635 /* create attachment for the dmabuf with the user device */
636 dba = dma_buf_attach(dbuf, buf->dev);
637 if (IS_ERR(dba)) {
638 pr_err("failed to attach dmabuf\n");
639 kfree(buf);
640 return dba;
641 }
642
643 buf->dma_dir = dma_dir;
644 buf->size = size;
645 buf->db_attach = dba;
646
647 return buf;
648}
649
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300650static void *vb2_dma_sg_cookie(void *buf_priv)
651{
652 struct vb2_dma_sg_buf *buf = buf_priv;
653
Hans Verkuile078b792014-11-18 09:51:03 -0300654 return buf->dma_sgt;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300655}
656
657const struct vb2_mem_ops vb2_dma_sg_memops = {
658 .alloc = vb2_dma_sg_alloc,
659 .put = vb2_dma_sg_put,
660 .get_userptr = vb2_dma_sg_get_userptr,
661 .put_userptr = vb2_dma_sg_put_userptr,
Hans Verkuild790b7e2014-11-24 08:50:31 -0300662 .prepare = vb2_dma_sg_prepare,
663 .finish = vb2_dma_sg_finish,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300664 .vaddr = vb2_dma_sg_vaddr,
665 .mmap = vb2_dma_sg_mmap,
666 .num_users = vb2_dma_sg_num_users,
Hans Verkuil041c7b62014-11-18 09:51:04 -0300667 .get_dmabuf = vb2_dma_sg_get_dmabuf,
Hans Verkuile078b792014-11-18 09:51:03 -0300668 .map_dmabuf = vb2_dma_sg_map_dmabuf,
669 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
670 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
671 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300672 .cookie = vb2_dma_sg_cookie,
673};
674EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
675
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300676void *vb2_dma_sg_init_ctx(struct device *dev)
677{
678 struct vb2_dma_sg_conf *conf;
679
680 conf = kzalloc(sizeof(*conf), GFP_KERNEL);
681 if (!conf)
682 return ERR_PTR(-ENOMEM);
683
684 conf->dev = dev;
685
686 return conf;
687}
688EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
689
690void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
691{
692 if (!IS_ERR_OR_NULL(alloc_ctx))
693 kfree(alloc_ctx);
694}
695EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
696
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300697MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
698MODULE_AUTHOR("Andrzej Pietrasiewicz");
699MODULE_LICENSE("GPL");