blob: ab3227b75c8486d0471bd603140e8cca88ce81b4 [file] [log] [blame]
Pawel Osciak3c18ff02010-10-11 10:58:53 -03001/*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Pawel Osciak95072082011-03-13 15:23:32 -03006 * Author: Pawel Osciak <pawel@osciak.com>
Pawel Osciak3c18ff02010-10-11 10:58:53 -03007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
Javier Martin570d2a42012-02-16 12:19:08 -030013#include <linux/io.h>
Pawel Osciak3c18ff02010-10-11 10:58:53 -030014#include <linux/module.h>
15#include <linux/mm.h>
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030016#include <linux/sched.h>
Pawel Osciak3c18ff02010-10-11 10:58:53 -030017#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
Junghak Sungc1399902015-09-22 10:30:29 -030020#include <media/videobuf2-v4l2.h>
Nicolas THERYddb9fa22012-08-03 07:23:54 -030021#include <media/videobuf2-vmalloc.h>
Pawel Osciak3c18ff02010-10-11 10:58:53 -030022#include <media/videobuf2-memops.h>
23
24struct vb2_vmalloc_buf {
25 void *vaddr;
Jan Kara5a9e4de2015-07-13 11:55:48 -030026 struct frame_vector *vec;
Hans Verkuilcd474032014-11-18 09:50:58 -030027 enum dma_data_direction dma_dir;
Pawel Osciak3c18ff02010-10-11 10:58:53 -030028 unsigned long size;
29 atomic_t refcount;
30 struct vb2_vmarea_handler handler;
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -030031 struct dma_buf *dbuf;
Pawel Osciak3c18ff02010-10-11 10:58:53 -030032};
33
34static void vb2_vmalloc_put(void *buf_priv);
35
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070036static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
Hans Verkuild16e8322016-04-15 09:15:05 -030037 unsigned long size, enum dma_data_direction dma_dir,
38 gfp_t gfp_flags)
Pawel Osciak3c18ff02010-10-11 10:58:53 -030039{
40 struct vb2_vmalloc_buf *buf;
41
Hans Verkuilb6ba2052013-03-01 15:44:20 -030042 buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030043 if (!buf)
Hans Verkuil0ff657b2016-07-21 09:14:02 -030044 return ERR_PTR(-ENOMEM);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030045
46 buf->size = size;
47 buf->vaddr = vmalloc_user(buf->size);
Hans Verkuild935c572014-11-18 09:50:59 -030048 buf->dma_dir = dma_dir;
Pawel Osciak3c18ff02010-10-11 10:58:53 -030049 buf->handler.refcount = &buf->refcount;
50 buf->handler.put = vb2_vmalloc_put;
51 buf->handler.arg = buf;
52
53 if (!buf->vaddr) {
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030054 pr_debug("vmalloc of size %ld failed\n", buf->size);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030055 kfree(buf);
Hans Verkuil0ff657b2016-07-21 09:14:02 -030056 return ERR_PTR(-ENOMEM);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030057 }
58
59 atomic_inc(&buf->refcount);
Pawel Osciak3c18ff02010-10-11 10:58:53 -030060 return buf;
61}
62
63static void vb2_vmalloc_put(void *buf_priv)
64{
65 struct vb2_vmalloc_buf *buf = buf_priv;
66
67 if (atomic_dec_and_test(&buf->refcount)) {
Pawel Osciak3c18ff02010-10-11 10:58:53 -030068 vfree(buf->vaddr);
69 kfree(buf);
70 }
71}
72
Hans Verkuil36c0f8b2016-04-15 09:15:05 -030073static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
Hans Verkuilcd474032014-11-18 09:50:58 -030074 unsigned long size,
75 enum dma_data_direction dma_dir)
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030076{
77 struct vb2_vmalloc_buf *buf;
Jan Kara5a9e4de2015-07-13 11:55:48 -030078 struct frame_vector *vec;
79 int n_pages, offset, i;
Hans Verkuil0ff657b2016-07-21 09:14:02 -030080 int ret = -ENOMEM;
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030081
82 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
83 if (!buf)
Hans Verkuil0ff657b2016-07-21 09:14:02 -030084 return ERR_PTR(-ENOMEM);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030085
Hans Verkuilcd474032014-11-18 09:50:58 -030086 buf->dma_dir = dma_dir;
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030087 offset = vaddr & ~PAGE_MASK;
88 buf->size = size;
Jan Kara5a9e4de2015-07-13 11:55:48 -030089 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
Hans Verkuil0ff657b2016-07-21 09:14:02 -030090 if (IS_ERR(vec)) {
91 ret = PTR_ERR(vec);
Jan Kara5a9e4de2015-07-13 11:55:48 -030092 goto fail_pfnvec_create;
Hans Verkuil0ff657b2016-07-21 09:14:02 -030093 }
Jan Kara5a9e4de2015-07-13 11:55:48 -030094 buf->vec = vec;
95 n_pages = frame_vector_count(vec);
96 if (frame_vector_to_pages(vec) < 0) {
97 unsigned long *nums = frame_vector_pfns(vec);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -030098
Jan Kara5a9e4de2015-07-13 11:55:48 -030099 /*
100 * We cannot get page pointers for these pfns. Check memory is
101 * physically contiguous and use direct mapping.
102 */
103 for (i = 1; i < n_pages; i++)
104 if (nums[i-1] + 1 != nums[i])
105 goto fail_map;
106 buf->vaddr = (__force void *)
107 ioremap_nocache(nums[0] << PAGE_SHIFT, size);
Javier Martin570d2a42012-02-16 12:19:08 -0300108 } else {
Jan Kara5a9e4de2015-07-13 11:55:48 -0300109 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
Javier Martin570d2a42012-02-16 12:19:08 -0300110 PAGE_KERNEL);
Javier Martin570d2a42012-02-16 12:19:08 -0300111 }
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300112
Jan Kara5a9e4de2015-07-13 11:55:48 -0300113 if (!buf->vaddr)
114 goto fail_map;
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300115 buf->vaddr += offset;
116 return buf;
117
Jan Kara5a9e4de2015-07-13 11:55:48 -0300118fail_map:
119 vb2_destroy_framevec(vec);
120fail_pfnvec_create:
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300121 kfree(buf);
122
Hans Verkuil0ff657b2016-07-21 09:14:02 -0300123 return ERR_PTR(ret);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300124}
125
126static void vb2_vmalloc_put_userptr(void *buf_priv)
127{
128 struct vb2_vmalloc_buf *buf = buf_priv;
129 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 unsigned int i;
Jan Kara5a9e4de2015-07-13 11:55:48 -0300131 struct page **pages;
132 unsigned int n_pages;
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300133
Jan Kara5a9e4de2015-07-13 11:55:48 -0300134 if (!buf->vec->is_pfns) {
135 n_pages = frame_vector_count(buf->vec);
136 pages = frame_vector_pages(buf->vec);
Javier Martin570d2a42012-02-16 12:19:08 -0300137 if (vaddr)
Jan Kara5a9e4de2015-07-13 11:55:48 -0300138 vm_unmap_ram((void *)vaddr, n_pages);
139 if (buf->dma_dir == DMA_FROM_DEVICE)
140 for (i = 0; i < n_pages; i++)
141 set_page_dirty_lock(pages[i]);
Javier Martin570d2a42012-02-16 12:19:08 -0300142 } else {
Hans Verkuil7c424dd2014-12-13 08:52:54 -0300143 iounmap((__force void __iomem *)buf->vaddr);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300144 }
Jan Kara5a9e4de2015-07-13 11:55:48 -0300145 vb2_destroy_framevec(buf->vec);
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300146 kfree(buf);
147}
148
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300149static void *vb2_vmalloc_vaddr(void *buf_priv)
150{
151 struct vb2_vmalloc_buf *buf = buf_priv;
152
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300153 if (!buf->vaddr) {
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300154 pr_err("Address of an unallocated plane requested "
155 "or cannot map user pointer\n");
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300156 return NULL;
157 }
158
159 return buf->vaddr;
160}
161
162static unsigned int vb2_vmalloc_num_users(void *buf_priv)
163{
164 struct vb2_vmalloc_buf *buf = buf_priv;
165 return atomic_read(&buf->refcount);
166}
167
168static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
169{
170 struct vb2_vmalloc_buf *buf = buf_priv;
171 int ret;
172
173 if (!buf) {
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300174 pr_err("No memory to map\n");
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300175 return -EINVAL;
176 }
177
178 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
179 if (ret) {
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300180 pr_err("Remapping vmalloc memory, error: %d\n", ret);
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300181 return ret;
182 }
183
184 /*
185 * Make sure that vm_areas for 2 buffers won't be merged together
186 */
187 vma->vm_flags |= VM_DONTEXPAND;
188
189 /*
190 * Use common vm_area operations to track buffer refcount.
191 */
192 vma->vm_private_data = &buf->handler;
193 vma->vm_ops = &vb2_common_vm_ops;
194
195 vma->vm_ops->open(vma);
196
197 return 0;
198}
199
Geert Uytterhoeven99f3cd52014-12-15 10:40:28 -0300200#ifdef CONFIG_HAS_DMA
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300201/*********************************************/
Hans Verkuil46506352014-11-18 09:51:05 -0300202/* DMABUF ops for exporters */
203/*********************************************/
204
205struct vb2_vmalloc_attachment {
206 struct sg_table sgt;
207 enum dma_data_direction dma_dir;
208};
209
210static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
211 struct dma_buf_attachment *dbuf_attach)
212{
213 struct vb2_vmalloc_attachment *attach;
214 struct vb2_vmalloc_buf *buf = dbuf->priv;
215 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
216 struct sg_table *sgt;
217 struct scatterlist *sg;
218 void *vaddr = buf->vaddr;
219 int ret;
220 int i;
221
222 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
223 if (!attach)
224 return -ENOMEM;
225
226 sgt = &attach->sgt;
227 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
228 if (ret) {
229 kfree(attach);
230 return ret;
231 }
232 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
233 struct page *page = vmalloc_to_page(vaddr);
234
235 if (!page) {
236 sg_free_table(sgt);
237 kfree(attach);
238 return -ENOMEM;
239 }
240 sg_set_page(sg, page, PAGE_SIZE, 0);
241 vaddr += PAGE_SIZE;
242 }
243
244 attach->dma_dir = DMA_NONE;
245 dbuf_attach->priv = attach;
246 return 0;
247}
248
249static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
250 struct dma_buf_attachment *db_attach)
251{
252 struct vb2_vmalloc_attachment *attach = db_attach->priv;
253 struct sg_table *sgt;
254
255 if (!attach)
256 return;
257
258 sgt = &attach->sgt;
259
260 /* release the scatterlist cache */
261 if (attach->dma_dir != DMA_NONE)
262 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
263 attach->dma_dir);
264 sg_free_table(sgt);
265 kfree(attach);
266 db_attach->priv = NULL;
267}
268
269static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
270 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
271{
272 struct vb2_vmalloc_attachment *attach = db_attach->priv;
273 /* stealing dmabuf mutex to serialize map/unmap operations */
274 struct mutex *lock = &db_attach->dmabuf->lock;
275 struct sg_table *sgt;
Hans Verkuil46506352014-11-18 09:51:05 -0300276
277 mutex_lock(lock);
278
279 sgt = &attach->sgt;
280 /* return previously mapped sg table */
281 if (attach->dma_dir == dma_dir) {
282 mutex_unlock(lock);
283 return sgt;
284 }
285
286 /* release any previous cache */
287 if (attach->dma_dir != DMA_NONE) {
288 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
289 attach->dma_dir);
290 attach->dma_dir = DMA_NONE;
291 }
292
293 /* mapping to the client with new direction */
Ricardo Ribaldaba81c6e2015-04-29 09:00:47 -0300294 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
295 dma_dir);
296 if (!sgt->nents) {
Hans Verkuil46506352014-11-18 09:51:05 -0300297 pr_err("failed to map scatterlist\n");
298 mutex_unlock(lock);
299 return ERR_PTR(-EIO);
300 }
301
302 attach->dma_dir = dma_dir;
303
304 mutex_unlock(lock);
305
306 return sgt;
307}
308
309static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
310 struct sg_table *sgt, enum dma_data_direction dma_dir)
311{
312 /* nothing to be done here */
313}
314
315static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
316{
317 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
318 vb2_vmalloc_put(dbuf->priv);
319}
320
321static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
322{
323 struct vb2_vmalloc_buf *buf = dbuf->priv;
324
325 return buf->vaddr + pgnum * PAGE_SIZE;
326}
327
328static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
329{
330 struct vb2_vmalloc_buf *buf = dbuf->priv;
331
332 return buf->vaddr;
333}
334
335static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
336 struct vm_area_struct *vma)
337{
338 return vb2_vmalloc_mmap(dbuf->priv, vma);
339}
340
341static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
342 .attach = vb2_vmalloc_dmabuf_ops_attach,
343 .detach = vb2_vmalloc_dmabuf_ops_detach,
344 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
345 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
346 .kmap = vb2_vmalloc_dmabuf_ops_kmap,
347 .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
348 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
349 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
350 .release = vb2_vmalloc_dmabuf_ops_release,
351};
352
353static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
354{
355 struct vb2_vmalloc_buf *buf = buf_priv;
356 struct dma_buf *dbuf;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530357 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
358
359 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
360 exp_info.size = buf->size;
361 exp_info.flags = flags;
362 exp_info.priv = buf;
Hans Verkuil46506352014-11-18 09:51:05 -0300363
364 if (WARN_ON(!buf->vaddr))
365 return NULL;
366
Sumit Semwald8fbe342015-01-23 12:53:43 +0530367 dbuf = dma_buf_export(&exp_info);
Hans Verkuil46506352014-11-18 09:51:05 -0300368 if (IS_ERR(dbuf))
369 return NULL;
370
371 /* dmabuf keeps reference to vb2 buffer */
372 atomic_inc(&buf->refcount);
373
374 return dbuf;
375}
Geert Uytterhoeven99f3cd52014-12-15 10:40:28 -0300376#endif /* CONFIG_HAS_DMA */
377
Hans Verkuil46506352014-11-18 09:51:05 -0300378
379/*********************************************/
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300380/* callbacks for DMABUF buffers */
381/*********************************************/
382
383static int vb2_vmalloc_map_dmabuf(void *mem_priv)
384{
385 struct vb2_vmalloc_buf *buf = mem_priv;
386
387 buf->vaddr = dma_buf_vmap(buf->dbuf);
388
389 return buf->vaddr ? 0 : -EFAULT;
390}
391
392static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
393{
394 struct vb2_vmalloc_buf *buf = mem_priv;
395
396 dma_buf_vunmap(buf->dbuf, buf->vaddr);
397 buf->vaddr = NULL;
398}
399
400static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
401{
402 struct vb2_vmalloc_buf *buf = mem_priv;
403
404 if (buf->vaddr)
405 dma_buf_vunmap(buf->dbuf, buf->vaddr);
406
407 kfree(buf);
408}
409
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300410static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
Hans Verkuilcd474032014-11-18 09:50:58 -0300411 unsigned long size, enum dma_data_direction dma_dir)
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300412{
413 struct vb2_vmalloc_buf *buf;
414
415 if (dbuf->size < size)
416 return ERR_PTR(-EFAULT);
417
418 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
419 if (!buf)
420 return ERR_PTR(-ENOMEM);
421
422 buf->dbuf = dbuf;
Hans Verkuilcd474032014-11-18 09:50:58 -0300423 buf->dma_dir = dma_dir;
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300424 buf->size = size;
425
426 return buf;
427}
428
429
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300430const struct vb2_mem_ops vb2_vmalloc_memops = {
431 .alloc = vb2_vmalloc_alloc,
432 .put = vb2_vmalloc_put,
Andrzej Pietrasiewicz4419b8a2011-10-13 07:30:51 -0300433 .get_userptr = vb2_vmalloc_get_userptr,
434 .put_userptr = vb2_vmalloc_put_userptr,
Geert Uytterhoeven99f3cd52014-12-15 10:40:28 -0300435#ifdef CONFIG_HAS_DMA
Hans Verkuil46506352014-11-18 09:51:05 -0300436 .get_dmabuf = vb2_vmalloc_get_dmabuf,
Geert Uytterhoeven99f3cd52014-12-15 10:40:28 -0300437#endif
Tomasz Stanislawski89d2ee02012-06-14 10:37:46 -0300438 .map_dmabuf = vb2_vmalloc_map_dmabuf,
439 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
440 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
441 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300442 .vaddr = vb2_vmalloc_vaddr,
443 .mmap = vb2_vmalloc_mmap,
444 .num_users = vb2_vmalloc_num_users,
445};
446EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
447
448MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
Pawel Osciak95072082011-03-13 15:23:32 -0300449MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
Pawel Osciak3c18ff02010-10-11 10:58:53 -0300450MODULE_LICENSE("GPL");