blob: 2529b831725bd38c1471706117a6320dc7fa1941 [file] [log] [blame]
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -03001/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
Hans Verkuilffdc78e2013-03-02 05:12:08 -030024static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030033struct vb2_dma_sg_buf {
34 void *vaddr;
35 struct page **pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030036 int offset;
Hans Verkuilcd474032014-11-18 09:50:58 -030037 enum dma_data_direction dma_dir;
Ricardo Ribalda22301242013-08-02 10:20:00 -030038 struct sg_table sg_table;
39 size_t size;
40 unsigned int num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030041 atomic_t refcount;
42 struct vb2_vmarea_handler handler;
Ricardo Ribalda50ac9522013-11-26 09:58:44 -030043 struct vm_area_struct *vma;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030044};
45
46static void vb2_dma_sg_put(void *buf_priv);
47
Ricardo Ribaldadf237282013-08-02 10:19:59 -030048static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
49 gfp_t gfp_flags)
50{
51 unsigned int last_page = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -030052 int size = buf->size;
Ricardo Ribaldadf237282013-08-02 10:19:59 -030053
54 while (size > 0) {
55 struct page *pages;
56 int order;
57 int i;
58
59 order = get_order(size);
60 /* Dont over allocate*/
61 if ((PAGE_SIZE << order) > size)
62 order--;
63
64 pages = NULL;
65 while (!pages) {
66 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
67 __GFP_NOWARN | gfp_flags, order);
68 if (pages)
69 break;
70
71 if (order == 0) {
72 while (last_page--)
73 __free_page(buf->pages[last_page]);
74 return -ENOMEM;
75 }
76 order--;
77 }
78
79 split_page(pages, order);
Ricardo Ribalda22301242013-08-02 10:20:00 -030080 for (i = 0; i < (1 << order); i++)
81 buf->pages[last_page++] = &pages[i];
Ricardo Ribaldadf237282013-08-02 10:19:59 -030082
83 size -= PAGE_SIZE << order;
84 }
85
86 return 0;
87}
88
Hans Verkuild935c572014-11-18 09:50:59 -030089static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
90 enum dma_data_direction dma_dir, gfp_t gfp_flags)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030091{
92 struct vb2_dma_sg_buf *buf;
Ricardo Ribaldadf237282013-08-02 10:19:59 -030093 int ret;
Ricardo Ribalda22301242013-08-02 10:20:00 -030094 int num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030095
96 buf = kzalloc(sizeof *buf, GFP_KERNEL);
97 if (!buf)
98 return NULL;
99
100 buf->vaddr = NULL;
Hans Verkuild935c572014-11-18 09:50:59 -0300101 buf->dma_dir = dma_dir;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300102 buf->offset = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300103 buf->size = size;
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300104 /* size is already page aligned */
Ricardo Ribalda22301242013-08-02 10:20:00 -0300105 buf->num_pages = size >> PAGE_SHIFT;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300106
Ricardo Ribalda22301242013-08-02 10:20:00 -0300107 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300108 GFP_KERNEL);
109 if (!buf->pages)
110 goto fail_pages_array_alloc;
111
Ricardo Ribaldadf237282013-08-02 10:19:59 -0300112 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
113 if (ret)
114 goto fail_pages_alloc;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300115
Ricardo Ribalda22301242013-08-02 10:20:00 -0300116 ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
Hans Verkuil47bc59c2014-08-01 09:18:50 -0300117 buf->num_pages, 0, size, GFP_KERNEL);
Ricardo Ribalda22301242013-08-02 10:20:00 -0300118 if (ret)
119 goto fail_table_alloc;
120
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300121 buf->handler.refcount = &buf->refcount;
122 buf->handler.put = vb2_dma_sg_put;
123 buf->handler.arg = buf;
124
125 atomic_inc(&buf->refcount);
126
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300127 dprintk(1, "%s: Allocated buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300128 __func__, buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300129 return buf;
130
Ricardo Ribalda22301242013-08-02 10:20:00 -0300131fail_table_alloc:
132 num_pages = buf->num_pages;
133 while (num_pages--)
134 __free_page(buf->pages[num_pages]);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300135fail_pages_alloc:
Andrzej Pietrasiewicza9bb36a2011-01-28 09:42:51 -0300136 kfree(buf->pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300137fail_pages_array_alloc:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300138 kfree(buf);
139 return NULL;
140}
141
142static void vb2_dma_sg_put(void *buf_priv)
143{
144 struct vb2_dma_sg_buf *buf = buf_priv;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300145 int i = buf->num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300146
147 if (atomic_dec_and_test(&buf->refcount)) {
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300148 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300149 buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300150 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300151 vm_unmap_ram(buf->vaddr, buf->num_pages);
152 sg_free_table(&buf->sg_table);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300153 while (--i >= 0)
154 __free_page(buf->pages[i]);
155 kfree(buf->pages);
156 kfree(buf);
157 }
158}
159
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300160static inline int vma_is_io(struct vm_area_struct *vma)
161{
162 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
163}
164
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300165static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
Hans Verkuilcd474032014-11-18 09:50:58 -0300166 unsigned long size,
167 enum dma_data_direction dma_dir)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300168{
169 struct vb2_dma_sg_buf *buf;
170 unsigned long first, last;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300171 int num_pages_from_user;
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300172 struct vm_area_struct *vma;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300173
174 buf = kzalloc(sizeof *buf, GFP_KERNEL);
175 if (!buf)
176 return NULL;
177
178 buf->vaddr = NULL;
Hans Verkuilcd474032014-11-18 09:50:58 -0300179 buf->dma_dir = dma_dir;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300180 buf->offset = vaddr & ~PAGE_MASK;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300181 buf->size = size;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300182
183 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
184 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300185 buf->num_pages = last - first + 1;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300186
Ricardo Ribalda22301242013-08-02 10:20:00 -0300187 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300188 GFP_KERNEL);
189 if (!buf->pages)
Geyslan G. Bem64c832a2013-11-20 18:02:52 -0300190 goto userptr_fail_alloc_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300191
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300192 vma = find_vma(current->mm, vaddr);
193 if (!vma) {
194 dprintk(1, "no vma for address %lu\n", vaddr);
195 goto userptr_fail_find_vma;
196 }
197
198 if (vma->vm_end < vaddr + size) {
199 dprintk(1, "vma at %lu is too small for %lu bytes\n",
200 vaddr, size);
201 goto userptr_fail_find_vma;
202 }
203
204 buf->vma = vb2_get_vma(vma);
205 if (!buf->vma) {
206 dprintk(1, "failed to copy vma\n");
207 goto userptr_fail_find_vma;
208 }
209
210 if (vma_is_io(buf->vma)) {
211 for (num_pages_from_user = 0;
212 num_pages_from_user < buf->num_pages;
213 ++num_pages_from_user, vaddr += PAGE_SIZE) {
214 unsigned long pfn;
215
Ricardo Ribalda227ae222014-04-25 13:11:29 -0300216 if (follow_pfn(vma, vaddr, &pfn)) {
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300217 dprintk(1, "no page for address %lu\n", vaddr);
218 break;
219 }
220 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
221 }
222 } else
223 num_pages_from_user = get_user_pages(current, current->mm,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300224 vaddr & PAGE_MASK,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300225 buf->num_pages,
Hans Verkuilcd474032014-11-18 09:50:58 -0300226 buf->dma_dir == DMA_FROM_DEVICE,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300227 1, /* force */
228 buf->pages,
229 NULL);
Marek Szyprowskib037c0f2011-11-17 05:32:17 -0300230
Ricardo Ribalda22301242013-08-02 10:20:00 -0300231 if (num_pages_from_user != buf->num_pages)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300232 goto userptr_fail_get_user_pages;
233
Ricardo Ribalda22301242013-08-02 10:20:00 -0300234 if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
235 buf->num_pages, buf->offset, size, 0))
236 goto userptr_fail_alloc_table_from_pages;
237
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300238 return buf;
239
Ricardo Ribalda22301242013-08-02 10:20:00 -0300240userptr_fail_alloc_table_from_pages:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300241userptr_fail_get_user_pages:
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300242 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
Ricardo Ribalda202dfbd2013-11-06 15:48:38 -0300243 buf->num_pages, num_pages_from_user);
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300244 if (!vma_is_io(buf->vma))
245 while (--num_pages_from_user >= 0)
246 put_page(buf->pages[num_pages_from_user]);
247 vb2_put_vma(buf->vma);
248userptr_fail_find_vma:
Andrzej Pietrasiewicza9bb36a2011-01-28 09:42:51 -0300249 kfree(buf->pages);
Geyslan G. Bem64c832a2013-11-20 18:02:52 -0300250userptr_fail_alloc_pages:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300251 kfree(buf);
252 return NULL;
253}
254
255/*
256 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
257 * be used
258 */
259static void vb2_dma_sg_put_userptr(void *buf_priv)
260{
261 struct vb2_dma_sg_buf *buf = buf_priv;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300262 int i = buf->num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300263
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300264 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300265 __func__, buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300266 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300267 vm_unmap_ram(buf->vaddr, buf->num_pages);
268 sg_free_table(&buf->sg_table);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300269 while (--i >= 0) {
Hans Verkuilcd474032014-11-18 09:50:58 -0300270 if (buf->dma_dir == DMA_FROM_DEVICE)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300271 set_page_dirty_lock(buf->pages[i]);
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300272 if (!vma_is_io(buf->vma))
273 put_page(buf->pages[i]);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300274 }
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300275 kfree(buf->pages);
Ricardo Ribalda50ac9522013-11-26 09:58:44 -0300276 vb2_put_vma(buf->vma);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300277 kfree(buf);
278}
279
280static void *vb2_dma_sg_vaddr(void *buf_priv)
281{
282 struct vb2_dma_sg_buf *buf = buf_priv;
283
284 BUG_ON(!buf);
285
286 if (!buf->vaddr)
287 buf->vaddr = vm_map_ram(buf->pages,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300288 buf->num_pages,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300289 -1,
290 PAGE_KERNEL);
291
292 /* add offset in case userptr is not page-aligned */
293 return buf->vaddr + buf->offset;
294}
295
296static unsigned int vb2_dma_sg_num_users(void *buf_priv)
297{
298 struct vb2_dma_sg_buf *buf = buf_priv;
299
300 return atomic_read(&buf->refcount);
301}
302
303static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
304{
305 struct vb2_dma_sg_buf *buf = buf_priv;
306 unsigned long uaddr = vma->vm_start;
307 unsigned long usize = vma->vm_end - vma->vm_start;
308 int i = 0;
309
310 if (!buf) {
311 printk(KERN_ERR "No memory to map\n");
312 return -EINVAL;
313 }
314
315 do {
316 int ret;
317
318 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
319 if (ret) {
320 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
321 return ret;
322 }
323
324 uaddr += PAGE_SIZE;
325 usize -= PAGE_SIZE;
326 } while (usize > 0);
327
328
329 /*
330 * Use common vm_area operations to track buffer refcount.
331 */
332 vma->vm_private_data = &buf->handler;
333 vma->vm_ops = &vb2_common_vm_ops;
334
335 vma->vm_ops->open(vma);
336
337 return 0;
338}
339
340static void *vb2_dma_sg_cookie(void *buf_priv)
341{
342 struct vb2_dma_sg_buf *buf = buf_priv;
343
Ricardo Ribalda22301242013-08-02 10:20:00 -0300344 return &buf->sg_table;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300345}
346
347const struct vb2_mem_ops vb2_dma_sg_memops = {
348 .alloc = vb2_dma_sg_alloc,
349 .put = vb2_dma_sg_put,
350 .get_userptr = vb2_dma_sg_get_userptr,
351 .put_userptr = vb2_dma_sg_put_userptr,
352 .vaddr = vb2_dma_sg_vaddr,
353 .mmap = vb2_dma_sg_mmap,
354 .num_users = vb2_dma_sg_num_users,
355 .cookie = vb2_dma_sg_cookie,
356};
357EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
358
359MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
360MODULE_AUTHOR("Andrzej Pietrasiewicz");
361MODULE_LICENSE("GPL");