blob: 0d3a8ffe47a3c15efc27faef3712f682e6a7f620 [file] [log] [blame]
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -03001/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
Hans Verkuilffdc78e2013-03-02 05:12:08 -030024static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030033struct vb2_dma_sg_buf {
34 void *vaddr;
35 struct page **pages;
36 int write;
37 int offset;
Ricardo Ribalda22301242013-08-02 10:20:00 -030038 struct sg_table sg_table;
39 size_t size;
40 unsigned int num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030041 atomic_t refcount;
42 struct vb2_vmarea_handler handler;
43};
44
45static void vb2_dma_sg_put(void *buf_priv);
46
Ricardo Ribaldadf237282013-08-02 10:19:59 -030047static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
48 gfp_t gfp_flags)
49{
50 unsigned int last_page = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -030051 int size = buf->size;
Ricardo Ribaldadf237282013-08-02 10:19:59 -030052
53 while (size > 0) {
54 struct page *pages;
55 int order;
56 int i;
57
58 order = get_order(size);
59 /* Dont over allocate*/
60 if ((PAGE_SIZE << order) > size)
61 order--;
62
63 pages = NULL;
64 while (!pages) {
65 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
66 __GFP_NOWARN | gfp_flags, order);
67 if (pages)
68 break;
69
70 if (order == 0) {
71 while (last_page--)
72 __free_page(buf->pages[last_page]);
73 return -ENOMEM;
74 }
75 order--;
76 }
77
78 split_page(pages, order);
Ricardo Ribalda22301242013-08-02 10:20:00 -030079 for (i = 0; i < (1 << order); i++)
80 buf->pages[last_page++] = &pages[i];
Ricardo Ribaldadf237282013-08-02 10:19:59 -030081
82 size -= PAGE_SIZE << order;
83 }
84
85 return 0;
86}
87
Hans Verkuilb6ba2052013-03-01 15:44:20 -030088static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030089{
90 struct vb2_dma_sg_buf *buf;
Ricardo Ribaldadf237282013-08-02 10:19:59 -030091 int ret;
Ricardo Ribalda22301242013-08-02 10:20:00 -030092 int num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030093
94 buf = kzalloc(sizeof *buf, GFP_KERNEL);
95 if (!buf)
96 return NULL;
97
98 buf->vaddr = NULL;
99 buf->write = 0;
100 buf->offset = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300101 buf->size = size;
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300102 /* size is already page aligned */
Ricardo Ribalda22301242013-08-02 10:20:00 -0300103 buf->num_pages = size >> PAGE_SHIFT;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300104
Ricardo Ribalda22301242013-08-02 10:20:00 -0300105 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300106 GFP_KERNEL);
107 if (!buf->pages)
108 goto fail_pages_array_alloc;
109
Ricardo Ribaldadf237282013-08-02 10:19:59 -0300110 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
111 if (ret)
112 goto fail_pages_alloc;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300113
Ricardo Ribalda22301242013-08-02 10:20:00 -0300114 ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
115 buf->num_pages, 0, size, gfp_flags);
116 if (ret)
117 goto fail_table_alloc;
118
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300119 buf->handler.refcount = &buf->refcount;
120 buf->handler.put = vb2_dma_sg_put;
121 buf->handler.arg = buf;
122
123 atomic_inc(&buf->refcount);
124
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300125 dprintk(1, "%s: Allocated buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300126 __func__, buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300127 return buf;
128
Ricardo Ribalda22301242013-08-02 10:20:00 -0300129fail_table_alloc:
130 num_pages = buf->num_pages;
131 while (num_pages--)
132 __free_page(buf->pages[num_pages]);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300133fail_pages_alloc:
Andrzej Pietrasiewicza9bb36a2011-01-28 09:42:51 -0300134 kfree(buf->pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300135fail_pages_array_alloc:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300136 kfree(buf);
137 return NULL;
138}
139
140static void vb2_dma_sg_put(void *buf_priv)
141{
142 struct vb2_dma_sg_buf *buf = buf_priv;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300143 int i = buf->num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300144
145 if (atomic_dec_and_test(&buf->refcount)) {
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300146 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300147 buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300148 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300149 vm_unmap_ram(buf->vaddr, buf->num_pages);
150 sg_free_table(&buf->sg_table);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300151 while (--i >= 0)
152 __free_page(buf->pages[i]);
153 kfree(buf->pages);
154 kfree(buf);
155 }
156}
157
158static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
159 unsigned long size, int write)
160{
161 struct vb2_dma_sg_buf *buf;
162 unsigned long first, last;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300163 int num_pages_from_user;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300164
165 buf = kzalloc(sizeof *buf, GFP_KERNEL);
166 if (!buf)
167 return NULL;
168
169 buf->vaddr = NULL;
170 buf->write = write;
171 buf->offset = vaddr & ~PAGE_MASK;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300172 buf->size = size;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300173
174 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
175 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300176 buf->num_pages = last - first + 1;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300177
Ricardo Ribalda22301242013-08-02 10:20:00 -0300178 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300179 GFP_KERNEL);
180 if (!buf->pages)
Geyslan G. Bem64c832a2013-11-20 18:02:52 -0300181 goto userptr_fail_alloc_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300182
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300183 num_pages_from_user = get_user_pages(current, current->mm,
184 vaddr & PAGE_MASK,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300185 buf->num_pages,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300186 write,
187 1, /* force */
188 buf->pages,
189 NULL);
Marek Szyprowskib037c0f2011-11-17 05:32:17 -0300190
Ricardo Ribalda22301242013-08-02 10:20:00 -0300191 if (num_pages_from_user != buf->num_pages)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300192 goto userptr_fail_get_user_pages;
193
Ricardo Ribalda22301242013-08-02 10:20:00 -0300194 if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
195 buf->num_pages, buf->offset, size, 0))
196 goto userptr_fail_alloc_table_from_pages;
197
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300198 return buf;
199
Ricardo Ribalda22301242013-08-02 10:20:00 -0300200userptr_fail_alloc_table_from_pages:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300201userptr_fail_get_user_pages:
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300202 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300203 num_pages_from_user, buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300204 while (--num_pages_from_user >= 0)
205 put_page(buf->pages[num_pages_from_user]);
Andrzej Pietrasiewicza9bb36a2011-01-28 09:42:51 -0300206 kfree(buf->pages);
Geyslan G. Bem64c832a2013-11-20 18:02:52 -0300207userptr_fail_alloc_pages:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300208 kfree(buf);
209 return NULL;
210}
211
212/*
213 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
214 * be used
215 */
216static void vb2_dma_sg_put_userptr(void *buf_priv)
217{
218 struct vb2_dma_sg_buf *buf = buf_priv;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300219 int i = buf->num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300220
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300221 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300222 __func__, buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300223 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300224 vm_unmap_ram(buf->vaddr, buf->num_pages);
225 sg_free_table(&buf->sg_table);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300226 while (--i >= 0) {
227 if (buf->write)
228 set_page_dirty_lock(buf->pages[i]);
229 put_page(buf->pages[i]);
230 }
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300231 kfree(buf->pages);
232 kfree(buf);
233}
234
235static void *vb2_dma_sg_vaddr(void *buf_priv)
236{
237 struct vb2_dma_sg_buf *buf = buf_priv;
238
239 BUG_ON(!buf);
240
241 if (!buf->vaddr)
242 buf->vaddr = vm_map_ram(buf->pages,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300243 buf->num_pages,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300244 -1,
245 PAGE_KERNEL);
246
247 /* add offset in case userptr is not page-aligned */
248 return buf->vaddr + buf->offset;
249}
250
251static unsigned int vb2_dma_sg_num_users(void *buf_priv)
252{
253 struct vb2_dma_sg_buf *buf = buf_priv;
254
255 return atomic_read(&buf->refcount);
256}
257
258static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
259{
260 struct vb2_dma_sg_buf *buf = buf_priv;
261 unsigned long uaddr = vma->vm_start;
262 unsigned long usize = vma->vm_end - vma->vm_start;
263 int i = 0;
264
265 if (!buf) {
266 printk(KERN_ERR "No memory to map\n");
267 return -EINVAL;
268 }
269
270 do {
271 int ret;
272
273 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
274 if (ret) {
275 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
276 return ret;
277 }
278
279 uaddr += PAGE_SIZE;
280 usize -= PAGE_SIZE;
281 } while (usize > 0);
282
283
284 /*
285 * Use common vm_area operations to track buffer refcount.
286 */
287 vma->vm_private_data = &buf->handler;
288 vma->vm_ops = &vb2_common_vm_ops;
289
290 vma->vm_ops->open(vma);
291
292 return 0;
293}
294
295static void *vb2_dma_sg_cookie(void *buf_priv)
296{
297 struct vb2_dma_sg_buf *buf = buf_priv;
298
Ricardo Ribalda22301242013-08-02 10:20:00 -0300299 return &buf->sg_table;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300300}
301
302const struct vb2_mem_ops vb2_dma_sg_memops = {
303 .alloc = vb2_dma_sg_alloc,
304 .put = vb2_dma_sg_put,
305 .get_userptr = vb2_dma_sg_get_userptr,
306 .put_userptr = vb2_dma_sg_put_userptr,
307 .vaddr = vb2_dma_sg_vaddr,
308 .mmap = vb2_dma_sg_mmap,
309 .num_users = vb2_dma_sg_num_users,
310 .cookie = vb2_dma_sg_cookie,
311};
312EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
313
314MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
315MODULE_AUTHOR("Andrzej Pietrasiewicz");
316MODULE_LICENSE("GPL");