blob: d25f28461da1fcaac4b5d74ad8d6cac65e286b4f [file] [log] [blame]
Magnus Damm2cc45cf2008-07-16 21:33:39 -03001/*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
Hans Verkuilf19ad392008-07-18 02:02:50 -030019#include <linux/mm.h>
Magnus Damm720b17e2009-06-16 15:32:36 -070020#include <linux/pagemap.h>
Magnus Damm2cc45cf2008-07-16 21:33:39 -030021#include <linux/dma-mapping.h>
Guennadi Liakhovetskif39c1ab2009-11-09 16:11:34 -030022#include <linux/sched.h>
Magnus Damm2cc45cf2008-07-16 21:33:39 -030023#include <media/videobuf-dma-contig.h>
24
25struct videobuf_dma_contig_memory {
26 u32 magic;
27 void *vaddr;
28 dma_addr_t dma_handle;
29 unsigned long size;
Magnus Damm720b17e2009-06-16 15:32:36 -070030 int is_userptr;
Magnus Damm2cc45cf2008-07-16 21:33:39 -030031};
32
33#define MAGIC_DC_MEM 0x0733ac61
Guennadi Liakhovetskic60f2b52008-07-17 17:30:47 -030034#define MAGIC_CHECK(is, should) \
35 if (unlikely((is) != (should))) { \
36 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
37 BUG(); \
Magnus Damm2cc45cf2008-07-16 21:33:39 -030038 }
39
40static void
41videobuf_vm_open(struct vm_area_struct *vma)
42{
43 struct videobuf_mapping *map = vma->vm_private_data;
44
45 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
46 map, map->count, vma->vm_start, vma->vm_end);
47
48 map->count++;
49}
50
51static void videobuf_vm_close(struct vm_area_struct *vma)
52{
53 struct videobuf_mapping *map = vma->vm_private_data;
54 struct videobuf_queue *q = map->q;
55 int i;
56
57 dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
58 map, map->count, vma->vm_start, vma->vm_end);
59
60 map->count--;
61 if (0 == map->count) {
62 struct videobuf_dma_contig_memory *mem;
63
64 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
65 mutex_lock(&q->vb_lock);
66
67 /* We need first to cancel streams, before unmapping */
68 if (q->streaming)
69 videobuf_queue_cancel(q);
70
71 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
72 if (NULL == q->bufs[i])
73 continue;
74
75 if (q->bufs[i]->map != map)
76 continue;
77
78 mem = q->bufs[i]->priv;
79 if (mem) {
80 /* This callback is called only if kernel has
81 allocated memory and this memory is mmapped.
82 In this case, memory should be freed,
83 in order to do memory unmap.
84 */
85
86 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
87
88 /* vfree is not atomic - can't be
89 called with IRQ's disabled
90 */
91 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
92 i, mem->vaddr);
93
94 dma_free_coherent(q->dev, mem->size,
95 mem->vaddr, mem->dma_handle);
96 mem->vaddr = NULL;
97 }
98
99 q->bufs[i]->map = NULL;
100 q->bufs[i]->baddr = 0;
101 }
102
103 kfree(map);
104
105 mutex_unlock(&q->vb_lock);
106 }
107}
108
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400109static const struct vm_operations_struct videobuf_vm_ops = {
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300110 .open = videobuf_vm_open,
111 .close = videobuf_vm_close,
112};
113
Magnus Damm720b17e2009-06-16 15:32:36 -0700114/**
115 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
116 * @mem: per-buffer private videobuf-dma-contig data
117 *
118 * This function resets the user space pointer
119 */
120static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
121{
122 mem->is_userptr = 0;
123 mem->dma_handle = 0;
124 mem->size = 0;
125}
126
127/**
128 * videobuf_dma_contig_user_get() - setup user space memory pointer
129 * @mem: per-buffer private videobuf-dma-contig data
130 * @vb: video buffer to map
131 *
132 * This function validates and sets up a pointer to user space memory.
133 * Only physically contiguous pfn-mapped memory is accepted.
134 *
135 * Returns 0 if successful.
136 */
137static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
138 struct videobuf_buffer *vb)
139{
140 struct mm_struct *mm = current->mm;
141 struct vm_area_struct *vma;
142 unsigned long prev_pfn, this_pfn;
143 unsigned long pages_done, user_address;
144 int ret;
145
146 mem->size = PAGE_ALIGN(vb->size);
147 mem->is_userptr = 0;
148 ret = -EINVAL;
149
150 down_read(&mm->mmap_sem);
151
152 vma = find_vma(mm, vb->baddr);
153 if (!vma)
154 goto out_up;
155
156 if ((vb->baddr + mem->size) > vma->vm_end)
157 goto out_up;
158
159 pages_done = 0;
160 prev_pfn = 0; /* kill warning */
161 user_address = vb->baddr;
162
163 while (pages_done < (mem->size >> PAGE_SHIFT)) {
164 ret = follow_pfn(vma, user_address, &this_pfn);
165 if (ret)
166 break;
167
168 if (pages_done == 0)
169 mem->dma_handle = this_pfn << PAGE_SHIFT;
170 else if (this_pfn != (prev_pfn + 1))
171 ret = -EFAULT;
172
173 if (ret)
174 break;
175
176 prev_pfn = this_pfn;
177 user_address += PAGE_SIZE;
178 pages_done++;
179 }
180
181 if (!ret)
182 mem->is_userptr = 1;
183
184 out_up:
185 up_read(&current->mm->mmap_sem);
186
187 return ret;
188}
189
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300190static void *__videobuf_alloc(size_t size)
191{
192 struct videobuf_dma_contig_memory *mem;
193 struct videobuf_buffer *vb;
194
195 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
196 if (vb) {
197 mem = vb->priv = ((char *)vb) + size;
198 mem->magic = MAGIC_DC_MEM;
199 }
200
201 return vb;
202}
203
204static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
205{
206 struct videobuf_dma_contig_memory *mem = buf->priv;
207
208 BUG_ON(!mem);
209 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
210
211 return mem->vaddr;
212}
213
214static int __videobuf_iolock(struct videobuf_queue *q,
215 struct videobuf_buffer *vb,
216 struct v4l2_framebuffer *fbuf)
217{
218 struct videobuf_dma_contig_memory *mem = vb->priv;
219
220 BUG_ON(!mem);
221 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
222
223 switch (vb->memory) {
224 case V4L2_MEMORY_MMAP:
225 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
226
227 /* All handling should be done by __videobuf_mmap_mapper() */
228 if (!mem->vaddr) {
229 dev_err(q->dev, "memory is not alloced/mmapped.\n");
230 return -EINVAL;
231 }
232 break;
233 case V4L2_MEMORY_USERPTR:
234 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
235
Magnus Damm720b17e2009-06-16 15:32:36 -0700236 /* handle pointer from user space */
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300237 if (vb->baddr)
Magnus Damm720b17e2009-06-16 15:32:36 -0700238 return videobuf_dma_contig_user_get(mem, vb);
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300239
Magnus Damm720b17e2009-06-16 15:32:36 -0700240 /* allocate memory for the read() method */
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300241 mem->size = PAGE_ALIGN(vb->size);
242 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
243 &mem->dma_handle, GFP_KERNEL);
244 if (!mem->vaddr) {
245 dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
246 mem->size);
247 return -ENOMEM;
248 }
249
250 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
251 mem->vaddr, mem->size);
252 break;
253 case V4L2_MEMORY_OVERLAY:
254 default:
255 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
256 __func__);
257 return -EINVAL;
258 }
259
260 return 0;
261}
262
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300263static int __videobuf_mmap_free(struct videobuf_queue *q)
264{
265 unsigned int i;
266
267 dev_dbg(q->dev, "%s\n", __func__);
268 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
269 if (q->bufs[i] && q->bufs[i]->map)
270 return -EBUSY;
271 }
272
273 return 0;
274}
275
276static int __videobuf_mmap_mapper(struct videobuf_queue *q,
277 struct vm_area_struct *vma)
278{
279 struct videobuf_dma_contig_memory *mem;
280 struct videobuf_mapping *map;
281 unsigned int first;
282 int retval;
283 unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
284
285 dev_dbg(q->dev, "%s\n", __func__);
286 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
287 return -EINVAL;
288
289 /* look for first buffer to map */
290 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
291 if (!q->bufs[first])
292 continue;
293
294 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
295 continue;
296 if (q->bufs[first]->boff == offset)
297 break;
298 }
299 if (VIDEO_MAX_FRAME == first) {
300 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
301 offset);
302 return -EINVAL;
303 }
304
305 /* create mapping + update buffer list */
306 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
307 if (!map)
308 return -ENOMEM;
309
310 q->bufs[first]->map = map;
311 map->start = vma->vm_start;
312 map->end = vma->vm_end;
313 map->q = q;
314
315 q->bufs[first]->baddr = vma->vm_start;
316
317 mem = q->bufs[first]->priv;
318 BUG_ON(!mem);
319 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
320
321 mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
322 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
323 &mem->dma_handle, GFP_KERNEL);
324 if (!mem->vaddr) {
325 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
326 mem->size);
327 goto error;
328 }
329 dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
330 mem->vaddr, mem->size);
331
332 /* Try to remap memory */
333
334 size = vma->vm_end - vma->vm_start;
335 size = (size < mem->size) ? size : mem->size;
336
337 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
338 retval = remap_pfn_range(vma, vma->vm_start,
339 mem->dma_handle >> PAGE_SHIFT,
340 size, vma->vm_page_prot);
341 if (retval) {
342 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
343 dma_free_coherent(q->dev, mem->size,
344 mem->vaddr, mem->dma_handle);
345 goto error;
346 }
347
348 vma->vm_ops = &videobuf_vm_ops;
349 vma->vm_flags |= VM_DONTEXPAND;
350 vma->vm_private_data = map;
351
352 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
353 map, q, vma->vm_start, vma->vm_end,
354 (long int) q->bufs[first]->bsize,
355 vma->vm_pgoff, first);
356
357 videobuf_vm_open(vma);
358
359 return 0;
360
361error:
362 kfree(map);
363 return -ENOMEM;
364}
365
366static int __videobuf_copy_to_user(struct videobuf_queue *q,
367 char __user *data, size_t count,
368 int nonblocking)
369{
370 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
371 void *vaddr;
372
373 BUG_ON(!mem);
374 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
375 BUG_ON(!mem->vaddr);
376
377 /* copy to userspace */
378 if (count > q->read_buf->size - q->read_off)
379 count = q->read_buf->size - q->read_off;
380
381 vaddr = mem->vaddr;
382
383 if (copy_to_user(data, vaddr + q->read_off, count))
384 return -EFAULT;
385
386 return count;
387}
388
389static int __videobuf_copy_stream(struct videobuf_queue *q,
390 char __user *data, size_t count, size_t pos,
391 int vbihack, int nonblocking)
392{
393 unsigned int *fc;
394 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
395
396 BUG_ON(!mem);
397 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
398
399 if (vbihack) {
400 /* dirty, undocumented hack -- pass the frame counter
401 * within the last four bytes of each vbi data block.
402 * We need that one to maintain backward compatibility
403 * to all vbi decoding software out there ... */
404 fc = (unsigned int *)mem->vaddr;
405 fc += (q->read_buf->size >> 2) - 1;
406 *fc = q->read_buf->field_count >> 1;
407 dev_dbg(q->dev, "vbihack: %d\n", *fc);
408 }
409
410 /* copy stuff using the common method */
411 count = __videobuf_copy_to_user(q, data, count, nonblocking);
412
413 if ((count == -EFAULT) && (pos == 0))
414 return -EFAULT;
415
416 return count;
417}
418
419static struct videobuf_qtype_ops qops = {
420 .magic = MAGIC_QTYPE_OPS,
421
422 .alloc = __videobuf_alloc,
423 .iolock = __videobuf_iolock,
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300424 .mmap_free = __videobuf_mmap_free,
425 .mmap_mapper = __videobuf_mmap_mapper,
426 .video_copy_to_user = __videobuf_copy_to_user,
427 .copy_stream = __videobuf_copy_stream,
428 .vmalloc = __videobuf_to_vmalloc,
429};
430
431void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
Jonathan Corbet38a54f32009-11-17 19:43:41 -0300432 const struct videobuf_queue_ops *ops,
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300433 struct device *dev,
434 spinlock_t *irqlock,
435 enum v4l2_buf_type type,
436 enum v4l2_field field,
437 unsigned int msize,
438 void *priv)
439{
440 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
441 priv, &qops);
442}
443EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
444
445dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
446{
447 struct videobuf_dma_contig_memory *mem = buf->priv;
448
449 BUG_ON(!mem);
450 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
451
452 return mem->dma_handle;
453}
454EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
455
456void videobuf_dma_contig_free(struct videobuf_queue *q,
457 struct videobuf_buffer *buf)
458{
459 struct videobuf_dma_contig_memory *mem = buf->priv;
460
461 /* mmapped memory can't be freed here, otherwise mmapped region
462 would be released, while still needed. In this case, the memory
463 release should happen inside videobuf_vm_close().
464 So, it should free memory only if the memory were allocated for
465 read() operation.
466 */
Magnus Damm720b17e2009-06-16 15:32:36 -0700467 if (buf->memory != V4L2_MEMORY_USERPTR)
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300468 return;
469
470 if (!mem)
471 return;
472
473 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
474
Magnus Damm720b17e2009-06-16 15:32:36 -0700475 /* handle user space pointer case */
476 if (buf->baddr) {
477 videobuf_dma_contig_user_put(mem);
478 return;
479 }
480
481 /* read() method */
Magnus Damm2cc45cf2008-07-16 21:33:39 -0300482 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
483 mem->vaddr = NULL;
484}
485EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
486
487MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
488MODULE_AUTHOR("Magnus Damm");
489MODULE_LICENSE("GPL");