blob: 31944b11e6ea409fa454eb1f02a99f7e761f2e4e [file] [log] [blame]
Magnus Damm2cc45cf2008-07-16 21:33:39 -03001/*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
Hans Verkuilf19ad392008-07-18 02:02:50 -030019#include <linux/mm.h>
Magnus Damm2cc45cf2008-07-16 21:33:39 -030020#include <linux/dma-mapping.h>
21#include <media/videobuf-dma-contig.h>
22
23struct videobuf_dma_contig_memory {
24 u32 magic;
25 void *vaddr;
26 dma_addr_t dma_handle;
27 unsigned long size;
28};
29
30#define MAGIC_DC_MEM 0x0733ac61
Guennadi Liakhovetskic60f2b52008-07-17 17:30:47 -030031#define MAGIC_CHECK(is, should) \
32 if (unlikely((is) != (should))) { \
33 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
34 BUG(); \
Magnus Damm2cc45cf2008-07-16 21:33:39 -030035 }
36
37static void
38videobuf_vm_open(struct vm_area_struct *vma)
39{
40 struct videobuf_mapping *map = vma->vm_private_data;
41
42 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
43 map, map->count, vma->vm_start, vma->vm_end);
44
45 map->count++;
46}
47
48static void videobuf_vm_close(struct vm_area_struct *vma)
49{
50 struct videobuf_mapping *map = vma->vm_private_data;
51 struct videobuf_queue *q = map->q;
52 int i;
53
54 dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
55 map, map->count, vma->vm_start, vma->vm_end);
56
57 map->count--;
58 if (0 == map->count) {
59 struct videobuf_dma_contig_memory *mem;
60
61 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
62 mutex_lock(&q->vb_lock);
63
64 /* We need first to cancel streams, before unmapping */
65 if (q->streaming)
66 videobuf_queue_cancel(q);
67
68 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
69 if (NULL == q->bufs[i])
70 continue;
71
72 if (q->bufs[i]->map != map)
73 continue;
74
75 mem = q->bufs[i]->priv;
76 if (mem) {
77 /* This callback is called only if kernel has
78 allocated memory and this memory is mmapped.
79 In this case, memory should be freed,
80 in order to do memory unmap.
81 */
82
83 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
84
85 /* vfree is not atomic - can't be
86 called with IRQ's disabled
87 */
88 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
89 i, mem->vaddr);
90
91 dma_free_coherent(q->dev, mem->size,
92 mem->vaddr, mem->dma_handle);
93 mem->vaddr = NULL;
94 }
95
96 q->bufs[i]->map = NULL;
97 q->bufs[i]->baddr = 0;
98 }
99
100 kfree(map);
101
102 mutex_unlock(&q->vb_lock);
103 }
104}
105
106static struct vm_operations_struct videobuf_vm_ops = {
107 .open = videobuf_vm_open,
108 .close = videobuf_vm_close,
109};
110
111static void *__videobuf_alloc(size_t size)
112{
113 struct videobuf_dma_contig_memory *mem;
114 struct videobuf_buffer *vb;
115
116 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
117 if (vb) {
118 mem = vb->priv = ((char *)vb) + size;
119 mem->magic = MAGIC_DC_MEM;
120 }
121
122 return vb;
123}
124
125static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
126{
127 struct videobuf_dma_contig_memory *mem = buf->priv;
128
129 BUG_ON(!mem);
130 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
131
132 return mem->vaddr;
133}
134
135static int __videobuf_iolock(struct videobuf_queue *q,
136 struct videobuf_buffer *vb,
137 struct v4l2_framebuffer *fbuf)
138{
139 struct videobuf_dma_contig_memory *mem = vb->priv;
140
141 BUG_ON(!mem);
142 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
143
144 switch (vb->memory) {
145 case V4L2_MEMORY_MMAP:
146 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
147
148 /* All handling should be done by __videobuf_mmap_mapper() */
149 if (!mem->vaddr) {
150 dev_err(q->dev, "memory is not alloced/mmapped.\n");
151 return -EINVAL;
152 }
153 break;
154 case V4L2_MEMORY_USERPTR:
155 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
156
157 /* The only USERPTR currently supported is the one needed for
158 read() method.
159 */
160 if (vb->baddr)
161 return -EINVAL;
162
163 mem->size = PAGE_ALIGN(vb->size);
164 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
165 &mem->dma_handle, GFP_KERNEL);
166 if (!mem->vaddr) {
167 dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
168 mem->size);
169 return -ENOMEM;
170 }
171
172 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
173 mem->vaddr, mem->size);
174 break;
175 case V4L2_MEMORY_OVERLAY:
176 default:
177 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
178 __func__);
179 return -EINVAL;
180 }
181
182 return 0;
183}
184
185static int __videobuf_sync(struct videobuf_queue *q,
186 struct videobuf_buffer *buf)
187{
188 struct videobuf_dma_contig_memory *mem = buf->priv;
189
190 BUG_ON(!mem);
191 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
192
193 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
194 DMA_FROM_DEVICE);
195 return 0;
196}
197
198static int __videobuf_mmap_free(struct videobuf_queue *q)
199{
200 unsigned int i;
201
202 dev_dbg(q->dev, "%s\n", __func__);
203 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
204 if (q->bufs[i] && q->bufs[i]->map)
205 return -EBUSY;
206 }
207
208 return 0;
209}
210
211static int __videobuf_mmap_mapper(struct videobuf_queue *q,
212 struct vm_area_struct *vma)
213{
214 struct videobuf_dma_contig_memory *mem;
215 struct videobuf_mapping *map;
216 unsigned int first;
217 int retval;
218 unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
219
220 dev_dbg(q->dev, "%s\n", __func__);
221 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
222 return -EINVAL;
223
224 /* look for first buffer to map */
225 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
226 if (!q->bufs[first])
227 continue;
228
229 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
230 continue;
231 if (q->bufs[first]->boff == offset)
232 break;
233 }
234 if (VIDEO_MAX_FRAME == first) {
235 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
236 offset);
237 return -EINVAL;
238 }
239
240 /* create mapping + update buffer list */
241 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
242 if (!map)
243 return -ENOMEM;
244
245 q->bufs[first]->map = map;
246 map->start = vma->vm_start;
247 map->end = vma->vm_end;
248 map->q = q;
249
250 q->bufs[first]->baddr = vma->vm_start;
251
252 mem = q->bufs[first]->priv;
253 BUG_ON(!mem);
254 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
255
256 mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
257 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
258 &mem->dma_handle, GFP_KERNEL);
259 if (!mem->vaddr) {
260 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
261 mem->size);
262 goto error;
263 }
264 dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
265 mem->vaddr, mem->size);
266
267 /* Try to remap memory */
268
269 size = vma->vm_end - vma->vm_start;
270 size = (size < mem->size) ? size : mem->size;
271
272 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
273 retval = remap_pfn_range(vma, vma->vm_start,
274 mem->dma_handle >> PAGE_SHIFT,
275 size, vma->vm_page_prot);
276 if (retval) {
277 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
278 dma_free_coherent(q->dev, mem->size,
279 mem->vaddr, mem->dma_handle);
280 goto error;
281 }
282
283 vma->vm_ops = &videobuf_vm_ops;
284 vma->vm_flags |= VM_DONTEXPAND;
285 vma->vm_private_data = map;
286
287 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
288 map, q, vma->vm_start, vma->vm_end,
289 (long int) q->bufs[first]->bsize,
290 vma->vm_pgoff, first);
291
292 videobuf_vm_open(vma);
293
294 return 0;
295
296error:
297 kfree(map);
298 return -ENOMEM;
299}
300
301static int __videobuf_copy_to_user(struct videobuf_queue *q,
302 char __user *data, size_t count,
303 int nonblocking)
304{
305 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
306 void *vaddr;
307
308 BUG_ON(!mem);
309 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
310 BUG_ON(!mem->vaddr);
311
312 /* copy to userspace */
313 if (count > q->read_buf->size - q->read_off)
314 count = q->read_buf->size - q->read_off;
315
316 vaddr = mem->vaddr;
317
318 if (copy_to_user(data, vaddr + q->read_off, count))
319 return -EFAULT;
320
321 return count;
322}
323
324static int __videobuf_copy_stream(struct videobuf_queue *q,
325 char __user *data, size_t count, size_t pos,
326 int vbihack, int nonblocking)
327{
328 unsigned int *fc;
329 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
330
331 BUG_ON(!mem);
332 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
333
334 if (vbihack) {
335 /* dirty, undocumented hack -- pass the frame counter
336 * within the last four bytes of each vbi data block.
337 * We need that one to maintain backward compatibility
338 * to all vbi decoding software out there ... */
339 fc = (unsigned int *)mem->vaddr;
340 fc += (q->read_buf->size >> 2) - 1;
341 *fc = q->read_buf->field_count >> 1;
342 dev_dbg(q->dev, "vbihack: %d\n", *fc);
343 }
344
345 /* copy stuff using the common method */
346 count = __videobuf_copy_to_user(q, data, count, nonblocking);
347
348 if ((count == -EFAULT) && (pos == 0))
349 return -EFAULT;
350
351 return count;
352}
353
354static struct videobuf_qtype_ops qops = {
355 .magic = MAGIC_QTYPE_OPS,
356
357 .alloc = __videobuf_alloc,
358 .iolock = __videobuf_iolock,
359 .sync = __videobuf_sync,
360 .mmap_free = __videobuf_mmap_free,
361 .mmap_mapper = __videobuf_mmap_mapper,
362 .video_copy_to_user = __videobuf_copy_to_user,
363 .copy_stream = __videobuf_copy_stream,
364 .vmalloc = __videobuf_to_vmalloc,
365};
366
367void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
368 struct videobuf_queue_ops *ops,
369 struct device *dev,
370 spinlock_t *irqlock,
371 enum v4l2_buf_type type,
372 enum v4l2_field field,
373 unsigned int msize,
374 void *priv)
375{
376 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
377 priv, &qops);
378}
379EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
380
381dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
382{
383 struct videobuf_dma_contig_memory *mem = buf->priv;
384
385 BUG_ON(!mem);
386 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
387
388 return mem->dma_handle;
389}
390EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
391
392void videobuf_dma_contig_free(struct videobuf_queue *q,
393 struct videobuf_buffer *buf)
394{
395 struct videobuf_dma_contig_memory *mem = buf->priv;
396
397 /* mmapped memory can't be freed here, otherwise mmapped region
398 would be released, while still needed. In this case, the memory
399 release should happen inside videobuf_vm_close().
400 So, it should free memory only if the memory were allocated for
401 read() operation.
402 */
403 if ((buf->memory != V4L2_MEMORY_USERPTR) || !buf->baddr)
404 return;
405
406 if (!mem)
407 return;
408
409 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
410
411 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
412 mem->vaddr = NULL;
413}
414EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
415
416MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
417MODULE_AUTHOR("Magnus Damm");
418MODULE_LICENSE("GPL");