Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 1 | /* |
| 2 | * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2 |
| 3 | * |
| 4 | * Copyright (C) 2010 Samsung Electronics |
| 5 | * |
| 6 | * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/scatterlist.h> |
| 16 | #include <linux/sched.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/vmalloc.h> |
| 19 | |
| 20 | #include <media/videobuf2-core.h> |
| 21 | #include <media/videobuf2-memops.h> |
| 22 | #include <media/videobuf2-dma-sg.h> |
| 23 | |
Hans Verkuil | ffdc78e | 2013-03-02 05:12:08 -0300 | [diff] [blame] | 24 | static int debug; |
| 25 | module_param(debug, int, 0644); |
| 26 | |
| 27 | #define dprintk(level, fmt, arg...) \ |
| 28 | do { \ |
| 29 | if (debug >= level) \ |
| 30 | printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ |
| 31 | } while (0) |
| 32 | |
Hans Verkuil | 0c3a14c | 2014-11-18 09:51:01 -0300 | [diff] [blame] | 33 | struct vb2_dma_sg_conf { |
| 34 | struct device *dev; |
| 35 | }; |
| 36 | |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 37 | struct vb2_dma_sg_buf { |
Hans Verkuil | 0c3a14c | 2014-11-18 09:51:01 -0300 | [diff] [blame] | 38 | struct device *dev; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 39 | void *vaddr; |
| 40 | struct page **pages; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 41 | int offset; |
Hans Verkuil | cd47403 | 2014-11-18 09:50:58 -0300 | [diff] [blame] | 42 | enum dma_data_direction dma_dir; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 43 | struct sg_table sg_table; |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 44 | /* |
| 45 | * This will point to sg_table when used with the MMAP or USERPTR |
| 46 | * memory model, and to the dma_buf sglist when used with the |
| 47 | * DMABUF memory model. |
| 48 | */ |
| 49 | struct sg_table *dma_sgt; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 50 | size_t size; |
| 51 | unsigned int num_pages; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 52 | atomic_t refcount; |
| 53 | struct vb2_vmarea_handler handler; |
Ricardo Ribalda | 50ac952 | 2013-11-26 09:58:44 -0300 | [diff] [blame] | 54 | struct vm_area_struct *vma; |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 55 | |
| 56 | struct dma_buf_attachment *db_attach; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 57 | }; |
| 58 | |
| 59 | static void vb2_dma_sg_put(void *buf_priv); |
| 60 | |
Ricardo Ribalda | df23728 | 2013-08-02 10:19:59 -0300 | [diff] [blame] | 61 | static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, |
| 62 | gfp_t gfp_flags) |
| 63 | { |
| 64 | unsigned int last_page = 0; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 65 | int size = buf->size; |
Ricardo Ribalda | df23728 | 2013-08-02 10:19:59 -0300 | [diff] [blame] | 66 | |
| 67 | while (size > 0) { |
| 68 | struct page *pages; |
| 69 | int order; |
| 70 | int i; |
| 71 | |
| 72 | order = get_order(size); |
| 73 | /* Dont over allocate*/ |
| 74 | if ((PAGE_SIZE << order) > size) |
| 75 | order--; |
| 76 | |
| 77 | pages = NULL; |
| 78 | while (!pages) { |
| 79 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | |
| 80 | __GFP_NOWARN | gfp_flags, order); |
| 81 | if (pages) |
| 82 | break; |
| 83 | |
| 84 | if (order == 0) { |
| 85 | while (last_page--) |
| 86 | __free_page(buf->pages[last_page]); |
| 87 | return -ENOMEM; |
| 88 | } |
| 89 | order--; |
| 90 | } |
| 91 | |
| 92 | split_page(pages, order); |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 93 | for (i = 0; i < (1 << order); i++) |
| 94 | buf->pages[last_page++] = &pages[i]; |
Ricardo Ribalda | df23728 | 2013-08-02 10:19:59 -0300 | [diff] [blame] | 95 | |
| 96 | size -= PAGE_SIZE << order; |
| 97 | } |
| 98 | |
| 99 | return 0; |
| 100 | } |
| 101 | |
Hans Verkuil | d935c57 | 2014-11-18 09:50:59 -0300 | [diff] [blame] | 102 | static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, |
| 103 | enum dma_data_direction dma_dir, gfp_t gfp_flags) |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 104 | { |
Hans Verkuil | 0c3a14c | 2014-11-18 09:51:01 -0300 | [diff] [blame] | 105 | struct vb2_dma_sg_conf *conf = alloc_ctx; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 106 | struct vb2_dma_sg_buf *buf; |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 107 | struct sg_table *sgt; |
Ricardo Ribalda | df23728 | 2013-08-02 10:19:59 -0300 | [diff] [blame] | 108 | int ret; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 109 | int num_pages; |
Hans Verkuil | 251a79f | 2014-11-18 09:51:08 -0300 | [diff] [blame] | 110 | DEFINE_DMA_ATTRS(attrs); |
| 111 | |
| 112 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 113 | |
Hans Verkuil | 0c3a14c | 2014-11-18 09:51:01 -0300 | [diff] [blame] | 114 | if (WARN_ON(alloc_ctx == NULL)) |
| 115 | return NULL; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 116 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
| 117 | if (!buf) |
| 118 | return NULL; |
| 119 | |
| 120 | buf->vaddr = NULL; |
Hans Verkuil | d935c57 | 2014-11-18 09:50:59 -0300 | [diff] [blame] | 121 | buf->dma_dir = dma_dir; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 122 | buf->offset = 0; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 123 | buf->size = size; |
Mauro Carvalho Chehab | 7f84145 | 2013-04-19 07:18:01 -0300 | [diff] [blame] | 124 | /* size is already page aligned */ |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 125 | buf->num_pages = size >> PAGE_SHIFT; |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 126 | buf->dma_sgt = &buf->sg_table; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 127 | |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 128 | buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 129 | GFP_KERNEL); |
| 130 | if (!buf->pages) |
| 131 | goto fail_pages_array_alloc; |
| 132 | |
Ricardo Ribalda | df23728 | 2013-08-02 10:19:59 -0300 | [diff] [blame] | 133 | ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags); |
| 134 | if (ret) |
| 135 | goto fail_pages_alloc; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 136 | |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 137 | ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, |
Hans Verkuil | 47bc59c | 2014-08-01 09:18:50 -0300 | [diff] [blame] | 138 | buf->num_pages, 0, size, GFP_KERNEL); |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 139 | if (ret) |
| 140 | goto fail_table_alloc; |
| 141 | |
Hans Verkuil | 0c3a14c | 2014-11-18 09:51:01 -0300 | [diff] [blame] | 142 | /* Prevent the device from being released while the buffer is used */ |
| 143 | buf->dev = get_device(conf->dev); |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 144 | |
| 145 | sgt = &buf->sg_table; |
Hans Verkuil | 251a79f | 2014-11-18 09:51:08 -0300 | [diff] [blame] | 146 | /* |
| 147 | * No need to sync to the device, this will happen later when the |
| 148 | * prepare() memop is called. |
| 149 | */ |
| 150 | if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents, |
| 151 | buf->dma_dir, &attrs) == 0) |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 152 | goto fail_map; |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 153 | |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 154 | buf->handler.refcount = &buf->refcount; |
| 155 | buf->handler.put = vb2_dma_sg_put; |
| 156 | buf->handler.arg = buf; |
| 157 | |
| 158 | atomic_inc(&buf->refcount); |
| 159 | |
Hans Verkuil | ffdc78e | 2013-03-02 05:12:08 -0300 | [diff] [blame] | 160 | dprintk(1, "%s: Allocated buffer of %d pages\n", |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 161 | __func__, buf->num_pages); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 162 | return buf; |
| 163 | |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 164 | fail_map: |
| 165 | put_device(buf->dev); |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 166 | sg_free_table(buf->dma_sgt); |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 167 | fail_table_alloc: |
| 168 | num_pages = buf->num_pages; |
| 169 | while (num_pages--) |
| 170 | __free_page(buf->pages[num_pages]); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 171 | fail_pages_alloc: |
Andrzej Pietrasiewicz | a9bb36a | 2011-01-28 09:42:51 -0300 | [diff] [blame] | 172 | kfree(buf->pages); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 173 | fail_pages_array_alloc: |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 174 | kfree(buf); |
| 175 | return NULL; |
| 176 | } |
| 177 | |
| 178 | static void vb2_dma_sg_put(void *buf_priv) |
| 179 | { |
| 180 | struct vb2_dma_sg_buf *buf = buf_priv; |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 181 | struct sg_table *sgt = &buf->sg_table; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 182 | int i = buf->num_pages; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 183 | |
| 184 | if (atomic_dec_and_test(&buf->refcount)) { |
Hans Verkuil | 251a79f | 2014-11-18 09:51:08 -0300 | [diff] [blame] | 185 | DEFINE_DMA_ATTRS(attrs); |
| 186 | |
| 187 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); |
Hans Verkuil | ffdc78e | 2013-03-02 05:12:08 -0300 | [diff] [blame] | 188 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 189 | buf->num_pages); |
Hans Verkuil | 251a79f | 2014-11-18 09:51:08 -0300 | [diff] [blame] | 190 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, |
| 191 | buf->dma_dir, &attrs); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 192 | if (buf->vaddr) |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 193 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 194 | sg_free_table(buf->dma_sgt); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 195 | while (--i >= 0) |
| 196 | __free_page(buf->pages[i]); |
| 197 | kfree(buf->pages); |
Hans Verkuil | 0c3a14c | 2014-11-18 09:51:01 -0300 | [diff] [blame] | 198 | put_device(buf->dev); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 199 | kfree(buf); |
| 200 | } |
| 201 | } |
| 202 | |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 203 | static void vb2_dma_sg_prepare(void *buf_priv) |
| 204 | { |
| 205 | struct vb2_dma_sg_buf *buf = buf_priv; |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 206 | struct sg_table *sgt = buf->dma_sgt; |
| 207 | |
| 208 | /* DMABUF exporter will flush the cache for us */ |
| 209 | if (buf->db_attach) |
| 210 | return; |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 211 | |
| 212 | dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); |
| 213 | } |
| 214 | |
| 215 | static void vb2_dma_sg_finish(void *buf_priv) |
| 216 | { |
| 217 | struct vb2_dma_sg_buf *buf = buf_priv; |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 218 | struct sg_table *sgt = buf->dma_sgt; |
| 219 | |
| 220 | /* DMABUF exporter will flush the cache for us */ |
| 221 | if (buf->db_attach) |
| 222 | return; |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 223 | |
| 224 | dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); |
| 225 | } |
| 226 | |
Ricardo Ribalda | 50ac952 | 2013-11-26 09:58:44 -0300 | [diff] [blame] | 227 | static inline int vma_is_io(struct vm_area_struct *vma) |
| 228 | { |
| 229 | return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); |
| 230 | } |
| 231 | |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 232 | static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, |
Hans Verkuil | cd47403 | 2014-11-18 09:50:58 -0300 | [diff] [blame] | 233 | unsigned long size, |
| 234 | enum dma_data_direction dma_dir) |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 235 | { |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 236 | struct vb2_dma_sg_conf *conf = alloc_ctx; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 237 | struct vb2_dma_sg_buf *buf; |
| 238 | unsigned long first, last; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 239 | int num_pages_from_user; |
Ricardo Ribalda | 50ac952 | 2013-11-26 09:58:44 -0300 | [diff] [blame] | 240 | struct vm_area_struct *vma; |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 241 | struct sg_table *sgt; |
Hans Verkuil | 251a79f | 2014-11-18 09:51:08 -0300 | [diff] [blame] | 242 | DEFINE_DMA_ATTRS(attrs); |
| 243 | |
| 244 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 245 | |
| 246 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
| 247 | if (!buf) |
| 248 | return NULL; |
| 249 | |
| 250 | buf->vaddr = NULL; |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 251 | buf->dev = conf->dev; |
Hans Verkuil | cd47403 | 2014-11-18 09:50:58 -0300 | [diff] [blame] | 252 | buf->dma_dir = dma_dir; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 253 | buf->offset = vaddr & ~PAGE_MASK; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 254 | buf->size = size; |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 255 | buf->dma_sgt = &buf->sg_table; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 256 | |
| 257 | first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; |
| 258 | last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 259 | buf->num_pages = last - first + 1; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 260 | |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 261 | buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 262 | GFP_KERNEL); |
| 263 | if (!buf->pages) |
Geyslan G. Bem | 64c832a | 2013-11-20 18:02:52 -0300 | [diff] [blame] | 264 | goto userptr_fail_alloc_pages; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 265 | |
Ricardo Ribalda | 50ac952 | 2013-11-26 09:58:44 -0300 | [diff] [blame] | 266 | vma = find_vma(current->mm, vaddr); |
| 267 | if (!vma) { |
| 268 | dprintk(1, "no vma for address %lu\n", vaddr); |
| 269 | goto userptr_fail_find_vma; |
| 270 | } |
| 271 | |
| 272 | if (vma->vm_end < vaddr + size) { |
| 273 | dprintk(1, "vma at %lu is too small for %lu bytes\n", |
| 274 | vaddr, size); |
| 275 | goto userptr_fail_find_vma; |
| 276 | } |
| 277 | |
| 278 | buf->vma = vb2_get_vma(vma); |
| 279 | if (!buf->vma) { |
| 280 | dprintk(1, "failed to copy vma\n"); |
| 281 | goto userptr_fail_find_vma; |
| 282 | } |
| 283 | |
| 284 | if (vma_is_io(buf->vma)) { |
| 285 | for (num_pages_from_user = 0; |
| 286 | num_pages_from_user < buf->num_pages; |
| 287 | ++num_pages_from_user, vaddr += PAGE_SIZE) { |
| 288 | unsigned long pfn; |
| 289 | |
Ricardo Ribalda | 227ae22 | 2014-04-25 13:11:29 -0300 | [diff] [blame] | 290 | if (follow_pfn(vma, vaddr, &pfn)) { |
Ricardo Ribalda | 50ac952 | 2013-11-26 09:58:44 -0300 | [diff] [blame] | 291 | dprintk(1, "no page for address %lu\n", vaddr); |
| 292 | break; |
| 293 | } |
| 294 | buf->pages[num_pages_from_user] = pfn_to_page(pfn); |
| 295 | } |
| 296 | } else |
| 297 | num_pages_from_user = get_user_pages(current, current->mm, |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 298 | vaddr & PAGE_MASK, |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 299 | buf->num_pages, |
Hans Verkuil | cd47403 | 2014-11-18 09:50:58 -0300 | [diff] [blame] | 300 | buf->dma_dir == DMA_FROM_DEVICE, |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 301 | 1, /* force */ |
| 302 | buf->pages, |
| 303 | NULL); |
Marek Szyprowski | b037c0f | 2011-11-17 05:32:17 -0300 | [diff] [blame] | 304 | |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 305 | if (num_pages_from_user != buf->num_pages) |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 306 | goto userptr_fail_get_user_pages; |
| 307 | |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 308 | if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 309 | buf->num_pages, buf->offset, size, 0)) |
| 310 | goto userptr_fail_alloc_table_from_pages; |
| 311 | |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 312 | sgt = &buf->sg_table; |
Hans Verkuil | 251a79f | 2014-11-18 09:51:08 -0300 | [diff] [blame] | 313 | /* |
| 314 | * No need to sync to the device, this will happen later when the |
| 315 | * prepare() memop is called. |
| 316 | */ |
| 317 | if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents, |
| 318 | buf->dma_dir, &attrs) == 0) |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 319 | goto userptr_fail_map; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 320 | return buf; |
| 321 | |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 322 | userptr_fail_map: |
| 323 | sg_free_table(&buf->sg_table); |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 324 | userptr_fail_alloc_table_from_pages: |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 325 | userptr_fail_get_user_pages: |
Hans Verkuil | ffdc78e | 2013-03-02 05:12:08 -0300 | [diff] [blame] | 326 | dprintk(1, "get_user_pages requested/got: %d/%d]\n", |
Ricardo Ribalda | 202dfbd | 2013-11-06 15:48:38 -0300 | [diff] [blame] | 327 | buf->num_pages, num_pages_from_user); |
Ricardo Ribalda | 50ac952 | 2013-11-26 09:58:44 -0300 | [diff] [blame] | 328 | if (!vma_is_io(buf->vma)) |
| 329 | while (--num_pages_from_user >= 0) |
| 330 | put_page(buf->pages[num_pages_from_user]); |
| 331 | vb2_put_vma(buf->vma); |
| 332 | userptr_fail_find_vma: |
Andrzej Pietrasiewicz | a9bb36a | 2011-01-28 09:42:51 -0300 | [diff] [blame] | 333 | kfree(buf->pages); |
Geyslan G. Bem | 64c832a | 2013-11-20 18:02:52 -0300 | [diff] [blame] | 334 | userptr_fail_alloc_pages: |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 335 | kfree(buf); |
| 336 | return NULL; |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * @put_userptr: inform the allocator that a USERPTR buffer will no longer |
| 341 | * be used |
| 342 | */ |
| 343 | static void vb2_dma_sg_put_userptr(void *buf_priv) |
| 344 | { |
| 345 | struct vb2_dma_sg_buf *buf = buf_priv; |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 346 | struct sg_table *sgt = &buf->sg_table; |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 347 | int i = buf->num_pages; |
Hans Verkuil | 251a79f | 2014-11-18 09:51:08 -0300 | [diff] [blame] | 348 | DEFINE_DMA_ATTRS(attrs); |
| 349 | |
| 350 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 351 | |
Hans Verkuil | ffdc78e | 2013-03-02 05:12:08 -0300 | [diff] [blame] | 352 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 353 | __func__, buf->num_pages); |
Hans Verkuil | 251a79f | 2014-11-18 09:51:08 -0300 | [diff] [blame] | 354 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 355 | if (buf->vaddr) |
Ricardo Ribalda | 2230124 | 2013-08-02 10:20:00 -0300 | [diff] [blame] | 356 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 357 | sg_free_table(buf->dma_sgt); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 358 | while (--i >= 0) { |
Hans Verkuil | cd47403 | 2014-11-18 09:50:58 -0300 | [diff] [blame] | 359 | if (buf->dma_dir == DMA_FROM_DEVICE) |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 360 | set_page_dirty_lock(buf->pages[i]); |
Ricardo Ribalda | 50ac952 | 2013-11-26 09:58:44 -0300 | [diff] [blame] | 361 | if (!vma_is_io(buf->vma)) |
| 362 | put_page(buf->pages[i]); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 363 | } |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 364 | kfree(buf->pages); |
Ricardo Ribalda | 50ac952 | 2013-11-26 09:58:44 -0300 | [diff] [blame] | 365 | vb2_put_vma(buf->vma); |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 366 | kfree(buf); |
| 367 | } |
| 368 | |
| 369 | static void *vb2_dma_sg_vaddr(void *buf_priv) |
| 370 | { |
| 371 | struct vb2_dma_sg_buf *buf = buf_priv; |
| 372 | |
| 373 | BUG_ON(!buf); |
| 374 | |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 375 | if (!buf->vaddr) { |
| 376 | if (buf->db_attach) |
| 377 | buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); |
| 378 | else |
| 379 | buf->vaddr = vm_map_ram(buf->pages, |
| 380 | buf->num_pages, -1, PAGE_KERNEL); |
| 381 | } |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 382 | |
| 383 | /* add offset in case userptr is not page-aligned */ |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 384 | return buf->vaddr ? buf->vaddr + buf->offset : NULL; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | static unsigned int vb2_dma_sg_num_users(void *buf_priv) |
| 388 | { |
| 389 | struct vb2_dma_sg_buf *buf = buf_priv; |
| 390 | |
| 391 | return atomic_read(&buf->refcount); |
| 392 | } |
| 393 | |
| 394 | static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) |
| 395 | { |
| 396 | struct vb2_dma_sg_buf *buf = buf_priv; |
| 397 | unsigned long uaddr = vma->vm_start; |
| 398 | unsigned long usize = vma->vm_end - vma->vm_start; |
| 399 | int i = 0; |
| 400 | |
| 401 | if (!buf) { |
| 402 | printk(KERN_ERR "No memory to map\n"); |
| 403 | return -EINVAL; |
| 404 | } |
| 405 | |
| 406 | do { |
| 407 | int ret; |
| 408 | |
| 409 | ret = vm_insert_page(vma, uaddr, buf->pages[i++]); |
| 410 | if (ret) { |
| 411 | printk(KERN_ERR "Remapping memory, error: %d\n", ret); |
| 412 | return ret; |
| 413 | } |
| 414 | |
| 415 | uaddr += PAGE_SIZE; |
| 416 | usize -= PAGE_SIZE; |
| 417 | } while (usize > 0); |
| 418 | |
| 419 | |
| 420 | /* |
| 421 | * Use common vm_area operations to track buffer refcount. |
| 422 | */ |
| 423 | vma->vm_private_data = &buf->handler; |
| 424 | vma->vm_ops = &vb2_common_vm_ops; |
| 425 | |
| 426 | vma->vm_ops->open(vma); |
| 427 | |
| 428 | return 0; |
| 429 | } |
| 430 | |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 431 | /*********************************************/ |
Hans Verkuil | 041c7b6 | 2014-11-18 09:51:04 -0300 | [diff] [blame] | 432 | /* DMABUF ops for exporters */ |
| 433 | /*********************************************/ |
| 434 | |
| 435 | struct vb2_dma_sg_attachment { |
| 436 | struct sg_table sgt; |
| 437 | enum dma_data_direction dma_dir; |
| 438 | }; |
| 439 | |
| 440 | static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, |
| 441 | struct dma_buf_attachment *dbuf_attach) |
| 442 | { |
| 443 | struct vb2_dma_sg_attachment *attach; |
| 444 | unsigned int i; |
| 445 | struct scatterlist *rd, *wr; |
| 446 | struct sg_table *sgt; |
| 447 | struct vb2_dma_sg_buf *buf = dbuf->priv; |
| 448 | int ret; |
| 449 | |
| 450 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); |
| 451 | if (!attach) |
| 452 | return -ENOMEM; |
| 453 | |
| 454 | sgt = &attach->sgt; |
| 455 | /* Copy the buf->base_sgt scatter list to the attachment, as we can't |
| 456 | * map the same scatter list to multiple attachments at the same time. |
| 457 | */ |
| 458 | ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); |
| 459 | if (ret) { |
| 460 | kfree(attach); |
| 461 | return -ENOMEM; |
| 462 | } |
| 463 | |
| 464 | rd = buf->dma_sgt->sgl; |
| 465 | wr = sgt->sgl; |
| 466 | for (i = 0; i < sgt->orig_nents; ++i) { |
| 467 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); |
| 468 | rd = sg_next(rd); |
| 469 | wr = sg_next(wr); |
| 470 | } |
| 471 | |
| 472 | attach->dma_dir = DMA_NONE; |
| 473 | dbuf_attach->priv = attach; |
| 474 | |
| 475 | return 0; |
| 476 | } |
| 477 | |
| 478 | static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, |
| 479 | struct dma_buf_attachment *db_attach) |
| 480 | { |
| 481 | struct vb2_dma_sg_attachment *attach = db_attach->priv; |
| 482 | struct sg_table *sgt; |
| 483 | |
| 484 | if (!attach) |
| 485 | return; |
| 486 | |
| 487 | sgt = &attach->sgt; |
| 488 | |
| 489 | /* release the scatterlist cache */ |
| 490 | if (attach->dma_dir != DMA_NONE) |
| 491 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
| 492 | attach->dma_dir); |
| 493 | sg_free_table(sgt); |
| 494 | kfree(attach); |
| 495 | db_attach->priv = NULL; |
| 496 | } |
| 497 | |
| 498 | static struct sg_table *vb2_dma_sg_dmabuf_ops_map( |
| 499 | struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) |
| 500 | { |
| 501 | struct vb2_dma_sg_attachment *attach = db_attach->priv; |
| 502 | /* stealing dmabuf mutex to serialize map/unmap operations */ |
| 503 | struct mutex *lock = &db_attach->dmabuf->lock; |
| 504 | struct sg_table *sgt; |
| 505 | int ret; |
| 506 | |
| 507 | mutex_lock(lock); |
| 508 | |
| 509 | sgt = &attach->sgt; |
| 510 | /* return previously mapped sg table */ |
| 511 | if (attach->dma_dir == dma_dir) { |
| 512 | mutex_unlock(lock); |
| 513 | return sgt; |
| 514 | } |
| 515 | |
| 516 | /* release any previous cache */ |
| 517 | if (attach->dma_dir != DMA_NONE) { |
| 518 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
| 519 | attach->dma_dir); |
| 520 | attach->dma_dir = DMA_NONE; |
| 521 | } |
| 522 | |
| 523 | /* mapping to the client with new direction */ |
| 524 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir); |
| 525 | if (ret <= 0) { |
| 526 | pr_err("failed to map scatterlist\n"); |
| 527 | mutex_unlock(lock); |
| 528 | return ERR_PTR(-EIO); |
| 529 | } |
| 530 | |
| 531 | attach->dma_dir = dma_dir; |
| 532 | |
| 533 | mutex_unlock(lock); |
| 534 | |
| 535 | return sgt; |
| 536 | } |
| 537 | |
| 538 | static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, |
| 539 | struct sg_table *sgt, enum dma_data_direction dma_dir) |
| 540 | { |
| 541 | /* nothing to be done here */ |
| 542 | } |
| 543 | |
| 544 | static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) |
| 545 | { |
| 546 | /* drop reference obtained in vb2_dma_sg_get_dmabuf */ |
| 547 | vb2_dma_sg_put(dbuf->priv); |
| 548 | } |
| 549 | |
| 550 | static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) |
| 551 | { |
| 552 | struct vb2_dma_sg_buf *buf = dbuf->priv; |
| 553 | |
| 554 | return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; |
| 555 | } |
| 556 | |
| 557 | static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf) |
| 558 | { |
| 559 | struct vb2_dma_sg_buf *buf = dbuf->priv; |
| 560 | |
| 561 | return vb2_dma_sg_vaddr(buf); |
| 562 | } |
| 563 | |
| 564 | static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, |
| 565 | struct vm_area_struct *vma) |
| 566 | { |
| 567 | return vb2_dma_sg_mmap(dbuf->priv, vma); |
| 568 | } |
| 569 | |
| 570 | static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { |
| 571 | .attach = vb2_dma_sg_dmabuf_ops_attach, |
| 572 | .detach = vb2_dma_sg_dmabuf_ops_detach, |
| 573 | .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, |
| 574 | .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, |
| 575 | .kmap = vb2_dma_sg_dmabuf_ops_kmap, |
| 576 | .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, |
| 577 | .vmap = vb2_dma_sg_dmabuf_ops_vmap, |
| 578 | .mmap = vb2_dma_sg_dmabuf_ops_mmap, |
| 579 | .release = vb2_dma_sg_dmabuf_ops_release, |
| 580 | }; |
| 581 | |
| 582 | static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags) |
| 583 | { |
| 584 | struct vb2_dma_sg_buf *buf = buf_priv; |
| 585 | struct dma_buf *dbuf; |
| 586 | |
| 587 | if (WARN_ON(!buf->dma_sgt)) |
| 588 | return NULL; |
| 589 | |
| 590 | dbuf = dma_buf_export(buf, &vb2_dma_sg_dmabuf_ops, buf->size, flags, NULL); |
| 591 | if (IS_ERR(dbuf)) |
| 592 | return NULL; |
| 593 | |
| 594 | /* dmabuf keeps reference to vb2 buffer */ |
| 595 | atomic_inc(&buf->refcount); |
| 596 | |
| 597 | return dbuf; |
| 598 | } |
| 599 | |
| 600 | /*********************************************/ |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 601 | /* callbacks for DMABUF buffers */ |
| 602 | /*********************************************/ |
| 603 | |
| 604 | static int vb2_dma_sg_map_dmabuf(void *mem_priv) |
| 605 | { |
| 606 | struct vb2_dma_sg_buf *buf = mem_priv; |
| 607 | struct sg_table *sgt; |
| 608 | |
| 609 | if (WARN_ON(!buf->db_attach)) { |
| 610 | pr_err("trying to pin a non attached buffer\n"); |
| 611 | return -EINVAL; |
| 612 | } |
| 613 | |
| 614 | if (WARN_ON(buf->dma_sgt)) { |
| 615 | pr_err("dmabuf buffer is already pinned\n"); |
| 616 | return 0; |
| 617 | } |
| 618 | |
| 619 | /* get the associated scatterlist for this buffer */ |
| 620 | sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); |
| 621 | if (IS_ERR(sgt)) { |
| 622 | pr_err("Error getting dmabuf scatterlist\n"); |
| 623 | return -EINVAL; |
| 624 | } |
| 625 | |
| 626 | buf->dma_sgt = sgt; |
| 627 | buf->vaddr = NULL; |
| 628 | |
| 629 | return 0; |
| 630 | } |
| 631 | |
| 632 | static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) |
| 633 | { |
| 634 | struct vb2_dma_sg_buf *buf = mem_priv; |
| 635 | struct sg_table *sgt = buf->dma_sgt; |
| 636 | |
| 637 | if (WARN_ON(!buf->db_attach)) { |
| 638 | pr_err("trying to unpin a not attached buffer\n"); |
| 639 | return; |
| 640 | } |
| 641 | |
| 642 | if (WARN_ON(!sgt)) { |
| 643 | pr_err("dmabuf buffer is already unpinned\n"); |
| 644 | return; |
| 645 | } |
| 646 | |
| 647 | if (buf->vaddr) { |
| 648 | dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); |
| 649 | buf->vaddr = NULL; |
| 650 | } |
| 651 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); |
| 652 | |
| 653 | buf->dma_sgt = NULL; |
| 654 | } |
| 655 | |
| 656 | static void vb2_dma_sg_detach_dmabuf(void *mem_priv) |
| 657 | { |
| 658 | struct vb2_dma_sg_buf *buf = mem_priv; |
| 659 | |
| 660 | /* if vb2 works correctly you should never detach mapped buffer */ |
| 661 | if (WARN_ON(buf->dma_sgt)) |
| 662 | vb2_dma_sg_unmap_dmabuf(buf); |
| 663 | |
| 664 | /* detach this attachment */ |
| 665 | dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); |
| 666 | kfree(buf); |
| 667 | } |
| 668 | |
| 669 | static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, |
| 670 | unsigned long size, enum dma_data_direction dma_dir) |
| 671 | { |
| 672 | struct vb2_dma_sg_conf *conf = alloc_ctx; |
| 673 | struct vb2_dma_sg_buf *buf; |
| 674 | struct dma_buf_attachment *dba; |
| 675 | |
| 676 | if (dbuf->size < size) |
| 677 | return ERR_PTR(-EFAULT); |
| 678 | |
| 679 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); |
| 680 | if (!buf) |
| 681 | return ERR_PTR(-ENOMEM); |
| 682 | |
| 683 | buf->dev = conf->dev; |
| 684 | /* create attachment for the dmabuf with the user device */ |
| 685 | dba = dma_buf_attach(dbuf, buf->dev); |
| 686 | if (IS_ERR(dba)) { |
| 687 | pr_err("failed to attach dmabuf\n"); |
| 688 | kfree(buf); |
| 689 | return dba; |
| 690 | } |
| 691 | |
| 692 | buf->dma_dir = dma_dir; |
| 693 | buf->size = size; |
| 694 | buf->db_attach = dba; |
| 695 | |
| 696 | return buf; |
| 697 | } |
| 698 | |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 699 | static void *vb2_dma_sg_cookie(void *buf_priv) |
| 700 | { |
| 701 | struct vb2_dma_sg_buf *buf = buf_priv; |
| 702 | |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 703 | return buf->dma_sgt; |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 704 | } |
| 705 | |
| 706 | const struct vb2_mem_ops vb2_dma_sg_memops = { |
| 707 | .alloc = vb2_dma_sg_alloc, |
| 708 | .put = vb2_dma_sg_put, |
| 709 | .get_userptr = vb2_dma_sg_get_userptr, |
| 710 | .put_userptr = vb2_dma_sg_put_userptr, |
Hans Verkuil | d790b7e | 2014-11-24 08:50:31 -0300 | [diff] [blame] | 711 | .prepare = vb2_dma_sg_prepare, |
| 712 | .finish = vb2_dma_sg_finish, |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 713 | .vaddr = vb2_dma_sg_vaddr, |
| 714 | .mmap = vb2_dma_sg_mmap, |
| 715 | .num_users = vb2_dma_sg_num_users, |
Hans Verkuil | 041c7b6 | 2014-11-18 09:51:04 -0300 | [diff] [blame] | 716 | .get_dmabuf = vb2_dma_sg_get_dmabuf, |
Hans Verkuil | e078b79 | 2014-11-18 09:51:03 -0300 | [diff] [blame] | 717 | .map_dmabuf = vb2_dma_sg_map_dmabuf, |
| 718 | .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, |
| 719 | .attach_dmabuf = vb2_dma_sg_attach_dmabuf, |
| 720 | .detach_dmabuf = vb2_dma_sg_detach_dmabuf, |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 721 | .cookie = vb2_dma_sg_cookie, |
| 722 | }; |
| 723 | EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); |
| 724 | |
Hans Verkuil | 0c3a14c | 2014-11-18 09:51:01 -0300 | [diff] [blame] | 725 | void *vb2_dma_sg_init_ctx(struct device *dev) |
| 726 | { |
| 727 | struct vb2_dma_sg_conf *conf; |
| 728 | |
| 729 | conf = kzalloc(sizeof(*conf), GFP_KERNEL); |
| 730 | if (!conf) |
| 731 | return ERR_PTR(-ENOMEM); |
| 732 | |
| 733 | conf->dev = dev; |
| 734 | |
| 735 | return conf; |
| 736 | } |
| 737 | EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx); |
| 738 | |
| 739 | void vb2_dma_sg_cleanup_ctx(void *alloc_ctx) |
| 740 | { |
| 741 | if (!IS_ERR_OR_NULL(alloc_ctx)) |
| 742 | kfree(alloc_ctx); |
| 743 | } |
| 744 | EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx); |
| 745 | |
Andrzej Pietrasiewicz | 5ba3f75 | 2010-11-29 11:53:34 -0300 | [diff] [blame] | 746 | MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); |
| 747 | MODULE_AUTHOR("Andrzej Pietrasiewicz"); |
| 748 | MODULE_LICENSE("GPL"); |