blob: 87b8083748885592c8da34f2defa4a1b46be0be5 [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09005#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04006#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04007#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04008#include <linux/dma-mapping.h>
9
10struct dma_coherent_mem {
11 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030012 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060013 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040014 int size;
15 int flags;
16 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070017 spinlock_t spinlock;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040018};
19
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010020static bool dma_init_coherent_memory(
21 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
22 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040023{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070024 struct dma_coherent_mem *dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040025 void __iomem *mem_base = NULL;
26 int pages = size >> PAGE_SHIFT;
27 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
28
29 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
30 goto out;
31 if (!size)
32 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040033
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060034 mem_base = ioremap(phys_addr, size);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040035 if (!mem_base)
36 goto out;
37
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070038 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
39 if (!dma_mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040040 goto out;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070041 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
42 if (!dma_mem->bitmap)
43 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040044
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070045 dma_mem->virt_base = mem_base;
46 dma_mem->device_base = device_addr;
47 dma_mem->pfn_base = PFN_DOWN(phys_addr);
48 dma_mem->size = pages;
49 dma_mem->flags = flags;
50 spin_lock_init(&dma_mem->spinlock);
51
52 *mem = dma_mem;
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010053 return true;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040054
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070055out:
56 kfree(dma_mem);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040057 if (mem_base)
58 iounmap(mem_base);
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010059 return false;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040060}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070061
62static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
63{
64 if (!mem)
65 return;
66 iounmap(mem->virt_base);
67 kfree(mem->bitmap);
68 kfree(mem);
69}
70
71static int dma_assign_coherent_memory(struct device *dev,
72 struct dma_coherent_mem *mem)
73{
74 if (dev->dma_mem)
75 return -EBUSY;
76
77 dev->dma_mem = mem;
78 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
79
80 return 0;
81}
82
83int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
84 dma_addr_t device_addr, size_t size, int flags)
85{
86 struct dma_coherent_mem *mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070087
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010088 if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
89 &mem))
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070090 return 0;
91
92 if (dma_assign_coherent_memory(dev, mem) == 0)
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010093 return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070094
95 dma_release_coherent_memory(mem);
96 return 0;
97}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040098EXPORT_SYMBOL(dma_declare_coherent_memory);
99
100void dma_release_declared_memory(struct device *dev)
101{
102 struct dma_coherent_mem *mem = dev->dma_mem;
103
104 if (!mem)
105 return;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700106 dma_release_coherent_memory(mem);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400107 dev->dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400108}
109EXPORT_SYMBOL(dma_release_declared_memory);
110
111void *dma_mark_declared_memory_occupied(struct device *dev,
112 dma_addr_t device_addr, size_t size)
113{
114 struct dma_coherent_mem *mem = dev->dma_mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700115 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400116 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400117
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700118 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400119
120 if (!mem)
121 return ERR_PTR(-EINVAL);
122
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700123 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400124 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700125 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700126 spin_unlock_irqrestore(&mem->spinlock, flags);
127
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400128 if (err != 0)
129 return ERR_PTR(err);
130 return mem->virt_base + (pos << PAGE_SHIFT);
131}
132EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
133
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400134/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400135 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400136 *
137 * @dev: device from which we allocate memory
138 * @size: size of requested memory area
139 * @dma_handle: This will be filled with the correct dma handle
140 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900141 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400142 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400143 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400144 * to support allocation from per-device coherent memory pools.
145 *
146 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400147 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400148 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400149int dma_alloc_from_coherent(struct device *dev, ssize_t size,
150 dma_addr_t *dma_handle, void **ret)
151{
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800152 struct dma_coherent_mem *mem;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400153 int order = get_order(size);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700154 unsigned long flags;
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800155 int pageno;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400156
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800157 if (!dev)
158 return 0;
159 mem = dev->dma_mem;
160 if (!mem)
161 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900162
163 *ret = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700164 spin_lock_irqsave(&mem->spinlock, flags);
Paul Mundt06096972009-01-21 18:51:53 +0900165
Adrian McMenamincdf57ca2009-01-21 18:47:38 +0900166 if (unlikely(size > (mem->size << PAGE_SHIFT)))
Paul Mundt06096972009-01-21 18:51:53 +0900167 goto err;
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800168
169 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
Paul Mundt06096972009-01-21 18:51:53 +0900170 if (unlikely(pageno < 0))
171 goto err;
172
173 /*
174 * Memory was found in the per-device area.
175 */
176 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
177 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
178 memset(*ret, 0, size);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700179 spin_unlock_irqrestore(&mem->spinlock, flags);
Paul Mundt06096972009-01-21 18:51:53 +0900180
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800181 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900182
183err:
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700184 spin_unlock_irqrestore(&mem->spinlock, flags);
Paul Mundt06096972009-01-21 18:51:53 +0900185 /*
186 * In the case where the allocation can not be satisfied from the
187 * per-device area, try to fall back to generic memory if the
188 * constraints allow it.
189 */
190 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400191}
Ingo Molnara38409f2008-08-20 12:16:09 +0200192EXPORT_SYMBOL(dma_alloc_from_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400193
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400194/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400195 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400196 * @dev: device from which the memory was allocated
197 * @order: the order of pages allocated
198 * @vaddr: virtual address of allocated pages
199 *
200 * This checks whether the memory was allocated from the per-device
201 * coherent memory pool and if so, releases that memory.
202 *
203 * Returns 1 if we correctly released the memory, or 0 if
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400204 * dma_release_coherent() should proceed with releasing memory from
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400205 * generic pools.
206 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400207int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
208{
209 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
210
211 if (mem && vaddr >= mem->virt_base && vaddr <
212 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
213 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700214 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400215
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700216 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400217 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700218 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400219 return 1;
220 }
221 return 0;
222}
Ingo Molnara38409f2008-08-20 12:16:09 +0200223EXPORT_SYMBOL(dma_release_from_coherent);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100224
225/**
226 * dma_mmap_from_coherent() - try to mmap the memory allocated from
227 * per-device coherent memory pool to userspace
228 * @dev: device from which the memory was allocated
229 * @vma: vm_area for the userspace memory
230 * @vaddr: cpu address returned by dma_alloc_from_coherent
231 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
Randy Dunlap6e7b4a52012-06-09 15:02:59 -0700232 * @ret: result from remap_pfn_range()
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100233 *
234 * This checks whether the memory was allocated from the per-device
235 * coherent memory pool and if so, maps that memory to the provided vma.
236 *
Laurent Pinchartba4d93b2012-10-18 09:29:44 +0200237 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
238 * proceed with mapping memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100239 */
240int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
241 void *vaddr, size_t size, int *ret)
242{
243 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
244
245 if (mem && vaddr >= mem->virt_base && vaddr + size <=
246 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
247 unsigned long off = vma->vm_pgoff;
248 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
249 int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
250 int count = size >> PAGE_SHIFT;
251
252 *ret = -ENXIO;
253 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600254 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100255 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
256 user_count << PAGE_SHIFT,
257 vma->vm_page_prot);
258 }
259 return 1;
260 }
261 return 0;
262}
263EXPORT_SYMBOL(dma_mmap_from_coherent);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700264
265/*
266 * Support for reserved memory regions defined in device tree
267 */
268#ifdef CONFIG_OF_RESERVED_MEM
269#include <linux/of.h>
270#include <linux/of_fdt.h>
271#include <linux/of_reserved_mem.h>
272
273static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
274{
275 struct dma_coherent_mem *mem = rmem->priv;
276
277 if (!mem &&
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100278 !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
279 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
280 &mem)) {
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700281 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
282 &rmem->base, (unsigned long)rmem->size / SZ_1M);
283 return -ENODEV;
284 }
285 rmem->priv = mem;
286 dma_assign_coherent_memory(dev, mem);
287 return 0;
288}
289
290static void rmem_dma_device_release(struct reserved_mem *rmem,
291 struct device *dev)
292{
293 dev->dma_mem = NULL;
294}
295
296static const struct reserved_mem_ops rmem_dma_ops = {
297 .device_init = rmem_dma_device_init,
298 .device_release = rmem_dma_device_release,
299};
300
301static int __init rmem_dma_setup(struct reserved_mem *rmem)
302{
303 unsigned long node = rmem->fdt_node;
304
305 if (of_get_flat_dt_prop(node, "reusable", NULL))
306 return -EINVAL;
307
308#ifdef CONFIG_ARM
309 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
310 pr_err("Reserved memory: regions without no-map are not yet supported\n");
311 return -EINVAL;
312 }
313#endif
314
315 rmem->ops = &rmem_dma_ops;
316 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
317 &rmem->base, (unsigned long)rmem->size / SZ_1M);
318 return 0;
319}
320RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
321#endif