blob: 99c969520f303a8f500b8c3f784f7c989762cfe1 [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07005#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04007#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04008#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04009#include <linux/dma-mapping.h>
10
11struct dma_coherent_mem {
12 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030013 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060014 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040015 int size;
16 int flags;
17 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070018 spinlock_t spinlock;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010019 bool use_dev_dma_pfn_offset;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040020};
21
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010022static inline dma_addr_t dma_get_device_base(struct device *dev,
23 struct dma_coherent_mem * mem)
24{
25 if (mem->use_dev_dma_pfn_offset)
26 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
27 else
28 return mem->device_base;
29}
30
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010031static bool dma_init_coherent_memory(
32 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
33 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040034{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070035 struct dma_coherent_mem *dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040036 void __iomem *mem_base = NULL;
37 int pages = size >> PAGE_SHIFT;
38 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
39
40 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
41 goto out;
42 if (!size)
43 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040044
Brian Starkey6b03ae02016-03-22 14:28:03 -070045 if (flags & DMA_MEMORY_MAP)
46 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
47 else
48 mem_base = ioremap(phys_addr, size);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040049 if (!mem_base)
50 goto out;
51
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070052 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
53 if (!dma_mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040054 goto out;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070055 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
56 if (!dma_mem->bitmap)
57 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040058
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070059 dma_mem->virt_base = mem_base;
60 dma_mem->device_base = device_addr;
61 dma_mem->pfn_base = PFN_DOWN(phys_addr);
62 dma_mem->size = pages;
63 dma_mem->flags = flags;
64 spin_lock_init(&dma_mem->spinlock);
65
66 *mem = dma_mem;
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010067 return true;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040068
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070069out:
70 kfree(dma_mem);
Brian Starkey6b03ae02016-03-22 14:28:03 -070071 if (mem_base) {
72 if (flags & DMA_MEMORY_MAP)
73 memunmap(mem_base);
74 else
75 iounmap(mem_base);
76 }
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010077 return false;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040078}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070079
80static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
81{
82 if (!mem)
83 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070084
85 if (mem->flags & DMA_MEMORY_MAP)
86 memunmap(mem->virt_base);
87 else
88 iounmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070089 kfree(mem->bitmap);
90 kfree(mem);
91}
92
93static int dma_assign_coherent_memory(struct device *dev,
94 struct dma_coherent_mem *mem)
95{
96 if (dev->dma_mem)
97 return -EBUSY;
98
99 dev->dma_mem = mem;
100 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
101
102 return 0;
103}
104
105int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
106 dma_addr_t device_addr, size_t size, int flags)
107{
108 struct dma_coherent_mem *mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700109
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100110 if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
111 &mem))
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700112 return 0;
113
114 if (dma_assign_coherent_memory(dev, mem) == 0)
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100115 return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700116
117 dma_release_coherent_memory(mem);
118 return 0;
119}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400120EXPORT_SYMBOL(dma_declare_coherent_memory);
121
122void dma_release_declared_memory(struct device *dev)
123{
124 struct dma_coherent_mem *mem = dev->dma_mem;
125
126 if (!mem)
127 return;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700128 dma_release_coherent_memory(mem);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400129 dev->dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400130}
131EXPORT_SYMBOL(dma_release_declared_memory);
132
133void *dma_mark_declared_memory_occupied(struct device *dev,
134 dma_addr_t device_addr, size_t size)
135{
136 struct dma_coherent_mem *mem = dev->dma_mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700137 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400138 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400139
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700140 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400141
142 if (!mem)
143 return ERR_PTR(-EINVAL);
144
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700145 spin_lock_irqsave(&mem->spinlock, flags);
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100146 pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700147 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700148 spin_unlock_irqrestore(&mem->spinlock, flags);
149
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400150 if (err != 0)
151 return ERR_PTR(err);
152 return mem->virt_base + (pos << PAGE_SHIFT);
153}
154EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
155
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400156/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400157 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400158 *
159 * @dev: device from which we allocate memory
160 * @size: size of requested memory area
161 * @dma_handle: This will be filled with the correct dma handle
162 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900163 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400164 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400165 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400166 * to support allocation from per-device coherent memory pools.
167 *
168 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400169 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400170 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400171int dma_alloc_from_coherent(struct device *dev, ssize_t size,
172 dma_addr_t *dma_handle, void **ret)
173{
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800174 struct dma_coherent_mem *mem;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400175 int order = get_order(size);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700176 unsigned long flags;
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800177 int pageno;
Bastian Hechtdd01c752016-09-28 08:51:57 +0100178 int dma_memory_map;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400179
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800180 if (!dev)
181 return 0;
182 mem = dev->dma_mem;
183 if (!mem)
184 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900185
186 *ret = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700187 spin_lock_irqsave(&mem->spinlock, flags);
Paul Mundt06096972009-01-21 18:51:53 +0900188
Adrian McMenamincdf57ca2009-01-21 18:47:38 +0900189 if (unlikely(size > (mem->size << PAGE_SHIFT)))
Paul Mundt06096972009-01-21 18:51:53 +0900190 goto err;
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800191
192 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
Paul Mundt06096972009-01-21 18:51:53 +0900193 if (unlikely(pageno < 0))
194 goto err;
195
196 /*
197 * Memory was found in the per-device area.
198 */
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100199 *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
Paul Mundt06096972009-01-21 18:51:53 +0900200 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
Bastian Hechtdd01c752016-09-28 08:51:57 +0100201 dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
202 spin_unlock_irqrestore(&mem->spinlock, flags);
203 if (dma_memory_map)
Brian Starkey20d7a352016-03-22 14:28:06 -0700204 memset(*ret, 0, size);
205 else
206 memset_io(*ret, 0, size);
Paul Mundt06096972009-01-21 18:51:53 +0900207
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800208 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900209
210err:
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700211 spin_unlock_irqrestore(&mem->spinlock, flags);
Paul Mundt06096972009-01-21 18:51:53 +0900212 /*
213 * In the case where the allocation can not be satisfied from the
214 * per-device area, try to fall back to generic memory if the
215 * constraints allow it.
216 */
217 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400218}
Ingo Molnara38409f2008-08-20 12:16:09 +0200219EXPORT_SYMBOL(dma_alloc_from_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400220
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400221/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400222 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400223 * @dev: device from which the memory was allocated
224 * @order: the order of pages allocated
225 * @vaddr: virtual address of allocated pages
226 *
227 * This checks whether the memory was allocated from the per-device
228 * coherent memory pool and if so, releases that memory.
229 *
230 * Returns 1 if we correctly released the memory, or 0 if
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400231 * dma_release_coherent() should proceed with releasing memory from
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400232 * generic pools.
233 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400234int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
235{
236 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
237
238 if (mem && vaddr >= mem->virt_base && vaddr <
239 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
240 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700241 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400242
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700243 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400244 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700245 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400246 return 1;
247 }
248 return 0;
249}
Ingo Molnara38409f2008-08-20 12:16:09 +0200250EXPORT_SYMBOL(dma_release_from_coherent);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100251
252/**
253 * dma_mmap_from_coherent() - try to mmap the memory allocated from
254 * per-device coherent memory pool to userspace
255 * @dev: device from which the memory was allocated
256 * @vma: vm_area for the userspace memory
257 * @vaddr: cpu address returned by dma_alloc_from_coherent
258 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
Randy Dunlap6e7b4a52012-06-09 15:02:59 -0700259 * @ret: result from remap_pfn_range()
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100260 *
261 * This checks whether the memory was allocated from the per-device
262 * coherent memory pool and if so, maps that memory to the provided vma.
263 *
Laurent Pinchartba4d93b2012-10-18 09:29:44 +0200264 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
265 * proceed with mapping memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100266 */
267int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
268 void *vaddr, size_t size, int *ret)
269{
270 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
271
272 if (mem && vaddr >= mem->virt_base && vaddr + size <=
273 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
274 unsigned long off = vma->vm_pgoff;
275 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Muhammad Falak R Wanie688f142016-05-21 18:52:57 +0530276 int user_count = vma_pages(vma);
George G. Davis9ca5d4f2016-09-28 08:51:56 +0100277 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100278
279 *ret = -ENXIO;
280 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600281 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100282 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
283 user_count << PAGE_SHIFT,
284 vma->vm_page_prot);
285 }
286 return 1;
287 }
288 return 0;
289}
290EXPORT_SYMBOL(dma_mmap_from_coherent);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700291
292/*
293 * Support for reserved memory regions defined in device tree
294 */
295#ifdef CONFIG_OF_RESERVED_MEM
296#include <linux/of.h>
297#include <linux/of_fdt.h>
298#include <linux/of_reserved_mem.h>
299
300static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
301{
302 struct dma_coherent_mem *mem = rmem->priv;
303
304 if (!mem &&
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100305 !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
306 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
307 &mem)) {
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700308 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
309 &rmem->base, (unsigned long)rmem->size / SZ_1M);
310 return -ENODEV;
311 }
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100312 mem->use_dev_dma_pfn_offset = true;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700313 rmem->priv = mem;
314 dma_assign_coherent_memory(dev, mem);
315 return 0;
316}
317
318static void rmem_dma_device_release(struct reserved_mem *rmem,
319 struct device *dev)
320{
321 dev->dma_mem = NULL;
322}
323
324static const struct reserved_mem_ops rmem_dma_ops = {
325 .device_init = rmem_dma_device_init,
326 .device_release = rmem_dma_device_release,
327};
328
329static int __init rmem_dma_setup(struct reserved_mem *rmem)
330{
331 unsigned long node = rmem->fdt_node;
332
333 if (of_get_flat_dt_prop(node, "reusable", NULL))
334 return -EINVAL;
335
336#ifdef CONFIG_ARM
337 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
338 pr_err("Reserved memory: regions without no-map are not yet supported\n");
339 return -EINVAL;
340 }
341#endif
342
343 rmem->ops = &rmem_dma_ops;
344 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
345 &rmem->base, (unsigned long)rmem->size / SZ_1M);
346 return 0;
347}
348RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
349#endif