blob: b275eeb739f1001b405f5b667150e5d55caca510 [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07005#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04007#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04008#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04009#include <linux/dma-mapping.h>
10
11struct dma_coherent_mem {
12 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030013 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060014 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040015 int size;
16 int flags;
17 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070018 spinlock_t spinlock;
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010019 bool use_dev_dma_pfn_offset;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040020};
21
Vladimir Murzin93228b42017-06-26 10:18:58 +010022static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
23
24static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
25{
26 if (dev && dev->dma_mem)
27 return dev->dma_mem;
Vladimir Murzin43fc5092017-07-20 11:19:58 +010028 return NULL;
Vladimir Murzin93228b42017-06-26 10:18:58 +010029}
30
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +010031static inline dma_addr_t dma_get_device_base(struct device *dev,
32 struct dma_coherent_mem * mem)
33{
34 if (mem->use_dev_dma_pfn_offset)
35 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
36 else
37 return mem->device_base;
38}
39
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010040static bool dma_init_coherent_memory(
41 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
42 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040043{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070044 struct dma_coherent_mem *dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040045 void __iomem *mem_base = NULL;
46 int pages = size >> PAGE_SHIFT;
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
48
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040049 if (!size)
50 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040051
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020052 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040053 if (!mem_base)
54 goto out;
55
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070056 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
57 if (!dma_mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040058 goto out;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070059 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
60 if (!dma_mem->bitmap)
61 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040062
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070063 dma_mem->virt_base = mem_base;
64 dma_mem->device_base = device_addr;
65 dma_mem->pfn_base = PFN_DOWN(phys_addr);
66 dma_mem->size = pages;
67 dma_mem->flags = flags;
68 spin_lock_init(&dma_mem->spinlock);
69
70 *mem = dma_mem;
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010071 return true;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040072
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070073out:
74 kfree(dma_mem);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020075 if (mem_base)
76 memunmap(mem_base);
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010077 return false;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040078}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070079
80static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
81{
82 if (!mem)
83 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070084
Christoph Hellwig2436bdc2017-08-25 17:13:09 +020085 memunmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070086 kfree(mem->bitmap);
87 kfree(mem);
88}
89
90static int dma_assign_coherent_memory(struct device *dev,
91 struct dma_coherent_mem *mem)
92{
Vladimir Murzin93228b42017-06-26 10:18:58 +010093 if (!dev)
94 return -ENODEV;
95
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070096 if (dev->dma_mem)
97 return -EBUSY;
98
99 dev->dma_mem = mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700100 return 0;
101}
102
103int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
104 dma_addr_t device_addr, size_t size, int flags)
105{
106 struct dma_coherent_mem *mem;
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200107 int ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700108
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200109 ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
110 if (ret)
111 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700112
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200113 ret = dma_assign_coherent_memory(dev, mem);
114 if (ret)
115 dma_release_coherent_memory(mem);
116 return ret;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700117}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400118EXPORT_SYMBOL(dma_declare_coherent_memory);
119
120void dma_release_declared_memory(struct device *dev)
121{
122 struct dma_coherent_mem *mem = dev->dma_mem;
123
124 if (!mem)
125 return;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700126 dma_release_coherent_memory(mem);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400127 dev->dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400128}
129EXPORT_SYMBOL(dma_release_declared_memory);
130
131void *dma_mark_declared_memory_occupied(struct device *dev,
132 dma_addr_t device_addr, size_t size)
133{
134 struct dma_coherent_mem *mem = dev->dma_mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700135 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400136 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400137
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700138 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400139
140 if (!mem)
141 return ERR_PTR(-EINVAL);
142
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700143 spin_lock_irqsave(&mem->spinlock, flags);
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100144 pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700145 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700146 spin_unlock_irqrestore(&mem->spinlock, flags);
147
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400148 if (err != 0)
149 return ERR_PTR(err);
150 return mem->virt_base + (pos << PAGE_SHIFT);
151}
152EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
153
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100154static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
155 ssize_t size, dma_addr_t *dma_handle)
156{
157 int order = get_order(size);
158 unsigned long flags;
159 int pageno;
160 int dma_memory_map;
161 void *ret;
162
163 spin_lock_irqsave(&mem->spinlock, flags);
164
165 if (unlikely(size > (mem->size << PAGE_SHIFT)))
166 goto err;
167
168 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
169 if (unlikely(pageno < 0))
170 goto err;
171
172 /*
173 * Memory was found in the coherent area.
174 */
175 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
176 ret = mem->virt_base + (pageno << PAGE_SHIFT);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100177 spin_unlock_irqrestore(&mem->spinlock, flags);
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200178 memset(ret, 0, size);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100179 return ret;
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100180err:
181 spin_unlock_irqrestore(&mem->spinlock, flags);
182 return NULL;
183}
184
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400185/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100186 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400187 * @dev: device from which we allocate memory
188 * @size: size of requested memory area
189 * @dma_handle: This will be filled with the correct dma handle
190 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900191 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400192 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400193 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400194 * to support allocation from per-device coherent memory pools.
195 *
196 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400197 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400198 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100199int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
200 dma_addr_t *dma_handle, void **ret)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400201{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100202 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400203
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800204 if (!mem)
205 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900206
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100207 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
208 if (*ret)
209 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900210
Paul Mundt06096972009-01-21 18:51:53 +0900211 /*
212 * In the case where the allocation can not be satisfied from the
213 * per-device area, try to fall back to generic memory if the
214 * constraints allow it.
215 */
216 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400217}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100218EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400219
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100220void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400221{
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100222 if (!dma_coherent_default_memory)
223 return NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400224
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100225 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
226 dma_handle);
227}
228
229static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
230 int order, void *vaddr)
231{
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400232 if (mem && vaddr >= mem->virt_base && vaddr <
233 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
234 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700235 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400236
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700237 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400238 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700239 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400240 return 1;
241 }
242 return 0;
243}
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100244
245/**
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100246 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100247 * @dev: device from which the memory was allocated
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100248 * @order: the order of pages allocated
249 * @vaddr: virtual address of allocated pages
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100250 *
251 * This checks whether the memory was allocated from the per-device
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100252 * coherent memory pool and if so, releases that memory.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100253 *
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100254 * Returns 1 if we correctly released the memory, or 0 if the caller should
255 * proceed with releasing memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100256 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100257int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100258{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100259 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100260
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100261 return __dma_release_from_coherent(mem, order, vaddr);
262}
263EXPORT_SYMBOL(dma_release_from_dev_coherent);
264
265int dma_release_from_global_coherent(int order, void *vaddr)
266{
267 if (!dma_coherent_default_memory)
268 return 0;
269
270 return __dma_release_from_coherent(dma_coherent_default_memory, order,
271 vaddr);
272}
273
274static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
275 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
276{
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100277 if (mem && vaddr >= mem->virt_base && vaddr + size <=
278 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
279 unsigned long off = vma->vm_pgoff;
280 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Muhammad Falak R Wanie688f142016-05-21 18:52:57 +0530281 int user_count = vma_pages(vma);
George G. Davis9ca5d4f2016-09-28 08:51:56 +0100282 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100283
284 *ret = -ENXIO;
285 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600286 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100287 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
288 user_count << PAGE_SHIFT,
289 vma->vm_page_prot);
290 }
291 return 1;
292 }
293 return 0;
294}
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100295
296/**
297 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
298 * @dev: device from which the memory was allocated
299 * @vma: vm_area for the userspace memory
300 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
301 * @size: size of the memory buffer allocated
302 * @ret: result from remap_pfn_range()
303 *
304 * This checks whether the memory was allocated from the per-device
305 * coherent memory pool and if so, maps that memory to the provided vma.
306 *
307 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
308 * proceed with mapping memory from generic pools.
309 */
310int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
311 void *vaddr, size_t size, int *ret)
312{
313 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
314
315 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
316}
317EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
318
319int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
320 size_t size, int *ret)
321{
322 if (!dma_coherent_default_memory)
323 return 0;
324
325 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
326 vaddr, size, ret);
327}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700328
329/*
330 * Support for reserved memory regions defined in device tree
331 */
332#ifdef CONFIG_OF_RESERVED_MEM
333#include <linux/of.h>
334#include <linux/of_fdt.h>
335#include <linux/of_reserved_mem.h>
336
Vladimir Murzin93228b42017-06-26 10:18:58 +0100337static struct reserved_mem *dma_reserved_default_memory __initdata;
338
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700339static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
340{
341 struct dma_coherent_mem *mem = rmem->priv;
342
343 if (!mem &&
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100344 !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200345 DMA_MEMORY_EXCLUSIVE,
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100346 &mem)) {
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700347 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
348 &rmem->base, (unsigned long)rmem->size / SZ_1M);
349 return -ENODEV;
350 }
Vladimir Murzinc41f9ea2017-06-26 10:18:57 +0100351 mem->use_dev_dma_pfn_offset = true;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700352 rmem->priv = mem;
353 dma_assign_coherent_memory(dev, mem);
354 return 0;
355}
356
357static void rmem_dma_device_release(struct reserved_mem *rmem,
358 struct device *dev)
359{
Vladimir Murzin93228b42017-06-26 10:18:58 +0100360 if (dev)
361 dev->dma_mem = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700362}
363
364static const struct reserved_mem_ops rmem_dma_ops = {
365 .device_init = rmem_dma_device_init,
366 .device_release = rmem_dma_device_release,
367};
368
369static int __init rmem_dma_setup(struct reserved_mem *rmem)
370{
371 unsigned long node = rmem->fdt_node;
372
373 if (of_get_flat_dt_prop(node, "reusable", NULL))
374 return -EINVAL;
375
376#ifdef CONFIG_ARM
377 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
378 pr_err("Reserved memory: regions without no-map are not yet supported\n");
379 return -EINVAL;
380 }
Vladimir Murzin93228b42017-06-26 10:18:58 +0100381
382 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
383 WARN(dma_reserved_default_memory,
384 "Reserved memory: region for default DMA coherent area is redefined\n");
385 dma_reserved_default_memory = rmem;
386 }
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700387#endif
388
389 rmem->ops = &rmem_dma_ops;
390 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
391 &rmem->base, (unsigned long)rmem->size / SZ_1M);
392 return 0;
393}
Vladimir Murzin93228b42017-06-26 10:18:58 +0100394
395static int __init dma_init_reserved_memory(void)
396{
397 const struct reserved_mem_ops *ops;
398 int ret;
399
400 if (!dma_reserved_default_memory)
401 return -ENOMEM;
402
403 ops = dma_reserved_default_memory->ops;
404
405 /*
406 * We rely on rmem_dma_device_init() does not propagate error of
407 * dma_assign_coherent_memory() for "NULL" device.
408 */
409 ret = ops->device_init(dma_reserved_default_memory, NULL);
410
411 if (!ret) {
412 dma_coherent_default_memory = dma_reserved_default_memory->priv;
413 pr_info("DMA: default coherent area is set\n");
414 }
415
416 return ret;
417}
418
419core_initcall(dma_init_reserved_memory);
420
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700421RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
422#endif