blob: 7d6e84a51424b269d6bfffa401f6788ac44997ed [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09005#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04006#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04007#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04008#include <linux/dma-mapping.h>
9
10struct dma_coherent_mem {
11 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030012 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060013 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040014 int size;
15 int flags;
16 unsigned long *bitmap;
17};
18
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060019int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040020 dma_addr_t device_addr, size_t size, int flags)
21{
22 void __iomem *mem_base = NULL;
23 int pages = size >> PAGE_SHIFT;
24 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
25
26 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
27 goto out;
28 if (!size)
29 goto out;
30 if (dev->dma_mem)
31 goto out;
32
33 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
34
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060035 mem_base = ioremap(phys_addr, size);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040036 if (!mem_base)
37 goto out;
38
39 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
40 if (!dev->dma_mem)
41 goto out;
42 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
43 if (!dev->dma_mem->bitmap)
44 goto free1_out;
45
46 dev->dma_mem->virt_base = mem_base;
47 dev->dma_mem->device_base = device_addr;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060048 dev->dma_mem->pfn_base = PFN_DOWN(phys_addr);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040049 dev->dma_mem->size = pages;
50 dev->dma_mem->flags = flags;
51
52 if (flags & DMA_MEMORY_MAP)
53 return DMA_MEMORY_MAP;
54
55 return DMA_MEMORY_IO;
56
57 free1_out:
58 kfree(dev->dma_mem);
59 out:
60 if (mem_base)
61 iounmap(mem_base);
62 return 0;
63}
64EXPORT_SYMBOL(dma_declare_coherent_memory);
65
66void dma_release_declared_memory(struct device *dev)
67{
68 struct dma_coherent_mem *mem = dev->dma_mem;
69
70 if (!mem)
71 return;
72 dev->dma_mem = NULL;
73 iounmap(mem->virt_base);
74 kfree(mem->bitmap);
75 kfree(mem);
76}
77EXPORT_SYMBOL(dma_release_declared_memory);
78
79void *dma_mark_declared_memory_occupied(struct device *dev,
80 dma_addr_t device_addr, size_t size)
81{
82 struct dma_coherent_mem *mem = dev->dma_mem;
83 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040084
Jan Beulichd2dc1f42008-08-05 13:01:31 -070085 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040086
87 if (!mem)
88 return ERR_PTR(-EINVAL);
89
90 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
Jan Beulichd2dc1f42008-08-05 13:01:31 -070091 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040092 if (err != 0)
93 return ERR_PTR(err);
94 return mem->virt_base + (pos << PAGE_SHIFT);
95}
96EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
97
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +040098/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +040099 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400100 *
101 * @dev: device from which we allocate memory
102 * @size: size of requested memory area
103 * @dma_handle: This will be filled with the correct dma handle
104 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900105 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400106 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400107 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400108 * to support allocation from per-device coherent memory pools.
109 *
110 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400111 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400112 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400113int dma_alloc_from_coherent(struct device *dev, ssize_t size,
114 dma_addr_t *dma_handle, void **ret)
115{
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800116 struct dma_coherent_mem *mem;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400117 int order = get_order(size);
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800118 int pageno;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400119
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800120 if (!dev)
121 return 0;
122 mem = dev->dma_mem;
123 if (!mem)
124 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900125
126 *ret = NULL;
127
Adrian McMenamincdf57ca2009-01-21 18:47:38 +0900128 if (unlikely(size > (mem->size << PAGE_SHIFT)))
Paul Mundt06096972009-01-21 18:51:53 +0900129 goto err;
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800130
131 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
Paul Mundt06096972009-01-21 18:51:53 +0900132 if (unlikely(pageno < 0))
133 goto err;
134
135 /*
136 * Memory was found in the per-device area.
137 */
138 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
139 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
140 memset(*ret, 0, size);
141
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800142 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900143
144err:
145 /*
146 * In the case where the allocation can not be satisfied from the
147 * per-device area, try to fall back to generic memory if the
148 * constraints allow it.
149 */
150 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400151}
Ingo Molnara38409f2008-08-20 12:16:09 +0200152EXPORT_SYMBOL(dma_alloc_from_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400153
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400154/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400155 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400156 * @dev: device from which the memory was allocated
157 * @order: the order of pages allocated
158 * @vaddr: virtual address of allocated pages
159 *
160 * This checks whether the memory was allocated from the per-device
161 * coherent memory pool and if so, releases that memory.
162 *
163 * Returns 1 if we correctly released the memory, or 0 if
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400164 * dma_release_coherent() should proceed with releasing memory from
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400165 * generic pools.
166 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400167int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
168{
169 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
170
171 if (mem && vaddr >= mem->virt_base && vaddr <
172 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
173 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
174
175 bitmap_release_region(mem->bitmap, page, order);
176 return 1;
177 }
178 return 0;
179}
Ingo Molnara38409f2008-08-20 12:16:09 +0200180EXPORT_SYMBOL(dma_release_from_coherent);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100181
182/**
183 * dma_mmap_from_coherent() - try to mmap the memory allocated from
184 * per-device coherent memory pool to userspace
185 * @dev: device from which the memory was allocated
186 * @vma: vm_area for the userspace memory
187 * @vaddr: cpu address returned by dma_alloc_from_coherent
188 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
Randy Dunlap6e7b4a52012-06-09 15:02:59 -0700189 * @ret: result from remap_pfn_range()
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100190 *
191 * This checks whether the memory was allocated from the per-device
192 * coherent memory pool and if so, maps that memory to the provided vma.
193 *
Laurent Pinchartba4d93b2012-10-18 09:29:44 +0200194 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
195 * proceed with mapping memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100196 */
197int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
198 void *vaddr, size_t size, int *ret)
199{
200 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
201
202 if (mem && vaddr >= mem->virt_base && vaddr + size <=
203 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
204 unsigned long off = vma->vm_pgoff;
205 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
206 int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
207 int count = size >> PAGE_SHIFT;
208
209 *ret = -ENXIO;
210 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600211 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100212 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
213 user_count << PAGE_SHIFT,
214 vma->vm_page_prot);
215 }
216 return 1;
217 }
218 return 0;
219}
220EXPORT_SYMBOL(dma_mmap_from_coherent);