blob: bb0025c510b3860e1f84b434c03a4d34a1d64851 [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09005#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04006#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04007#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04008#include <linux/dma-mapping.h>
9
10struct dma_coherent_mem {
11 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030012 dma_addr_t device_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040013 int size;
14 int flags;
15 unsigned long *bitmap;
16};
17
18int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
19 dma_addr_t device_addr, size_t size, int flags)
20{
21 void __iomem *mem_base = NULL;
22 int pages = size >> PAGE_SHIFT;
23 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
24
25 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
26 goto out;
27 if (!size)
28 goto out;
29 if (dev->dma_mem)
30 goto out;
31
32 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
33
34 mem_base = ioremap(bus_addr, size);
35 if (!mem_base)
36 goto out;
37
38 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
39 if (!dev->dma_mem)
40 goto out;
41 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
42 if (!dev->dma_mem->bitmap)
43 goto free1_out;
44
45 dev->dma_mem->virt_base = mem_base;
46 dev->dma_mem->device_base = device_addr;
47 dev->dma_mem->size = pages;
48 dev->dma_mem->flags = flags;
49
50 if (flags & DMA_MEMORY_MAP)
51 return DMA_MEMORY_MAP;
52
53 return DMA_MEMORY_IO;
54
55 free1_out:
56 kfree(dev->dma_mem);
57 out:
58 if (mem_base)
59 iounmap(mem_base);
60 return 0;
61}
62EXPORT_SYMBOL(dma_declare_coherent_memory);
63
64void dma_release_declared_memory(struct device *dev)
65{
66 struct dma_coherent_mem *mem = dev->dma_mem;
67
68 if (!mem)
69 return;
70 dev->dma_mem = NULL;
71 iounmap(mem->virt_base);
72 kfree(mem->bitmap);
73 kfree(mem);
74}
75EXPORT_SYMBOL(dma_release_declared_memory);
76
77void *dma_mark_declared_memory_occupied(struct device *dev,
78 dma_addr_t device_addr, size_t size)
79{
80 struct dma_coherent_mem *mem = dev->dma_mem;
81 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040082
Jan Beulichd2dc1f42008-08-05 13:01:31 -070083 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040084
85 if (!mem)
86 return ERR_PTR(-EINVAL);
87
88 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
Jan Beulichd2dc1f42008-08-05 13:01:31 -070089 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040090 if (err != 0)
91 return ERR_PTR(err);
92 return mem->virt_base + (pos << PAGE_SHIFT);
93}
94EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
95
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +040096/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +040097 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +040098 *
99 * @dev: device from which we allocate memory
100 * @size: size of requested memory area
101 * @dma_handle: This will be filled with the correct dma handle
102 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900103 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400104 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400105 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400106 * to support allocation from per-device coherent memory pools.
107 *
108 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400109 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400110 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400111int dma_alloc_from_coherent(struct device *dev, ssize_t size,
112 dma_addr_t *dma_handle, void **ret)
113{
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800114 struct dma_coherent_mem *mem;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400115 int order = get_order(size);
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800116 int pageno;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400117
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800118 if (!dev)
119 return 0;
120 mem = dev->dma_mem;
121 if (!mem)
122 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900123
124 *ret = NULL;
125
Adrian McMenamincdf57ca2009-01-21 18:47:38 +0900126 if (unlikely(size > (mem->size << PAGE_SHIFT)))
Paul Mundt06096972009-01-21 18:51:53 +0900127 goto err;
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800128
129 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
Paul Mundt06096972009-01-21 18:51:53 +0900130 if (unlikely(pageno < 0))
131 goto err;
132
133 /*
134 * Memory was found in the per-device area.
135 */
136 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
137 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
138 memset(*ret, 0, size);
139
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800140 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900141
142err:
143 /*
144 * In the case where the allocation can not be satisfied from the
145 * per-device area, try to fall back to generic memory if the
146 * constraints allow it.
147 */
148 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400149}
Ingo Molnara38409f2008-08-20 12:16:09 +0200150EXPORT_SYMBOL(dma_alloc_from_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400151
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400152/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400153 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400154 * @dev: device from which the memory was allocated
155 * @order: the order of pages allocated
156 * @vaddr: virtual address of allocated pages
157 *
158 * This checks whether the memory was allocated from the per-device
159 * coherent memory pool and if so, releases that memory.
160 *
161 * Returns 1 if we correctly released the memory, or 0 if
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400162 * dma_release_coherent() should proceed with releasing memory from
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400163 * generic pools.
164 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400165int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
166{
167 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
168
169 if (mem && vaddr >= mem->virt_base && vaddr <
170 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
171 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
172
173 bitmap_release_region(mem->bitmap, page, order);
174 return 1;
175 }
176 return 0;
177}
Ingo Molnara38409f2008-08-20 12:16:09 +0200178EXPORT_SYMBOL(dma_release_from_coherent);