blob: c1d4d5b4c61ccb6fb95262fc34c2166158e00cd3 [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
5#include <linux/kernel.h>
6#include <linux/dma-mapping.h>
7
8struct dma_coherent_mem {
9 void *virt_base;
10 u32 device_base;
11 int size;
12 int flags;
13 unsigned long *bitmap;
14};
15
16int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
17 dma_addr_t device_addr, size_t size, int flags)
18{
19 void __iomem *mem_base = NULL;
20 int pages = size >> PAGE_SHIFT;
21 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
22
23 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
24 goto out;
25 if (!size)
26 goto out;
27 if (dev->dma_mem)
28 goto out;
29
30 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
31
32 mem_base = ioremap(bus_addr, size);
33 if (!mem_base)
34 goto out;
35
36 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
37 if (!dev->dma_mem)
38 goto out;
39 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
40 if (!dev->dma_mem->bitmap)
41 goto free1_out;
42
43 dev->dma_mem->virt_base = mem_base;
44 dev->dma_mem->device_base = device_addr;
45 dev->dma_mem->size = pages;
46 dev->dma_mem->flags = flags;
47
48 if (flags & DMA_MEMORY_MAP)
49 return DMA_MEMORY_MAP;
50
51 return DMA_MEMORY_IO;
52
53 free1_out:
54 kfree(dev->dma_mem);
55 out:
56 if (mem_base)
57 iounmap(mem_base);
58 return 0;
59}
60EXPORT_SYMBOL(dma_declare_coherent_memory);
61
62void dma_release_declared_memory(struct device *dev)
63{
64 struct dma_coherent_mem *mem = dev->dma_mem;
65
66 if (!mem)
67 return;
68 dev->dma_mem = NULL;
69 iounmap(mem->virt_base);
70 kfree(mem->bitmap);
71 kfree(mem);
72}
73EXPORT_SYMBOL(dma_release_declared_memory);
74
75void *dma_mark_declared_memory_occupied(struct device *dev,
76 dma_addr_t device_addr, size_t size)
77{
78 struct dma_coherent_mem *mem = dev->dma_mem;
79 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040080
Jan Beulichd2dc1f42008-08-05 13:01:31 -070081 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040082
83 if (!mem)
84 return ERR_PTR(-EINVAL);
85
86 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
Jan Beulichd2dc1f42008-08-05 13:01:31 -070087 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040088 if (err != 0)
89 return ERR_PTR(err);
90 return mem->virt_base + (pos << PAGE_SHIFT);
91}
92EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
93
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +040094/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +040095 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +040096 *
97 * @dev: device from which we allocate memory
98 * @size: size of requested memory area
99 * @dma_handle: This will be filled with the correct dma handle
100 * @ret: This pointer will be filled with the virtual address
101 * to allocated area.
102 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400103 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400104 * to support allocation from per-device coherent memory pools.
105 *
106 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400107 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400108 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400109int dma_alloc_from_coherent(struct device *dev, ssize_t size,
110 dma_addr_t *dma_handle, void **ret)
111{
112 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
113 int order = get_order(size);
114
115 if (mem) {
116 int page = bitmap_find_free_region(mem->bitmap, mem->size,
117 order);
118 if (page >= 0) {
119 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
120 *ret = mem->virt_base + (page << PAGE_SHIFT);
121 memset(*ret, 0, size);
Dmitry Baryshkov538c29d2008-07-18 13:29:57 +0400122 } else if (mem->flags & DMA_MEMORY_EXCLUSIVE)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400123 *ret = NULL;
124 }
125 return (mem != NULL);
126}
127
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400128/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400129 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400130 * @dev: device from which the memory was allocated
131 * @order: the order of pages allocated
132 * @vaddr: virtual address of allocated pages
133 *
134 * This checks whether the memory was allocated from the per-device
135 * coherent memory pool and if so, releases that memory.
136 *
137 * Returns 1 if we correctly released the memory, or 0 if
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400138 * dma_release_coherent() should proceed with releasing memory from
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400139 * generic pools.
140 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400141int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
142{
143 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
144
145 if (mem && vaddr >= mem->virt_base && vaddr <
146 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
147 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
148
149 bitmap_release_region(mem->bitmap, page, order);
150 return 1;
151 }
152 return 0;
153}