blob: d931e148e5b05f3e7e07b7607753011896a5aa78 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/page.h>
14#include <linux/io.h>
15#include <linux/memory_alloc.h>
16#include <linux/mm.h>
17#include <linux/vmalloc.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/err.h>
21#include <linux/log2.h>
Jordan Crouse8c78b132011-05-26 10:27:47 -060022#include <linux/debugfs.h>
23#include <linux/seq_file.h>
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025
26#define MAX_MEMPOOLS 8
27
28struct mem_pool mpools[MAX_MEMPOOLS];
29
30/* The tree contains all allocations over all memory pools */
31static struct rb_root alloc_root;
32static struct mutex alloc_mutex;
33
Jordan Crouse8c78b132011-05-26 10:27:47 -060034static void *s_start(struct seq_file *m, loff_t *pos)
35 __acquires(&alloc_mutex)
36{
37 loff_t n = *pos;
38 struct rb_node *r;
39
40 mutex_lock(&alloc_mutex);
41 r = rb_first(&alloc_root);
42
43 while (n > 0 && r) {
44 n--;
45 r = rb_next(r);
46 }
47 if (!n)
48 return r;
49 return NULL;
50}
51
52static void *s_next(struct seq_file *m, void *p, loff_t *pos)
53{
54 struct rb_node *r = p;
55 ++*pos;
56 return rb_next(r);
57}
58
59static void s_stop(struct seq_file *m, void *p)
60 __releases(&alloc_mutex)
61{
62 mutex_unlock(&alloc_mutex);
63}
64
65static int s_show(struct seq_file *m, void *p)
66{
67 struct rb_node *r = p;
68 struct alloc *node = rb_entry(r, struct alloc, rb_node);
69
70 seq_printf(m, "0x%lx 0x%p %ld %u %pS\n", node->paddr, node->vaddr,
71 node->len, node->mpool->id, node->caller);
72 return 0;
73}
74
75static const struct seq_operations mempool_op = {
76 .start = s_start,
77 .next = s_next,
78 .stop = s_stop,
79 .show = s_show,
80};
81
82static int mempool_open(struct inode *inode, struct file *file)
83{
84 return seq_open(file, &mempool_op);
85}
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087static struct alloc *find_alloc(void *addr)
88{
89 struct rb_root *root = &alloc_root;
90 struct rb_node *p = root->rb_node;
91
92 mutex_lock(&alloc_mutex);
93
94 while (p) {
95 struct alloc *node;
96
97 node = rb_entry(p, struct alloc, rb_node);
98 if (addr < node->vaddr)
99 p = p->rb_left;
100 else if (addr > node->vaddr)
101 p = p->rb_right;
102 else {
103 mutex_unlock(&alloc_mutex);
104 return node;
105 }
106 }
107 mutex_unlock(&alloc_mutex);
108 return NULL;
109}
110
111static int add_alloc(struct alloc *node)
112{
113 struct rb_root *root = &alloc_root;
114 struct rb_node **p = &root->rb_node;
115 struct rb_node *parent = NULL;
116
117 mutex_lock(&alloc_mutex);
118 while (*p) {
119 struct alloc *tmp;
120 parent = *p;
121
122 tmp = rb_entry(parent, struct alloc, rb_node);
123
124 if (node->vaddr < tmp->vaddr)
125 p = &(*p)->rb_left;
126 else if (node->vaddr > tmp->vaddr)
127 p = &(*p)->rb_right;
128 else {
129 WARN(1, "memory at %p already allocated", tmp->vaddr);
130 mutex_unlock(&alloc_mutex);
131 return -EINVAL;
132 }
133 }
134 rb_link_node(&node->rb_node, parent, p);
135 rb_insert_color(&node->rb_node, root);
136 mutex_unlock(&alloc_mutex);
137 return 0;
138}
139
140static int remove_alloc(struct alloc *victim_node)
141{
142 struct rb_root *root = &alloc_root;
143 if (!victim_node)
144 return -EINVAL;
145
146 mutex_lock(&alloc_mutex);
147 rb_erase(&victim_node->rb_node, root);
148 mutex_unlock(&alloc_mutex);
149 return 0;
150}
151
152static struct gen_pool *initialize_gpool(unsigned long start,
153 unsigned long size)
154{
155 struct gen_pool *gpool;
156
157 gpool = gen_pool_create(PAGE_SHIFT, -1);
158
159 if (!gpool)
160 return NULL;
161 if (gen_pool_add(gpool, start, size, -1)) {
162 gen_pool_destroy(gpool);
163 return NULL;
164 }
165
166 return gpool;
167}
168
169static void *__alloc(struct mem_pool *mpool, unsigned long size,
Jordan Crouse8c78b132011-05-26 10:27:47 -0600170 unsigned long align, int cached, void *caller)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171{
172 unsigned long paddr;
173 void __iomem *vaddr;
174
175 unsigned long aligned_size;
176 int log_align = ilog2(align);
177
178 struct alloc *node;
179
180 aligned_size = PFN_ALIGN(size);
181 paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
182 if (!paddr)
183 return NULL;
184
185 node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
186 if (!node)
187 goto out;
188
189 if (cached)
190 vaddr = ioremap_cached(paddr, aligned_size);
191 else
192 vaddr = ioremap(paddr, aligned_size);
193
194 if (!vaddr)
195 goto out_kfree;
196
197 node->vaddr = vaddr;
198 node->paddr = paddr;
199 node->len = aligned_size;
200 node->mpool = mpool;
Jordan Crouse8c78b132011-05-26 10:27:47 -0600201 node->caller = caller;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 if (add_alloc(node))
203 goto out_kfree;
204
205 mpool->free -= aligned_size;
206
207 return vaddr;
208out_kfree:
209 if (vaddr)
210 iounmap(vaddr);
211 kfree(node);
212out:
213 gen_pool_free(mpool->gpool, paddr, aligned_size);
214 return NULL;
215}
216
217static void __free(void *vaddr, bool unmap)
218{
219 struct alloc *node = find_alloc(vaddr);
220
221 if (!node)
222 return;
223
224 if (unmap)
225 iounmap(node->vaddr);
226
227 gen_pool_free(node->mpool->gpool, node->paddr, node->len);
228 node->mpool->free += node->len;
229
230 remove_alloc(node);
231 kfree(node);
232}
233
234static struct mem_pool *mem_type_to_memory_pool(int mem_type)
235{
236 struct mem_pool *mpool = &mpools[mem_type];
237
238 if (!mpool->size)
239 return NULL;
240
241 mutex_lock(&mpool->pool_mutex);
242 if (!mpool->gpool)
243 mpool->gpool = initialize_gpool(mpool->paddr, mpool->size);
244 mutex_unlock(&mpool->pool_mutex);
245 if (!mpool->gpool)
246 return NULL;
247
248 return mpool;
249}
250
251struct mem_pool *initialize_memory_pool(unsigned long start,
252 unsigned long size, int mem_type)
253{
254 int id = mem_type;
255
256 if (id >= MAX_MEMPOOLS || size <= PAGE_SIZE || size % PAGE_SIZE)
257 return NULL;
258
259 mutex_lock(&mpools[id].pool_mutex);
260
261 mpools[id].paddr = start;
262 mpools[id].size = size;
263 mpools[id].free = size;
Jordan Crouse8c78b132011-05-26 10:27:47 -0600264 mpools[id].id = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 mutex_unlock(&mpools[id].pool_mutex);
266
267 pr_info("memory pool %d (start %lx size %lx) initialized\n",
268 id, start, size);
269 return &mpools[id];
270}
271EXPORT_SYMBOL_GPL(initialize_memory_pool);
272
273void *allocate_contiguous_memory(unsigned long size,
274 int mem_type, unsigned long align, int cached)
275{
276 unsigned long aligned_size = PFN_ALIGN(size);
277 struct mem_pool *mpool;
278
279 mpool = mem_type_to_memory_pool(mem_type);
280 if (!mpool)
281 return NULL;
Jordan Crouse8c78b132011-05-26 10:27:47 -0600282 return __alloc(mpool, aligned_size, align, cached,
283 __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284
285}
286EXPORT_SYMBOL_GPL(allocate_contiguous_memory);
287
Jordan Crouse8c78b132011-05-26 10:27:47 -0600288unsigned long _allocate_contiguous_memory_nomap(unsigned long size,
289 int mem_type, unsigned long align, void *caller)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290{
291 unsigned long paddr;
292 unsigned long aligned_size;
293
294 struct alloc *node;
295 struct mem_pool *mpool;
296 int log_align = ilog2(align);
297
298 mpool = mem_type_to_memory_pool(mem_type);
299 if (!mpool || !mpool->gpool)
300 return 0;
301
302 aligned_size = PFN_ALIGN(size);
303 paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
304 if (!paddr)
305 return 0;
306
307 node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
308 if (!node)
309 goto out;
310
311 node->paddr = paddr;
312
313 /* We search the tree using node->vaddr, so set
314 * it to something unique even though we don't
315 * use it for physical allocation nodes.
316 * The virtual and physical address ranges
317 * are disjoint, so there won't be any chance of
318 * a duplicate node->vaddr value.
319 */
320 node->vaddr = (void *)paddr;
321 node->len = aligned_size;
322 node->mpool = mpool;
Jordan Crouse8c78b132011-05-26 10:27:47 -0600323 node->caller = caller;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324 if (add_alloc(node))
325 goto out_kfree;
326
327 mpool->free -= aligned_size;
328 return paddr;
329out_kfree:
330 kfree(node);
331out:
332 gen_pool_free(mpool->gpool, paddr, aligned_size);
333 return 0;
334}
Jordan Crouse8c78b132011-05-26 10:27:47 -0600335EXPORT_SYMBOL_GPL(_allocate_contiguous_memory_nomap);
336
337unsigned long allocate_contiguous_memory_nomap(unsigned long size,
338 int mem_type, unsigned long align)
339{
340 return _allocate_contiguous_memory_nomap(size, mem_type, align,
341 __builtin_return_address(0));
342}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343EXPORT_SYMBOL_GPL(allocate_contiguous_memory_nomap);
344
345void free_contiguous_memory(void *addr)
346{
347 if (!addr)
348 return;
349 __free(addr, true);
350 return;
351}
352EXPORT_SYMBOL_GPL(free_contiguous_memory);
353
354void free_contiguous_memory_by_paddr(unsigned long paddr)
355{
356 if (!paddr)
357 return;
358 __free((void *)paddr, false);
359 return;
360}
361EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr);
362
363unsigned long memory_pool_node_paddr(void *vaddr)
364{
365 struct alloc *node = find_alloc(vaddr);
366
367 if (!node)
368 return -EINVAL;
369
370 return node->paddr;
371}
372EXPORT_SYMBOL_GPL(memory_pool_node_paddr);
373
374unsigned long memory_pool_node_len(void *vaddr)
375{
376 struct alloc *node = find_alloc(vaddr);
377
378 if (!node)
379 return -EINVAL;
380
381 return node->len;
382}
383EXPORT_SYMBOL_GPL(memory_pool_node_len);
384
Jordan Crouse8c78b132011-05-26 10:27:47 -0600385static const struct file_operations mempool_operations = {
386 .owner = THIS_MODULE,
387 .open = mempool_open,
388 .read = seq_read,
389 .llseek = seq_lseek,
390 .release = seq_release_private,
391};
392
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393int __init memory_pool_init(void)
394{
395 int i;
396
397 alloc_root = RB_ROOT;
398 mutex_init(&alloc_mutex);
399 for (i = 0; i < ARRAY_SIZE(mpools); i++) {
400 mutex_init(&mpools[i].pool_mutex);
401 mpools[i].gpool = NULL;
402 }
Jordan Crouse8c78b132011-05-26 10:27:47 -0600403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 return 0;
405}
Jordan Crouse8c78b132011-05-26 10:27:47 -0600406
407static int __init debugfs_mempool_init(void)
408{
409 struct dentry *entry, *dir = debugfs_create_dir("mempool", NULL);
410
411 if (!dir) {
412 pr_err("Cannot create /sys/kernel/debug/mempool");
413 return -EINVAL;
414 }
415
416 entry = debugfs_create_file("map", S_IRUSR, dir,
417 NULL, &mempool_operations);
418
419 if (!entry)
420 pr_err("Cannot create /sys/kernel/debug/mempool/map");
421
422 return entry ? 0 : -EINVAL;
423}
424
425module_init(debugfs_mempool_init);