blob: 03f1944898418a132ebbb67cc688c399ec182678 [file] [log] [blame]
Duy Truong790f06d2013-02-13 16:38:12 -08001/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/page.h>
14#include <linux/io.h>
15#include <linux/memory_alloc.h>
16#include <linux/mm.h>
17#include <linux/vmalloc.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/err.h>
21#include <linux/log2.h>
Jordan Crouse8c78b132011-05-26 10:27:47 -060022#include <linux/debugfs.h>
23#include <linux/seq_file.h>
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025
26#define MAX_MEMPOOLS 8
27
28struct mem_pool mpools[MAX_MEMPOOLS];
29
30/* The tree contains all allocations over all memory pools */
31static struct rb_root alloc_root;
32static struct mutex alloc_mutex;
33
Jordan Crouse8c78b132011-05-26 10:27:47 -060034static void *s_start(struct seq_file *m, loff_t *pos)
35 __acquires(&alloc_mutex)
36{
37 loff_t n = *pos;
38 struct rb_node *r;
39
40 mutex_lock(&alloc_mutex);
41 r = rb_first(&alloc_root);
42
43 while (n > 0 && r) {
44 n--;
45 r = rb_next(r);
46 }
47 if (!n)
48 return r;
49 return NULL;
50}
51
52static void *s_next(struct seq_file *m, void *p, loff_t *pos)
53{
54 struct rb_node *r = p;
55 ++*pos;
56 return rb_next(r);
57}
58
59static void s_stop(struct seq_file *m, void *p)
60 __releases(&alloc_mutex)
61{
62 mutex_unlock(&alloc_mutex);
63}
64
65static int s_show(struct seq_file *m, void *p)
66{
67 struct rb_node *r = p;
68 struct alloc *node = rb_entry(r, struct alloc, rb_node);
69
Laura Abbottda6b9612013-04-09 10:48:22 -070070 seq_printf(m, "0x%pa 0x%pa %ld %u %pS\n", &node->paddr, &node->vaddr,
Jordan Crouse8c78b132011-05-26 10:27:47 -060071 node->len, node->mpool->id, node->caller);
72 return 0;
73}
74
75static const struct seq_operations mempool_op = {
76 .start = s_start,
77 .next = s_next,
78 .stop = s_stop,
79 .show = s_show,
80};
81
82static int mempool_open(struct inode *inode, struct file *file)
83{
84 return seq_open(file, &mempool_op);
85}
86
Laura Abbottda6b9612013-04-09 10:48:22 -070087static struct alloc *find_alloc(phys_addr_t addr)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088{
89 struct rb_root *root = &alloc_root;
90 struct rb_node *p = root->rb_node;
91
92 mutex_lock(&alloc_mutex);
93
94 while (p) {
95 struct alloc *node;
96
97 node = rb_entry(p, struct alloc, rb_node);
98 if (addr < node->vaddr)
99 p = p->rb_left;
100 else if (addr > node->vaddr)
101 p = p->rb_right;
102 else {
103 mutex_unlock(&alloc_mutex);
104 return node;
105 }
106 }
107 mutex_unlock(&alloc_mutex);
108 return NULL;
109}
110
111static int add_alloc(struct alloc *node)
112{
113 struct rb_root *root = &alloc_root;
114 struct rb_node **p = &root->rb_node;
115 struct rb_node *parent = NULL;
116
117 mutex_lock(&alloc_mutex);
118 while (*p) {
119 struct alloc *tmp;
120 parent = *p;
121
122 tmp = rb_entry(parent, struct alloc, rb_node);
123
124 if (node->vaddr < tmp->vaddr)
125 p = &(*p)->rb_left;
126 else if (node->vaddr > tmp->vaddr)
127 p = &(*p)->rb_right;
128 else {
Laura Abbottda6b9612013-04-09 10:48:22 -0700129 WARN(1, "memory at %pa already allocated", &tmp->vaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 mutex_unlock(&alloc_mutex);
131 return -EINVAL;
132 }
133 }
134 rb_link_node(&node->rb_node, parent, p);
135 rb_insert_color(&node->rb_node, root);
136 mutex_unlock(&alloc_mutex);
137 return 0;
138}
139
140static int remove_alloc(struct alloc *victim_node)
141{
142 struct rb_root *root = &alloc_root;
143 if (!victim_node)
144 return -EINVAL;
145
146 mutex_lock(&alloc_mutex);
147 rb_erase(&victim_node->rb_node, root);
148 mutex_unlock(&alloc_mutex);
149 return 0;
150}
151
Laura Abbottda6b9612013-04-09 10:48:22 -0700152static struct gen_pool *initialize_gpool(phys_addr_t start,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 unsigned long size)
154{
155 struct gen_pool *gpool;
156
157 gpool = gen_pool_create(PAGE_SHIFT, -1);
158
159 if (!gpool)
160 return NULL;
161 if (gen_pool_add(gpool, start, size, -1)) {
162 gen_pool_destroy(gpool);
163 return NULL;
164 }
165
166 return gpool;
167}
168
169static void *__alloc(struct mem_pool *mpool, unsigned long size,
Jordan Crouse8c78b132011-05-26 10:27:47 -0600170 unsigned long align, int cached, void *caller)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171{
172 unsigned long paddr;
173 void __iomem *vaddr;
174
175 unsigned long aligned_size;
176 int log_align = ilog2(align);
177
178 struct alloc *node;
179
180 aligned_size = PFN_ALIGN(size);
181 paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
182 if (!paddr)
183 return NULL;
184
185 node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
186 if (!node)
187 goto out;
188
189 if (cached)
190 vaddr = ioremap_cached(paddr, aligned_size);
191 else
192 vaddr = ioremap(paddr, aligned_size);
193
194 if (!vaddr)
195 goto out_kfree;
196
Laura Abbottda6b9612013-04-09 10:48:22 -0700197 /*
198 * Just cast to an unsigned long to avoid warnings about casting from a
199 * pointer to an integer of different size. The pointer is only 32-bits
200 * so we lose no data.
201 */
202 node->vaddr = (unsigned long)vaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 node->paddr = paddr;
204 node->len = aligned_size;
205 node->mpool = mpool;
Jordan Crouse8c78b132011-05-26 10:27:47 -0600206 node->caller = caller;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 if (add_alloc(node))
208 goto out_kfree;
209
210 mpool->free -= aligned_size;
211
212 return vaddr;
213out_kfree:
214 if (vaddr)
215 iounmap(vaddr);
216 kfree(node);
217out:
218 gen_pool_free(mpool->gpool, paddr, aligned_size);
219 return NULL;
220}
221
222static void __free(void *vaddr, bool unmap)
223{
Laura Abbottda6b9612013-04-09 10:48:22 -0700224 struct alloc *node = find_alloc((unsigned long)vaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225
226 if (!node)
227 return;
228
229 if (unmap)
Laura Abbottda6b9612013-04-09 10:48:22 -0700230 /*
231 * We need the double cast because otherwise gcc complains about
232 * cast to pointer of different size. This is technically a down
233 * cast but if unmap is being called, this had better be an
234 * actual 32-bit pointer anyway.
235 */
236 iounmap((void *)(unsigned long)node->vaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237
238 gen_pool_free(node->mpool->gpool, node->paddr, node->len);
239 node->mpool->free += node->len;
240
241 remove_alloc(node);
242 kfree(node);
243}
244
245static struct mem_pool *mem_type_to_memory_pool(int mem_type)
246{
247 struct mem_pool *mpool = &mpools[mem_type];
248
249 if (!mpool->size)
250 return NULL;
251
252 mutex_lock(&mpool->pool_mutex);
253 if (!mpool->gpool)
254 mpool->gpool = initialize_gpool(mpool->paddr, mpool->size);
255 mutex_unlock(&mpool->pool_mutex);
256 if (!mpool->gpool)
257 return NULL;
258
259 return mpool;
260}
261
Laura Abbottda6b9612013-04-09 10:48:22 -0700262struct mem_pool *initialize_memory_pool(phys_addr_t start,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 unsigned long size, int mem_type)
264{
265 int id = mem_type;
266
267 if (id >= MAX_MEMPOOLS || size <= PAGE_SIZE || size % PAGE_SIZE)
268 return NULL;
269
270 mutex_lock(&mpools[id].pool_mutex);
271
272 mpools[id].paddr = start;
273 mpools[id].size = size;
274 mpools[id].free = size;
Jordan Crouse8c78b132011-05-26 10:27:47 -0600275 mpools[id].id = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 mutex_unlock(&mpools[id].pool_mutex);
277
Laura Abbottda6b9612013-04-09 10:48:22 -0700278 pr_info("memory pool %d (start %pa size %lx) initialized\n",
279 id, &start, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 return &mpools[id];
281}
282EXPORT_SYMBOL_GPL(initialize_memory_pool);
283
284void *allocate_contiguous_memory(unsigned long size,
285 int mem_type, unsigned long align, int cached)
286{
287 unsigned long aligned_size = PFN_ALIGN(size);
288 struct mem_pool *mpool;
289
290 mpool = mem_type_to_memory_pool(mem_type);
291 if (!mpool)
292 return NULL;
Jordan Crouse8c78b132011-05-26 10:27:47 -0600293 return __alloc(mpool, aligned_size, align, cached,
294 __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295
296}
297EXPORT_SYMBOL_GPL(allocate_contiguous_memory);
298
Laura Abbottda6b9612013-04-09 10:48:22 -0700299phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
Jordan Crouse8c78b132011-05-26 10:27:47 -0600300 int mem_type, unsigned long align, void *caller)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301{
Laura Abbottda6b9612013-04-09 10:48:22 -0700302 phys_addr_t paddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303 unsigned long aligned_size;
304
305 struct alloc *node;
306 struct mem_pool *mpool;
307 int log_align = ilog2(align);
308
309 mpool = mem_type_to_memory_pool(mem_type);
310 if (!mpool || !mpool->gpool)
311 return 0;
312
313 aligned_size = PFN_ALIGN(size);
314 paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
315 if (!paddr)
316 return 0;
317
318 node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
319 if (!node)
320 goto out;
321
322 node->paddr = paddr;
323
324 /* We search the tree using node->vaddr, so set
325 * it to something unique even though we don't
326 * use it for physical allocation nodes.
327 * The virtual and physical address ranges
328 * are disjoint, so there won't be any chance of
329 * a duplicate node->vaddr value.
330 */
Laura Abbottda6b9612013-04-09 10:48:22 -0700331 node->vaddr = paddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 node->len = aligned_size;
333 node->mpool = mpool;
Jordan Crouse8c78b132011-05-26 10:27:47 -0600334 node->caller = caller;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 if (add_alloc(node))
336 goto out_kfree;
337
338 mpool->free -= aligned_size;
339 return paddr;
340out_kfree:
341 kfree(node);
342out:
343 gen_pool_free(mpool->gpool, paddr, aligned_size);
344 return 0;
345}
Jordan Crouse8c78b132011-05-26 10:27:47 -0600346EXPORT_SYMBOL_GPL(_allocate_contiguous_memory_nomap);
347
Laura Abbottda6b9612013-04-09 10:48:22 -0700348phys_addr_t allocate_contiguous_memory_nomap(unsigned long size,
Jordan Crouse8c78b132011-05-26 10:27:47 -0600349 int mem_type, unsigned long align)
350{
351 return _allocate_contiguous_memory_nomap(size, mem_type, align,
352 __builtin_return_address(0));
353}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354EXPORT_SYMBOL_GPL(allocate_contiguous_memory_nomap);
355
356void free_contiguous_memory(void *addr)
357{
358 if (!addr)
359 return;
360 __free(addr, true);
361 return;
362}
363EXPORT_SYMBOL_GPL(free_contiguous_memory);
364
Laura Abbottda6b9612013-04-09 10:48:22 -0700365void free_contiguous_memory_by_paddr(phys_addr_t paddr)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366{
367 if (!paddr)
368 return;
Laura Abbottda6b9612013-04-09 10:48:22 -0700369 __free((void *)(unsigned long)paddr, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 return;
371}
372EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr);
373
Laura Abbottda6b9612013-04-09 10:48:22 -0700374phys_addr_t memory_pool_node_paddr(void *vaddr)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375{
Laura Abbottda6b9612013-04-09 10:48:22 -0700376 struct alloc *node = find_alloc((unsigned long)vaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377
378 if (!node)
379 return -EINVAL;
380
381 return node->paddr;
382}
383EXPORT_SYMBOL_GPL(memory_pool_node_paddr);
384
385unsigned long memory_pool_node_len(void *vaddr)
386{
Laura Abbottda6b9612013-04-09 10:48:22 -0700387 struct alloc *node = find_alloc((unsigned long)vaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388
389 if (!node)
390 return -EINVAL;
391
392 return node->len;
393}
394EXPORT_SYMBOL_GPL(memory_pool_node_len);
395
Jordan Crouse8c78b132011-05-26 10:27:47 -0600396static const struct file_operations mempool_operations = {
397 .owner = THIS_MODULE,
398 .open = mempool_open,
399 .read = seq_read,
400 .llseek = seq_lseek,
401 .release = seq_release_private,
402};
403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404int __init memory_pool_init(void)
405{
406 int i;
407
408 alloc_root = RB_ROOT;
409 mutex_init(&alloc_mutex);
410 for (i = 0; i < ARRAY_SIZE(mpools); i++) {
411 mutex_init(&mpools[i].pool_mutex);
412 mpools[i].gpool = NULL;
413 }
Jordan Crouse8c78b132011-05-26 10:27:47 -0600414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 return 0;
416}
Jordan Crouse8c78b132011-05-26 10:27:47 -0600417
418static int __init debugfs_mempool_init(void)
419{
420 struct dentry *entry, *dir = debugfs_create_dir("mempool", NULL);
421
422 if (!dir) {
423 pr_err("Cannot create /sys/kernel/debug/mempool");
424 return -EINVAL;
425 }
426
427 entry = debugfs_create_file("map", S_IRUSR, dir,
428 NULL, &mempool_operations);
429
430 if (!entry)
431 pr_err("Cannot create /sys/kernel/debug/mempool/map");
432
433 return entry ? 0 : -EINVAL;
434}
435
436module_init(debugfs_mempool_init);