blob: 218e6f95d44f3e68bfdedb66abf236a090fea951 [file] [log] [blame]
Yinghai Lu09325872011-02-24 14:43:05 +01001/*
2 * bootmem - A boot-time physical memory allocator and configurator
3 *
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
7 *
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
10 */
11#include <linux/init.h>
12#include <linux/pfn.h>
13#include <linux/slab.h>
14#include <linux/bootmem.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040015#include <linux/export.h>
Yinghai Lu09325872011-02-24 14:43:05 +010016#include <linux/kmemleak.h>
17#include <linux/range.h>
18#include <linux/memblock.h>
19
20#include <asm/bug.h>
21#include <asm/io.h>
22#include <asm/processor.h>
23
24#include "internal.h"
25
Yinghai Lue782ab42011-02-24 14:43:06 +010026#ifndef CONFIG_NEED_MULTIPLE_NODES
27struct pglist_data __refdata contig_page_data;
28EXPORT_SYMBOL(contig_page_data);
29#endif
30
Yinghai Lu09325872011-02-24 14:43:05 +010031unsigned long max_low_pfn;
32unsigned long min_low_pfn;
33unsigned long max_pfn;
34
Yinghai Lu8bc1f912011-02-24 14:43:06 +010035static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
36 u64 goal, u64 limit)
37{
38 void *ptr;
39 u64 addr;
40
41 if (limit > memblock.current_limit)
42 limit = memblock.current_limit;
43
Tejun Heoeb40c4c2011-07-12 10:46:35 +020044 addr = memblock_find_in_range_node(goal, limit, size, align, nid);
Tejun Heo1f5026a2011-07-12 09:58:09 +020045 if (!addr)
Yinghai Lu8bc1f912011-02-24 14:43:06 +010046 return NULL;
47
48 ptr = phys_to_virt(addr);
49 memset(ptr, 0, size);
Tejun Heo24aa0782011-07-12 11:16:06 +020050 memblock_reserve(addr, size);
Yinghai Lu8bc1f912011-02-24 14:43:06 +010051 /*
52 * The min_count is set to 0 so that bootmem allocated blocks
53 * are never reported as leaks.
54 */
55 kmemleak_alloc(ptr, size, 0, 0);
56 return ptr;
57}
58
Yinghai Lu09325872011-02-24 14:43:05 +010059/*
60 * free_bootmem_late - free bootmem pages directly to page allocator
61 * @addr: starting address of the range
62 * @size: size of the range in bytes
63 *
64 * This is only useful when the bootmem allocator has already been torn
65 * down, but we are still initializing the system. Pages are given directly
66 * to the page allocator, no bootmem metadata is updated because it is gone.
67 */
68void __init free_bootmem_late(unsigned long addr, unsigned long size)
69{
70 unsigned long cursor, end;
71
72 kmemleak_free_part(__va(addr), size);
73
74 cursor = PFN_UP(addr);
75 end = PFN_DOWN(addr + size);
76
77 for (; cursor < end; cursor++) {
78 __free_pages_bootmem(pfn_to_page(cursor), 0);
79 totalram_pages++;
80 }
81}
82
83static void __init __free_pages_memory(unsigned long start, unsigned long end)
84{
Russ Anderson6bc2e852012-05-10 13:01:46 -070085 unsigned long i, start_aligned, end_aligned;
Yinghai Lu09325872011-02-24 14:43:05 +010086 int order = ilog2(BITS_PER_LONG);
87
88 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
89 end_aligned = end & ~(BITS_PER_LONG - 1);
90
91 if (end_aligned <= start_aligned) {
92 for (i = start; i < end; i++)
93 __free_pages_bootmem(pfn_to_page(i), 0);
94
95 return;
96 }
97
98 for (i = start; i < start_aligned; i++)
99 __free_pages_bootmem(pfn_to_page(i), 0);
100
101 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
102 __free_pages_bootmem(pfn_to_page(i), order);
103
104 for (i = end_aligned; i < end; i++)
105 __free_pages_bootmem(pfn_to_page(i), 0);
106}
107
Yinghai Lue650ea72012-07-11 14:02:56 -0700108static unsigned long __init __free_memory_core(phys_addr_t start,
109 phys_addr_t end)
110{
111 unsigned long start_pfn = PFN_UP(start);
112 unsigned long end_pfn = min_t(unsigned long,
113 PFN_DOWN(end), max_low_pfn);
114
115 if (start_pfn > end_pfn)
116 return 0;
117
118 __free_pages_memory(start_pfn, end_pfn);
119
120 return end_pfn - start_pfn;
121}
122
Tejun Heo64a02da2011-07-12 11:16:01 +0200123unsigned long __init free_low_memory_core_early(int nodeid)
Yinghai Lu09325872011-02-24 14:43:05 +0100124{
Yinghai Lu09325872011-02-24 14:43:05 +0100125 unsigned long count = 0;
Yinghai Lue650ea72012-07-11 14:02:56 -0700126 phys_addr_t start, end, size;
Tejun Heo8a9ca342011-07-12 11:16:02 +0200127 u64 i;
Yinghai Lu09325872011-02-24 14:43:05 +0100128
Yinghai Lue650ea72012-07-11 14:02:56 -0700129 for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
130 count += __free_memory_core(start, end);
Yinghai Lu09325872011-02-24 14:43:05 +0100131
Yinghai Lue650ea72012-07-11 14:02:56 -0700132 /* free range that is used for reserved array if we allocate it */
133 size = get_allocated_memblock_reserved_regions_info(&start);
134 if (size)
135 count += __free_memory_core(start, start + size);
Yinghai Lu09325872011-02-24 14:43:05 +0100136
137 return count;
138}
139
140/**
141 * free_all_bootmem_node - release a node's free pages to the buddy allocator
142 * @pgdat: node to be released
143 *
144 * Returns the number of pages actually released.
145 */
146unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
147{
148 register_page_bootmem_info_node(pgdat);
149
Tejun Heo64a02da2011-07-12 11:16:01 +0200150 /* free_low_memory_core_early(MAX_NUMNODES) will be called later */
Yinghai Lu09325872011-02-24 14:43:05 +0100151 return 0;
152}
153
154/**
155 * free_all_bootmem - release free pages to the buddy allocator
156 *
157 * Returns the number of pages actually released.
158 */
159unsigned long __init free_all_bootmem(void)
160{
161 /*
162 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300163 * because in some case like Node0 doesn't have RAM installed
Yinghai Lu09325872011-02-24 14:43:05 +0100164 * low ram will be on Node1
165 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
166 * will be used instead of only Node0 related
167 */
Tejun Heo64a02da2011-07-12 11:16:01 +0200168 return free_low_memory_core_early(MAX_NUMNODES);
Yinghai Lu09325872011-02-24 14:43:05 +0100169}
170
171/**
172 * free_bootmem_node - mark a page range as usable
173 * @pgdat: node the range resides on
174 * @physaddr: starting address of the range
175 * @size: size of the range in bytes
176 *
177 * Partial pages will be considered reserved and left as they are.
178 *
179 * The range must reside completely on the specified node.
180 */
181void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
182 unsigned long size)
183{
184 kmemleak_free_part(__va(physaddr), size);
Tejun Heo24aa0782011-07-12 11:16:06 +0200185 memblock_free(physaddr, size);
Yinghai Lu09325872011-02-24 14:43:05 +0100186}
187
188/**
189 * free_bootmem - mark a page range as usable
190 * @addr: starting address of the range
191 * @size: size of the range in bytes
192 *
193 * Partial pages will be considered reserved and left as they are.
194 *
195 * The range must be contiguous but may span node boundaries.
196 */
197void __init free_bootmem(unsigned long addr, unsigned long size)
198{
199 kmemleak_free_part(__va(addr), size);
Tejun Heo24aa0782011-07-12 11:16:06 +0200200 memblock_free(addr, size);
Yinghai Lu09325872011-02-24 14:43:05 +0100201}
202
203static void * __init ___alloc_bootmem_nopanic(unsigned long size,
204 unsigned long align,
205 unsigned long goal,
206 unsigned long limit)
207{
208 void *ptr;
209
210 if (WARN_ON_ONCE(slab_is_available()))
211 return kzalloc(size, GFP_NOWAIT);
212
213restart:
214
215 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
216
217 if (ptr)
218 return ptr;
219
220 if (goal != 0) {
221 goal = 0;
222 goto restart;
223 }
224
225 return NULL;
226}
227
228/**
229 * __alloc_bootmem_nopanic - allocate boot memory without panicking
230 * @size: size of the request in bytes
231 * @align: alignment of the region
232 * @goal: preferred starting address of the region
233 *
234 * The goal is dropped if it can not be satisfied and the allocation will
235 * fall back to memory below @goal.
236 *
237 * Allocation may happen on any node in the system.
238 *
239 * Returns NULL on failure.
240 */
241void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
242 unsigned long goal)
243{
244 unsigned long limit = -1UL;
245
246 return ___alloc_bootmem_nopanic(size, align, goal, limit);
247}
248
249static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
250 unsigned long goal, unsigned long limit)
251{
252 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
253
254 if (mem)
255 return mem;
256 /*
257 * Whoops, we cannot satisfy the allocation request.
258 */
259 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
260 panic("Out of memory");
261 return NULL;
262}
263
264/**
265 * __alloc_bootmem - allocate boot memory
266 * @size: size of the request in bytes
267 * @align: alignment of the region
268 * @goal: preferred starting address of the region
269 *
270 * The goal is dropped if it can not be satisfied and the allocation will
271 * fall back to memory below @goal.
272 *
273 * Allocation may happen on any node in the system.
274 *
275 * The function panics if the request can not be satisfied.
276 */
277void * __init __alloc_bootmem(unsigned long size, unsigned long align,
278 unsigned long goal)
279{
280 unsigned long limit = -1UL;
281
282 return ___alloc_bootmem(size, align, goal, limit);
283}
284
285/**
286 * __alloc_bootmem_node - allocate boot memory from a specific node
287 * @pgdat: node to allocate from
288 * @size: size of the request in bytes
289 * @align: alignment of the region
290 * @goal: preferred starting address of the region
291 *
292 * The goal is dropped if it can not be satisfied and the allocation will
293 * fall back to memory below @goal.
294 *
295 * Allocation may fall back to any node in the system if the specified node
296 * can not hold the requested memory.
297 *
298 * The function panics if the request can not be satisfied.
299 */
300void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
301 unsigned long align, unsigned long goal)
302{
303 void *ptr;
304
305 if (WARN_ON_ONCE(slab_is_available()))
306 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
307
David Miller4e1c2b22012-04-25 16:10:50 -0400308again:
Yinghai Lu09325872011-02-24 14:43:05 +0100309 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
310 goal, -1ULL);
311 if (ptr)
312 return ptr;
313
David Miller4e1c2b22012-04-25 16:10:50 -0400314 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
315 goal, -1ULL);
316 if (!ptr && goal) {
317 goal = 0;
318 goto again;
319 }
320 return ptr;
Yinghai Lu09325872011-02-24 14:43:05 +0100321}
322
323void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
324 unsigned long align, unsigned long goal)
325{
Yinghai Lu09325872011-02-24 14:43:05 +0100326 return __alloc_bootmem_node(pgdat, size, align, goal);
Yinghai Lu09325872011-02-24 14:43:05 +0100327}
328
329#ifdef CONFIG_SPARSEMEM
330/**
331 * alloc_bootmem_section - allocate boot memory from a specific section
332 * @size: size of the request in bytes
333 * @section_nr: sparse map section to allocate from
334 *
335 * Return NULL on failure.
336 */
337void * __init alloc_bootmem_section(unsigned long size,
338 unsigned long section_nr)
339{
340 unsigned long pfn, goal, limit;
341
342 pfn = section_nr_to_pfn(section_nr);
343 goal = pfn << PAGE_SHIFT;
344 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
345
346 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
347 SMP_CACHE_BYTES, goal, limit);
348}
349#endif
350
351void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
352 unsigned long align, unsigned long goal)
353{
354 void *ptr;
355
356 if (WARN_ON_ONCE(slab_is_available()))
357 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
358
359 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
360 goal, -1ULL);
361 if (ptr)
362 return ptr;
363
364 return __alloc_bootmem_nopanic(size, align, goal);
365}
366
367#ifndef ARCH_LOW_ADDRESS_LIMIT
368#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
369#endif
370
371/**
372 * __alloc_bootmem_low - allocate low boot memory
373 * @size: size of the request in bytes
374 * @align: alignment of the region
375 * @goal: preferred starting address of the region
376 *
377 * The goal is dropped if it can not be satisfied and the allocation will
378 * fall back to memory below @goal.
379 *
380 * Allocation may happen on any node in the system.
381 *
382 * The function panics if the request can not be satisfied.
383 */
384void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
385 unsigned long goal)
386{
387 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
388}
389
390/**
391 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
392 * @pgdat: node to allocate from
393 * @size: size of the request in bytes
394 * @align: alignment of the region
395 * @goal: preferred starting address of the region
396 *
397 * The goal is dropped if it can not be satisfied and the allocation will
398 * fall back to memory below @goal.
399 *
400 * Allocation may fall back to any node in the system if the specified node
401 * can not hold the requested memory.
402 *
403 * The function panics if the request can not be satisfied.
404 */
405void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
406 unsigned long align, unsigned long goal)
407{
408 void *ptr;
409
410 if (WARN_ON_ONCE(slab_is_available()))
411 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
412
413 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
414 goal, ARCH_LOW_ADDRESS_LIMIT);
415 if (ptr)
416 return ptr;
417
418 return __alloc_memory_core_early(MAX_NUMNODES, size, align,
419 goal, ARCH_LOW_ADDRESS_LIMIT);
420}