blob: 5398d48c360a96efafe69a7e5ad2f590c7693a43 [file] [log] [blame]
Andy Whitcroftd41dee32005-06-23 00:07:54 -07001/*
2 * sparse memory mappings.
3 */
Andy Whitcroftd41dee32005-06-23 00:07:54 -07004#include <linux/mm.h>
5#include <linux/mmzone.h>
6#include <linux/bootmem.h>
Dave Hansen0b0acbe2005-10-29 18:16:55 -07007#include <linux/highmem.h>
Andy Whitcroftd41dee32005-06-23 00:07:54 -07008#include <linux/module.h>
Dave Hansen28ae55c2005-09-03 15:54:29 -07009#include <linux/spinlock.h>
Dave Hansen0b0acbe2005-10-29 18:16:55 -070010#include <linux/vmalloc.h>
Andy Whitcroftd41dee32005-06-23 00:07:54 -070011#include <asm/dma.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070012#include <asm/pgalloc.h>
13#include <asm/pgtable.h>
Andy Whitcroftd41dee32005-06-23 00:07:54 -070014
15/*
16 * Permanent SPARSEMEM data:
17 *
18 * 1) mem_section - memory sections, mem_map's for valid memory
19 */
Bob Picco3e347262005-09-03 15:54:28 -070020#ifdef CONFIG_SPARSEMEM_EXTREME
Bob Picco802f1922005-09-03 15:54:26 -070021struct mem_section *mem_section[NR_SECTION_ROOTS]
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080022 ____cacheline_internodealigned_in_smp;
Bob Picco3e347262005-09-03 15:54:28 -070023#else
24struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080025 ____cacheline_internodealigned_in_smp;
Bob Picco3e347262005-09-03 15:54:28 -070026#endif
27EXPORT_SYMBOL(mem_section);
28
Christoph Lameter89689ae2006-12-06 20:31:45 -080029#ifdef NODE_NOT_IN_PAGE_FLAGS
30/*
31 * If we did not store the node number in the page then we have to
32 * do a lookup in the section_to_node_table in order to find which
33 * node the page belongs to.
34 */
35#if MAX_NUMNODES <= 256
36static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
37#else
38static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39#endif
40
Andy Whitcroft25ba77c2006-12-06 20:33:03 -080041int page_to_nid(struct page *page)
Christoph Lameter89689ae2006-12-06 20:31:45 -080042{
43 return section_to_node_table[page_to_section(page)];
44}
45EXPORT_SYMBOL(page_to_nid);
Andy Whitcroft85770ff2007-08-22 14:01:03 -070046
47static void set_section_nid(unsigned long section_nr, int nid)
48{
49 section_to_node_table[section_nr] = nid;
50}
51#else /* !NODE_NOT_IN_PAGE_FLAGS */
52static inline void set_section_nid(unsigned long section_nr, int nid)
53{
54}
Christoph Lameter89689ae2006-12-06 20:31:45 -080055#endif
56
Bob Picco3e347262005-09-03 15:54:28 -070057#ifdef CONFIG_SPARSEMEM_EXTREME
Sam Ravnborg577a32f2007-05-17 23:29:25 +020058static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
Bob Picco802f1922005-09-03 15:54:26 -070059{
Dave Hansen28ae55c2005-09-03 15:54:29 -070060 struct mem_section *section = NULL;
61 unsigned long array_size = SECTIONS_PER_ROOT *
62 sizeof(struct mem_section);
Bob Picco802f1922005-09-03 15:54:26 -070063
Mike Kravetz39d24e62006-05-15 09:44:13 -070064 if (slab_is_available())
Mike Kravetz46a66ee2006-05-01 12:16:09 -070065 section = kmalloc_node(array_size, GFP_KERNEL, nid);
66 else
67 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
Bob Picco3e347262005-09-03 15:54:28 -070068
Dave Hansen28ae55c2005-09-03 15:54:29 -070069 if (section)
70 memset(section, 0, array_size);
Bob Picco3e347262005-09-03 15:54:28 -070071
Dave Hansen28ae55c2005-09-03 15:54:29 -070072 return section;
Bob Picco802f1922005-09-03 15:54:26 -070073}
Dave Hansen28ae55c2005-09-03 15:54:29 -070074
Yasunori Gotoa3142c82007-05-08 00:23:07 -070075static int __meminit sparse_index_init(unsigned long section_nr, int nid)
Dave Hansen28ae55c2005-09-03 15:54:29 -070076{
Ingo Molnar34af9462006-06-27 02:53:55 -070077 static DEFINE_SPINLOCK(index_init_lock);
Dave Hansen28ae55c2005-09-03 15:54:29 -070078 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
79 struct mem_section *section;
80 int ret = 0;
81
82 if (mem_section[root])
83 return -EEXIST;
84
85 section = sparse_index_alloc(nid);
WANG Congaf0cd5a2007-12-17 16:19:58 -080086 if (!section)
87 return -ENOMEM;
Dave Hansen28ae55c2005-09-03 15:54:29 -070088 /*
89 * This lock keeps two different sections from
90 * reallocating for the same index
91 */
92 spin_lock(&index_init_lock);
93
94 if (mem_section[root]) {
95 ret = -EEXIST;
96 goto out;
97 }
98
99 mem_section[root] = section;
100out:
101 spin_unlock(&index_init_lock);
102 return ret;
103}
104#else /* !SPARSEMEM_EXTREME */
105static inline int sparse_index_init(unsigned long section_nr, int nid)
106{
107 return 0;
108}
109#endif
110
Dave Hansen4ca644d2005-10-29 18:16:51 -0700111/*
112 * Although written for the SPARSEMEM_EXTREME case, this happens
Andy Whitcroftcd881a62007-10-16 01:24:10 -0700113 * to also work for the flat array case because
Dave Hansen4ca644d2005-10-29 18:16:51 -0700114 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
115 */
116int __section_nr(struct mem_section* ms)
117{
118 unsigned long root_nr;
119 struct mem_section* root;
120
Mike Kravetz12783b02006-05-20 15:00:05 -0700121 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
122 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
Dave Hansen4ca644d2005-10-29 18:16:51 -0700123 if (!root)
124 continue;
125
126 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
127 break;
128 }
129
130 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
131}
132
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700133/*
134 * During early boot, before section_mem_map is used for an actual
135 * mem_map, we use section_mem_map to store the section's NUMA
136 * node. This keeps us from having to use another data structure. The
137 * node information is cleared just before we store the real mem_map.
138 */
139static inline unsigned long sparse_encode_early_nid(int nid)
140{
141 return (nid << SECTION_NID_SHIFT);
142}
143
144static inline int sparse_early_nid(struct mem_section *section)
145{
146 return (section->section_mem_map >> SECTION_NID_SHIFT);
147}
148
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700149/* Record a memory area against a node. */
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700150void __init memory_present(int nid, unsigned long start, unsigned long end)
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700151{
Ingo Molnarbead9a32008-04-16 01:40:00 +0200152 unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700153 unsigned long pfn;
154
Ingo Molnarbead9a32008-04-16 01:40:00 +0200155 /*
156 * Sanity checks - do not allow an architecture to pass
157 * in larger pfns than the maximum scope of sparsemem:
158 */
159 if (start >= max_arch_pfn)
160 return;
161 if (end >= max_arch_pfn)
162 end = max_arch_pfn;
163
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700164 start &= PAGE_SECTION_MASK;
165 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
166 unsigned long section = pfn_to_section_nr(pfn);
Bob Picco802f1922005-09-03 15:54:26 -0700167 struct mem_section *ms;
168
169 sparse_index_init(section, nid);
Andy Whitcroft85770ff2007-08-22 14:01:03 -0700170 set_section_nid(section, nid);
Bob Picco802f1922005-09-03 15:54:26 -0700171
172 ms = __nr_to_section(section);
173 if (!ms->section_mem_map)
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700174 ms->section_mem_map = sparse_encode_early_nid(nid) |
175 SECTION_MARKED_PRESENT;
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700176 }
177}
178
179/*
180 * Only used by the i386 NUMA architecures, but relatively
181 * generic code.
182 */
183unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
184 unsigned long end_pfn)
185{
186 unsigned long pfn;
187 unsigned long nr_pages = 0;
188
189 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
190 if (nid != early_pfn_to_nid(pfn))
191 continue;
192
Andy Whitcroft540557b2007-10-16 01:24:11 -0700193 if (pfn_present(pfn))
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700194 nr_pages += PAGES_PER_SECTION;
195 }
196
197 return nr_pages * sizeof(struct page);
198}
199
200/*
Andy Whitcroft29751f62005-06-23 00:08:00 -0700201 * Subtle, we encode the real pfn into the mem_map such that
202 * the identity pfn - section_mem_map will return the actual
203 * physical page frame number.
204 */
205static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
206{
207 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
208}
209
210/*
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700211 * Decode mem_map from the coded memmap
Andy Whitcroft29751f62005-06-23 00:08:00 -0700212 */
Andy Whitcroft29751f62005-06-23 00:08:00 -0700213struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
214{
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700215 /* mask off the extra low bits of information */
216 coded_mem_map &= SECTION_MAP_MASK;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700217 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
218}
219
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700220static int __meminit sparse_init_one_section(struct mem_section *ms,
Mel Gorman5c0e3062007-10-16 01:25:56 -0700221 unsigned long pnum, struct page *mem_map,
222 unsigned long *pageblock_bitmap)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700223{
Andy Whitcroft540557b2007-10-16 01:24:11 -0700224 if (!present_section(ms))
Andy Whitcroft29751f62005-06-23 00:08:00 -0700225 return -EINVAL;
226
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700227 ms->section_mem_map &= ~SECTION_MAP_MASK;
Andy Whitcroft540557b2007-10-16 01:24:11 -0700228 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
229 SECTION_HAS_MEM_MAP;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700230 ms->pageblock_flags = pageblock_bitmap;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700231
232 return 1;
233}
234
Yasunori Goto04753272008-04-28 02:13:31 -0700235unsigned long usemap_size(void)
Mel Gorman5c0e3062007-10-16 01:25:56 -0700236{
237 unsigned long size_bytes;
238 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
239 size_bytes = roundup(size_bytes, sizeof(unsigned long));
240 return size_bytes;
241}
242
243#ifdef CONFIG_MEMORY_HOTPLUG
244static unsigned long *__kmalloc_section_usemap(void)
245{
246 return kmalloc(usemap_size(), GFP_KERNEL);
247}
248#endif /* CONFIG_MEMORY_HOTPLUG */
249
Sam Ravnborga322f8a2008-02-04 22:29:35 -0800250static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
Mel Gorman5c0e3062007-10-16 01:25:56 -0700251{
252 unsigned long *usemap;
253 struct mem_section *ms = __nr_to_section(pnum);
254 int nid = sparse_early_nid(ms);
255
256 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
257 if (usemap)
258 return usemap;
259
260 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
261 nid = 0;
262
263 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
264 return NULL;
265}
266
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700267#ifndef CONFIG_SPARSEMEM_VMEMMAP
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700268struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700269{
270 struct page *map;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700271
272 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
273 if (map)
274 return map;
275
Yasunori Goto9d992172008-04-28 02:13:32 -0700276 map = alloc_bootmem_pages_node(NODE_DATA(nid),
277 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700278 return map;
279}
280#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
281
282struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
283{
284 struct page *map;
285 struct mem_section *ms = __nr_to_section(pnum);
286 int nid = sparse_early_nid(ms);
287
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700288 map = sparse_mem_map_populate(pnum, nid);
Andy Whitcroft29751f62005-06-23 00:08:00 -0700289 if (map)
290 return map;
291
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700292 printk(KERN_ERR "%s: sparsemem memory map backing failed "
293 "some memory will not be available.\n", __FUNCTION__);
Bob Picco802f1922005-09-03 15:54:26 -0700294 ms->section_mem_map = 0;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700295 return NULL;
296}
297
Yinghai Luc2b91e22008-04-12 01:19:24 -0700298void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
299{
300}
Stephen Rothwell193faea2007-06-08 13:46:51 -0700301/*
302 * Allocate the accumulated non-linear sections, allocate a mem_map
303 * for each and record the physical to section mapping.
304 */
305void __init sparse_init(void)
306{
307 unsigned long pnum;
308 struct page *map;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700309 unsigned long *usemap;
Yinghai Lue123dd32008-04-13 11:51:06 -0700310 unsigned long **usemap_map;
311 int size;
312
313 /*
314 * map is using big page (aka 2M in x86 64 bit)
315 * usemap is less one page (aka 24 bytes)
316 * so alloc 2M (with 2M align) and 24 bytes in turn will
317 * make next 2M slip to one more 2M later.
318 * then in big system, the memory will have a lot of holes...
319 * here try to allocate 2M pages continously.
320 *
321 * powerpc need to call sparse_init_one_section right after each
322 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
323 */
324 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
325 usemap_map = alloc_bootmem(size);
326 if (!usemap_map)
327 panic("can not allocate usemap_map\n");
Stephen Rothwell193faea2007-06-08 13:46:51 -0700328
329 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
Andy Whitcroft540557b2007-10-16 01:24:11 -0700330 if (!present_section_nr(pnum))
Stephen Rothwell193faea2007-06-08 13:46:51 -0700331 continue;
Yinghai Lue123dd32008-04-13 11:51:06 -0700332 usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
333 }
334
335 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
336 if (!present_section_nr(pnum))
337 continue;
338
339 usemap = usemap_map[pnum];
340 if (!usemap)
341 continue;
Stephen Rothwell193faea2007-06-08 13:46:51 -0700342
343 map = sparse_early_mem_map_alloc(pnum);
344 if (!map)
345 continue;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700346
Mel Gorman5c0e3062007-10-16 01:25:56 -0700347 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
348 usemap);
Stephen Rothwell193faea2007-06-08 13:46:51 -0700349 }
Yinghai Lue123dd32008-04-13 11:51:06 -0700350
Yinghai Luc2b91e22008-04-12 01:19:24 -0700351 vmemmap_populate_print_last();
352
Yinghai Lue123dd32008-04-13 11:51:06 -0700353 free_bootmem(__pa(usemap_map), size);
Stephen Rothwell193faea2007-06-08 13:46:51 -0700354}
355
356#ifdef CONFIG_MEMORY_HOTPLUG
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700357#ifdef CONFIG_SPARSEMEM_VMEMMAP
358static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
359 unsigned long nr_pages)
360{
361 /* This will make the necessary allocations eventually. */
362 return sparse_mem_map_populate(pnum, nid);
363}
364static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
365{
366 return; /* XXX: Not implemented yet */
367}
368#else
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700369static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
370{
371 struct page *page, *ret;
372 unsigned long memmap_size = sizeof(struct page) * nr_pages;
373
Yasunori Gotof2d0aa52006-10-28 10:38:32 -0700374 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700375 if (page)
376 goto got_map_page;
377
378 ret = vmalloc(memmap_size);
379 if (ret)
380 goto got_map_ptr;
381
382 return NULL;
383got_map_page:
384 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
385got_map_ptr:
386 memset(ret, 0, memmap_size);
387
388 return ret;
389}
390
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700391static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
392 unsigned long nr_pages)
393{
394 return __kmalloc_section_memmap(nr_pages);
395}
396
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700397static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
398{
Christoph Lameter9e2779f2008-02-04 22:28:34 -0800399 if (is_vmalloc_addr(memmap))
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700400 vfree(memmap);
401 else
402 free_pages((unsigned long)memmap,
403 get_order(sizeof(struct page) * nr_pages));
404}
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700405#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700406
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700407static void free_section_usemap(struct page *memmap, unsigned long *usemap)
408{
409 if (!usemap)
410 return;
411
412 /*
413 * Check to see if allocation came from hot-plug-add
414 */
415 if (PageSlab(virt_to_page(usemap))) {
416 kfree(usemap);
417 if (memmap)
418 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
419 return;
420 }
421
422 /*
423 * TODO: Allocations came from bootmem - how do I free up ?
424 */
425 printk(KERN_WARNING "Not freeing up allocations from bootmem "
426 "- leaking memory\n");
427}
428
Andy Whitcroft29751f62005-06-23 00:08:00 -0700429/*
Andy Whitcroft29751f62005-06-23 00:08:00 -0700430 * returns the number of sections whose mem_maps were properly
431 * set. If this is <=0, then that means that the passed-in
432 * map was not consumed and must be freed.
433 */
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700434int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
435 int nr_pages)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700436{
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700437 unsigned long section_nr = pfn_to_section_nr(start_pfn);
438 struct pglist_data *pgdat = zone->zone_pgdat;
439 struct mem_section *ms;
440 struct page *memmap;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700441 unsigned long *usemap;
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700442 unsigned long flags;
443 int ret;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700444
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700445 /*
446 * no locking for this, because it does its own
447 * plus, it does a kmalloc
448 */
WANG Congbbd06822007-12-17 16:19:59 -0800449 ret = sparse_index_init(section_nr, pgdat->node_id);
450 if (ret < 0 && ret != -EEXIST)
451 return ret;
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700452 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
WANG Congbbd06822007-12-17 16:19:59 -0800453 if (!memmap)
454 return -ENOMEM;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700455 usemap = __kmalloc_section_usemap();
WANG Congbbd06822007-12-17 16:19:59 -0800456 if (!usemap) {
457 __kfree_section_memmap(memmap, nr_pages);
458 return -ENOMEM;
459 }
Andy Whitcroft29751f62005-06-23 00:08:00 -0700460
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700461 pgdat_resize_lock(pgdat, &flags);
462
463 ms = __pfn_to_section(start_pfn);
464 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
465 ret = -EEXIST;
466 goto out;
467 }
Mel Gorman5c0e3062007-10-16 01:25:56 -0700468
Andy Whitcroft29751f62005-06-23 00:08:00 -0700469 ms->section_mem_map |= SECTION_MARKED_PRESENT;
470
Mel Gorman5c0e3062007-10-16 01:25:56 -0700471 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700472
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700473out:
474 pgdat_resize_unlock(pgdat, &flags);
WANG Congbbd06822007-12-17 16:19:59 -0800475 if (ret <= 0) {
476 kfree(usemap);
Mike Kravetz46a66ee2006-05-01 12:16:09 -0700477 __kfree_section_memmap(memmap, nr_pages);
WANG Congbbd06822007-12-17 16:19:59 -0800478 }
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700479 return ret;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700480}
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700481
482void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
483{
484 struct page *memmap = NULL;
485 unsigned long *usemap = NULL;
486
487 if (ms->section_mem_map) {
488 usemap = ms->pageblock_flags;
489 memmap = sparse_decode_mem_map(ms->section_mem_map,
490 __section_nr(ms));
491 ms->section_mem_map = 0;
492 ms->pageblock_flags = NULL;
493 }
494
495 free_section_usemap(memmap, usemap);
496}
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700497#endif