blob: 7a3650923d9a8f6a5b4b959982f658d5032cd923 [file] [log] [blame]
Andy Whitcroftd41dee32005-06-23 00:07:54 -07001/*
2 * sparse memory mappings.
3 */
Andy Whitcroftd41dee32005-06-23 00:07:54 -07004#include <linux/mm.h>
5#include <linux/mmzone.h>
6#include <linux/bootmem.h>
Dave Hansen0b0acbe2005-10-29 18:16:55 -07007#include <linux/highmem.h>
Andy Whitcroftd41dee32005-06-23 00:07:54 -07008#include <linux/module.h>
Dave Hansen28ae55c2005-09-03 15:54:29 -07009#include <linux/spinlock.h>
Dave Hansen0b0acbe2005-10-29 18:16:55 -070010#include <linux/vmalloc.h>
Yasunori Goto0c0a4a52008-04-28 02:13:34 -070011#include "internal.h"
Andy Whitcroftd41dee32005-06-23 00:07:54 -070012#include <asm/dma.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070013#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
Mel Gorman2dbb51c2008-07-23 21:26:52 -070015#include "internal.h"
Andy Whitcroftd41dee32005-06-23 00:07:54 -070016
17/*
18 * Permanent SPARSEMEM data:
19 *
20 * 1) mem_section - memory sections, mem_map's for valid memory
21 */
Bob Picco3e347262005-09-03 15:54:28 -070022#ifdef CONFIG_SPARSEMEM_EXTREME
Bob Picco802f1922005-09-03 15:54:26 -070023struct mem_section *mem_section[NR_SECTION_ROOTS]
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080024 ____cacheline_internodealigned_in_smp;
Bob Picco3e347262005-09-03 15:54:28 -070025#else
26struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080027 ____cacheline_internodealigned_in_smp;
Bob Picco3e347262005-09-03 15:54:28 -070028#endif
29EXPORT_SYMBOL(mem_section);
30
Christoph Lameter89689ae2006-12-06 20:31:45 -080031#ifdef NODE_NOT_IN_PAGE_FLAGS
32/*
33 * If we did not store the node number in the page then we have to
34 * do a lookup in the section_to_node_table in order to find which
35 * node the page belongs to.
36 */
37#if MAX_NUMNODES <= 256
38static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39#else
40static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41#endif
42
Andy Whitcroft25ba77c2006-12-06 20:33:03 -080043int page_to_nid(struct page *page)
Christoph Lameter89689ae2006-12-06 20:31:45 -080044{
45 return section_to_node_table[page_to_section(page)];
46}
47EXPORT_SYMBOL(page_to_nid);
Andy Whitcroft85770ff2007-08-22 14:01:03 -070048
49static void set_section_nid(unsigned long section_nr, int nid)
50{
51 section_to_node_table[section_nr] = nid;
52}
53#else /* !NODE_NOT_IN_PAGE_FLAGS */
54static inline void set_section_nid(unsigned long section_nr, int nid)
55{
56}
Christoph Lameter89689ae2006-12-06 20:31:45 -080057#endif
58
Bob Picco3e347262005-09-03 15:54:28 -070059#ifdef CONFIG_SPARSEMEM_EXTREME
Sam Ravnborg577a32f2007-05-17 23:29:25 +020060static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
Bob Picco802f1922005-09-03 15:54:26 -070061{
Dave Hansen28ae55c2005-09-03 15:54:29 -070062 struct mem_section *section = NULL;
63 unsigned long array_size = SECTIONS_PER_ROOT *
64 sizeof(struct mem_section);
Bob Picco802f1922005-09-03 15:54:26 -070065
Mike Kravetz39d24e62006-05-15 09:44:13 -070066 if (slab_is_available())
Mike Kravetz46a66ee2006-05-01 12:16:09 -070067 section = kmalloc_node(array_size, GFP_KERNEL, nid);
68 else
69 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
Bob Picco3e347262005-09-03 15:54:28 -070070
Dave Hansen28ae55c2005-09-03 15:54:29 -070071 if (section)
72 memset(section, 0, array_size);
Bob Picco3e347262005-09-03 15:54:28 -070073
Dave Hansen28ae55c2005-09-03 15:54:29 -070074 return section;
Bob Picco802f1922005-09-03 15:54:26 -070075}
Dave Hansen28ae55c2005-09-03 15:54:29 -070076
Yasunori Gotoa3142c82007-05-08 00:23:07 -070077static int __meminit sparse_index_init(unsigned long section_nr, int nid)
Dave Hansen28ae55c2005-09-03 15:54:29 -070078{
Ingo Molnar34af9462006-06-27 02:53:55 -070079 static DEFINE_SPINLOCK(index_init_lock);
Dave Hansen28ae55c2005-09-03 15:54:29 -070080 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
81 struct mem_section *section;
82 int ret = 0;
83
84 if (mem_section[root])
85 return -EEXIST;
86
87 section = sparse_index_alloc(nid);
WANG Congaf0cd5a2007-12-17 16:19:58 -080088 if (!section)
89 return -ENOMEM;
Dave Hansen28ae55c2005-09-03 15:54:29 -070090 /*
91 * This lock keeps two different sections from
92 * reallocating for the same index
93 */
94 spin_lock(&index_init_lock);
95
96 if (mem_section[root]) {
97 ret = -EEXIST;
98 goto out;
99 }
100
101 mem_section[root] = section;
102out:
103 spin_unlock(&index_init_lock);
104 return ret;
105}
106#else /* !SPARSEMEM_EXTREME */
107static inline int sparse_index_init(unsigned long section_nr, int nid)
108{
109 return 0;
110}
111#endif
112
Dave Hansen4ca644d2005-10-29 18:16:51 -0700113/*
114 * Although written for the SPARSEMEM_EXTREME case, this happens
Andy Whitcroftcd881a62007-10-16 01:24:10 -0700115 * to also work for the flat array case because
Dave Hansen4ca644d2005-10-29 18:16:51 -0700116 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
117 */
118int __section_nr(struct mem_section* ms)
119{
120 unsigned long root_nr;
121 struct mem_section* root;
122
Mike Kravetz12783b02006-05-20 15:00:05 -0700123 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
124 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
Dave Hansen4ca644d2005-10-29 18:16:51 -0700125 if (!root)
126 continue;
127
128 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
129 break;
130 }
131
132 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
133}
134
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700135/*
136 * During early boot, before section_mem_map is used for an actual
137 * mem_map, we use section_mem_map to store the section's NUMA
138 * node. This keeps us from having to use another data structure. The
139 * node information is cleared just before we store the real mem_map.
140 */
141static inline unsigned long sparse_encode_early_nid(int nid)
142{
143 return (nid << SECTION_NID_SHIFT);
144}
145
146static inline int sparse_early_nid(struct mem_section *section)
147{
148 return (section->section_mem_map >> SECTION_NID_SHIFT);
149}
150
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700151/* Validate the physical addressing limitations of the model */
152void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
153 unsigned long *end_pfn)
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700154{
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700155 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700156
Ingo Molnarbead9a32008-04-16 01:40:00 +0200157 /*
158 * Sanity checks - do not allow an architecture to pass
159 * in larger pfns than the maximum scope of sparsemem:
160 */
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700161 if (*start_pfn > max_sparsemem_pfn) {
162 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
163 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
164 *start_pfn, *end_pfn, max_sparsemem_pfn);
165 WARN_ON_ONCE(1);
166 *start_pfn = max_sparsemem_pfn;
167 *end_pfn = max_sparsemem_pfn;
168 }
169
170 if (*end_pfn > max_sparsemem_pfn) {
171 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173 *start_pfn, *end_pfn, max_sparsemem_pfn);
174 WARN_ON_ONCE(1);
175 *end_pfn = max_sparsemem_pfn;
176 }
177}
178
179/* Record a memory area against a node. */
180void __init memory_present(int nid, unsigned long start, unsigned long end)
181{
182 unsigned long pfn;
Ingo Molnarbead9a32008-04-16 01:40:00 +0200183
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700184 start &= PAGE_SECTION_MASK;
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700185 mminit_validate_memmodel_limits(&start, &end);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700186 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
187 unsigned long section = pfn_to_section_nr(pfn);
Bob Picco802f1922005-09-03 15:54:26 -0700188 struct mem_section *ms;
189
190 sparse_index_init(section, nid);
Andy Whitcroft85770ff2007-08-22 14:01:03 -0700191 set_section_nid(section, nid);
Bob Picco802f1922005-09-03 15:54:26 -0700192
193 ms = __nr_to_section(section);
194 if (!ms->section_mem_map)
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700195 ms->section_mem_map = sparse_encode_early_nid(nid) |
196 SECTION_MARKED_PRESENT;
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700197 }
198}
199
200/*
201 * Only used by the i386 NUMA architecures, but relatively
202 * generic code.
203 */
204unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
205 unsigned long end_pfn)
206{
207 unsigned long pfn;
208 unsigned long nr_pages = 0;
209
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700210 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700211 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
212 if (nid != early_pfn_to_nid(pfn))
213 continue;
214
Andy Whitcroft540557b2007-10-16 01:24:11 -0700215 if (pfn_present(pfn))
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700216 nr_pages += PAGES_PER_SECTION;
217 }
218
219 return nr_pages * sizeof(struct page);
220}
221
222/*
Andy Whitcroft29751f62005-06-23 00:08:00 -0700223 * Subtle, we encode the real pfn into the mem_map such that
224 * the identity pfn - section_mem_map will return the actual
225 * physical page frame number.
226 */
227static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
228{
229 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
230}
231
232/*
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700233 * Decode mem_map from the coded memmap
Andy Whitcroft29751f62005-06-23 00:08:00 -0700234 */
Andy Whitcroft29751f62005-06-23 00:08:00 -0700235struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
236{
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700237 /* mask off the extra low bits of information */
238 coded_mem_map &= SECTION_MAP_MASK;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700239 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
240}
241
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700242static int __meminit sparse_init_one_section(struct mem_section *ms,
Mel Gorman5c0e3062007-10-16 01:25:56 -0700243 unsigned long pnum, struct page *mem_map,
244 unsigned long *pageblock_bitmap)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700245{
Andy Whitcroft540557b2007-10-16 01:24:11 -0700246 if (!present_section(ms))
Andy Whitcroft29751f62005-06-23 00:08:00 -0700247 return -EINVAL;
248
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700249 ms->section_mem_map &= ~SECTION_MAP_MASK;
Andy Whitcroft540557b2007-10-16 01:24:11 -0700250 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
251 SECTION_HAS_MEM_MAP;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700252 ms->pageblock_flags = pageblock_bitmap;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700253
254 return 1;
255}
256
Yasunori Goto04753272008-04-28 02:13:31 -0700257unsigned long usemap_size(void)
Mel Gorman5c0e3062007-10-16 01:25:56 -0700258{
259 unsigned long size_bytes;
260 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
261 size_bytes = roundup(size_bytes, sizeof(unsigned long));
262 return size_bytes;
263}
264
265#ifdef CONFIG_MEMORY_HOTPLUG
266static unsigned long *__kmalloc_section_usemap(void)
267{
268 return kmalloc(usemap_size(), GFP_KERNEL);
269}
270#endif /* CONFIG_MEMORY_HOTPLUG */
271
Sam Ravnborga322f8a2008-02-04 22:29:35 -0800272static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
Mel Gorman5c0e3062007-10-16 01:25:56 -0700273{
Andrew Morton51674642008-04-30 00:55:17 -0700274 unsigned long *usemap;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700275 struct mem_section *ms = __nr_to_section(pnum);
276 int nid = sparse_early_nid(ms);
277
Andrew Morton51674642008-04-30 00:55:17 -0700278 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
Mel Gorman5c0e3062007-10-16 01:25:56 -0700279 if (usemap)
280 return usemap;
281
282 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
283 nid = 0;
284
Harvey Harrisond40cee22008-04-30 00:55:07 -0700285 printk(KERN_WARNING "%s: allocation failed\n", __func__);
Mel Gorman5c0e3062007-10-16 01:25:56 -0700286 return NULL;
287}
288
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700289#ifndef CONFIG_SPARSEMEM_VMEMMAP
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700290struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700291{
292 struct page *map;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700293
294 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
295 if (map)
296 return map;
297
Yasunori Goto9d992172008-04-28 02:13:32 -0700298 map = alloc_bootmem_pages_node(NODE_DATA(nid),
299 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700300 return map;
301}
302#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
303
304struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
305{
306 struct page *map;
307 struct mem_section *ms = __nr_to_section(pnum);
308 int nid = sparse_early_nid(ms);
309
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700310 map = sparse_mem_map_populate(pnum, nid);
Andy Whitcroft29751f62005-06-23 00:08:00 -0700311 if (map)
312 return map;
313
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700314 printk(KERN_ERR "%s: sparsemem memory map backing failed "
Harvey Harrisond40cee22008-04-30 00:55:07 -0700315 "some memory will not be available.\n", __func__);
Bob Picco802f1922005-09-03 15:54:26 -0700316 ms->section_mem_map = 0;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700317 return NULL;
318}
319
Yinghai Luc2b91e22008-04-12 01:19:24 -0700320void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
321{
322}
Stephen Rothwell193faea2007-06-08 13:46:51 -0700323/*
324 * Allocate the accumulated non-linear sections, allocate a mem_map
325 * for each and record the physical to section mapping.
326 */
327void __init sparse_init(void)
328{
329 unsigned long pnum;
330 struct page *map;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700331 unsigned long *usemap;
Yinghai Lue123dd32008-04-13 11:51:06 -0700332 unsigned long **usemap_map;
333 int size;
334
335 /*
336 * map is using big page (aka 2M in x86 64 bit)
337 * usemap is less one page (aka 24 bytes)
338 * so alloc 2M (with 2M align) and 24 bytes in turn will
339 * make next 2M slip to one more 2M later.
340 * then in big system, the memory will have a lot of holes...
341 * here try to allocate 2M pages continously.
342 *
343 * powerpc need to call sparse_init_one_section right after each
344 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
345 */
346 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
347 usemap_map = alloc_bootmem(size);
348 if (!usemap_map)
349 panic("can not allocate usemap_map\n");
Stephen Rothwell193faea2007-06-08 13:46:51 -0700350
351 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
Andy Whitcroft540557b2007-10-16 01:24:11 -0700352 if (!present_section_nr(pnum))
Stephen Rothwell193faea2007-06-08 13:46:51 -0700353 continue;
Yinghai Lue123dd32008-04-13 11:51:06 -0700354 usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
355 }
356
357 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
358 if (!present_section_nr(pnum))
359 continue;
360
361 usemap = usemap_map[pnum];
362 if (!usemap)
363 continue;
Stephen Rothwell193faea2007-06-08 13:46:51 -0700364
365 map = sparse_early_mem_map_alloc(pnum);
366 if (!map)
367 continue;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700368
Mel Gorman5c0e3062007-10-16 01:25:56 -0700369 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
370 usemap);
Stephen Rothwell193faea2007-06-08 13:46:51 -0700371 }
Yinghai Lue123dd32008-04-13 11:51:06 -0700372
Yinghai Luc2b91e22008-04-12 01:19:24 -0700373 vmemmap_populate_print_last();
374
Yinghai Lue123dd32008-04-13 11:51:06 -0700375 free_bootmem(__pa(usemap_map), size);
Stephen Rothwell193faea2007-06-08 13:46:51 -0700376}
377
378#ifdef CONFIG_MEMORY_HOTPLUG
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700379#ifdef CONFIG_SPARSEMEM_VMEMMAP
380static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
381 unsigned long nr_pages)
382{
383 /* This will make the necessary allocations eventually. */
384 return sparse_mem_map_populate(pnum, nid);
385}
386static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
387{
388 return; /* XXX: Not implemented yet */
389}
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700390static void free_map_bootmem(struct page *page, unsigned long nr_pages)
391{
392}
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700393#else
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700394static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
395{
396 struct page *page, *ret;
397 unsigned long memmap_size = sizeof(struct page) * nr_pages;
398
Yasunori Gotof2d0aa52006-10-28 10:38:32 -0700399 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700400 if (page)
401 goto got_map_page;
402
403 ret = vmalloc(memmap_size);
404 if (ret)
405 goto got_map_ptr;
406
407 return NULL;
408got_map_page:
409 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
410got_map_ptr:
411 memset(ret, 0, memmap_size);
412
413 return ret;
414}
415
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700416static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
417 unsigned long nr_pages)
418{
419 return __kmalloc_section_memmap(nr_pages);
420}
421
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700422static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
423{
Christoph Lameter9e2779f2008-02-04 22:28:34 -0800424 if (is_vmalloc_addr(memmap))
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700425 vfree(memmap);
426 else
427 free_pages((unsigned long)memmap,
428 get_order(sizeof(struct page) * nr_pages));
429}
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700430
431static void free_map_bootmem(struct page *page, unsigned long nr_pages)
432{
433 unsigned long maps_section_nr, removing_section_nr, i;
434 int magic;
435
436 for (i = 0; i < nr_pages; i++, page++) {
437 magic = atomic_read(&page->_mapcount);
438
439 BUG_ON(magic == NODE_INFO);
440
441 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
442 removing_section_nr = page->private;
443
444 /*
445 * When this function is called, the removing section is
446 * logical offlined state. This means all pages are isolated
447 * from page allocator. If removing section's memmap is placed
448 * on the same section, it must not be freed.
449 * If it is freed, page allocator may allocate it which will
450 * be removed physically soon.
451 */
452 if (maps_section_nr != removing_section_nr)
453 put_page_bootmem(page);
454 }
455}
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700456#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700457
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700458static void free_section_usemap(struct page *memmap, unsigned long *usemap)
459{
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700460 struct page *usemap_page;
461 unsigned long nr_pages;
462
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700463 if (!usemap)
464 return;
465
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700466 usemap_page = virt_to_page(usemap);
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700467 /*
468 * Check to see if allocation came from hot-plug-add
469 */
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700470 if (PageSlab(usemap_page)) {
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700471 kfree(usemap);
472 if (memmap)
473 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
474 return;
475 }
476
477 /*
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700478 * The usemap came from bootmem. This is packed with other usemaps
479 * on the section which has pgdat at boot time. Just keep it as is now.
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700480 */
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700481
482 if (memmap) {
483 struct page *memmap_page;
484 memmap_page = virt_to_page(memmap);
485
486 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
487 >> PAGE_SHIFT;
488
489 free_map_bootmem(memmap_page, nr_pages);
490 }
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700491}
492
Andy Whitcroft29751f62005-06-23 00:08:00 -0700493/*
Andy Whitcroft29751f62005-06-23 00:08:00 -0700494 * returns the number of sections whose mem_maps were properly
495 * set. If this is <=0, then that means that the passed-in
496 * map was not consumed and must be freed.
497 */
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700498int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
499 int nr_pages)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700500{
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700501 unsigned long section_nr = pfn_to_section_nr(start_pfn);
502 struct pglist_data *pgdat = zone->zone_pgdat;
503 struct mem_section *ms;
504 struct page *memmap;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700505 unsigned long *usemap;
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700506 unsigned long flags;
507 int ret;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700508
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700509 /*
510 * no locking for this, because it does its own
511 * plus, it does a kmalloc
512 */
WANG Congbbd06822007-12-17 16:19:59 -0800513 ret = sparse_index_init(section_nr, pgdat->node_id);
514 if (ret < 0 && ret != -EEXIST)
515 return ret;
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700516 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
WANG Congbbd06822007-12-17 16:19:59 -0800517 if (!memmap)
518 return -ENOMEM;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700519 usemap = __kmalloc_section_usemap();
WANG Congbbd06822007-12-17 16:19:59 -0800520 if (!usemap) {
521 __kfree_section_memmap(memmap, nr_pages);
522 return -ENOMEM;
523 }
Andy Whitcroft29751f62005-06-23 00:08:00 -0700524
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700525 pgdat_resize_lock(pgdat, &flags);
526
527 ms = __pfn_to_section(start_pfn);
528 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
529 ret = -EEXIST;
530 goto out;
531 }
Mel Gorman5c0e3062007-10-16 01:25:56 -0700532
Andy Whitcroft29751f62005-06-23 00:08:00 -0700533 ms->section_mem_map |= SECTION_MARKED_PRESENT;
534
Mel Gorman5c0e3062007-10-16 01:25:56 -0700535 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700536
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700537out:
538 pgdat_resize_unlock(pgdat, &flags);
WANG Congbbd06822007-12-17 16:19:59 -0800539 if (ret <= 0) {
540 kfree(usemap);
Mike Kravetz46a66ee2006-05-01 12:16:09 -0700541 __kfree_section_memmap(memmap, nr_pages);
WANG Congbbd06822007-12-17 16:19:59 -0800542 }
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700543 return ret;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700544}
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700545
546void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
547{
548 struct page *memmap = NULL;
549 unsigned long *usemap = NULL;
550
551 if (ms->section_mem_map) {
552 usemap = ms->pageblock_flags;
553 memmap = sparse_decode_mem_map(ms->section_mem_map,
554 __section_nr(ms));
555 ms->section_mem_map = 0;
556 ms->pageblock_flags = NULL;
557 }
558
559 free_section_usemap(memmap, usemap);
560}
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700561#endif