blob: 6ce4aab69e9974aea1838092efc26601ea7598b4 [file] [log] [blame]
Andy Whitcroftd41dee32005-06-23 00:07:54 -07001/*
2 * sparse memory mappings.
3 */
Andy Whitcroftd41dee32005-06-23 00:07:54 -07004#include <linux/mm.h>
5#include <linux/mmzone.h>
6#include <linux/bootmem.h>
Dave Hansen0b0acbe2005-10-29 18:16:55 -07007#include <linux/highmem.h>
Andy Whitcroftd41dee32005-06-23 00:07:54 -07008#include <linux/module.h>
Dave Hansen28ae55c2005-09-03 15:54:29 -07009#include <linux/spinlock.h>
Dave Hansen0b0acbe2005-10-29 18:16:55 -070010#include <linux/vmalloc.h>
Yasunori Goto0c0a4a52008-04-28 02:13:34 -070011#include "internal.h"
Andy Whitcroftd41dee32005-06-23 00:07:54 -070012#include <asm/dma.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070013#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
Andy Whitcroftd41dee32005-06-23 00:07:54 -070015
16/*
17 * Permanent SPARSEMEM data:
18 *
19 * 1) mem_section - memory sections, mem_map's for valid memory
20 */
Bob Picco3e347262005-09-03 15:54:28 -070021#ifdef CONFIG_SPARSEMEM_EXTREME
Bob Picco802f1922005-09-03 15:54:26 -070022struct mem_section *mem_section[NR_SECTION_ROOTS]
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080023 ____cacheline_internodealigned_in_smp;
Bob Picco3e347262005-09-03 15:54:28 -070024#else
25struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080026 ____cacheline_internodealigned_in_smp;
Bob Picco3e347262005-09-03 15:54:28 -070027#endif
28EXPORT_SYMBOL(mem_section);
29
Christoph Lameter89689ae2006-12-06 20:31:45 -080030#ifdef NODE_NOT_IN_PAGE_FLAGS
31/*
32 * If we did not store the node number in the page then we have to
33 * do a lookup in the section_to_node_table in order to find which
34 * node the page belongs to.
35 */
36#if MAX_NUMNODES <= 256
37static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
38#else
39static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
40#endif
41
Andy Whitcroft25ba77c2006-12-06 20:33:03 -080042int page_to_nid(struct page *page)
Christoph Lameter89689ae2006-12-06 20:31:45 -080043{
44 return section_to_node_table[page_to_section(page)];
45}
46EXPORT_SYMBOL(page_to_nid);
Andy Whitcroft85770ff2007-08-22 14:01:03 -070047
48static void set_section_nid(unsigned long section_nr, int nid)
49{
50 section_to_node_table[section_nr] = nid;
51}
52#else /* !NODE_NOT_IN_PAGE_FLAGS */
53static inline void set_section_nid(unsigned long section_nr, int nid)
54{
55}
Christoph Lameter89689ae2006-12-06 20:31:45 -080056#endif
57
Bob Picco3e347262005-09-03 15:54:28 -070058#ifdef CONFIG_SPARSEMEM_EXTREME
Sam Ravnborg577a32f2007-05-17 23:29:25 +020059static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
Bob Picco802f1922005-09-03 15:54:26 -070060{
Dave Hansen28ae55c2005-09-03 15:54:29 -070061 struct mem_section *section = NULL;
62 unsigned long array_size = SECTIONS_PER_ROOT *
63 sizeof(struct mem_section);
Bob Picco802f1922005-09-03 15:54:26 -070064
Shaohua Lif52407c2009-09-21 17:01:19 -070065 if (slab_is_available()) {
66 if (node_state(nid, N_HIGH_MEMORY))
67 section = kmalloc_node(array_size, GFP_KERNEL, nid);
68 else
69 section = kmalloc(array_size, GFP_KERNEL);
70 } else
Mike Kravetz46a66ee2006-05-01 12:16:09 -070071 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
Bob Picco3e347262005-09-03 15:54:28 -070072
Dave Hansen28ae55c2005-09-03 15:54:29 -070073 if (section)
74 memset(section, 0, array_size);
Bob Picco3e347262005-09-03 15:54:28 -070075
Dave Hansen28ae55c2005-09-03 15:54:29 -070076 return section;
Bob Picco802f1922005-09-03 15:54:26 -070077}
Dave Hansen28ae55c2005-09-03 15:54:29 -070078
Yasunori Gotoa3142c82007-05-08 00:23:07 -070079static int __meminit sparse_index_init(unsigned long section_nr, int nid)
Dave Hansen28ae55c2005-09-03 15:54:29 -070080{
Ingo Molnar34af9462006-06-27 02:53:55 -070081 static DEFINE_SPINLOCK(index_init_lock);
Dave Hansen28ae55c2005-09-03 15:54:29 -070082 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
83 struct mem_section *section;
84 int ret = 0;
85
86 if (mem_section[root])
87 return -EEXIST;
88
89 section = sparse_index_alloc(nid);
WANG Congaf0cd5a2007-12-17 16:19:58 -080090 if (!section)
91 return -ENOMEM;
Dave Hansen28ae55c2005-09-03 15:54:29 -070092 /*
93 * This lock keeps two different sections from
94 * reallocating for the same index
95 */
96 spin_lock(&index_init_lock);
97
98 if (mem_section[root]) {
99 ret = -EEXIST;
100 goto out;
101 }
102
103 mem_section[root] = section;
104out:
105 spin_unlock(&index_init_lock);
106 return ret;
107}
108#else /* !SPARSEMEM_EXTREME */
109static inline int sparse_index_init(unsigned long section_nr, int nid)
110{
111 return 0;
112}
113#endif
114
Dave Hansen4ca644d2005-10-29 18:16:51 -0700115/*
116 * Although written for the SPARSEMEM_EXTREME case, this happens
Andy Whitcroftcd881a62007-10-16 01:24:10 -0700117 * to also work for the flat array case because
Dave Hansen4ca644d2005-10-29 18:16:51 -0700118 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
119 */
120int __section_nr(struct mem_section* ms)
121{
122 unsigned long root_nr;
123 struct mem_section* root;
124
Mike Kravetz12783b02006-05-20 15:00:05 -0700125 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
126 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
Dave Hansen4ca644d2005-10-29 18:16:51 -0700127 if (!root)
128 continue;
129
130 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
131 break;
132 }
133
134 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
135}
136
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700137/*
138 * During early boot, before section_mem_map is used for an actual
139 * mem_map, we use section_mem_map to store the section's NUMA
140 * node. This keeps us from having to use another data structure. The
141 * node information is cleared just before we store the real mem_map.
142 */
143static inline unsigned long sparse_encode_early_nid(int nid)
144{
145 return (nid << SECTION_NID_SHIFT);
146}
147
148static inline int sparse_early_nid(struct mem_section *section)
149{
150 return (section->section_mem_map >> SECTION_NID_SHIFT);
151}
152
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700153/* Validate the physical addressing limitations of the model */
154void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
155 unsigned long *end_pfn)
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700156{
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700157 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700158
Ingo Molnarbead9a32008-04-16 01:40:00 +0200159 /*
160 * Sanity checks - do not allow an architecture to pass
161 * in larger pfns than the maximum scope of sparsemem:
162 */
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700163 if (*start_pfn > max_sparsemem_pfn) {
164 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
165 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
166 *start_pfn, *end_pfn, max_sparsemem_pfn);
167 WARN_ON_ONCE(1);
168 *start_pfn = max_sparsemem_pfn;
169 *end_pfn = max_sparsemem_pfn;
Cyrill Gorcunovef161a92009-03-31 15:19:25 -0700170 } else if (*end_pfn > max_sparsemem_pfn) {
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700171 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173 *start_pfn, *end_pfn, max_sparsemem_pfn);
174 WARN_ON_ONCE(1);
175 *end_pfn = max_sparsemem_pfn;
176 }
177}
178
179/* Record a memory area against a node. */
180void __init memory_present(int nid, unsigned long start, unsigned long end)
181{
182 unsigned long pfn;
Ingo Molnarbead9a32008-04-16 01:40:00 +0200183
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700184 start &= PAGE_SECTION_MASK;
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700185 mminit_validate_memmodel_limits(&start, &end);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700186 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
187 unsigned long section = pfn_to_section_nr(pfn);
Bob Picco802f1922005-09-03 15:54:26 -0700188 struct mem_section *ms;
189
190 sparse_index_init(section, nid);
Andy Whitcroft85770ff2007-08-22 14:01:03 -0700191 set_section_nid(section, nid);
Bob Picco802f1922005-09-03 15:54:26 -0700192
193 ms = __nr_to_section(section);
194 if (!ms->section_mem_map)
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700195 ms->section_mem_map = sparse_encode_early_nid(nid) |
196 SECTION_MARKED_PRESENT;
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700197 }
198}
199
200/*
201 * Only used by the i386 NUMA architecures, but relatively
202 * generic code.
203 */
204unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
205 unsigned long end_pfn)
206{
207 unsigned long pfn;
208 unsigned long nr_pages = 0;
209
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700210 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700211 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
212 if (nid != early_pfn_to_nid(pfn))
213 continue;
214
Andy Whitcroft540557b2007-10-16 01:24:11 -0700215 if (pfn_present(pfn))
Andy Whitcroftd41dee32005-06-23 00:07:54 -0700216 nr_pages += PAGES_PER_SECTION;
217 }
218
219 return nr_pages * sizeof(struct page);
220}
221
222/*
Andy Whitcroft29751f62005-06-23 00:08:00 -0700223 * Subtle, we encode the real pfn into the mem_map such that
224 * the identity pfn - section_mem_map will return the actual
225 * physical page frame number.
226 */
227static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
228{
229 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
230}
231
232/*
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700233 * Decode mem_map from the coded memmap
Andy Whitcroft29751f62005-06-23 00:08:00 -0700234 */
Andy Whitcroft29751f62005-06-23 00:08:00 -0700235struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
236{
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700237 /* mask off the extra low bits of information */
238 coded_mem_map &= SECTION_MAP_MASK;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700239 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
240}
241
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700242static int __meminit sparse_init_one_section(struct mem_section *ms,
Mel Gorman5c0e3062007-10-16 01:25:56 -0700243 unsigned long pnum, struct page *mem_map,
244 unsigned long *pageblock_bitmap)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700245{
Andy Whitcroft540557b2007-10-16 01:24:11 -0700246 if (!present_section(ms))
Andy Whitcroft29751f62005-06-23 00:08:00 -0700247 return -EINVAL;
248
Andy Whitcroft30c253e2006-06-23 02:03:41 -0700249 ms->section_mem_map &= ~SECTION_MAP_MASK;
Andy Whitcroft540557b2007-10-16 01:24:11 -0700250 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
251 SECTION_HAS_MEM_MAP;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700252 ms->pageblock_flags = pageblock_bitmap;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700253
254 return 1;
255}
256
Yasunori Goto04753272008-04-28 02:13:31 -0700257unsigned long usemap_size(void)
Mel Gorman5c0e3062007-10-16 01:25:56 -0700258{
259 unsigned long size_bytes;
260 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
261 size_bytes = roundup(size_bytes, sizeof(unsigned long));
262 return size_bytes;
263}
264
265#ifdef CONFIG_MEMORY_HOTPLUG
266static unsigned long *__kmalloc_section_usemap(void)
267{
268 return kmalloc(usemap_size(), GFP_KERNEL);
269}
270#endif /* CONFIG_MEMORY_HOTPLUG */
271
Yasunori Goto48c90682008-07-23 21:28:15 -0700272#ifdef CONFIG_MEMORY_HOTREMOVE
273static unsigned long * __init
274sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
275{
276 unsigned long section_nr;
277
278 /*
279 * A page may contain usemaps for other sections preventing the
280 * page being freed and making a section unremovable while
281 * other sections referencing the usemap retmain active. Similarly,
282 * a pgdat can prevent a section being removed. If section A
283 * contains a pgdat and section B contains the usemap, both
284 * sections become inter-dependent. This allocates usemaps
285 * from the same section as the pgdat where possible to avoid
286 * this problem.
287 */
288 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
289 return alloc_bootmem_section(usemap_size(), section_nr);
290}
291
292static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
293{
294 unsigned long usemap_snr, pgdat_snr;
295 static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
296 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
297 struct pglist_data *pgdat = NODE_DATA(nid);
298 int usemap_nid;
299
300 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
301 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
302 if (usemap_snr == pgdat_snr)
303 return;
304
305 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
306 /* skip redundant message */
307 return;
308
309 old_usemap_snr = usemap_snr;
310 old_pgdat_snr = pgdat_snr;
311
312 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
313 if (usemap_nid != nid) {
314 printk(KERN_INFO
315 "node %d must be removed before remove section %ld\n",
316 nid, usemap_snr);
317 return;
318 }
319 /*
320 * There is a circular dependency.
321 * Some platforms allow un-removable section because they will just
322 * gather other removable sections for dynamic partitioning.
323 * Just notify un-removable section's number here.
324 */
325 printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
326 pgdat_snr, nid);
327 printk(KERN_CONT
328 " have a circular dependency on usemap and pgdat allocations\n");
329}
330#else
331static unsigned long * __init
332sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
333{
334 return NULL;
335}
336
337static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
338{
339}
340#endif /* CONFIG_MEMORY_HOTREMOVE */
341
Sam Ravnborga322f8a2008-02-04 22:29:35 -0800342static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
Mel Gorman5c0e3062007-10-16 01:25:56 -0700343{
Andrew Morton51674642008-04-30 00:55:17 -0700344 unsigned long *usemap;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700345 struct mem_section *ms = __nr_to_section(pnum);
346 int nid = sparse_early_nid(ms);
347
Yasunori Goto48c90682008-07-23 21:28:15 -0700348 usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));
Mel Gorman5c0e3062007-10-16 01:25:56 -0700349 if (usemap)
350 return usemap;
351
Yasunori Goto48c90682008-07-23 21:28:15 -0700352 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
353 if (usemap) {
354 check_usemap_section_nr(nid, usemap);
355 return usemap;
356 }
357
Mel Gorman5c0e3062007-10-16 01:25:56 -0700358 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
359 nid = 0;
360
Harvey Harrisond40cee22008-04-30 00:55:07 -0700361 printk(KERN_WARNING "%s: allocation failed\n", __func__);
Mel Gorman5c0e3062007-10-16 01:25:56 -0700362 return NULL;
363}
364
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700365#ifndef CONFIG_SPARSEMEM_VMEMMAP
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700366struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700367{
368 struct page *map;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700369
370 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
371 if (map)
372 return map;
373
Yasunori Goto9d992172008-04-28 02:13:32 -0700374 map = alloc_bootmem_pages_node(NODE_DATA(nid),
375 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700376 return map;
377}
378#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
379
Adrian Bunk9e5c6da2008-07-25 19:46:22 -0700380static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700381{
382 struct page *map;
383 struct mem_section *ms = __nr_to_section(pnum);
384 int nid = sparse_early_nid(ms);
385
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700386 map = sparse_mem_map_populate(pnum, nid);
Andy Whitcroft29751f62005-06-23 00:08:00 -0700387 if (map)
388 return map;
389
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700390 printk(KERN_ERR "%s: sparsemem memory map backing failed "
Harvey Harrisond40cee22008-04-30 00:55:07 -0700391 "some memory will not be available.\n", __func__);
Bob Picco802f1922005-09-03 15:54:26 -0700392 ms->section_mem_map = 0;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700393 return NULL;
394}
395
Yinghai Luc2b91e22008-04-12 01:19:24 -0700396void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
397{
398}
Stephen Rothwell193faea2007-06-08 13:46:51 -0700399/*
400 * Allocate the accumulated non-linear sections, allocate a mem_map
401 * for each and record the physical to section mapping.
402 */
403void __init sparse_init(void)
404{
405 unsigned long pnum;
406 struct page *map;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700407 unsigned long *usemap;
Yinghai Lue123dd32008-04-13 11:51:06 -0700408 unsigned long **usemap_map;
409 int size;
410
411 /*
412 * map is using big page (aka 2M in x86 64 bit)
413 * usemap is less one page (aka 24 bytes)
414 * so alloc 2M (with 2M align) and 24 bytes in turn will
415 * make next 2M slip to one more 2M later.
416 * then in big system, the memory will have a lot of holes...
417 * here try to allocate 2M pages continously.
418 *
419 * powerpc need to call sparse_init_one_section right after each
420 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
421 */
422 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
423 usemap_map = alloc_bootmem(size);
424 if (!usemap_map)
425 panic("can not allocate usemap_map\n");
Stephen Rothwell193faea2007-06-08 13:46:51 -0700426
427 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
Andy Whitcroft540557b2007-10-16 01:24:11 -0700428 if (!present_section_nr(pnum))
Stephen Rothwell193faea2007-06-08 13:46:51 -0700429 continue;
Yinghai Lue123dd32008-04-13 11:51:06 -0700430 usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
431 }
432
433 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
434 if (!present_section_nr(pnum))
435 continue;
436
437 usemap = usemap_map[pnum];
438 if (!usemap)
439 continue;
Stephen Rothwell193faea2007-06-08 13:46:51 -0700440
441 map = sparse_early_mem_map_alloc(pnum);
442 if (!map)
443 continue;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700444
Mel Gorman5c0e3062007-10-16 01:25:56 -0700445 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
446 usemap);
Stephen Rothwell193faea2007-06-08 13:46:51 -0700447 }
Yinghai Lue123dd32008-04-13 11:51:06 -0700448
Yinghai Luc2b91e22008-04-12 01:19:24 -0700449 vmemmap_populate_print_last();
450
Yinghai Lue123dd32008-04-13 11:51:06 -0700451 free_bootmem(__pa(usemap_map), size);
Stephen Rothwell193faea2007-06-08 13:46:51 -0700452}
453
454#ifdef CONFIG_MEMORY_HOTPLUG
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700455#ifdef CONFIG_SPARSEMEM_VMEMMAP
456static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
457 unsigned long nr_pages)
458{
459 /* This will make the necessary allocations eventually. */
460 return sparse_mem_map_populate(pnum, nid);
461}
462static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
463{
464 return; /* XXX: Not implemented yet */
465}
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700466static void free_map_bootmem(struct page *page, unsigned long nr_pages)
467{
468}
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700469#else
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700470static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
471{
472 struct page *page, *ret;
473 unsigned long memmap_size = sizeof(struct page) * nr_pages;
474
Yasunori Gotof2d0aa52006-10-28 10:38:32 -0700475 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700476 if (page)
477 goto got_map_page;
478
479 ret = vmalloc(memmap_size);
480 if (ret)
481 goto got_map_ptr;
482
483 return NULL;
484got_map_page:
485 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
486got_map_ptr:
487 memset(ret, 0, memmap_size);
488
489 return ret;
490}
491
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700492static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
493 unsigned long nr_pages)
494{
495 return __kmalloc_section_memmap(nr_pages);
496}
497
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700498static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
499{
Christoph Lameter9e2779f2008-02-04 22:28:34 -0800500 if (is_vmalloc_addr(memmap))
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700501 vfree(memmap);
502 else
503 free_pages((unsigned long)memmap,
504 get_order(sizeof(struct page) * nr_pages));
505}
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700506
507static void free_map_bootmem(struct page *page, unsigned long nr_pages)
508{
509 unsigned long maps_section_nr, removing_section_nr, i;
510 int magic;
511
512 for (i = 0; i < nr_pages; i++, page++) {
513 magic = atomic_read(&page->_mapcount);
514
515 BUG_ON(magic == NODE_INFO);
516
517 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
518 removing_section_nr = page->private;
519
520 /*
521 * When this function is called, the removing section is
522 * logical offlined state. This means all pages are isolated
523 * from page allocator. If removing section's memmap is placed
524 * on the same section, it must not be freed.
525 * If it is freed, page allocator may allocate it which will
526 * be removed physically soon.
527 */
528 if (maps_section_nr != removing_section_nr)
529 put_page_bootmem(page);
530 }
531}
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700532#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700533
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700534static void free_section_usemap(struct page *memmap, unsigned long *usemap)
535{
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700536 struct page *usemap_page;
537 unsigned long nr_pages;
538
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700539 if (!usemap)
540 return;
541
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700542 usemap_page = virt_to_page(usemap);
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700543 /*
544 * Check to see if allocation came from hot-plug-add
545 */
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700546 if (PageSlab(usemap_page)) {
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700547 kfree(usemap);
548 if (memmap)
549 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
550 return;
551 }
552
553 /*
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700554 * The usemap came from bootmem. This is packed with other usemaps
555 * on the section which has pgdat at boot time. Just keep it as is now.
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700556 */
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700557
558 if (memmap) {
559 struct page *memmap_page;
560 memmap_page = virt_to_page(memmap);
561
562 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
563 >> PAGE_SHIFT;
564
565 free_map_bootmem(memmap_page, nr_pages);
566 }
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700567}
568
Andy Whitcroft29751f62005-06-23 00:08:00 -0700569/*
Andy Whitcroft29751f62005-06-23 00:08:00 -0700570 * returns the number of sections whose mem_maps were properly
571 * set. If this is <=0, then that means that the passed-in
572 * map was not consumed and must be freed.
573 */
Al Viro31168482008-11-22 17:33:24 +0000574int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700575 int nr_pages)
Andy Whitcroft29751f62005-06-23 00:08:00 -0700576{
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700577 unsigned long section_nr = pfn_to_section_nr(start_pfn);
578 struct pglist_data *pgdat = zone->zone_pgdat;
579 struct mem_section *ms;
580 struct page *memmap;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700581 unsigned long *usemap;
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700582 unsigned long flags;
583 int ret;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700584
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700585 /*
586 * no locking for this, because it does its own
587 * plus, it does a kmalloc
588 */
WANG Congbbd06822007-12-17 16:19:59 -0800589 ret = sparse_index_init(section_nr, pgdat->node_id);
590 if (ret < 0 && ret != -EEXIST)
591 return ret;
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700592 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
WANG Congbbd06822007-12-17 16:19:59 -0800593 if (!memmap)
594 return -ENOMEM;
Mel Gorman5c0e3062007-10-16 01:25:56 -0700595 usemap = __kmalloc_section_usemap();
WANG Congbbd06822007-12-17 16:19:59 -0800596 if (!usemap) {
597 __kfree_section_memmap(memmap, nr_pages);
598 return -ENOMEM;
599 }
Andy Whitcroft29751f62005-06-23 00:08:00 -0700600
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700601 pgdat_resize_lock(pgdat, &flags);
602
603 ms = __pfn_to_section(start_pfn);
604 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
605 ret = -EEXIST;
606 goto out;
607 }
Mel Gorman5c0e3062007-10-16 01:25:56 -0700608
Andy Whitcroft29751f62005-06-23 00:08:00 -0700609 ms->section_mem_map |= SECTION_MARKED_PRESENT;
610
Mel Gorman5c0e3062007-10-16 01:25:56 -0700611 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700612
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700613out:
614 pgdat_resize_unlock(pgdat, &flags);
WANG Congbbd06822007-12-17 16:19:59 -0800615 if (ret <= 0) {
616 kfree(usemap);
Mike Kravetz46a66ee2006-05-01 12:16:09 -0700617 __kfree_section_memmap(memmap, nr_pages);
WANG Congbbd06822007-12-17 16:19:59 -0800618 }
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700619 return ret;
Andy Whitcroft29751f62005-06-23 00:08:00 -0700620}
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700621
622void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
623{
624 struct page *memmap = NULL;
625 unsigned long *usemap = NULL;
626
627 if (ms->section_mem_map) {
628 usemap = ms->pageblock_flags;
629 memmap = sparse_decode_mem_map(ms->section_mem_map,
630 __section_nr(ms));
631 ms->section_mem_map = 0;
632 ms->pageblock_flags = NULL;
633 }
634
635 free_section_usemap(memmap, usemap);
636}
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700637#endif