Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1 | /* |
| 2 | * sparse memory mappings. |
| 3 | */ |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> |
| 5 | #include <linux/mmzone.h> |
| 6 | #include <linux/bootmem.h> |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 7 | #include <linux/highmem.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 8 | #include <linux/module.h> |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 9 | #include <linux/spinlock.h> |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 10 | #include <linux/vmalloc.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 11 | #include <asm/dma.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 12 | #include <asm/pgalloc.h> |
| 13 | #include <asm/pgtable.h> |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 14 | |
| 15 | /* |
| 16 | * Permanent SPARSEMEM data: |
| 17 | * |
| 18 | * 1) mem_section - memory sections, mem_map's for valid memory |
| 19 | */ |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 20 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 21 | struct mem_section *mem_section[NR_SECTION_ROOTS] |
Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 22 | ____cacheline_internodealigned_in_smp; |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 23 | #else |
| 24 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] |
Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 25 | ____cacheline_internodealigned_in_smp; |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 26 | #endif |
| 27 | EXPORT_SYMBOL(mem_section); |
| 28 | |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 29 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
| 30 | /* |
| 31 | * If we did not store the node number in the page then we have to |
| 32 | * do a lookup in the section_to_node_table in order to find which |
| 33 | * node the page belongs to. |
| 34 | */ |
| 35 | #if MAX_NUMNODES <= 256 |
| 36 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
| 37 | #else |
| 38 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
| 39 | #endif |
| 40 | |
Andy Whitcroft | 25ba77c | 2006-12-06 20:33:03 -0800 | [diff] [blame] | 41 | int page_to_nid(struct page *page) |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 42 | { |
| 43 | return section_to_node_table[page_to_section(page)]; |
| 44 | } |
| 45 | EXPORT_SYMBOL(page_to_nid); |
Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 46 | |
| 47 | static void set_section_nid(unsigned long section_nr, int nid) |
| 48 | { |
| 49 | section_to_node_table[section_nr] = nid; |
| 50 | } |
| 51 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ |
| 52 | static inline void set_section_nid(unsigned long section_nr, int nid) |
| 53 | { |
| 54 | } |
Christoph Lameter | 89689ae | 2006-12-06 20:31:45 -0800 | [diff] [blame] | 55 | #endif |
| 56 | |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 57 | #ifdef CONFIG_SPARSEMEM_EXTREME |
Sam Ravnborg | 577a32f | 2007-05-17 23:29:25 +0200 | [diff] [blame] | 58 | static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 59 | { |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 60 | struct mem_section *section = NULL; |
| 61 | unsigned long array_size = SECTIONS_PER_ROOT * |
| 62 | sizeof(struct mem_section); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 63 | |
Mike Kravetz | 39d24e6 | 2006-05-15 09:44:13 -0700 | [diff] [blame] | 64 | if (slab_is_available()) |
Mike Kravetz | 46a66ee | 2006-05-01 12:16:09 -0700 | [diff] [blame] | 65 | section = kmalloc_node(array_size, GFP_KERNEL, nid); |
| 66 | else |
| 67 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 68 | |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 69 | if (section) |
| 70 | memset(section, 0, array_size); |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 71 | |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 72 | return section; |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 73 | } |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 74 | |
Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 75 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 76 | { |
Ingo Molnar | 34af946 | 2006-06-27 02:53:55 -0700 | [diff] [blame] | 77 | static DEFINE_SPINLOCK(index_init_lock); |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 78 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
| 79 | struct mem_section *section; |
| 80 | int ret = 0; |
| 81 | |
| 82 | if (mem_section[root]) |
| 83 | return -EEXIST; |
| 84 | |
| 85 | section = sparse_index_alloc(nid); |
| 86 | /* |
| 87 | * This lock keeps two different sections from |
| 88 | * reallocating for the same index |
| 89 | */ |
| 90 | spin_lock(&index_init_lock); |
| 91 | |
| 92 | if (mem_section[root]) { |
| 93 | ret = -EEXIST; |
| 94 | goto out; |
| 95 | } |
| 96 | |
| 97 | mem_section[root] = section; |
| 98 | out: |
| 99 | spin_unlock(&index_init_lock); |
| 100 | return ret; |
| 101 | } |
| 102 | #else /* !SPARSEMEM_EXTREME */ |
| 103 | static inline int sparse_index_init(unsigned long section_nr, int nid) |
| 104 | { |
| 105 | return 0; |
| 106 | } |
| 107 | #endif |
| 108 | |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 109 | /* |
| 110 | * Although written for the SPARSEMEM_EXTREME case, this happens |
Andy Whitcroft | cd881a6 | 2007-10-16 01:24:10 -0700 | [diff] [blame] | 111 | * to also work for the flat array case because |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 112 | * NR_SECTION_ROOTS==NR_MEM_SECTIONS. |
| 113 | */ |
| 114 | int __section_nr(struct mem_section* ms) |
| 115 | { |
| 116 | unsigned long root_nr; |
| 117 | struct mem_section* root; |
| 118 | |
Mike Kravetz | 12783b0 | 2006-05-20 15:00:05 -0700 | [diff] [blame] | 119 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
| 120 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 121 | if (!root) |
| 122 | continue; |
| 123 | |
| 124 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) |
| 125 | break; |
| 126 | } |
| 127 | |
| 128 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
| 129 | } |
| 130 | |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 131 | /* |
| 132 | * During early boot, before section_mem_map is used for an actual |
| 133 | * mem_map, we use section_mem_map to store the section's NUMA |
| 134 | * node. This keeps us from having to use another data structure. The |
| 135 | * node information is cleared just before we store the real mem_map. |
| 136 | */ |
| 137 | static inline unsigned long sparse_encode_early_nid(int nid) |
| 138 | { |
| 139 | return (nid << SECTION_NID_SHIFT); |
| 140 | } |
| 141 | |
| 142 | static inline int sparse_early_nid(struct mem_section *section) |
| 143 | { |
| 144 | return (section->section_mem_map >> SECTION_NID_SHIFT); |
| 145 | } |
| 146 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 147 | /* Record a memory area against a node. */ |
Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 148 | void __init memory_present(int nid, unsigned long start, unsigned long end) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 149 | { |
| 150 | unsigned long pfn; |
| 151 | |
| 152 | start &= PAGE_SECTION_MASK; |
| 153 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
| 154 | unsigned long section = pfn_to_section_nr(pfn); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 155 | struct mem_section *ms; |
| 156 | |
| 157 | sparse_index_init(section, nid); |
Andy Whitcroft | 85770ff | 2007-08-22 14:01:03 -0700 | [diff] [blame] | 158 | set_section_nid(section, nid); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 159 | |
| 160 | ms = __nr_to_section(section); |
| 161 | if (!ms->section_mem_map) |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 162 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
| 163 | SECTION_MARKED_PRESENT; |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 164 | } |
| 165 | } |
| 166 | |
| 167 | /* |
| 168 | * Only used by the i386 NUMA architecures, but relatively |
| 169 | * generic code. |
| 170 | */ |
| 171 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, |
| 172 | unsigned long end_pfn) |
| 173 | { |
| 174 | unsigned long pfn; |
| 175 | unsigned long nr_pages = 0; |
| 176 | |
| 177 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
| 178 | if (nid != early_pfn_to_nid(pfn)) |
| 179 | continue; |
| 180 | |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 181 | if (pfn_present(pfn)) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 182 | nr_pages += PAGES_PER_SECTION; |
| 183 | } |
| 184 | |
| 185 | return nr_pages * sizeof(struct page); |
| 186 | } |
| 187 | |
| 188 | /* |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 189 | * Subtle, we encode the real pfn into the mem_map such that |
| 190 | * the identity pfn - section_mem_map will return the actual |
| 191 | * physical page frame number. |
| 192 | */ |
| 193 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) |
| 194 | { |
| 195 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); |
| 196 | } |
| 197 | |
| 198 | /* |
| 199 | * We need this if we ever free the mem_maps. While not implemented yet, |
| 200 | * this function is included for parity with its sibling. |
| 201 | */ |
| 202 | static __attribute((unused)) |
| 203 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
| 204 | { |
| 205 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
| 206 | } |
| 207 | |
Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 208 | static int __meminit sparse_init_one_section(struct mem_section *ms, |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 209 | unsigned long pnum, struct page *mem_map, |
| 210 | unsigned long *pageblock_bitmap) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 211 | { |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 212 | if (!present_section(ms)) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 213 | return -EINVAL; |
| 214 | |
Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 215 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 216 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | |
| 217 | SECTION_HAS_MEM_MAP; |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 218 | ms->pageblock_flags = pageblock_bitmap; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 219 | |
| 220 | return 1; |
| 221 | } |
| 222 | |
Sam Ravnborg | dec2e6b | 2007-07-22 11:12:44 +0200 | [diff] [blame] | 223 | __attribute__((weak)) __init |
Zou Nan hai | 2e1c49d | 2007-06-01 00:46:28 -0700 | [diff] [blame] | 224 | void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) |
| 225 | { |
| 226 | return NULL; |
| 227 | } |
| 228 | |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 229 | static unsigned long usemap_size(void) |
| 230 | { |
| 231 | unsigned long size_bytes; |
| 232 | size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; |
| 233 | size_bytes = roundup(size_bytes, sizeof(unsigned long)); |
| 234 | return size_bytes; |
| 235 | } |
| 236 | |
| 237 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 238 | static unsigned long *__kmalloc_section_usemap(void) |
| 239 | { |
| 240 | return kmalloc(usemap_size(), GFP_KERNEL); |
| 241 | } |
| 242 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 243 | |
| 244 | static unsigned long *sparse_early_usemap_alloc(unsigned long pnum) |
| 245 | { |
| 246 | unsigned long *usemap; |
| 247 | struct mem_section *ms = __nr_to_section(pnum); |
| 248 | int nid = sparse_early_nid(ms); |
| 249 | |
| 250 | usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); |
| 251 | if (usemap) |
| 252 | return usemap; |
| 253 | |
| 254 | /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ |
| 255 | nid = 0; |
| 256 | |
| 257 | printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); |
| 258 | return NULL; |
| 259 | } |
| 260 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 261 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame^] | 262 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 263 | { |
| 264 | struct page *map; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 265 | |
| 266 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); |
| 267 | if (map) |
| 268 | return map; |
| 269 | |
Zou Nan hai | 2e1c49d | 2007-06-01 00:46:28 -0700 | [diff] [blame] | 270 | map = alloc_bootmem_high_node(NODE_DATA(nid), |
| 271 | sizeof(struct page) * PAGES_PER_SECTION); |
| 272 | if (map) |
| 273 | return map; |
| 274 | |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 275 | map = alloc_bootmem_node(NODE_DATA(nid), |
| 276 | sizeof(struct page) * PAGES_PER_SECTION); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 277 | return map; |
| 278 | } |
| 279 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
| 280 | |
| 281 | struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
| 282 | { |
| 283 | struct page *map; |
| 284 | struct mem_section *ms = __nr_to_section(pnum); |
| 285 | int nid = sparse_early_nid(ms); |
| 286 | |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame^] | 287 | map = sparse_mem_map_populate(pnum, nid); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 288 | if (map) |
| 289 | return map; |
| 290 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 291 | printk(KERN_ERR "%s: sparsemem memory map backing failed " |
| 292 | "some memory will not be available.\n", __FUNCTION__); |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 293 | ms->section_mem_map = 0; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 294 | return NULL; |
| 295 | } |
| 296 | |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 297 | /* |
| 298 | * Allocate the accumulated non-linear sections, allocate a mem_map |
| 299 | * for each and record the physical to section mapping. |
| 300 | */ |
| 301 | void __init sparse_init(void) |
| 302 | { |
| 303 | unsigned long pnum; |
| 304 | struct page *map; |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 305 | unsigned long *usemap; |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 306 | |
| 307 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 308 | if (!present_section_nr(pnum)) |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 309 | continue; |
| 310 | |
| 311 | map = sparse_early_mem_map_alloc(pnum); |
| 312 | if (!map) |
| 313 | continue; |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 314 | |
| 315 | usemap = sparse_early_usemap_alloc(pnum); |
| 316 | if (!usemap) |
| 317 | continue; |
| 318 | |
| 319 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, |
| 320 | usemap); |
Stephen Rothwell | 193faea | 2007-06-08 13:46:51 -0700 | [diff] [blame] | 321 | } |
| 322 | } |
| 323 | |
| 324 | #ifdef CONFIG_MEMORY_HOTPLUG |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame^] | 325 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 326 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
| 327 | unsigned long nr_pages) |
| 328 | { |
| 329 | /* This will make the necessary allocations eventually. */ |
| 330 | return sparse_mem_map_populate(pnum, nid); |
| 331 | } |
| 332 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) |
| 333 | { |
| 334 | return; /* XXX: Not implemented yet */ |
| 335 | } |
| 336 | #else |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 337 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) |
| 338 | { |
| 339 | struct page *page, *ret; |
| 340 | unsigned long memmap_size = sizeof(struct page) * nr_pages; |
| 341 | |
Yasunori Goto | f2d0aa5 | 2006-10-28 10:38:32 -0700 | [diff] [blame] | 342 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 343 | if (page) |
| 344 | goto got_map_page; |
| 345 | |
| 346 | ret = vmalloc(memmap_size); |
| 347 | if (ret) |
| 348 | goto got_map_ptr; |
| 349 | |
| 350 | return NULL; |
| 351 | got_map_page: |
| 352 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); |
| 353 | got_map_ptr: |
| 354 | memset(ret, 0, memmap_size); |
| 355 | |
| 356 | return ret; |
| 357 | } |
| 358 | |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame^] | 359 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
| 360 | unsigned long nr_pages) |
| 361 | { |
| 362 | return __kmalloc_section_memmap(nr_pages); |
| 363 | } |
| 364 | |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 365 | static int vaddr_in_vmalloc_area(void *addr) |
| 366 | { |
| 367 | if (addr >= (void *)VMALLOC_START && |
| 368 | addr < (void *)VMALLOC_END) |
| 369 | return 1; |
| 370 | return 0; |
| 371 | } |
| 372 | |
| 373 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) |
| 374 | { |
| 375 | if (vaddr_in_vmalloc_area(memmap)) |
| 376 | vfree(memmap); |
| 377 | else |
| 378 | free_pages((unsigned long)memmap, |
| 379 | get_order(sizeof(struct page) * nr_pages)); |
| 380 | } |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame^] | 381 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 382 | |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 383 | /* |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 384 | * returns the number of sections whose mem_maps were properly |
| 385 | * set. If this is <=0, then that means that the passed-in |
| 386 | * map was not consumed and must be freed. |
| 387 | */ |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 388 | int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
| 389 | int nr_pages) |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 390 | { |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 391 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
| 392 | struct pglist_data *pgdat = zone->zone_pgdat; |
| 393 | struct mem_section *ms; |
| 394 | struct page *memmap; |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 395 | unsigned long *usemap; |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 396 | unsigned long flags; |
| 397 | int ret; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 398 | |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 399 | /* |
| 400 | * no locking for this, because it does its own |
| 401 | * plus, it does a kmalloc |
| 402 | */ |
| 403 | sparse_index_init(section_nr, pgdat->node_id); |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame^] | 404 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 405 | usemap = __kmalloc_section_usemap(); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 406 | |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 407 | pgdat_resize_lock(pgdat, &flags); |
| 408 | |
| 409 | ms = __pfn_to_section(start_pfn); |
| 410 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { |
| 411 | ret = -EEXIST; |
| 412 | goto out; |
| 413 | } |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 414 | |
| 415 | if (!usemap) { |
| 416 | ret = -ENOMEM; |
| 417 | goto out; |
| 418 | } |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 419 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
| 420 | |
Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 421 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 422 | |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 423 | out: |
| 424 | pgdat_resize_unlock(pgdat, &flags); |
Mike Kravetz | 46a66ee | 2006-05-01 12:16:09 -0700 | [diff] [blame] | 425 | if (ret <= 0) |
| 426 | __kfree_section_memmap(memmap, nr_pages); |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 427 | return ret; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 428 | } |
Yasunori Goto | a3142c8 | 2007-05-08 00:23:07 -0700 | [diff] [blame] | 429 | #endif |