Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation |
| 3 | * August 2002: added remote node KVA remap - Martin J. Bligh |
| 4 | * |
| 5 | * Copyright (C) 2002, IBM Corp. |
| 6 | * |
| 7 | * All rights reserved. |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License as published by |
| 11 | * the Free Software Foundation; either version 2 of the License, or |
| 12 | * (at your option) any later version. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, but |
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 17 | * NON INFRINGEMENT. See the GNU General Public License for more |
| 18 | * details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License |
| 21 | * along with this program; if not, write to the Free Software |
| 22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | */ |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/mm.h> |
| 26 | #include <linux/bootmem.h> |
| 27 | #include <linux/mmzone.h> |
| 28 | #include <linux/highmem.h> |
| 29 | #include <linux/initrd.h> |
| 30 | #include <linux/nodemask.h> |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 31 | #include <linux/module.h> |
Eric W. Biederman | 1bc3b91 | 2005-06-25 14:58:01 -0700 | [diff] [blame] | 32 | #include <linux/kexec.h> |
Dave Hansen | 22a9835 | 2006-03-27 01:16:04 -0800 | [diff] [blame] | 33 | #include <linux/pfn.h> |
Eric W. Biederman | 1bc3b91 | 2005-06-25 14:58:01 -0700 | [diff] [blame] | 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/e820.h> |
| 36 | #include <asm/setup.h> |
| 37 | #include <asm/mmzone.h> |
| 38 | #include <bios_ebda.h> |
| 39 | |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 40 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 41 | EXPORT_SYMBOL(node_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | bootmem_data_t node0_bdata; |
| 43 | |
| 44 | /* |
Adrian Bunk | d254c8f | 2006-06-30 18:29:51 +0200 | [diff] [blame] | 45 | * numa interface - we expect the numa architecture specific code to have |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | * populated the following initialisation. |
| 47 | * |
| 48 | * 1) node_online_map - the map of all nodes configured (online) in the system |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 49 | * 2) node_start_pfn - the starting page frame number for a node |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | * 3) node_end_pfn - the ending page fram number for a node |
| 51 | */ |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 52 | unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly; |
| 53 | unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 55 | |
| 56 | #ifdef CONFIG_DISCONTIGMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | /* |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 58 | * 4) physnode_map - the mapping between a pfn and owning node |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | * physnode_map keeps track of the physical memory layout of a generic |
| 60 | * numa node on a 256Mb break (each element of the array will |
| 61 | * represent 256Mb of memory and will be marked by the node id. so, |
| 62 | * if the first gig is on node 0, and the second gig is on node 1 |
| 63 | * physnode_map will contain: |
| 64 | * |
| 65 | * physnode_map[0-3] = 0; |
| 66 | * physnode_map[4-7] = 1; |
| 67 | * physnode_map[8- ] = -1; |
| 68 | */ |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 69 | s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 70 | EXPORT_SYMBOL(physnode_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
| 72 | void memory_present(int nid, unsigned long start, unsigned long end) |
| 73 | { |
| 74 | unsigned long pfn; |
| 75 | |
| 76 | printk(KERN_INFO "Node: %d, start_pfn: %ld, end_pfn: %ld\n", |
| 77 | nid, start, end); |
| 78 | printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); |
| 79 | printk(KERN_DEBUG " "); |
| 80 | for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { |
| 81 | physnode_map[pfn / PAGES_PER_ELEMENT] = nid; |
| 82 | printk("%ld ", pfn); |
| 83 | } |
| 84 | printk("\n"); |
| 85 | } |
| 86 | |
| 87 | unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, |
| 88 | unsigned long end_pfn) |
| 89 | { |
| 90 | unsigned long nr_pages = end_pfn - start_pfn; |
| 91 | |
| 92 | if (!nr_pages) |
| 93 | return 0; |
| 94 | |
| 95 | return (nr_pages + 1) * sizeof(struct page); |
| 96 | } |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 97 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
| 99 | extern unsigned long find_max_low_pfn(void); |
| 100 | extern void find_max_pfn(void); |
Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 101 | extern void add_one_highpage_init(struct page *, int, int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | |
| 103 | extern struct e820map e820; |
| 104 | extern unsigned long init_pg_tables_end; |
| 105 | extern unsigned long highend_pfn, highstart_pfn; |
| 106 | extern unsigned long max_low_pfn; |
| 107 | extern unsigned long totalram_pages; |
| 108 | extern unsigned long totalhigh_pages; |
| 109 | |
| 110 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) |
| 111 | |
| 112 | unsigned long node_remap_start_pfn[MAX_NUMNODES]; |
| 113 | unsigned long node_remap_size[MAX_NUMNODES]; |
| 114 | unsigned long node_remap_offset[MAX_NUMNODES]; |
| 115 | void *node_remap_start_vaddr[MAX_NUMNODES]; |
| 116 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
| 117 | |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 118 | void *node_remap_end_vaddr[MAX_NUMNODES]; |
| 119 | void *node_remap_alloc_vaddr[MAX_NUMNODES]; |
keith mannthey | 9102330 | 2006-09-25 23:31:03 -0700 | [diff] [blame] | 120 | static unsigned long kva_start_pfn; |
| 121 | static unsigned long kva_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | /* |
| 123 | * FLAT - support for basic PC memory model with discontig enabled, essentially |
| 124 | * a single node with all available processors in it with a flat |
| 125 | * memory map. |
| 126 | */ |
| 127 | int __init get_memcfg_numa_flat(void) |
| 128 | { |
| 129 | printk("NUMA - single node, flat memory mode\n"); |
| 130 | |
| 131 | /* Run the memory configuration and find the top of memory. */ |
| 132 | find_max_pfn(); |
| 133 | node_start_pfn[0] = 0; |
| 134 | node_end_pfn[0] = max_pfn; |
| 135 | memory_present(0, 0, max_pfn); |
| 136 | |
| 137 | /* Indicate there is one node available. */ |
| 138 | nodes_clear(node_online_map); |
| 139 | node_set_online(0); |
| 140 | return 1; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * Find the highest page frame number we have available for the node |
| 145 | */ |
| 146 | static void __init find_max_pfn_node(int nid) |
| 147 | { |
| 148 | if (node_end_pfn[nid] > max_pfn) |
| 149 | node_end_pfn[nid] = max_pfn; |
| 150 | /* |
| 151 | * if a user has given mem=XXXX, then we need to make sure |
| 152 | * that the node _starts_ before that, too, not just ends |
| 153 | */ |
| 154 | if (node_start_pfn[nid] > max_pfn) |
| 155 | node_start_pfn[nid] = max_pfn; |
Eric Sesterhenn | 8d8f3cb | 2006-10-03 23:34:58 +0200 | [diff] [blame] | 156 | BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | } |
| 158 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | /* |
| 160 | * Allocate memory for the pg_data_t for this node via a crude pre-bootmem |
| 161 | * method. For node zero take this from the bottom of memory, for |
| 162 | * subsequent nodes place them at node_remap_start_vaddr which contains |
| 163 | * node local data in physically node local memory. See setup_memory() |
| 164 | * for details. |
| 165 | */ |
| 166 | static void __init allocate_pgdat(int nid) |
| 167 | { |
| 168 | if (nid && node_has_online_mem(nid)) |
| 169 | NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; |
| 170 | else { |
David Rientjes | 3529833 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 171 | NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(min_low_pfn)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | min_low_pfn += PFN_UP(sizeof(pg_data_t)); |
| 173 | } |
| 174 | } |
| 175 | |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 176 | void *alloc_remap(int nid, unsigned long size) |
| 177 | { |
| 178 | void *allocation = node_remap_alloc_vaddr[nid]; |
| 179 | |
| 180 | size = ALIGN(size, L1_CACHE_BYTES); |
| 181 | |
| 182 | if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) |
| 183 | return 0; |
| 184 | |
| 185 | node_remap_alloc_vaddr[nid] += size; |
| 186 | memset(allocation, 0, size); |
| 187 | |
| 188 | return allocation; |
| 189 | } |
| 190 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | void __init remap_numa_kva(void) |
| 192 | { |
| 193 | void *vaddr; |
| 194 | unsigned long pfn; |
| 195 | int node; |
| 196 | |
| 197 | for_each_online_node(node) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { |
| 199 | vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT); |
| 200 | set_pmd_pfn((ulong) vaddr, |
| 201 | node_remap_start_pfn[node] + pfn, |
| 202 | PAGE_KERNEL_LARGE); |
| 203 | } |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | static unsigned long calculate_numa_remap_pages(void) |
| 208 | { |
| 209 | int nid; |
| 210 | unsigned long size, reserve_pages = 0; |
Dave Hansen | 5b505b9 | 2005-06-23 00:07:41 -0700 | [diff] [blame] | 211 | unsigned long pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | |
| 213 | for_each_online_node(nid) { |
Mel Gorman | 4cfee88 | 2006-09-27 01:49:51 -0700 | [diff] [blame] | 214 | unsigned old_end_pfn = node_end_pfn[nid]; |
| 215 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | /* |
| 217 | * The acpi/srat node info can show hot-add memroy zones |
| 218 | * where memory could be added but not currently present. |
| 219 | */ |
| 220 | if (node_start_pfn[nid] > max_pfn) |
| 221 | continue; |
| 222 | if (node_end_pfn[nid] > max_pfn) |
| 223 | node_end_pfn[nid] = max_pfn; |
| 224 | |
| 225 | /* ensure the remap includes space for the pgdat. */ |
| 226 | size = node_remap_size[nid] + sizeof(pg_data_t); |
| 227 | |
| 228 | /* convert size to large (pmd size) pages, rounding up */ |
| 229 | size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES; |
| 230 | /* now the roundup is correct, convert to PAGE_SIZE pages */ |
| 231 | size = size * PTRS_PER_PTE; |
Dave Hansen | 5b505b9 | 2005-06-23 00:07:41 -0700 | [diff] [blame] | 232 | |
| 233 | /* |
| 234 | * Validate the region we are allocating only contains valid |
| 235 | * pages. |
| 236 | */ |
| 237 | for (pfn = node_end_pfn[nid] - size; |
| 238 | pfn < node_end_pfn[nid]; pfn++) |
| 239 | if (!page_is_ram(pfn)) |
| 240 | break; |
| 241 | |
| 242 | if (pfn != node_end_pfn[nid]) |
| 243 | size = 0; |
| 244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | printk("Reserving %ld pages of KVA for lmem_map of node %d\n", |
| 246 | size, nid); |
| 247 | node_remap_size[nid] = size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | node_remap_offset[nid] = reserve_pages; |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 249 | reserve_pages += size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | printk("Shrinking node %d from %ld pages to %ld pages\n", |
| 251 | nid, node_end_pfn[nid], node_end_pfn[nid] - size); |
Ravikiran G Thirumalai | 4b0271e | 2005-08-07 09:42:50 -0700 | [diff] [blame] | 252 | |
| 253 | if (node_end_pfn[nid] & (PTRS_PER_PTE-1)) { |
| 254 | /* |
| 255 | * Align node_end_pfn[] and node_remap_start_pfn[] to |
| 256 | * pmd boundary. remap_numa_kva will barf otherwise. |
| 257 | */ |
| 258 | printk("Shrinking node %d further by %ld pages for proper alignment\n", |
| 259 | nid, node_end_pfn[nid] & (PTRS_PER_PTE-1)); |
| 260 | size += node_end_pfn[nid] & (PTRS_PER_PTE-1); |
| 261 | } |
| 262 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | node_end_pfn[nid] -= size; |
| 264 | node_remap_start_pfn[nid] = node_end_pfn[nid]; |
Mel Gorman | 4cfee88 | 2006-09-27 01:49:51 -0700 | [diff] [blame] | 265 | shrink_active_range(nid, old_end_pfn, node_end_pfn[nid]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | } |
| 267 | printk("Reserving total of %ld pages for numa KVA remap\n", |
| 268 | reserve_pages); |
| 269 | return reserve_pages; |
| 270 | } |
| 271 | |
| 272 | extern void setup_bootmem_allocator(void); |
| 273 | unsigned long __init setup_memory(void) |
| 274 | { |
| 275 | int nid; |
| 276 | unsigned long system_start_pfn, system_max_low_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | |
| 278 | /* |
| 279 | * When mapping a NUMA machine we allocate the node_mem_map arrays |
| 280 | * from node local memory. They are then mapped directly into KVA |
| 281 | * between zone normal and vmalloc space. Calculate the size of |
| 282 | * this space and use it to adjust the boundry between ZONE_NORMAL |
| 283 | * and ZONE_HIGHMEM. |
| 284 | */ |
| 285 | find_max_pfn(); |
| 286 | get_memcfg_numa(); |
| 287 | |
keith mannthey | 9102330 | 2006-09-25 23:31:03 -0700 | [diff] [blame] | 288 | kva_pages = calculate_numa_remap_pages(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | |
| 290 | /* partially used pages are not usable - thus round upwards */ |
| 291 | system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); |
| 292 | |
keith mannthey | 9102330 | 2006-09-25 23:31:03 -0700 | [diff] [blame] | 293 | kva_start_pfn = find_max_low_pfn() - kva_pages; |
| 294 | |
| 295 | #ifdef CONFIG_BLK_DEV_INITRD |
| 296 | /* Numa kva area is below the initrd */ |
| 297 | if (LOADER_TYPE && INITRD_START) |
| 298 | kva_start_pfn = PFN_DOWN(INITRD_START) - kva_pages; |
| 299 | #endif |
| 300 | kva_start_pfn -= kva_start_pfn & (PTRS_PER_PTE-1); |
| 301 | |
| 302 | system_max_low_pfn = max_low_pfn = find_max_low_pfn(); |
| 303 | printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n", |
| 304 | kva_start_pfn, max_low_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | printk("max_pfn = %ld\n", max_pfn); |
| 306 | #ifdef CONFIG_HIGHMEM |
| 307 | highstart_pfn = highend_pfn = max_pfn; |
| 308 | if (max_pfn > system_max_low_pfn) |
| 309 | highstart_pfn = system_max_low_pfn; |
| 310 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
| 311 | pages_to_mb(highend_pfn - highstart_pfn)); |
Jan Beulich | ba9c231 | 2006-09-26 10:52:31 +0200 | [diff] [blame] | 312 | num_physpages = highend_pfn; |
| 313 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; |
| 314 | #else |
| 315 | num_physpages = system_max_low_pfn; |
| 316 | high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | #endif |
| 318 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
| 319 | pages_to_mb(system_max_low_pfn)); |
| 320 | printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n", |
| 321 | min_low_pfn, max_low_pfn, highstart_pfn); |
| 322 | |
| 323 | printk("Low memory ends at vaddr %08lx\n", |
| 324 | (ulong) pfn_to_kaddr(max_low_pfn)); |
| 325 | for_each_online_node(nid) { |
| 326 | node_remap_start_vaddr[nid] = pfn_to_kaddr( |
keith mannthey | 9102330 | 2006-09-25 23:31:03 -0700 | [diff] [blame] | 327 | kva_start_pfn + node_remap_offset[nid]); |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 328 | /* Init the node remap allocator */ |
| 329 | node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + |
| 330 | (node_remap_size[nid] * PAGE_SIZE); |
| 331 | node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] + |
| 332 | ALIGN(sizeof(pg_data_t), PAGE_SIZE); |
| 333 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | allocate_pgdat(nid); |
| 335 | printk ("node %d will remap to vaddr %08lx - %08lx\n", nid, |
| 336 | (ulong) node_remap_start_vaddr[nid], |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 337 | (ulong) pfn_to_kaddr(highstart_pfn |
| 338 | + node_remap_offset[nid] + node_remap_size[nid])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | } |
| 340 | printk("High memory starts at vaddr %08lx\n", |
| 341 | (ulong) pfn_to_kaddr(highstart_pfn)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | for_each_online_node(nid) |
| 343 | find_max_pfn_node(nid); |
| 344 | |
| 345 | memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); |
| 346 | NODE_DATA(0)->bdata = &node0_bdata; |
| 347 | setup_bootmem_allocator(); |
| 348 | return max_low_pfn; |
| 349 | } |
| 350 | |
keith mannthey | 9102330 | 2006-09-25 23:31:03 -0700 | [diff] [blame] | 351 | void __init numa_kva_reserve(void) |
| 352 | { |
| 353 | reserve_bootmem(PFN_PHYS(kva_start_pfn),PFN_PHYS(kva_pages)); |
| 354 | } |
| 355 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | void __init zone_sizes_init(void) |
| 357 | { |
| 358 | int nid; |
Mel Gorman | 6391af1 | 2006-10-11 01:20:39 -0700 | [diff] [blame] | 359 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
| 360 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 361 | max_zone_pfns[ZONE_DMA] = |
| 362 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
| 363 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 364 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | |
Mel Gorman | 4cfee88 | 2006-09-27 01:49:51 -0700 | [diff] [blame] | 366 | /* If SRAT has not registered memory, register it now */ |
| 367 | if (find_max_pfn_with_active_regions() == 0) { |
| 368 | for_each_online_node(nid) { |
| 369 | if (node_has_online_mem(nid)) |
| 370 | add_active_range(nid, node_start_pfn[nid], |
| 371 | node_end_pfn[nid]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | } |
Mel Gorman | 4cfee88 | 2006-09-27 01:49:51 -0700 | [diff] [blame] | 374 | |
| 375 | free_area_init_nodes(max_zone_pfns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | return; |
| 377 | } |
| 378 | |
| 379 | void __init set_highmem_pages_init(int bad_ppro) |
| 380 | { |
| 381 | #ifdef CONFIG_HIGHMEM |
| 382 | struct zone *zone; |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 383 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | |
| 385 | for_each_zone(zone) { |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 386 | unsigned long node_pfn, zone_start_pfn, zone_end_pfn; |
| 387 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | if (!is_highmem(zone)) |
| 389 | continue; |
| 390 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | zone_start_pfn = zone->zone_start_pfn; |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 392 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 394 | printk("Initializing %s for node %d (%08lx:%08lx)\n", |
Christoph Lameter | 89fa302 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 395 | zone->name, zone_to_nid(zone), |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 396 | zone_start_pfn, zone_end_pfn); |
| 397 | |
| 398 | for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { |
| 399 | if (!pfn_valid(node_pfn)) |
| 400 | continue; |
| 401 | page = pfn_to_page(node_pfn); |
Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 402 | add_one_highpage_init(page, node_pfn, bad_ppro); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | } |
| 404 | } |
| 405 | totalram_pages += totalhigh_pages; |
| 406 | #endif |
| 407 | } |
Yasunori Goto | 7c7e942 | 2006-12-22 01:11:13 -0800 | [diff] [blame^] | 408 | |
| 409 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 410 | int paddr_to_nid(u64 addr) |
| 411 | { |
| 412 | int nid; |
| 413 | unsigned long pfn = PFN_DOWN(addr); |
| 414 | |
| 415 | for_each_node(nid) |
| 416 | if (node_start_pfn[nid] <= pfn && |
| 417 | pfn < node_end_pfn[nid]) |
| 418 | return nid; |
| 419 | |
| 420 | return -1; |
| 421 | } |
| 422 | |
| 423 | /* |
| 424 | * This function is used to ask node id BEFORE memmap and mem_section's |
| 425 | * initialization (pfn_to_nid() can't be used yet). |
| 426 | * If _PXM is not defined on ACPI's DSDT, node id must be found by this. |
| 427 | */ |
| 428 | int memory_add_physaddr_to_nid(u64 addr) |
| 429 | { |
| 430 | int nid = paddr_to_nid(addr); |
| 431 | return (nid >= 0) ? nid : 0; |
| 432 | } |
| 433 | |
| 434 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
| 435 | #endif |