Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * This file contains NUMA specific variables and functions which can |
| 7 | * be split away from DISCONTIGMEM and are used on NUMA machines with |
| 8 | * contiguous memory. |
| 9 | * |
| 10 | * 2002/08/07 Erich Focht <efocht@ess.nec.de> |
| 11 | */ |
| 12 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/cpu.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/node.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/bootmem.h> |
Keith Mannthey | 8c2676a | 2006-09-30 23:27:07 -0700 | [diff] [blame] | 19 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/mmzone.h> |
| 21 | #include <asm/numa.h> |
| 22 | |
| 23 | |
| 24 | /* |
| 25 | * The following structures are usually initialized by ACPI or |
| 26 | * similar mechanisms and describe the NUMA characteristics of the machine. |
| 27 | */ |
| 28 | int num_node_memblks; |
| 29 | struct node_memblk_s node_memblk[NR_NODE_MEMBLKS]; |
holt@sgi.com | 2c6e6db | 2008-04-03 15:17:13 -0500 | [diff] [blame] | 30 | struct node_cpuid_s node_cpuid[NR_CPUS] = |
| 31 | { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } }; |
| 32 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | /* |
| 34 | * This is a matrix with "distances" between nodes, they should be |
| 35 | * proportional to the memory access latency ratios. |
| 36 | */ |
| 37 | u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; |
| 38 | |
| 39 | /* Identify which cnode a physical address resides on */ |
| 40 | int |
| 41 | paddr_to_nid(unsigned long paddr) |
| 42 | { |
| 43 | int i; |
| 44 | |
| 45 | for (i = 0; i < num_node_memblks; i++) |
| 46 | if (paddr >= node_memblk[i].start_paddr && |
| 47 | paddr < node_memblk[i].start_paddr + node_memblk[i].size) |
| 48 | break; |
| 49 | |
| 50 | return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); |
| 51 | } |
Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 52 | |
| 53 | #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) |
| 54 | /* |
| 55 | * Because of holes evaluate on section limits. |
| 56 | * If the section of memory exists, then return the node where the section |
| 57 | * resides. Otherwise return node 0 as the default. This is used by |
| 58 | * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where |
| 59 | * the section resides. |
| 60 | */ |
Mel Gorman | 8a942fd | 2015-06-30 14:56:55 -0700 | [diff] [blame] | 61 | int __meminit __early_pfn_to_nid(unsigned long pfn, |
| 62 | struct mminit_pfnnid_cache *state) |
Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 63 | { |
| 64 | int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; |
Russ Anderson | 7c243c7 | 2013-04-29 15:07:59 -0700 | [diff] [blame] | 65 | |
Mel Gorman | 8a942fd | 2015-06-30 14:56:55 -0700 | [diff] [blame] | 66 | if (section >= state->last_start && section < state->last_end) |
| 67 | return state->last_nid; |
Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 68 | |
| 69 | for (i = 0; i < num_node_memblks; i++) { |
| 70 | ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; |
| 71 | esec = (node_memblk[i].start_paddr + node_memblk[i].size + |
| 72 | ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; |
Russ Anderson | 7c243c7 | 2013-04-29 15:07:59 -0700 | [diff] [blame] | 73 | if (section >= ssec && section < esec) { |
Mel Gorman | 8a942fd | 2015-06-30 14:56:55 -0700 | [diff] [blame] | 74 | state->last_start = ssec; |
| 75 | state->last_end = esec; |
| 76 | state->last_nid = node_memblk[i].nid; |
Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 77 | return node_memblk[i].nid; |
Russ Anderson | 7c243c7 | 2013-04-29 15:07:59 -0700 | [diff] [blame] | 78 | } |
Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 79 | } |
| 80 | |
KAMEZAWA Hiroyuki | cc2559b | 2009-02-18 14:48:33 -0800 | [diff] [blame] | 81 | return -1; |
Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 82 | } |
Keith Mannthey | 8c2676a | 2006-09-30 23:27:07 -0700 | [diff] [blame] | 83 | |
Paul Gortmaker | ccce9bb | 2013-06-17 15:51:20 -0400 | [diff] [blame] | 84 | void numa_clear_node(int cpu) |
Yijing Wang | eee46b3 | 2013-03-21 11:50:30 +0800 | [diff] [blame] | 85 | { |
| 86 | unmap_cpu_from_node(cpu, NUMA_NO_NODE); |
| 87 | } |
| 88 | |
Keith Mannthey | 8c2676a | 2006-09-30 23:27:07 -0700 | [diff] [blame] | 89 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 90 | /* |
| 91 | * SRAT information is stored in node_memblk[], then we can use SRAT |
| 92 | * information at memory-hot-add if necessary. |
| 93 | */ |
| 94 | |
| 95 | int memory_add_physaddr_to_nid(u64 addr) |
| 96 | { |
| 97 | int nid = paddr_to_nid(addr); |
| 98 | if (nid < 0) |
| 99 | return 0; |
| 100 | return nid; |
| 101 | } |
| 102 | |
| 103 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
| 104 | #endif |
Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 105 | #endif |