| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * NUMA emulation | 
 | 3 |  */ | 
 | 4 | #include <linux/kernel.h> | 
 | 5 | #include <linux/errno.h> | 
 | 6 | #include <linux/topology.h> | 
 | 7 | #include <linux/memblock.h> | 
| Tejun Heo | 1b7e03e | 2011-05-02 17:24:48 +0200 | [diff] [blame] | 8 | #include <linux/bootmem.h> | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 9 | #include <asm/dma.h> | 
 | 10 |  | 
 | 11 | #include "numa_internal.h" | 
 | 12 |  | 
| Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 13 | static int emu_nid_to_phys[MAX_NUMNODES]; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 14 | static char *emu_cmdline __initdata; | 
 | 15 |  | 
 | 16 | void __init numa_emu_cmdline(char *str) | 
 | 17 | { | 
 | 18 | 	emu_cmdline = str; | 
 | 19 | } | 
 | 20 |  | 
 | 21 | static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) | 
 | 22 | { | 
 | 23 | 	int i; | 
 | 24 |  | 
 | 25 | 	for (i = 0; i < mi->nr_blks; i++) | 
 | 26 | 		if (mi->blk[i].nid == nid) | 
 | 27 | 			return i; | 
 | 28 | 	return -ENOENT; | 
 | 29 | } | 
 | 30 |  | 
| Jiri Kosina | e37aade | 2012-02-28 16:16:33 +0100 | [diff] [blame] | 31 | static u64 __init mem_hole_size(u64 start, u64 end) | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 32 | { | 
 | 33 | 	unsigned long start_pfn = PFN_UP(start); | 
 | 34 | 	unsigned long end_pfn = PFN_DOWN(end); | 
 | 35 |  | 
 | 36 | 	if (start_pfn < end_pfn) | 
 | 37 | 		return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn)); | 
 | 38 | 	return 0; | 
 | 39 | } | 
 | 40 |  | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 41 | /* | 
 | 42 |  * Sets up nid to range from @start to @end.  The return value is -errno if | 
 | 43 |  * something went wrong, 0 otherwise. | 
 | 44 |  */ | 
 | 45 | static int __init emu_setup_memblk(struct numa_meminfo *ei, | 
 | 46 | 				   struct numa_meminfo *pi, | 
 | 47 | 				   int nid, int phys_blk, u64 size) | 
 | 48 | { | 
 | 49 | 	struct numa_memblk *eb = &ei->blk[ei->nr_blks]; | 
 | 50 | 	struct numa_memblk *pb = &pi->blk[phys_blk]; | 
 | 51 |  | 
 | 52 | 	if (ei->nr_blks >= NR_NODE_MEMBLKS) { | 
 | 53 | 		pr_err("NUMA: Too many emulated memblks, failing emulation\n"); | 
 | 54 | 		return -EINVAL; | 
 | 55 | 	} | 
 | 56 |  | 
 | 57 | 	ei->nr_blks++; | 
 | 58 | 	eb->start = pb->start; | 
 | 59 | 	eb->end = pb->start + size; | 
 | 60 | 	eb->nid = nid; | 
 | 61 |  | 
 | 62 | 	if (emu_nid_to_phys[nid] == NUMA_NO_NODE) | 
| Andrea Arcangeli | d71b5a7 | 2012-03-21 16:34:16 -0700 | [diff] [blame] | 63 | 		emu_nid_to_phys[nid] = nid; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 64 |  | 
 | 65 | 	pb->start += size; | 
 | 66 | 	if (pb->start >= pb->end) { | 
 | 67 | 		WARN_ON_ONCE(pb->start > pb->end); | 
 | 68 | 		numa_remove_memblk_from(phys_blk, pi); | 
 | 69 | 	} | 
 | 70 |  | 
| Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 71 | 	printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n", | 
 | 72 | 	       nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 73 | 	return 0; | 
 | 74 | } | 
 | 75 |  | 
 | 76 | /* | 
 | 77 |  * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr | 
| Wei Yang | d80a9eb | 2017-07-08 09:30:58 +0800 | [diff] [blame] | 78 |  * to max_addr. | 
 | 79 |  * | 
 | 80 |  * Returns zero on success or negative on error. | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 81 |  */ | 
 | 82 | static int __init split_nodes_interleave(struct numa_meminfo *ei, | 
 | 83 | 					 struct numa_meminfo *pi, | 
 | 84 | 					 u64 addr, u64 max_addr, int nr_nodes) | 
 | 85 | { | 
| Wei Yang | d80a9eb | 2017-07-08 09:30:58 +0800 | [diff] [blame] | 86 | 	nodemask_t physnode_mask = numa_nodes_parsed; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 87 | 	u64 size; | 
 | 88 | 	int big; | 
 | 89 | 	int nid = 0; | 
 | 90 | 	int i, ret; | 
 | 91 |  | 
 | 92 | 	if (nr_nodes <= 0) | 
 | 93 | 		return -1; | 
 | 94 | 	if (nr_nodes > MAX_NUMNODES) { | 
 | 95 | 		pr_info("numa=fake=%d too large, reducing to %d\n", | 
 | 96 | 			nr_nodes, MAX_NUMNODES); | 
 | 97 | 		nr_nodes = MAX_NUMNODES; | 
 | 98 | 	} | 
 | 99 |  | 
| Tejun Heo | 1b7e03e | 2011-05-02 17:24:48 +0200 | [diff] [blame] | 100 | 	/* | 
 | 101 | 	 * Calculate target node size.  x86_32 freaks on __udivdi3() so do | 
 | 102 | 	 * the division in ulong number of pages and convert back. | 
 | 103 | 	 */ | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 104 | 	size = max_addr - addr - mem_hole_size(addr, max_addr); | 
| Tejun Heo | 1b7e03e | 2011-05-02 17:24:48 +0200 | [diff] [blame] | 105 | 	size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); | 
 | 106 |  | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 107 | 	/* | 
 | 108 | 	 * Calculate the number of big nodes that can be allocated as a result | 
 | 109 | 	 * of consolidating the remainder. | 
 | 110 | 	 */ | 
 | 111 | 	big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / | 
 | 112 | 		FAKE_NODE_MIN_SIZE; | 
 | 113 |  | 
 | 114 | 	size &= FAKE_NODE_MIN_HASH_MASK; | 
 | 115 | 	if (!size) { | 
 | 116 | 		pr_err("Not enough memory for each node.  " | 
 | 117 | 			"NUMA emulation disabled.\n"); | 
 | 118 | 		return -1; | 
 | 119 | 	} | 
 | 120 |  | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 121 | 	/* | 
 | 122 | 	 * Continue to fill physical nodes with fake nodes until there is no | 
 | 123 | 	 * memory left on any of them. | 
 | 124 | 	 */ | 
 | 125 | 	while (nodes_weight(physnode_mask)) { | 
 | 126 | 		for_each_node_mask(i, physnode_mask) { | 
 | 127 | 			u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); | 
 | 128 | 			u64 start, limit, end; | 
 | 129 | 			int phys_blk; | 
 | 130 |  | 
 | 131 | 			phys_blk = emu_find_memblk_by_nid(i, pi); | 
 | 132 | 			if (phys_blk < 0) { | 
 | 133 | 				node_clear(i, physnode_mask); | 
 | 134 | 				continue; | 
 | 135 | 			} | 
 | 136 | 			start = pi->blk[phys_blk].start; | 
 | 137 | 			limit = pi->blk[phys_blk].end; | 
 | 138 | 			end = start + size; | 
 | 139 |  | 
 | 140 | 			if (nid < big) | 
 | 141 | 				end += FAKE_NODE_MIN_SIZE; | 
 | 142 |  | 
 | 143 | 			/* | 
 | 144 | 			 * Continue to add memory to this fake node if its | 
 | 145 | 			 * non-reserved memory is less than the per-node size. | 
 | 146 | 			 */ | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 147 | 			while (end - start - mem_hole_size(start, end) < size) { | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 148 | 				end += FAKE_NODE_MIN_SIZE; | 
 | 149 | 				if (end > limit) { | 
 | 150 | 					end = limit; | 
 | 151 | 					break; | 
 | 152 | 				} | 
 | 153 | 			} | 
 | 154 |  | 
 | 155 | 			/* | 
 | 156 | 			 * If there won't be at least FAKE_NODE_MIN_SIZE of | 
 | 157 | 			 * non-reserved memory in ZONE_DMA32 for the next node, | 
 | 158 | 			 * this one must extend to the boundary. | 
 | 159 | 			 */ | 
 | 160 | 			if (end < dma32_end && dma32_end - end - | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 161 | 			    mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 162 | 				end = dma32_end; | 
 | 163 |  | 
 | 164 | 			/* | 
 | 165 | 			 * If there won't be enough non-reserved memory for the | 
 | 166 | 			 * next node, this one must extend to the end of the | 
 | 167 | 			 * physical node. | 
 | 168 | 			 */ | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 169 | 			if (limit - end - mem_hole_size(end, limit) < size) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 170 | 				end = limit; | 
 | 171 |  | 
 | 172 | 			ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, | 
 | 173 | 					       phys_blk, | 
 | 174 | 					       min(end, limit) - start); | 
 | 175 | 			if (ret < 0) | 
 | 176 | 				return ret; | 
 | 177 | 		} | 
 | 178 | 	} | 
 | 179 | 	return 0; | 
 | 180 | } | 
 | 181 |  | 
 | 182 | /* | 
 | 183 |  * Returns the end address of a node so that there is at least `size' amount of | 
 | 184 |  * non-reserved memory or `max_addr' is reached. | 
 | 185 |  */ | 
 | 186 | static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) | 
 | 187 | { | 
 | 188 | 	u64 end = start + size; | 
 | 189 |  | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 190 | 	while (end - start - mem_hole_size(start, end) < size) { | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 191 | 		end += FAKE_NODE_MIN_SIZE; | 
 | 192 | 		if (end > max_addr) { | 
 | 193 | 			end = max_addr; | 
 | 194 | 			break; | 
 | 195 | 		} | 
 | 196 | 	} | 
 | 197 | 	return end; | 
 | 198 | } | 
 | 199 |  | 
 | 200 | /* | 
 | 201 |  * Sets up fake nodes of `size' interleaved over physical nodes ranging from | 
| Wei Yang | d80a9eb | 2017-07-08 09:30:58 +0800 | [diff] [blame] | 202 |  * `addr' to `max_addr'. | 
 | 203 |  * | 
 | 204 |  * Returns zero on success or negative on error. | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 205 |  */ | 
 | 206 | static int __init split_nodes_size_interleave(struct numa_meminfo *ei, | 
 | 207 | 					      struct numa_meminfo *pi, | 
 | 208 | 					      u64 addr, u64 max_addr, u64 size) | 
 | 209 | { | 
| Wei Yang | d80a9eb | 2017-07-08 09:30:58 +0800 | [diff] [blame] | 210 | 	nodemask_t physnode_mask = numa_nodes_parsed; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 211 | 	u64 min_size; | 
 | 212 | 	int nid = 0; | 
 | 213 | 	int i, ret; | 
 | 214 |  | 
 | 215 | 	if (!size) | 
 | 216 | 		return -1; | 
 | 217 | 	/* | 
 | 218 | 	 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is | 
 | 219 | 	 * increased accordingly if the requested size is too small.  This | 
 | 220 | 	 * creates a uniform distribution of node sizes across the entire | 
 | 221 | 	 * machine (but not necessarily over physical nodes). | 
 | 222 | 	 */ | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 223 | 	min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 224 | 	min_size = max(min_size, FAKE_NODE_MIN_SIZE); | 
 | 225 | 	if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) | 
 | 226 | 		min_size = (min_size + FAKE_NODE_MIN_SIZE) & | 
 | 227 | 						FAKE_NODE_MIN_HASH_MASK; | 
 | 228 | 	if (size < min_size) { | 
 | 229 | 		pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", | 
 | 230 | 			size >> 20, min_size >> 20); | 
 | 231 | 		size = min_size; | 
 | 232 | 	} | 
 | 233 | 	size &= FAKE_NODE_MIN_HASH_MASK; | 
 | 234 |  | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 235 | 	/* | 
 | 236 | 	 * Fill physical nodes with fake nodes of size until there is no memory | 
 | 237 | 	 * left on any of them. | 
 | 238 | 	 */ | 
 | 239 | 	while (nodes_weight(physnode_mask)) { | 
 | 240 | 		for_each_node_mask(i, physnode_mask) { | 
| Tejun Heo | 1b7e03e | 2011-05-02 17:24:48 +0200 | [diff] [blame] | 241 | 			u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 242 | 			u64 start, limit, end; | 
 | 243 | 			int phys_blk; | 
 | 244 |  | 
 | 245 | 			phys_blk = emu_find_memblk_by_nid(i, pi); | 
 | 246 | 			if (phys_blk < 0) { | 
 | 247 | 				node_clear(i, physnode_mask); | 
 | 248 | 				continue; | 
 | 249 | 			} | 
 | 250 | 			start = pi->blk[phys_blk].start; | 
 | 251 | 			limit = pi->blk[phys_blk].end; | 
 | 252 |  | 
 | 253 | 			end = find_end_of_node(start, limit, size); | 
 | 254 | 			/* | 
 | 255 | 			 * If there won't be at least FAKE_NODE_MIN_SIZE of | 
 | 256 | 			 * non-reserved memory in ZONE_DMA32 for the next node, | 
 | 257 | 			 * this one must extend to the boundary. | 
 | 258 | 			 */ | 
 | 259 | 			if (end < dma32_end && dma32_end - end - | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 260 | 			    mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 261 | 				end = dma32_end; | 
 | 262 |  | 
 | 263 | 			/* | 
 | 264 | 			 * If there won't be enough non-reserved memory for the | 
 | 265 | 			 * next node, this one must extend to the end of the | 
 | 266 | 			 * physical node. | 
 | 267 | 			 */ | 
| Tejun Heo | 474b881 | 2011-07-12 11:16:04 +0200 | [diff] [blame] | 268 | 			if (limit - end - mem_hole_size(end, limit) < size) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 269 | 				end = limit; | 
 | 270 |  | 
 | 271 | 			ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, | 
 | 272 | 					       phys_blk, | 
 | 273 | 					       min(end, limit) - start); | 
 | 274 | 			if (ret < 0) | 
 | 275 | 				return ret; | 
 | 276 | 		} | 
 | 277 | 	} | 
 | 278 | 	return 0; | 
 | 279 | } | 
 | 280 |  | 
| Wei Yang | 158f424 | 2017-07-08 09:30:57 +0800 | [diff] [blame] | 281 | int __init setup_emu2phys_nid(int *dfl_phys_nid) | 
 | 282 | { | 
 | 283 | 	int i, max_emu_nid = 0; | 
 | 284 |  | 
 | 285 | 	*dfl_phys_nid = NUMA_NO_NODE; | 
 | 286 | 	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) { | 
 | 287 | 		if (emu_nid_to_phys[i] != NUMA_NO_NODE) { | 
 | 288 | 			max_emu_nid = i; | 
 | 289 | 			if (*dfl_phys_nid == NUMA_NO_NODE) | 
 | 290 | 				*dfl_phys_nid = emu_nid_to_phys[i]; | 
 | 291 | 		} | 
 | 292 | 	} | 
 | 293 |  | 
 | 294 | 	return max_emu_nid; | 
 | 295 | } | 
 | 296 |  | 
| Tejun Heo | 90e6b67 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 297 | /** | 
 | 298 |  * numa_emulation - Emulate NUMA nodes | 
 | 299 |  * @numa_meminfo: NUMA configuration to massage | 
 | 300 |  * @numa_dist_cnt: The size of the physical NUMA distance table | 
 | 301 |  * | 
 | 302 |  * Emulate NUMA nodes according to the numa=fake kernel parameter. | 
 | 303 |  * @numa_meminfo contains the physical memory configuration and is modified | 
 | 304 |  * to reflect the emulated configuration on success.  @numa_dist_cnt is | 
 | 305 |  * used to determine the size of the physical distance table. | 
 | 306 |  * | 
 | 307 |  * On success, the following modifications are made. | 
 | 308 |  * | 
 | 309 |  * - @numa_meminfo is updated to reflect the emulated nodes. | 
 | 310 |  * | 
 | 311 |  * - __apicid_to_node[] is updated such that APIC IDs are mapped to the | 
 | 312 |  *   emulated nodes. | 
 | 313 |  * | 
 | 314 |  * - NUMA distance table is rebuilt to represent distances between emulated | 
 | 315 |  *   nodes.  The distances are determined considering how emulated nodes | 
 | 316 |  *   are mapped to physical nodes and match the actual distances. | 
 | 317 |  * | 
 | 318 |  * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical | 
 | 319 |  *   nodes.  This is used by numa_add_cpu() and numa_remove_cpu(). | 
 | 320 |  * | 
 | 321 |  * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with | 
 | 322 |  * identity mapping and no other modification is made. | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 323 |  */ | 
 | 324 | void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | 
 | 325 | { | 
 | 326 | 	static struct numa_meminfo ei __initdata; | 
 | 327 | 	static struct numa_meminfo pi __initdata; | 
| Tejun Heo | 1b7e03e | 2011-05-02 17:24:48 +0200 | [diff] [blame] | 328 | 	const u64 max_addr = PFN_PHYS(max_pfn); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 329 | 	u8 *phys_dist = NULL; | 
| Yinghai Lu | ce00333 | 2011-03-02 11:22:14 +0100 | [diff] [blame] | 330 | 	size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); | 
| Tejun Heo | 56396e6 | 2011-03-11 10:33:31 +0100 | [diff] [blame] | 331 | 	int max_emu_nid, dfl_phys_nid; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 332 | 	int i, j, ret; | 
 | 333 |  | 
 | 334 | 	if (!emu_cmdline) | 
 | 335 | 		goto no_emu; | 
 | 336 |  | 
 | 337 | 	memset(&ei, 0, sizeof(ei)); | 
 | 338 | 	pi = *numa_meminfo; | 
 | 339 |  | 
 | 340 | 	for (i = 0; i < MAX_NUMNODES; i++) | 
 | 341 | 		emu_nid_to_phys[i] = NUMA_NO_NODE; | 
 | 342 |  | 
 | 343 | 	/* | 
 | 344 | 	 * If the numa=fake command-line contains a 'M' or 'G', it represents | 
 | 345 | 	 * the fixed node size.  Otherwise, if it is just a single number N, | 
 | 346 | 	 * split the system RAM into N fake nodes. | 
 | 347 | 	 */ | 
 | 348 | 	if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) { | 
 | 349 | 		u64 size; | 
 | 350 |  | 
 | 351 | 		size = memparse(emu_cmdline, &emu_cmdline); | 
 | 352 | 		ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size); | 
 | 353 | 	} else { | 
 | 354 | 		unsigned long n; | 
 | 355 |  | 
| Peter Zijlstra | 94c0dd3 | 2012-04-18 19:04:17 +0200 | [diff] [blame] | 356 | 		n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 357 | 		ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); | 
 | 358 | 	} | 
| Peter Zijlstra | 94c0dd3 | 2012-04-18 19:04:17 +0200 | [diff] [blame] | 359 | 	if (*emu_cmdline == ':') | 
 | 360 | 		emu_cmdline++; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 361 |  | 
 | 362 | 	if (ret < 0) | 
 | 363 | 		goto no_emu; | 
 | 364 |  | 
 | 365 | 	if (numa_cleanup_meminfo(&ei) < 0) { | 
 | 366 | 		pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n"); | 
 | 367 | 		goto no_emu; | 
 | 368 | 	} | 
 | 369 |  | 
| Yinghai Lu | ce00333 | 2011-03-02 11:22:14 +0100 | [diff] [blame] | 370 | 	/* copy the physical distance table */ | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 371 | 	if (numa_dist_cnt) { | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 372 | 		u64 phys; | 
 | 373 |  | 
| Tejun Heo | 1b7e03e | 2011-05-02 17:24:48 +0200 | [diff] [blame] | 374 | 		phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), | 
| Yinghai Lu | ce00333 | 2011-03-02 11:22:14 +0100 | [diff] [blame] | 375 | 					      phys_size, PAGE_SIZE); | 
| Tejun Heo | 1f5026a | 2011-07-12 09:58:09 +0200 | [diff] [blame] | 376 | 		if (!phys) { | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 377 | 			pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); | 
 | 378 | 			goto no_emu; | 
 | 379 | 		} | 
| Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 380 | 		memblock_reserve(phys, phys_size); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 381 | 		phys_dist = __va(phys); | 
 | 382 |  | 
 | 383 | 		for (i = 0; i < numa_dist_cnt; i++) | 
 | 384 | 			for (j = 0; j < numa_dist_cnt; j++) | 
 | 385 | 				phys_dist[i * numa_dist_cnt + j] = | 
 | 386 | 					node_distance(i, j); | 
 | 387 | 	} | 
 | 388 |  | 
| Tejun Heo | 56396e6 | 2011-03-11 10:33:31 +0100 | [diff] [blame] | 389 | 	/* | 
 | 390 | 	 * Determine the max emulated nid and the default phys nid to use | 
 | 391 | 	 * for unmapped nodes. | 
 | 392 | 	 */ | 
| Wei Yang | 158f424 | 2017-07-08 09:30:57 +0800 | [diff] [blame] | 393 | 	max_emu_nid = setup_emu2phys_nid(&dfl_phys_nid); | 
| Tejun Heo | 078a198 | 2011-03-04 16:32:02 +0100 | [diff] [blame] | 394 |  | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 395 | 	/* commit */ | 
 | 396 | 	*numa_meminfo = ei; | 
 | 397 |  | 
| Wei Yang | 4f16720 | 2017-07-08 09:30:59 +0800 | [diff] [blame] | 398 | 	/* Make sure numa_nodes_parsed only contains emulated nodes */ | 
 | 399 | 	nodes_clear(numa_nodes_parsed); | 
 | 400 | 	for (i = 0; i < ARRAY_SIZE(ei.blk); i++) | 
 | 401 | 		if (ei.blk[i].start != ei.blk[i].end && | 
 | 402 | 		    ei.blk[i].nid != NUMA_NO_NODE) | 
 | 403 | 			node_set(ei.blk[i].nid, numa_nodes_parsed); | 
 | 404 |  | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 405 | 	/* | 
 | 406 | 	 * Transform __apicid_to_node table to use emulated nids by | 
 | 407 | 	 * reverse-mapping phys_nid.  The maps should always exist but fall | 
 | 408 | 	 * back to zero just in case. | 
 | 409 | 	 */ | 
 | 410 | 	for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { | 
 | 411 | 		if (__apicid_to_node[i] == NUMA_NO_NODE) | 
 | 412 | 			continue; | 
 | 413 | 		for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) | 
 | 414 | 			if (__apicid_to_node[i] == emu_nid_to_phys[j]) | 
 | 415 | 				break; | 
 | 416 | 		__apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0; | 
 | 417 | 	} | 
 | 418 |  | 
 | 419 | 	/* make sure all emulated nodes are mapped to a physical node */ | 
 | 420 | 	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) | 
 | 421 | 		if (emu_nid_to_phys[i] == NUMA_NO_NODE) | 
| Tejun Heo | 078a198 | 2011-03-04 16:32:02 +0100 | [diff] [blame] | 422 | 			emu_nid_to_phys[i] = dfl_phys_nid; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 423 |  | 
| Tejun Heo | 56396e6 | 2011-03-11 10:33:31 +0100 | [diff] [blame] | 424 | 	/* transform distance table */ | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 425 | 	numa_reset_distance(); | 
| Tejun Heo | 56396e6 | 2011-03-11 10:33:31 +0100 | [diff] [blame] | 426 | 	for (i = 0; i < max_emu_nid + 1; i++) { | 
 | 427 | 		for (j = 0; j < max_emu_nid + 1; j++) { | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 428 | 			int physi = emu_nid_to_phys[i]; | 
 | 429 | 			int physj = emu_nid_to_phys[j]; | 
 | 430 | 			int dist; | 
 | 431 |  | 
| Peter Zijlstra | 94c0dd3 | 2012-04-18 19:04:17 +0200 | [diff] [blame] | 432 | 			if (get_option(&emu_cmdline, &dist) == 2) | 
 | 433 | 				; | 
 | 434 | 			else if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 435 | 				dist = physi == physj ? | 
 | 436 | 					LOCAL_DISTANCE : REMOTE_DISTANCE; | 
 | 437 | 			else | 
 | 438 | 				dist = phys_dist[physi * numa_dist_cnt + physj]; | 
 | 439 |  | 
 | 440 | 			numa_set_distance(i, j, dist); | 
 | 441 | 		} | 
 | 442 | 	} | 
| Yinghai Lu | ce00333 | 2011-03-02 11:22:14 +0100 | [diff] [blame] | 443 |  | 
 | 444 | 	/* free the copied physical distance table */ | 
 | 445 | 	if (phys_dist) | 
| Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 446 | 		memblock_free(__pa(phys_dist), phys_size); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 447 | 	return; | 
 | 448 |  | 
 | 449 | no_emu: | 
 | 450 | 	/* No emulation.  Build identity emu_nid_to_phys[] for numa_add_cpu() */ | 
 | 451 | 	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) | 
 | 452 | 		emu_nid_to_phys[i] = i; | 
 | 453 | } | 
 | 454 |  | 
 | 455 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | 
| Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 456 | void numa_add_cpu(int cpu) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 457 | { | 
 | 458 | 	int physnid, nid; | 
 | 459 |  | 
| Yinghai Lu | 51b361b | 2011-03-04 14:49:28 +0100 | [diff] [blame] | 460 | 	nid = early_cpu_to_node(cpu); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 461 | 	BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | 
 | 462 |  | 
 | 463 | 	physnid = emu_nid_to_phys[nid]; | 
 | 464 |  | 
 | 465 | 	/* | 
 | 466 | 	 * Map the cpu to each emulated node that is allocated on the physical | 
 | 467 | 	 * node of the cpu's apic id. | 
 | 468 | 	 */ | 
 | 469 | 	for_each_online_node(nid) | 
 | 470 | 		if (emu_nid_to_phys[nid] == physnid) | 
 | 471 | 			cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); | 
 | 472 | } | 
 | 473 |  | 
| Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 474 | void numa_remove_cpu(int cpu) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 475 | { | 
 | 476 | 	int i; | 
 | 477 |  | 
 | 478 | 	for_each_online_node(i) | 
 | 479 | 		cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); | 
 | 480 | } | 
 | 481 | #else	/* !CONFIG_DEBUG_PER_CPU_MAPS */ | 
| Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 482 | static void numa_set_cpumask(int cpu, bool enable) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 483 | { | 
| David Rientjes | 7a6c654 | 2011-04-20 19:19:13 -0700 | [diff] [blame] | 484 | 	int nid, physnid; | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 485 |  | 
 | 486 | 	nid = early_cpu_to_node(cpu); | 
 | 487 | 	if (nid == NUMA_NO_NODE) { | 
 | 488 | 		/* early_cpu_to_node() already emits a warning and trace */ | 
 | 489 | 		return; | 
 | 490 | 	} | 
 | 491 |  | 
 | 492 | 	physnid = emu_nid_to_phys[nid]; | 
 | 493 |  | 
| David Rientjes | 7a6c654 | 2011-04-20 19:19:13 -0700 | [diff] [blame] | 494 | 	for_each_online_node(nid) { | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 495 | 		if (emu_nid_to_phys[nid] != physnid) | 
 | 496 | 			continue; | 
 | 497 |  | 
| David Rientjes | 7a6c654 | 2011-04-20 19:19:13 -0700 | [diff] [blame] | 498 | 		debug_cpumask_set_cpu(cpu, nid, enable); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 499 | 	} | 
 | 500 | } | 
 | 501 |  | 
| Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 502 | void numa_add_cpu(int cpu) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 503 | { | 
| David Rientjes | 7a6c654 | 2011-04-20 19:19:13 -0700 | [diff] [blame] | 504 | 	numa_set_cpumask(cpu, true); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 505 | } | 
 | 506 |  | 
| Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 507 | void numa_remove_cpu(int cpu) | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 508 | { | 
| David Rientjes | 7a6c654 | 2011-04-20 19:19:13 -0700 | [diff] [blame] | 509 | 	numa_set_cpumask(cpu, false); | 
| Tejun Heo | b8ef917 | 2011-02-22 11:10:08 +0100 | [diff] [blame] | 510 | } | 
 | 511 | #endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */ |