Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 3 | * Basic Node interface support |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/module.h> |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/mm.h> |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 9 | #include <linux/memory.h> |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 10 | #include <linux/vmstat.h> |
Andrew Morton | 6e259e7 | 2013-04-29 15:08:07 -0700 | [diff] [blame] | 11 | #include <linux/notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/node.h> |
| 13 | #include <linux/hugetlb.h> |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 14 | #include <linux/compaction.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/cpumask.h> |
| 16 | #include <linux/topology.h> |
| 17 | #include <linux/nodemask.h> |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 18 | #include <linux/cpu.h> |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 19 | #include <linux/device.h> |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 20 | #include <linux/pm_runtime.h> |
Lee Schermerhorn | af936a1 | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 21 | #include <linux/swap.h> |
Tejun Heo | 18e5b53 | 2010-04-06 19:23:33 +0900 | [diff] [blame] | 22 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 24 | static struct bus_type node_subsys = { |
Kay Sievers | af5ca3f | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 25 | .name = "node", |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 26 | .dev_name = "node", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | }; |
| 28 | |
| 29 | |
Sudeep Holla | 5aaba36 | 2014-09-30 14:48:22 +0100 | [diff] [blame] | 30 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | { |
Zhen Lei | 064f0e9 | 2017-10-13 15:57:50 -0700 | [diff] [blame] | 32 | ssize_t n; |
| 33 | cpumask_var_t mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | struct node *node_dev = to_node(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 36 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ |
| 37 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Zhen Lei | 064f0e9 | 2017-10-13 15:57:50 -0700 | [diff] [blame] | 39 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 40 | return 0; |
| 41 | |
| 42 | cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); |
| 43 | n = cpumap_print_to_pagebuf(list, buf, mask); |
| 44 | free_cpumask_var(mask); |
| 45 | |
| 46 | return n; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | } |
| 48 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 49 | static inline ssize_t node_read_cpumask(struct device *dev, |
| 50 | struct device_attribute *attr, char *buf) |
Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 51 | { |
Sudeep Holla | 5aaba36 | 2014-09-30 14:48:22 +0100 | [diff] [blame] | 52 | return node_read_cpumap(dev, false, buf); |
Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 53 | } |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 54 | static inline ssize_t node_read_cpulist(struct device *dev, |
| 55 | struct device_attribute *attr, char *buf) |
Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 56 | { |
Sudeep Holla | 5aaba36 | 2014-09-30 14:48:22 +0100 | [diff] [blame] | 57 | return node_read_cpumap(dev, true, buf); |
Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 58 | } |
| 59 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 60 | static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL); |
| 61 | static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 63 | /** |
| 64 | * struct node_access_nodes - Access class device to hold user visible |
| 65 | * relationships to other nodes. |
| 66 | * @dev: Device for this memory access class |
| 67 | * @list_node: List element in the node's access list |
| 68 | * @access: The access class rank |
Mauro Carvalho Chehab | 58cb346 | 2019-06-18 15:55:12 -0300 | [diff] [blame] | 69 | * @hmem_attrs: Heterogeneous memory performance attributes |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 70 | */ |
| 71 | struct node_access_nodes { |
| 72 | struct device dev; |
| 73 | struct list_head list_node; |
| 74 | unsigned access; |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 75 | #ifdef CONFIG_HMEM_REPORTING |
| 76 | struct node_hmem_attrs hmem_attrs; |
| 77 | #endif |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 78 | }; |
| 79 | #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev) |
| 80 | |
| 81 | static struct attribute *node_init_access_node_attrs[] = { |
| 82 | NULL, |
| 83 | }; |
| 84 | |
| 85 | static struct attribute *node_targ_access_node_attrs[] = { |
| 86 | NULL, |
| 87 | }; |
| 88 | |
| 89 | static const struct attribute_group initiators = { |
| 90 | .name = "initiators", |
| 91 | .attrs = node_init_access_node_attrs, |
| 92 | }; |
| 93 | |
| 94 | static const struct attribute_group targets = { |
| 95 | .name = "targets", |
| 96 | .attrs = node_targ_access_node_attrs, |
| 97 | }; |
| 98 | |
| 99 | static const struct attribute_group *node_access_node_groups[] = { |
| 100 | &initiators, |
| 101 | &targets, |
| 102 | NULL, |
| 103 | }; |
| 104 | |
| 105 | static void node_remove_accesses(struct node *node) |
| 106 | { |
| 107 | struct node_access_nodes *c, *cnext; |
| 108 | |
| 109 | list_for_each_entry_safe(c, cnext, &node->access_list, list_node) { |
| 110 | list_del(&c->list_node); |
| 111 | device_unregister(&c->dev); |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | static void node_access_release(struct device *dev) |
| 116 | { |
| 117 | kfree(to_access_nodes(dev)); |
| 118 | } |
| 119 | |
| 120 | static struct node_access_nodes *node_init_node_access(struct node *node, |
| 121 | unsigned access) |
| 122 | { |
| 123 | struct node_access_nodes *access_node; |
| 124 | struct device *dev; |
| 125 | |
| 126 | list_for_each_entry(access_node, &node->access_list, list_node) |
| 127 | if (access_node->access == access) |
| 128 | return access_node; |
| 129 | |
| 130 | access_node = kzalloc(sizeof(*access_node), GFP_KERNEL); |
| 131 | if (!access_node) |
| 132 | return NULL; |
| 133 | |
| 134 | access_node->access = access; |
| 135 | dev = &access_node->dev; |
| 136 | dev->parent = &node->dev; |
| 137 | dev->release = node_access_release; |
| 138 | dev->groups = node_access_node_groups; |
| 139 | if (dev_set_name(dev, "access%u", access)) |
| 140 | goto free; |
| 141 | |
| 142 | if (device_register(dev)) |
| 143 | goto free_name; |
| 144 | |
| 145 | pm_runtime_no_callbacks(dev); |
| 146 | list_add_tail(&access_node->list_node, &node->access_list); |
| 147 | return access_node; |
| 148 | free_name: |
| 149 | kfree_const(dev->kobj.name); |
| 150 | free: |
| 151 | kfree(access_node); |
| 152 | return NULL; |
| 153 | } |
| 154 | |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 155 | #ifdef CONFIG_HMEM_REPORTING |
| 156 | #define ACCESS_ATTR(name) \ |
| 157 | static ssize_t name##_show(struct device *dev, \ |
| 158 | struct device_attribute *attr, \ |
| 159 | char *buf) \ |
| 160 | { \ |
| 161 | return sprintf(buf, "%u\n", to_access_nodes(dev)->hmem_attrs.name); \ |
| 162 | } \ |
| 163 | static DEVICE_ATTR_RO(name); |
| 164 | |
| 165 | ACCESS_ATTR(read_bandwidth) |
| 166 | ACCESS_ATTR(read_latency) |
| 167 | ACCESS_ATTR(write_bandwidth) |
| 168 | ACCESS_ATTR(write_latency) |
| 169 | |
| 170 | static struct attribute *access_attrs[] = { |
| 171 | &dev_attr_read_bandwidth.attr, |
| 172 | &dev_attr_read_latency.attr, |
| 173 | &dev_attr_write_bandwidth.attr, |
| 174 | &dev_attr_write_latency.attr, |
| 175 | NULL, |
| 176 | }; |
| 177 | |
| 178 | /** |
| 179 | * node_set_perf_attrs - Set the performance values for given access class |
| 180 | * @nid: Node identifier to be set |
| 181 | * @hmem_attrs: Heterogeneous memory performance attributes |
| 182 | * @access: The access class the for the given attributes |
| 183 | */ |
| 184 | void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, |
| 185 | unsigned access) |
| 186 | { |
| 187 | struct node_access_nodes *c; |
| 188 | struct node *node; |
| 189 | int i; |
| 190 | |
| 191 | if (WARN_ON_ONCE(!node_online(nid))) |
| 192 | return; |
| 193 | |
| 194 | node = node_devices[nid]; |
| 195 | c = node_init_node_access(node, access); |
| 196 | if (!c) |
| 197 | return; |
| 198 | |
| 199 | c->hmem_attrs = *hmem_attrs; |
| 200 | for (i = 0; access_attrs[i] != NULL; i++) { |
| 201 | if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i], |
| 202 | "initiators")) { |
| 203 | pr_info("failed to add performance attribute to node %d\n", |
| 204 | nid); |
| 205 | break; |
| 206 | } |
| 207 | } |
| 208 | } |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 209 | |
| 210 | /** |
| 211 | * struct node_cache_info - Internal tracking for memory node caches |
| 212 | * @dev: Device represeting the cache level |
| 213 | * @node: List element for tracking in the node |
| 214 | * @cache_attrs:Attributes for this cache level |
| 215 | */ |
| 216 | struct node_cache_info { |
| 217 | struct device dev; |
| 218 | struct list_head node; |
| 219 | struct node_cache_attrs cache_attrs; |
| 220 | }; |
| 221 | #define to_cache_info(device) container_of(device, struct node_cache_info, dev) |
| 222 | |
| 223 | #define CACHE_ATTR(name, fmt) \ |
| 224 | static ssize_t name##_show(struct device *dev, \ |
| 225 | struct device_attribute *attr, \ |
| 226 | char *buf) \ |
| 227 | { \ |
| 228 | return sprintf(buf, fmt "\n", to_cache_info(dev)->cache_attrs.name);\ |
| 229 | } \ |
| 230 | DEVICE_ATTR_RO(name); |
| 231 | |
| 232 | CACHE_ATTR(size, "%llu") |
| 233 | CACHE_ATTR(line_size, "%u") |
| 234 | CACHE_ATTR(indexing, "%u") |
| 235 | CACHE_ATTR(write_policy, "%u") |
| 236 | |
| 237 | static struct attribute *cache_attrs[] = { |
| 238 | &dev_attr_indexing.attr, |
| 239 | &dev_attr_size.attr, |
| 240 | &dev_attr_line_size.attr, |
| 241 | &dev_attr_write_policy.attr, |
| 242 | NULL, |
| 243 | }; |
| 244 | ATTRIBUTE_GROUPS(cache); |
| 245 | |
| 246 | static void node_cache_release(struct device *dev) |
| 247 | { |
| 248 | kfree(dev); |
| 249 | } |
| 250 | |
| 251 | static void node_cacheinfo_release(struct device *dev) |
| 252 | { |
| 253 | struct node_cache_info *info = to_cache_info(dev); |
| 254 | kfree(info); |
| 255 | } |
| 256 | |
| 257 | static void node_init_cache_dev(struct node *node) |
| 258 | { |
| 259 | struct device *dev; |
| 260 | |
| 261 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
| 262 | if (!dev) |
| 263 | return; |
| 264 | |
| 265 | dev->parent = &node->dev; |
| 266 | dev->release = node_cache_release; |
| 267 | if (dev_set_name(dev, "memory_side_cache")) |
| 268 | goto free_dev; |
| 269 | |
| 270 | if (device_register(dev)) |
| 271 | goto free_name; |
| 272 | |
| 273 | pm_runtime_no_callbacks(dev); |
| 274 | node->cache_dev = dev; |
| 275 | return; |
| 276 | free_name: |
| 277 | kfree_const(dev->kobj.name); |
| 278 | free_dev: |
| 279 | kfree(dev); |
| 280 | } |
| 281 | |
| 282 | /** |
| 283 | * node_add_cache() - add cache attribute to a memory node |
| 284 | * @nid: Node identifier that has new cache attributes |
| 285 | * @cache_attrs: Attributes for the cache being added |
| 286 | */ |
| 287 | void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) |
| 288 | { |
| 289 | struct node_cache_info *info; |
| 290 | struct device *dev; |
| 291 | struct node *node; |
| 292 | |
| 293 | if (!node_online(nid) || !node_devices[nid]) |
| 294 | return; |
| 295 | |
| 296 | node = node_devices[nid]; |
| 297 | list_for_each_entry(info, &node->cache_attrs, node) { |
| 298 | if (info->cache_attrs.level == cache_attrs->level) { |
| 299 | dev_warn(&node->dev, |
| 300 | "attempt to add duplicate cache level:%d\n", |
| 301 | cache_attrs->level); |
| 302 | return; |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | if (!node->cache_dev) |
| 307 | node_init_cache_dev(node); |
| 308 | if (!node->cache_dev) |
| 309 | return; |
| 310 | |
| 311 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
| 312 | if (!info) |
| 313 | return; |
| 314 | |
| 315 | dev = &info->dev; |
| 316 | dev->parent = node->cache_dev; |
| 317 | dev->release = node_cacheinfo_release; |
| 318 | dev->groups = cache_groups; |
| 319 | if (dev_set_name(dev, "index%d", cache_attrs->level)) |
| 320 | goto free_cache; |
| 321 | |
| 322 | info->cache_attrs = *cache_attrs; |
| 323 | if (device_register(dev)) { |
| 324 | dev_warn(&node->dev, "failed to add cache level:%d\n", |
| 325 | cache_attrs->level); |
| 326 | goto free_name; |
| 327 | } |
| 328 | pm_runtime_no_callbacks(dev); |
| 329 | list_add_tail(&info->node, &node->cache_attrs); |
| 330 | return; |
| 331 | free_name: |
| 332 | kfree_const(dev->kobj.name); |
| 333 | free_cache: |
| 334 | kfree(info); |
| 335 | } |
| 336 | |
| 337 | static void node_remove_caches(struct node *node) |
| 338 | { |
| 339 | struct node_cache_info *info, *next; |
| 340 | |
| 341 | if (!node->cache_dev) |
| 342 | return; |
| 343 | |
| 344 | list_for_each_entry_safe(info, next, &node->cache_attrs, node) { |
| 345 | list_del(&info->node); |
| 346 | device_unregister(&info->dev); |
| 347 | } |
| 348 | device_unregister(node->cache_dev); |
| 349 | } |
| 350 | |
| 351 | static void node_init_caches(unsigned int nid) |
| 352 | { |
| 353 | INIT_LIST_HEAD(&node_devices[nid]->cache_attrs); |
| 354 | } |
| 355 | #else |
| 356 | static void node_init_caches(unsigned int nid) { } |
| 357 | static void node_remove_caches(struct node *node) { } |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 358 | #endif |
| 359 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 361 | static ssize_t node_read_meminfo(struct device *dev, |
| 362 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | { |
| 364 | int n; |
| 365 | int nid = dev->id; |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 366 | struct pglist_data *pgdat = NODE_DATA(nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | struct sysinfo i; |
Vlastimil Babka | 61f94e1 | 2018-10-26 15:05:50 -0700 | [diff] [blame] | 368 | unsigned long sreclaimable, sunreclaimable; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | |
| 370 | si_meminfo_node(&i, nid); |
Vlastimil Babka | 61f94e1 | 2018-10-26 15:05:50 -0700 | [diff] [blame] | 371 | sreclaimable = node_page_state(pgdat, NR_SLAB_RECLAIMABLE); |
| 372 | sunreclaimable = node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE); |
KOSAKI Motohiro | 7ee9225 | 2010-08-09 17:19:50 -0700 | [diff] [blame] | 373 | n = sprintf(buf, |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 374 | "Node %d MemTotal: %8lu kB\n" |
| 375 | "Node %d MemFree: %8lu kB\n" |
| 376 | "Node %d MemUsed: %8lu kB\n" |
| 377 | "Node %d Active: %8lu kB\n" |
| 378 | "Node %d Inactive: %8lu kB\n" |
| 379 | "Node %d Active(anon): %8lu kB\n" |
| 380 | "Node %d Inactive(anon): %8lu kB\n" |
| 381 | "Node %d Active(file): %8lu kB\n" |
| 382 | "Node %d Inactive(file): %8lu kB\n" |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 383 | "Node %d Unevictable: %8lu kB\n" |
KOSAKI Motohiro | 7ee9225 | 2010-08-09 17:19:50 -0700 | [diff] [blame] | 384 | "Node %d Mlocked: %8lu kB\n", |
| 385 | nid, K(i.totalram), |
| 386 | nid, K(i.freeram), |
| 387 | nid, K(i.totalram - i.freeram), |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 388 | nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + |
| 389 | node_page_state(pgdat, NR_ACTIVE_FILE)), |
| 390 | nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + |
| 391 | node_page_state(pgdat, NR_INACTIVE_FILE)), |
| 392 | nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), |
| 393 | nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), |
| 394 | nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)), |
| 395 | nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)), |
| 396 | nid, K(node_page_state(pgdat, NR_UNEVICTABLE)), |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 397 | nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); |
KOSAKI Motohiro | 7ee9225 | 2010-08-09 17:19:50 -0700 | [diff] [blame] | 398 | |
Christoph Lameter | 182e8e2 | 2006-09-25 23:31:10 -0700 | [diff] [blame] | 399 | #ifdef CONFIG_HIGHMEM |
KOSAKI Motohiro | 7ee9225 | 2010-08-09 17:19:50 -0700 | [diff] [blame] | 400 | n += sprintf(buf + n, |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 401 | "Node %d HighTotal: %8lu kB\n" |
| 402 | "Node %d HighFree: %8lu kB\n" |
| 403 | "Node %d LowTotal: %8lu kB\n" |
KOSAKI Motohiro | 7ee9225 | 2010-08-09 17:19:50 -0700 | [diff] [blame] | 404 | "Node %d LowFree: %8lu kB\n", |
| 405 | nid, K(i.totalhigh), |
| 406 | nid, K(i.freehigh), |
| 407 | nid, K(i.totalram - i.totalhigh), |
| 408 | nid, K(i.freeram - i.freehigh)); |
Christoph Lameter | 182e8e2 | 2006-09-25 23:31:10 -0700 | [diff] [blame] | 409 | #endif |
KOSAKI Motohiro | 7ee9225 | 2010-08-09 17:19:50 -0700 | [diff] [blame] | 410 | n += sprintf(buf + n, |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 411 | "Node %d Dirty: %8lu kB\n" |
| 412 | "Node %d Writeback: %8lu kB\n" |
| 413 | "Node %d FilePages: %8lu kB\n" |
| 414 | "Node %d Mapped: %8lu kB\n" |
| 415 | "Node %d AnonPages: %8lu kB\n" |
KOSAKI Motohiro | 4b02108 | 2009-09-21 17:01:33 -0700 | [diff] [blame] | 416 | "Node %d Shmem: %8lu kB\n" |
KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 417 | "Node %d KernelStack: %8lu kB\n" |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 418 | "Node %d PageTables: %8lu kB\n" |
| 419 | "Node %d NFS_Unstable: %8lu kB\n" |
| 420 | "Node %d Bounce: %8lu kB\n" |
| 421 | "Node %d WritebackTmp: %8lu kB\n" |
Vlastimil Babka | 61f94e1 | 2018-10-26 15:05:50 -0700 | [diff] [blame] | 422 | "Node %d KReclaimable: %8lu kB\n" |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 423 | "Node %d Slab: %8lu kB\n" |
| 424 | "Node %d SReclaimable: %8lu kB\n" |
David Rientjes | 05b258e | 2011-01-13 15:47:14 -0800 | [diff] [blame] | 425 | "Node %d SUnreclaim: %8lu kB\n" |
| 426 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 427 | "Node %d AnonHugePages: %8lu kB\n" |
Kirill A. Shutemov | 65c4537 | 2016-07-26 15:26:10 -0700 | [diff] [blame] | 428 | "Node %d ShmemHugePages: %8lu kB\n" |
| 429 | "Node %d ShmemPmdMapped: %8lu kB\n" |
David Rientjes | 05b258e | 2011-01-13 15:47:14 -0800 | [diff] [blame] | 430 | #endif |
| 431 | , |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 432 | nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), |
| 433 | nid, K(node_page_state(pgdat, NR_WRITEBACK)), |
| 434 | nid, K(node_page_state(pgdat, NR_FILE_PAGES)), |
Mel Gorman | 50658e2 | 2016-07-28 15:46:14 -0700 | [diff] [blame] | 435 | nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), |
Mel Gorman | 4b9d0fa | 2016-07-28 15:46:17 -0700 | [diff] [blame] | 436 | nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), |
Rafael Aquini | cc7452b | 2014-08-06 16:06:38 -0700 | [diff] [blame] | 437 | nid, K(i.sharedram), |
Andy Lutomirski | d30dd8b | 2016-07-28 15:48:14 -0700 | [diff] [blame] | 438 | nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB), |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 439 | nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 440 | nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)), |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 441 | nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 442 | nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), |
Vlastimil Babka | 61f94e1 | 2018-10-26 15:05:50 -0700 | [diff] [blame] | 443 | nid, K(sreclaimable + |
| 444 | node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), |
| 445 | nid, K(sreclaimable + sunreclaimable), |
| 446 | nid, K(sreclaimable), |
| 447 | nid, K(sunreclaimable) |
David Rientjes | 05b258e | 2011-01-13 15:47:14 -0800 | [diff] [blame] | 448 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Vlastimil Babka | 61f94e1 | 2018-10-26 15:05:50 -0700 | [diff] [blame] | 449 | , |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 450 | nid, K(node_page_state(pgdat, NR_ANON_THPS) * |
Kirill A. Shutemov | 65c4537 | 2016-07-26 15:26:10 -0700 | [diff] [blame] | 451 | HPAGE_PMD_NR), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 452 | nid, K(node_page_state(pgdat, NR_SHMEM_THPS) * |
Kirill A. Shutemov | 65c4537 | 2016-07-26 15:26:10 -0700 | [diff] [blame] | 453 | HPAGE_PMD_NR), |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 454 | nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * |
Vlastimil Babka | 61f94e1 | 2018-10-26 15:05:50 -0700 | [diff] [blame] | 455 | HPAGE_PMD_NR) |
David Rientjes | 05b258e | 2011-01-13 15:47:14 -0800 | [diff] [blame] | 456 | #endif |
Vlastimil Babka | 61f94e1 | 2018-10-26 15:05:50 -0700 | [diff] [blame] | 457 | ); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | n += hugetlb_report_node_meminfo(nid, buf + n); |
| 459 | return n; |
| 460 | } |
| 461 | |
| 462 | #undef K |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 463 | static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 465 | static ssize_t node_read_numastat(struct device *dev, |
| 466 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | return sprintf(buf, |
| 469 | "numa_hit %lu\n" |
| 470 | "numa_miss %lu\n" |
| 471 | "numa_foreign %lu\n" |
| 472 | "interleave_hit %lu\n" |
| 473 | "local_node %lu\n" |
| 474 | "other_node %lu\n", |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 475 | sum_zone_numa_state(dev->id, NUMA_HIT), |
| 476 | sum_zone_numa_state(dev->id, NUMA_MISS), |
| 477 | sum_zone_numa_state(dev->id, NUMA_FOREIGN), |
| 478 | sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT), |
| 479 | sum_zone_numa_state(dev->id, NUMA_LOCAL), |
| 480 | sum_zone_numa_state(dev->id, NUMA_OTHER)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | } |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 482 | static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 484 | static ssize_t node_read_vmstat(struct device *dev, |
| 485 | struct device_attribute *attr, char *buf) |
Michael Rubin | 2ac3903 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 486 | { |
| 487 | int nid = dev->id; |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 488 | struct pglist_data *pgdat = NODE_DATA(nid); |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 489 | int i; |
| 490 | int n = 0; |
| 491 | |
| 492 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
| 493 | n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 494 | sum_zone_node_page_state(nid, i)); |
| 495 | |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 496 | #ifdef CONFIG_NUMA |
| 497 | for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 498 | n += sprintf(buf+n, "%s %lu\n", |
| 499 | vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 500 | sum_zone_numa_state(nid, i)); |
| 501 | #endif |
| 502 | |
| 503 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) |
| 504 | n += sprintf(buf+n, "%s %lu\n", |
| 505 | vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + |
| 506 | NR_VM_NUMA_STAT_ITEMS], |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 507 | node_page_state(pgdat, i)); |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 508 | |
| 509 | return n; |
Michael Rubin | 2ac3903 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 510 | } |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 511 | static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); |
Michael Rubin | 2ac3903 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 512 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 513 | static ssize_t node_read_distance(struct device *dev, |
Ana Nedelcu | 518d3f3 | 2015-03-08 12:48:48 +0200 | [diff] [blame] | 514 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | { |
| 516 | int nid = dev->id; |
| 517 | int len = 0; |
| 518 | int i; |
| 519 | |
David Rientjes | 12ee3c0 | 2010-03-10 14:50:21 -0800 | [diff] [blame] | 520 | /* |
| 521 | * buf is currently PAGE_SIZE in length and each node needs 4 chars |
| 522 | * at the most (distance + space or newline). |
| 523 | */ |
| 524 | BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | |
| 526 | for_each_online_node(i) |
| 527 | len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); |
| 528 | |
| 529 | len += sprintf(buf + len, "\n"); |
| 530 | return len; |
| 531 | } |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 532 | static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | |
Takashi Iwai | 3c9b8aa | 2015-01-29 12:29:22 +0100 | [diff] [blame] | 534 | static struct attribute *node_dev_attrs[] = { |
| 535 | &dev_attr_cpumap.attr, |
| 536 | &dev_attr_cpulist.attr, |
| 537 | &dev_attr_meminfo.attr, |
| 538 | &dev_attr_numastat.attr, |
| 539 | &dev_attr_distance.attr, |
| 540 | &dev_attr_vmstat.attr, |
| 541 | NULL |
| 542 | }; |
Greg Kroah-Hartman | 7ca7ec4 | 2015-03-25 13:47:17 +0100 | [diff] [blame] | 543 | ATTRIBUTE_GROUPS(node_dev); |
Takashi Iwai | 3c9b8aa | 2015-01-29 12:29:22 +0100 | [diff] [blame] | 544 | |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 545 | #ifdef CONFIG_HUGETLBFS |
| 546 | /* |
| 547 | * hugetlbfs per node attributes registration interface: |
| 548 | * When/if hugetlb[fs] subsystem initializes [sometime after this module], |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 549 | * it will register its per node attributes for all online nodes with |
| 550 | * memory. It will also call register_hugetlbfs_with_node(), below, to |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 551 | * register its attribute registration functions with this node driver. |
| 552 | * Once these hooks have been initialized, the node driver will call into |
| 553 | * the hugetlb module to [un]register attributes for hot-plugged nodes. |
| 554 | */ |
| 555 | static node_registration_func_t __hugetlb_register_node; |
| 556 | static node_registration_func_t __hugetlb_unregister_node; |
| 557 | |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 558 | static inline bool hugetlb_register_node(struct node *node) |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 559 | { |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 560 | if (__hugetlb_register_node && |
Lai Jiangshan | 8cebfcd | 2012-12-12 13:51:36 -0800 | [diff] [blame] | 561 | node_state(node->dev.id, N_MEMORY)) { |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 562 | __hugetlb_register_node(node); |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 563 | return true; |
| 564 | } |
| 565 | return false; |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 566 | } |
| 567 | |
| 568 | static inline void hugetlb_unregister_node(struct node *node) |
| 569 | { |
| 570 | if (__hugetlb_unregister_node) |
| 571 | __hugetlb_unregister_node(node); |
| 572 | } |
| 573 | |
| 574 | void register_hugetlbfs_with_node(node_registration_func_t doregister, |
| 575 | node_registration_func_t unregister) |
| 576 | { |
| 577 | __hugetlb_register_node = doregister; |
| 578 | __hugetlb_unregister_node = unregister; |
| 579 | } |
| 580 | #else |
| 581 | static inline void hugetlb_register_node(struct node *node) {} |
| 582 | |
| 583 | static inline void hugetlb_unregister_node(struct node *node) {} |
| 584 | #endif |
| 585 | |
Yasuaki Ishimatsu | 8c7b5b4 | 2012-12-11 16:00:57 -0800 | [diff] [blame] | 586 | static void node_device_release(struct device *dev) |
| 587 | { |
| 588 | struct node *node = to_node(dev); |
| 589 | |
| 590 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) |
| 591 | /* |
| 592 | * We schedule the work only when a memory section is |
| 593 | * onlined/offlined on this node. When we come here, |
| 594 | * all the memory on this node has been offlined, |
| 595 | * so we won't enqueue new work to this work. |
| 596 | * |
| 597 | * The work is using node->node_work, so we should |
| 598 | * flush work before freeing the memory. |
| 599 | */ |
| 600 | flush_work(&node->node_work); |
| 601 | #endif |
| 602 | kfree(node); |
| 603 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | |
| 605 | /* |
Robert P. J. Day | 405ae7d | 2007-02-17 19:13:42 +0100 | [diff] [blame] | 606 | * register_node - Setup a sysfs device for a node. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | * @num - Node number to use when creating the device. |
| 608 | * |
| 609 | * Initialize and register the node device. |
| 610 | */ |
Dou Liyang | a7be6e5 | 2017-07-10 15:49:20 -0700 | [diff] [blame] | 611 | static int register_node(struct node *node, int num) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | { |
| 613 | int error; |
| 614 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 615 | node->dev.id = num; |
| 616 | node->dev.bus = &node_subsys; |
Yasuaki Ishimatsu | 8c7b5b4 | 2012-12-11 16:00:57 -0800 | [diff] [blame] | 617 | node->dev.release = node_device_release; |
Greg Kroah-Hartman | 7ca7ec4 | 2015-03-25 13:47:17 +0100 | [diff] [blame] | 618 | node->dev.groups = node_dev_groups; |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 619 | error = device_register(&node->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | |
Arvind Yadav | c1cc0d5 | 2018-03-11 11:25:50 +0530 | [diff] [blame] | 621 | if (error) |
| 622 | put_device(&node->dev); |
| 623 | else { |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 624 | hugetlb_register_node(node); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 625 | |
| 626 | compaction_register_node(node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | } |
| 628 | return error; |
| 629 | } |
| 630 | |
Keiichiro Tokunaga | 4b45099 | 2005-05-08 21:28:53 +0900 | [diff] [blame] | 631 | /** |
| 632 | * unregister_node - unregister a node device |
| 633 | * @node: node going away |
| 634 | * |
| 635 | * Unregisters a node device @node. All the devices on the node must be |
| 636 | * unregistered before calling this function. |
| 637 | */ |
| 638 | void unregister_node(struct node *node) |
| 639 | { |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 640 | hugetlb_unregister_node(node); /* no-op, if memoryless node */ |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 641 | node_remove_accesses(node); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 642 | node_remove_caches(node); |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 643 | device_unregister(&node->dev); |
Keiichiro Tokunaga | 4b45099 | 2005-05-08 21:28:53 +0900 | [diff] [blame] | 644 | } |
| 645 | |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 646 | struct node *node_devices[MAX_NUMNODES]; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 647 | |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 648 | /* |
| 649 | * register cpu under node |
| 650 | */ |
| 651 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) |
| 652 | { |
Alex Chiang | 1830794 | 2009-12-14 17:59:08 -0800 | [diff] [blame] | 653 | int ret; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 654 | struct device *obj; |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 655 | |
Alex Chiang | f8246f3 | 2009-12-14 17:59:06 -0800 | [diff] [blame] | 656 | if (!node_online(nid)) |
| 657 | return 0; |
| 658 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 659 | obj = get_cpu_device(cpu); |
Alex Chiang | f8246f3 | 2009-12-14 17:59:06 -0800 | [diff] [blame] | 660 | if (!obj) |
| 661 | return 0; |
| 662 | |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 663 | ret = sysfs_create_link(&node_devices[nid]->dev.kobj, |
Alex Chiang | f8246f3 | 2009-12-14 17:59:06 -0800 | [diff] [blame] | 664 | &obj->kobj, |
| 665 | kobject_name(&obj->kobj)); |
Alex Chiang | 1830794 | 2009-12-14 17:59:08 -0800 | [diff] [blame] | 666 | if (ret) |
| 667 | return ret; |
| 668 | |
| 669 | return sysfs_create_link(&obj->kobj, |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 670 | &node_devices[nid]->dev.kobj, |
| 671 | kobject_name(&node_devices[nid]->dev.kobj)); |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 672 | } |
| 673 | |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 674 | /** |
| 675 | * register_memory_node_under_compute_node - link memory node to its compute |
| 676 | * node for a given access class. |
Mauro Carvalho Chehab | 58cb346 | 2019-06-18 15:55:12 -0300 | [diff] [blame] | 677 | * @mem_nid: Memory node number |
| 678 | * @cpu_nid: Cpu node number |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 679 | * @access: Access class to register |
| 680 | * |
| 681 | * Description: |
| 682 | * For use with platforms that may have separate memory and compute nodes. |
| 683 | * This function will export node relationships linking which memory |
| 684 | * initiator nodes can access memory targets at a given ranked access |
| 685 | * class. |
| 686 | */ |
| 687 | int register_memory_node_under_compute_node(unsigned int mem_nid, |
| 688 | unsigned int cpu_nid, |
| 689 | unsigned access) |
| 690 | { |
| 691 | struct node *init_node, *targ_node; |
| 692 | struct node_access_nodes *initiator, *target; |
| 693 | int ret; |
| 694 | |
| 695 | if (!node_online(cpu_nid) || !node_online(mem_nid)) |
| 696 | return -ENODEV; |
| 697 | |
| 698 | init_node = node_devices[cpu_nid]; |
| 699 | targ_node = node_devices[mem_nid]; |
| 700 | initiator = node_init_node_access(init_node, access); |
| 701 | target = node_init_node_access(targ_node, access); |
| 702 | if (!initiator || !target) |
| 703 | return -ENOMEM; |
| 704 | |
| 705 | ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets", |
| 706 | &targ_node->dev.kobj, |
| 707 | dev_name(&targ_node->dev)); |
| 708 | if (ret) |
| 709 | return ret; |
| 710 | |
| 711 | ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators", |
| 712 | &init_node->dev.kobj, |
| 713 | dev_name(&init_node->dev)); |
| 714 | if (ret) |
| 715 | goto err; |
| 716 | |
| 717 | return 0; |
| 718 | err: |
| 719 | sysfs_remove_link_from_group(&initiator->dev.kobj, "targets", |
| 720 | dev_name(&targ_node->dev)); |
| 721 | return ret; |
| 722 | } |
| 723 | |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 724 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) |
| 725 | { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 726 | struct device *obj; |
Alex Chiang | b9d52da | 2009-12-14 17:59:07 -0800 | [diff] [blame] | 727 | |
| 728 | if (!node_online(nid)) |
| 729 | return 0; |
| 730 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 731 | obj = get_cpu_device(cpu); |
Alex Chiang | b9d52da | 2009-12-14 17:59:07 -0800 | [diff] [blame] | 732 | if (!obj) |
| 733 | return 0; |
| 734 | |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 735 | sysfs_remove_link(&node_devices[nid]->dev.kobj, |
Alex Chiang | b9d52da | 2009-12-14 17:59:07 -0800 | [diff] [blame] | 736 | kobject_name(&obj->kobj)); |
Alex Chiang | 1830794 | 2009-12-14 17:59:08 -0800 | [diff] [blame] | 737 | sysfs_remove_link(&obj->kobj, |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 738 | kobject_name(&node_devices[nid]->dev.kobj)); |
Alex Chiang | b9d52da | 2009-12-14 17:59:07 -0800 | [diff] [blame] | 739 | |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 740 | return 0; |
| 741 | } |
| 742 | |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 743 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 744 | static int __ref get_nid_for_pfn(unsigned long pfn) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 745 | { |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 746 | if (!pfn_valid_within(pfn)) |
| 747 | return -1; |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 748 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
Thomas Gleixner | 8cdde38 | 2017-05-16 20:42:39 +0200 | [diff] [blame] | 749 | if (system_state < SYSTEM_RUNNING) |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 750 | return early_pfn_to_nid(pfn); |
| 751 | #endif |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 752 | return pfn_to_nid(pfn); |
| 753 | } |
| 754 | |
| 755 | /* register memory section under specified node if it spans that node */ |
David Hildenbrand | 8d595c4 | 2019-07-18 15:57:43 -0700 | [diff] [blame] | 756 | static int register_mem_sect_under_node(struct memory_block *mem_blk, |
| 757 | void *arg) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 758 | { |
Oscar Salvador | 4fbce63 | 2018-08-17 15:46:22 -0700 | [diff] [blame] | 759 | int ret, nid = *(int *)arg; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 760 | unsigned long pfn, sect_start_pfn, sect_end_pfn; |
| 761 | |
Pavel Tatashin | d0dc12e | 2018-04-05 16:23:00 -0700 | [diff] [blame] | 762 | mem_blk->nid = nid; |
Nathan Fontenot | d336016 | 2011-01-20 10:44:29 -0600 | [diff] [blame] | 763 | |
| 764 | sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); |
| 765 | sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); |
| 766 | sect_end_pfn += PAGES_PER_SECTION - 1; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 767 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
| 768 | int page_nid; |
| 769 | |
Yinghai Lu | 0469785 | 2015-09-04 15:42:39 -0700 | [diff] [blame] | 770 | /* |
| 771 | * memory block could have several absent sections from start. |
| 772 | * skip pfn range from absent section |
| 773 | */ |
| 774 | if (!pfn_present(pfn)) { |
| 775 | pfn = round_down(pfn + PAGES_PER_SECTION, |
| 776 | PAGES_PER_SECTION) - 1; |
| 777 | continue; |
| 778 | } |
| 779 | |
Pavel Tatashin | fc44f7f | 2018-04-05 16:22:56 -0700 | [diff] [blame] | 780 | /* |
| 781 | * We need to check if page belongs to nid only for the boot |
| 782 | * case, during hotplug we know that all pages in the memory |
| 783 | * block belong to the same node. |
| 784 | */ |
Oscar Salvador | 4fbce63 | 2018-08-17 15:46:22 -0700 | [diff] [blame] | 785 | if (system_state == SYSTEM_BOOTING) { |
Pavel Tatashin | fc44f7f | 2018-04-05 16:22:56 -0700 | [diff] [blame] | 786 | page_nid = get_nid_for_pfn(pfn); |
| 787 | if (page_nid < 0) |
| 788 | continue; |
| 789 | if (page_nid != nid) |
| 790 | continue; |
| 791 | } |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 792 | ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 793 | &mem_blk->dev.kobj, |
| 794 | kobject_name(&mem_blk->dev.kobj)); |
Alex Chiang | dee5d0d | 2009-12-14 17:59:05 -0800 | [diff] [blame] | 795 | if (ret) |
| 796 | return ret; |
| 797 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 798 | return sysfs_create_link_nowarn(&mem_blk->dev.kobj, |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 799 | &node_devices[nid]->dev.kobj, |
| 800 | kobject_name(&node_devices[nid]->dev.kobj)); |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 801 | } |
| 802 | /* mem section does not span the specified node */ |
| 803 | return 0; |
| 804 | } |
| 805 | |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 806 | /* |
| 807 | * Unregister memory block device under all nodes that it spans. |
David Hildenbrand | a31b264 | 2019-07-18 15:57:12 -0700 | [diff] [blame] | 808 | * Has to be called with mem_sysfs_mutex held (due to unlinked_nodes). |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 809 | */ |
David Hildenbrand | a31b264 | 2019-07-18 15:57:12 -0700 | [diff] [blame] | 810 | void unregister_memory_block_under_nodes(struct memory_block *mem_blk) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 811 | { |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 812 | unsigned long pfn, sect_start_pfn, sect_end_pfn; |
David Hildenbrand | a31b264 | 2019-07-18 15:57:12 -0700 | [diff] [blame] | 813 | static nodemask_t unlinked_nodes; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 814 | |
David Hildenbrand | a31b264 | 2019-07-18 15:57:12 -0700 | [diff] [blame] | 815 | nodes_clear(unlinked_nodes); |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 816 | sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); |
| 817 | sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 818 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
Roel Kluin | 4750498 | 2009-03-10 12:55:45 -0700 | [diff] [blame] | 819 | int nid; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 820 | |
| 821 | nid = get_nid_for_pfn(pfn); |
| 822 | if (nid < 0) |
| 823 | continue; |
| 824 | if (!node_online(nid)) |
| 825 | continue; |
David Hildenbrand | a31b264 | 2019-07-18 15:57:12 -0700 | [diff] [blame] | 826 | if (node_test_and_set(nid, unlinked_nodes)) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 827 | continue; |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 828 | sysfs_remove_link(&node_devices[nid]->dev.kobj, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 829 | kobject_name(&mem_blk->dev.kobj)); |
| 830 | sysfs_remove_link(&mem_blk->dev.kobj, |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 831 | kobject_name(&node_devices[nid]->dev.kobj)); |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 832 | } |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 833 | } |
| 834 | |
Oscar Salvador | 4fbce63 | 2018-08-17 15:46:22 -0700 | [diff] [blame] | 835 | int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 836 | { |
David Hildenbrand | fbcf73c | 2019-07-18 15:57:46 -0700 | [diff] [blame] | 837 | return walk_memory_blocks(PFN_PHYS(start_pfn), |
| 838 | PFN_PHYS(end_pfn - start_pfn), (void *)&nid, |
| 839 | register_mem_sect_under_node); |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 840 | } |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 841 | |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 842 | #ifdef CONFIG_HUGETLBFS |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 843 | /* |
| 844 | * Handle per node hstate attribute [un]registration on transistions |
| 845 | * to/from memoryless state. |
| 846 | */ |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 847 | static void node_hugetlb_work(struct work_struct *work) |
| 848 | { |
| 849 | struct node *node = container_of(work, struct node, node_work); |
| 850 | |
| 851 | /* |
| 852 | * We only get here when a node transitions to/from memoryless state. |
| 853 | * We can detect which transition occurred by examining whether the |
| 854 | * node has memory now. hugetlb_register_node() already check this |
| 855 | * so we try to register the attributes. If that fails, then the |
| 856 | * node has transitioned to memoryless, try to unregister the |
| 857 | * attributes. |
| 858 | */ |
| 859 | if (!hugetlb_register_node(node)) |
| 860 | hugetlb_unregister_node(node); |
| 861 | } |
| 862 | |
| 863 | static void init_node_hugetlb_work(int nid) |
| 864 | { |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 865 | INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work); |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 866 | } |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 867 | |
| 868 | static int node_memory_callback(struct notifier_block *self, |
| 869 | unsigned long action, void *arg) |
| 870 | { |
| 871 | struct memory_notify *mnb = arg; |
| 872 | int nid = mnb->status_change_nid; |
| 873 | |
| 874 | switch (action) { |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 875 | case MEM_ONLINE: |
| 876 | case MEM_OFFLINE: |
| 877 | /* |
| 878 | * offload per node hstate [un]registration to a work thread |
| 879 | * when transitioning to/from memoryless state. |
| 880 | */ |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 881 | if (nid != NUMA_NO_NODE) |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 882 | schedule_work(&node_devices[nid]->node_work); |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 883 | break; |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 884 | |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 885 | case MEM_GOING_ONLINE: |
| 886 | case MEM_GOING_OFFLINE: |
| 887 | case MEM_CANCEL_ONLINE: |
| 888 | case MEM_CANCEL_OFFLINE: |
| 889 | default: |
| 890 | break; |
| 891 | } |
| 892 | |
| 893 | return NOTIFY_OK; |
| 894 | } |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 895 | #endif /* CONFIG_HUGETLBFS */ |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 896 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 897 | |
| 898 | #if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \ |
| 899 | !defined(CONFIG_HUGETLBFS) |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 900 | static inline int node_memory_callback(struct notifier_block *self, |
| 901 | unsigned long action, void *arg) |
| 902 | { |
| 903 | return NOTIFY_OK; |
| 904 | } |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 905 | |
| 906 | static void init_node_hugetlb_work(int nid) { } |
| 907 | |
| 908 | #endif |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 909 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 910 | int __register_one_node(int nid) |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 911 | { |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 912 | int error; |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 913 | int cpu; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 914 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 915 | node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL); |
| 916 | if (!node_devices[nid]) |
| 917 | return -ENOMEM; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 918 | |
Dou Liyang | a7be6e5 | 2017-07-10 15:49:20 -0700 | [diff] [blame] | 919 | error = register_node(node_devices[nid], nid); |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 920 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 921 | /* link cpu under this node */ |
| 922 | for_each_present_cpu(cpu) { |
| 923 | if (cpu_to_node(cpu) == nid) |
| 924 | register_cpu_under_node(cpu, nid); |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 925 | } |
| 926 | |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 927 | INIT_LIST_HEAD(&node_devices[nid]->access_list); |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 928 | /* initialize work queue for memory hot plug */ |
| 929 | init_node_hugetlb_work(nid); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 930 | node_init_caches(nid); |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 931 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 932 | return error; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 933 | } |
| 934 | |
| 935 | void unregister_one_node(int nid) |
| 936 | { |
Xishi Qiu | 92d585e | 2014-03-06 17:18:21 +0800 | [diff] [blame] | 937 | if (!node_devices[nid]) |
| 938 | return; |
| 939 | |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 940 | unregister_node(node_devices[nid]); |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 941 | node_devices[nid] = NULL; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 942 | } |
| 943 | |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 944 | /* |
| 945 | * node states attributes |
| 946 | */ |
| 947 | |
| 948 | static ssize_t print_nodes_state(enum node_states state, char *buf) |
| 949 | { |
| 950 | int n; |
| 951 | |
Tejun Heo | f799b1a | 2015-02-13 14:37:56 -0800 | [diff] [blame] | 952 | n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", |
| 953 | nodemask_pr_args(&node_states[state])); |
Ryota Ozaki | f623881 | 2012-05-29 15:06:20 -0700 | [diff] [blame] | 954 | buf[n++] = '\n'; |
| 955 | buf[n] = '\0'; |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 956 | return n; |
| 957 | } |
| 958 | |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 959 | struct node_attr { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 960 | struct device_attribute attr; |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 961 | enum node_states state; |
| 962 | }; |
| 963 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 964 | static ssize_t show_node_state(struct device *dev, |
| 965 | struct device_attribute *attr, char *buf) |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 966 | { |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 967 | struct node_attr *na = container_of(attr, struct node_attr, attr); |
| 968 | return print_nodes_state(na->state, buf); |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 969 | } |
| 970 | |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 971 | #define _NODE_ATTR(name, state) \ |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 972 | { __ATTR(name, 0444, show_node_state, NULL), state } |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 973 | |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 974 | static struct node_attr node_state_attr[] = { |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 975 | [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE), |
| 976 | [N_ONLINE] = _NODE_ATTR(online, N_ONLINE), |
| 977 | [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 978 | #ifdef CONFIG_HIGHMEM |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 979 | [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 980 | #endif |
Lai Jiangshan | 20b2f52 | 2012-12-12 13:52:00 -0800 | [diff] [blame] | 981 | [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 982 | [N_CPU] = _NODE_ATTR(has_cpu, N_CPU), |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 983 | }; |
| 984 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 985 | static struct attribute *node_state_attrs[] = { |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 986 | &node_state_attr[N_POSSIBLE].attr.attr, |
| 987 | &node_state_attr[N_ONLINE].attr.attr, |
| 988 | &node_state_attr[N_NORMAL_MEMORY].attr.attr, |
Andi Kleen | 3701cde | 2010-01-05 12:48:04 +0100 | [diff] [blame] | 989 | #ifdef CONFIG_HIGHMEM |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 990 | &node_state_attr[N_HIGH_MEMORY].attr.attr, |
Andi Kleen | 3701cde | 2010-01-05 12:48:04 +0100 | [diff] [blame] | 991 | #endif |
Lai Jiangshan | 20b2f52 | 2012-12-12 13:52:00 -0800 | [diff] [blame] | 992 | &node_state_attr[N_MEMORY].attr.attr, |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 993 | &node_state_attr[N_CPU].attr.attr, |
Andi Kleen | 3701cde | 2010-01-05 12:48:04 +0100 | [diff] [blame] | 994 | NULL |
| 995 | }; |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 996 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 997 | static struct attribute_group memory_root_attr_group = { |
| 998 | .attrs = node_state_attrs, |
| 999 | }; |
| 1000 | |
| 1001 | static const struct attribute_group *cpu_root_attr_groups[] = { |
| 1002 | &memory_root_attr_group, |
| 1003 | NULL, |
| 1004 | }; |
| 1005 | |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 1006 | #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ |
Keiichiro Tokunaga | 4b45099 | 2005-05-08 21:28:53 +0900 | [diff] [blame] | 1007 | static int __init register_node_type(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1008 | { |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1009 | int ret; |
| 1010 | |
Andi Kleen | 3701cde | 2010-01-05 12:48:04 +0100 | [diff] [blame] | 1011 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); |
| 1012 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); |
| 1013 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1014 | ret = subsys_system_register(&node_subsys, cpu_root_attr_groups); |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 1015 | if (!ret) { |
Andrew Morton | 6e259e7 | 2013-04-29 15:08:07 -0700 | [diff] [blame] | 1016 | static struct notifier_block node_memory_callback_nb = { |
| 1017 | .notifier_call = node_memory_callback, |
| 1018 | .priority = NODE_CALLBACK_PRI, |
| 1019 | }; |
| 1020 | register_hotmemory_notifier(&node_memory_callback_nb); |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 1021 | } |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1022 | |
| 1023 | /* |
| 1024 | * Note: we're not going to unregister the node class if we fail |
| 1025 | * to register the node state class attribute files. |
| 1026 | */ |
| 1027 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1028 | } |
| 1029 | postcore_initcall(register_node_type); |