Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/base/memory.c - basic Memory class support |
| 3 | * |
| 4 | * Written by Matt Tolentino <matthew.e.tolentino@intel.com> |
| 5 | * Dave Hansen <haveblue@us.ibm.com> |
| 6 | * |
| 7 | * This file provides the necessary infrastructure to represent |
| 8 | * a SPARSEMEM-memory-model system's physical memory in /sysfs. |
| 9 | * All arch-independent code that assumes MEMORY_HOTPLUG requires |
| 10 | * SPARSEMEM should be contained here, or in mm/memory_hotplug.c. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/sysdev.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/init.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 16 | #include <linux/topology.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 17 | #include <linux/capability.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 18 | #include <linux/device.h> |
| 19 | #include <linux/memory.h> |
| 20 | #include <linux/kobject.h> |
| 21 | #include <linux/memory_hotplug.h> |
| 22 | #include <linux/mm.h> |
Daniel Walker | da19cbc | 2008-02-04 23:35:47 -0800 | [diff] [blame] | 23 | #include <linux/mutex.h> |
Shaohua Li | 9f1b16a | 2008-10-18 20:27:12 -0700 | [diff] [blame] | 24 | #include <linux/stat.h> |
| 25 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 26 | #include <asm/atomic.h> |
| 27 | #include <asm/uaccess.h> |
| 28 | |
| 29 | #define MEMORY_CLASS_NAME "memory" |
| 30 | |
| 31 | static struct sysdev_class memory_sysdev_class = { |
Kay Sievers | af5ca3f | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 32 | .name = MEMORY_CLASS_NAME, |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 33 | }; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 34 | |
Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 35 | static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 36 | { |
| 37 | return MEMORY_CLASS_NAME; |
| 38 | } |
| 39 | |
Al Viro | 9ec0fd4e | 2007-10-14 06:53:45 +0100 | [diff] [blame] | 40 | static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uevent_env *env) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 41 | { |
| 42 | int retval = 0; |
| 43 | |
| 44 | return retval; |
| 45 | } |
| 46 | |
Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 47 | static struct kset_uevent_ops memory_uevent_ops = { |
| 48 | .name = memory_uevent_name, |
| 49 | .uevent = memory_uevent, |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 50 | }; |
| 51 | |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 52 | static BLOCKING_NOTIFIER_HEAD(memory_chain); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 53 | |
Andy Whitcroft | 98a38eb | 2006-01-06 00:10:35 -0800 | [diff] [blame] | 54 | int register_memory_notifier(struct notifier_block *nb) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 55 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 56 | return blocking_notifier_chain_register(&memory_chain, nb); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 57 | } |
Hannes Hering | 3c82c30 | 2008-05-07 14:43:01 +0200 | [diff] [blame] | 58 | EXPORT_SYMBOL(register_memory_notifier); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 59 | |
Andy Whitcroft | 98a38eb | 2006-01-06 00:10:35 -0800 | [diff] [blame] | 60 | void unregister_memory_notifier(struct notifier_block *nb) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 61 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 62 | blocking_notifier_chain_unregister(&memory_chain, nb); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 63 | } |
Hannes Hering | 3c82c30 | 2008-05-07 14:43:01 +0200 | [diff] [blame] | 64 | EXPORT_SYMBOL(unregister_memory_notifier); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 65 | |
Robert Jennings | 925cc71 | 2009-12-17 14:44:38 +0000 | [diff] [blame] | 66 | static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain); |
| 67 | |
| 68 | int register_memory_isolate_notifier(struct notifier_block *nb) |
| 69 | { |
| 70 | return atomic_notifier_chain_register(&memory_isolate_chain, nb); |
| 71 | } |
| 72 | EXPORT_SYMBOL(register_memory_isolate_notifier); |
| 73 | |
| 74 | void unregister_memory_isolate_notifier(struct notifier_block *nb) |
| 75 | { |
| 76 | atomic_notifier_chain_unregister(&memory_isolate_chain, nb); |
| 77 | } |
| 78 | EXPORT_SYMBOL(unregister_memory_isolate_notifier); |
| 79 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 80 | /* |
| 81 | * register_memory - Setup a sysfs device for a memory block |
| 82 | */ |
Badari Pulavarty | 00a41db | 2008-02-11 09:23:18 -0800 | [diff] [blame] | 83 | static |
| 84 | int register_memory(struct memory_block *memory, struct mem_section *section) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 85 | { |
| 86 | int error; |
| 87 | |
| 88 | memory->sysdev.cls = &memory_sysdev_class; |
| 89 | memory->sysdev.id = __section_nr(section); |
| 90 | |
| 91 | error = sysdev_register(&memory->sysdev); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 92 | return error; |
| 93 | } |
| 94 | |
| 95 | static void |
Badari Pulavarty | 00a41db | 2008-02-11 09:23:18 -0800 | [diff] [blame] | 96 | unregister_memory(struct memory_block *memory, struct mem_section *section) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 97 | { |
| 98 | BUG_ON(memory->sysdev.cls != &memory_sysdev_class); |
| 99 | BUG_ON(memory->sysdev.id != __section_nr(section)); |
| 100 | |
Badari Pulavarty | 00a41db | 2008-02-11 09:23:18 -0800 | [diff] [blame] | 101 | /* drop the ref. we got in remove_memory_block() */ |
| 102 | kobject_put(&memory->sysdev.kobj); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 103 | sysdev_unregister(&memory->sysdev); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | /* |
| 107 | * use this as the physical section index that this memsection |
| 108 | * uses. |
| 109 | */ |
| 110 | |
Andi Kleen | 4a0b2b4 | 2008-07-01 18:48:41 +0200 | [diff] [blame] | 111 | static ssize_t show_mem_phys_index(struct sys_device *dev, |
| 112 | struct sysdev_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 113 | { |
| 114 | struct memory_block *mem = |
| 115 | container_of(dev, struct memory_block, sysdev); |
| 116 | return sprintf(buf, "%08lx\n", mem->phys_index); |
| 117 | } |
| 118 | |
| 119 | /* |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 120 | * Show whether the section of memory is likely to be hot-removable |
| 121 | */ |
Stephen Rothwell | 1f07be1 | 2008-07-28 11:05:04 +1000 | [diff] [blame] | 122 | static ssize_t show_mem_removable(struct sys_device *dev, |
| 123 | struct sysdev_attribute *attr, char *buf) |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 124 | { |
| 125 | unsigned long start_pfn; |
| 126 | int ret; |
| 127 | struct memory_block *mem = |
| 128 | container_of(dev, struct memory_block, sysdev); |
| 129 | |
| 130 | start_pfn = section_nr_to_pfn(mem->phys_index); |
| 131 | ret = is_mem_section_removable(start_pfn, PAGES_PER_SECTION); |
| 132 | return sprintf(buf, "%d\n", ret); |
| 133 | } |
| 134 | |
| 135 | /* |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 136 | * online, offline, going offline, etc. |
| 137 | */ |
Andi Kleen | 4a0b2b4 | 2008-07-01 18:48:41 +0200 | [diff] [blame] | 138 | static ssize_t show_mem_state(struct sys_device *dev, |
| 139 | struct sysdev_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 140 | { |
| 141 | struct memory_block *mem = |
| 142 | container_of(dev, struct memory_block, sysdev); |
| 143 | ssize_t len = 0; |
| 144 | |
| 145 | /* |
| 146 | * We can probably put these states in a nice little array |
| 147 | * so that they're not open-coded |
| 148 | */ |
| 149 | switch (mem->state) { |
| 150 | case MEM_ONLINE: |
| 151 | len = sprintf(buf, "online\n"); |
| 152 | break; |
| 153 | case MEM_OFFLINE: |
| 154 | len = sprintf(buf, "offline\n"); |
| 155 | break; |
| 156 | case MEM_GOING_OFFLINE: |
| 157 | len = sprintf(buf, "going-offline\n"); |
| 158 | break; |
| 159 | default: |
| 160 | len = sprintf(buf, "ERROR-UNKNOWN-%ld\n", |
| 161 | mem->state); |
| 162 | WARN_ON(1); |
| 163 | break; |
| 164 | } |
| 165 | |
| 166 | return len; |
| 167 | } |
| 168 | |
Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 169 | int memory_notify(unsigned long val, void *v) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 170 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 171 | return blocking_notifier_call_chain(&memory_chain, val, v); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 172 | } |
| 173 | |
Robert Jennings | 925cc71 | 2009-12-17 14:44:38 +0000 | [diff] [blame] | 174 | int memory_isolate_notify(unsigned long val, void *v) |
| 175 | { |
| 176 | return atomic_notifier_call_chain(&memory_isolate_chain, val, v); |
| 177 | } |
| 178 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 179 | /* |
| 180 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is |
| 181 | * OK to have direct references to sparsemem variables in here. |
| 182 | */ |
| 183 | static int |
| 184 | memory_block_action(struct memory_block *mem, unsigned long action) |
| 185 | { |
| 186 | int i; |
| 187 | unsigned long psection; |
| 188 | unsigned long start_pfn, start_paddr; |
| 189 | struct page *first_page; |
| 190 | int ret; |
| 191 | int old_state = mem->state; |
| 192 | |
| 193 | psection = mem->phys_index; |
| 194 | first_page = pfn_to_page(psection << PFN_SECTION_SHIFT); |
| 195 | |
| 196 | /* |
| 197 | * The probe routines leave the pages reserved, just |
| 198 | * as the bootmem code does. Make sure they're still |
| 199 | * that way. |
| 200 | */ |
| 201 | if (action == MEM_ONLINE) { |
| 202 | for (i = 0; i < PAGES_PER_SECTION; i++) { |
| 203 | if (PageReserved(first_page+i)) |
| 204 | continue; |
| 205 | |
| 206 | printk(KERN_WARNING "section number %ld page number %d " |
| 207 | "not reserved, was it already online? \n", |
| 208 | psection, i); |
| 209 | return -EBUSY; |
| 210 | } |
| 211 | } |
| 212 | |
| 213 | switch (action) { |
| 214 | case MEM_ONLINE: |
| 215 | start_pfn = page_to_pfn(first_page); |
| 216 | ret = online_pages(start_pfn, PAGES_PER_SECTION); |
| 217 | break; |
| 218 | case MEM_OFFLINE: |
| 219 | mem->state = MEM_GOING_OFFLINE; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 220 | start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; |
| 221 | ret = remove_memory(start_paddr, |
| 222 | PAGES_PER_SECTION << PAGE_SHIFT); |
| 223 | if (ret) { |
| 224 | mem->state = old_state; |
| 225 | break; |
| 226 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 227 | break; |
| 228 | default: |
Arjan van de Ven | f810a5c | 2008-07-25 19:45:39 -0700 | [diff] [blame] | 229 | WARN(1, KERN_WARNING "%s(%p, %ld) unknown action: %ld\n", |
Harvey Harrison | 2b3a302 | 2008-03-04 16:41:05 -0800 | [diff] [blame] | 230 | __func__, mem, action, action); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 231 | ret = -EINVAL; |
| 232 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 233 | |
| 234 | return ret; |
| 235 | } |
| 236 | |
| 237 | static int memory_block_change_state(struct memory_block *mem, |
| 238 | unsigned long to_state, unsigned long from_state_req) |
| 239 | { |
| 240 | int ret = 0; |
Daniel Walker | da19cbc | 2008-02-04 23:35:47 -0800 | [diff] [blame] | 241 | mutex_lock(&mem->state_mutex); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 242 | |
| 243 | if (mem->state != from_state_req) { |
| 244 | ret = -EINVAL; |
| 245 | goto out; |
| 246 | } |
| 247 | |
| 248 | ret = memory_block_action(mem, to_state); |
| 249 | if (!ret) |
| 250 | mem->state = to_state; |
| 251 | |
| 252 | out: |
Daniel Walker | da19cbc | 2008-02-04 23:35:47 -0800 | [diff] [blame] | 253 | mutex_unlock(&mem->state_mutex); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 254 | return ret; |
| 255 | } |
| 256 | |
| 257 | static ssize_t |
Andi Kleen | 4a0b2b4 | 2008-07-01 18:48:41 +0200 | [diff] [blame] | 258 | store_mem_state(struct sys_device *dev, |
| 259 | struct sysdev_attribute *attr, const char *buf, size_t count) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 260 | { |
| 261 | struct memory_block *mem; |
| 262 | unsigned int phys_section_nr; |
| 263 | int ret = -EINVAL; |
| 264 | |
| 265 | mem = container_of(dev, struct memory_block, sysdev); |
| 266 | phys_section_nr = mem->phys_index; |
| 267 | |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 268 | if (!present_section_nr(phys_section_nr)) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 269 | goto out; |
| 270 | |
| 271 | if (!strncmp(buf, "online", min((int)count, 6))) |
| 272 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); |
| 273 | else if(!strncmp(buf, "offline", min((int)count, 7))) |
| 274 | ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); |
| 275 | out: |
| 276 | if (ret) |
| 277 | return ret; |
| 278 | return count; |
| 279 | } |
| 280 | |
| 281 | /* |
| 282 | * phys_device is a bad name for this. What I really want |
| 283 | * is a way to differentiate between memory ranges that |
| 284 | * are part of physical devices that constitute |
| 285 | * a complete removable unit or fru. |
| 286 | * i.e. do these ranges belong to the same physical device, |
| 287 | * s.t. if I offline all of these sections I can then |
| 288 | * remove the physical device? |
| 289 | */ |
Andi Kleen | 4a0b2b4 | 2008-07-01 18:48:41 +0200 | [diff] [blame] | 290 | static ssize_t show_phys_device(struct sys_device *dev, |
| 291 | struct sysdev_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 292 | { |
| 293 | struct memory_block *mem = |
| 294 | container_of(dev, struct memory_block, sysdev); |
| 295 | return sprintf(buf, "%d\n", mem->phys_device); |
| 296 | } |
| 297 | |
| 298 | static SYSDEV_ATTR(phys_index, 0444, show_mem_phys_index, NULL); |
| 299 | static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state); |
| 300 | static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL); |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 301 | static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 302 | |
| 303 | #define mem_create_simple_file(mem, attr_name) \ |
| 304 | sysdev_create_file(&mem->sysdev, &attr_##attr_name) |
| 305 | #define mem_remove_simple_file(mem, attr_name) \ |
| 306 | sysdev_remove_file(&mem->sysdev, &attr_##attr_name) |
| 307 | |
| 308 | /* |
| 309 | * Block size attribute stuff |
| 310 | */ |
| 311 | static ssize_t |
| 312 | print_block_size(struct class *class, char *buf) |
| 313 | { |
| 314 | return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); |
| 315 | } |
| 316 | |
| 317 | static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); |
| 318 | |
| 319 | static int block_size_init(void) |
| 320 | { |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 321 | return sysfs_create_file(&memory_sysdev_class.kset.kobj, |
| 322 | &class_attr_block_size_bytes.attr); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 323 | } |
| 324 | |
| 325 | /* |
| 326 | * Some architectures will have custom drivers to do this, and |
| 327 | * will not need to do it from userspace. The fake hot-add code |
| 328 | * as well as ppc64 will do all of their discovery in userspace |
| 329 | * and will require this interface. |
| 330 | */ |
| 331 | #ifdef CONFIG_ARCH_MEMORY_PROBE |
| 332 | static ssize_t |
Al Viro | be7ee9b | 2006-02-01 06:06:16 -0500 | [diff] [blame] | 333 | memory_probe_store(struct class *class, const char *buf, size_t count) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 334 | { |
| 335 | u64 phys_addr; |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 336 | int nid; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 337 | int ret; |
| 338 | |
| 339 | phys_addr = simple_strtoull(buf, NULL, 0); |
| 340 | |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 341 | nid = memory_add_physaddr_to_nid(phys_addr); |
| 342 | ret = add_memory(nid, phys_addr, PAGES_PER_SECTION << PAGE_SHIFT); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 343 | |
| 344 | if (ret) |
| 345 | count = ret; |
| 346 | |
| 347 | return count; |
| 348 | } |
Shaohua Li | 9f1b16a | 2008-10-18 20:27:12 -0700 | [diff] [blame] | 349 | static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 350 | |
| 351 | static int memory_probe_init(void) |
| 352 | { |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 353 | return sysfs_create_file(&memory_sysdev_class.kset.kobj, |
| 354 | &class_attr_probe.attr); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 355 | } |
| 356 | #else |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 357 | static inline int memory_probe_init(void) |
| 358 | { |
| 359 | return 0; |
| 360 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 361 | #endif |
| 362 | |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 363 | #ifdef CONFIG_MEMORY_FAILURE |
| 364 | /* |
| 365 | * Support for offlining pages of memory |
| 366 | */ |
| 367 | |
| 368 | /* Soft offline a page */ |
| 369 | static ssize_t |
| 370 | store_soft_offline_page(struct class *class, const char *buf, size_t count) |
| 371 | { |
| 372 | int ret; |
| 373 | u64 pfn; |
| 374 | if (!capable(CAP_SYS_ADMIN)) |
| 375 | return -EPERM; |
| 376 | if (strict_strtoull(buf, 0, &pfn) < 0) |
| 377 | return -EINVAL; |
| 378 | pfn >>= PAGE_SHIFT; |
| 379 | if (!pfn_valid(pfn)) |
| 380 | return -ENXIO; |
| 381 | ret = soft_offline_page(pfn_to_page(pfn), 0); |
| 382 | return ret == 0 ? count : ret; |
| 383 | } |
| 384 | |
| 385 | /* Forcibly offline a page, including killing processes. */ |
| 386 | static ssize_t |
| 387 | store_hard_offline_page(struct class *class, const char *buf, size_t count) |
| 388 | { |
| 389 | int ret; |
| 390 | u64 pfn; |
| 391 | if (!capable(CAP_SYS_ADMIN)) |
| 392 | return -EPERM; |
| 393 | if (strict_strtoull(buf, 0, &pfn) < 0) |
| 394 | return -EINVAL; |
| 395 | pfn >>= PAGE_SHIFT; |
| 396 | ret = __memory_failure(pfn, 0, 0); |
| 397 | return ret ? ret : count; |
| 398 | } |
| 399 | |
| 400 | static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); |
| 401 | static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); |
| 402 | |
| 403 | static __init int memory_fail_init(void) |
| 404 | { |
| 405 | int err; |
| 406 | |
| 407 | err = sysfs_create_file(&memory_sysdev_class.kset.kobj, |
| 408 | &class_attr_soft_offline_page.attr); |
| 409 | if (!err) |
| 410 | err = sysfs_create_file(&memory_sysdev_class.kset.kobj, |
| 411 | &class_attr_hard_offline_page.attr); |
| 412 | return err; |
| 413 | } |
| 414 | #else |
| 415 | static inline int memory_fail_init(void) |
| 416 | { |
| 417 | return 0; |
| 418 | } |
| 419 | #endif |
| 420 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 421 | /* |
| 422 | * Note that phys_device is optional. It is here to allow for |
| 423 | * differentiation between which *physical* devices each |
| 424 | * section belongs to... |
| 425 | */ |
| 426 | |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 427 | static int add_memory_block(int nid, struct mem_section *section, |
| 428 | unsigned long state, int phys_device, |
| 429 | enum mem_add_context context) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 430 | { |
Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 431 | struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 432 | int ret = 0; |
| 433 | |
| 434 | if (!mem) |
| 435 | return -ENOMEM; |
| 436 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 437 | mem->phys_index = __section_nr(section); |
| 438 | mem->state = state; |
Daniel Walker | da19cbc | 2008-02-04 23:35:47 -0800 | [diff] [blame] | 439 | mutex_init(&mem->state_mutex); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 440 | mem->phys_device = phys_device; |
| 441 | |
Badari Pulavarty | 00a41db | 2008-02-11 09:23:18 -0800 | [diff] [blame] | 442 | ret = register_memory(mem, section); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 443 | if (!ret) |
| 444 | ret = mem_create_simple_file(mem, phys_index); |
| 445 | if (!ret) |
| 446 | ret = mem_create_simple_file(mem, state); |
| 447 | if (!ret) |
| 448 | ret = mem_create_simple_file(mem, phys_device); |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 449 | if (!ret) |
| 450 | ret = mem_create_simple_file(mem, removable); |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 451 | if (!ret) { |
| 452 | if (context == HOTPLUG) |
| 453 | ret = register_mem_sect_under_node(mem, nid); |
| 454 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 455 | |
| 456 | return ret; |
| 457 | } |
| 458 | |
| 459 | /* |
| 460 | * For now, we have a linear search to go find the appropriate |
| 461 | * memory_block corresponding to a particular phys_index. If |
| 462 | * this gets to be a real problem, we can always use a radix |
| 463 | * tree or something here. |
| 464 | * |
| 465 | * This could be made generic for all sysdev classes. |
| 466 | */ |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 467 | struct memory_block *find_memory_block(struct mem_section *section) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 468 | { |
| 469 | struct kobject *kobj; |
| 470 | struct sys_device *sysdev; |
| 471 | struct memory_block *mem; |
| 472 | char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1]; |
| 473 | |
| 474 | /* |
| 475 | * This only works because we know that section == sysdev->id |
| 476 | * slightly redundant with sysdev_register() |
| 477 | */ |
| 478 | sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, __section_nr(section)); |
| 479 | |
| 480 | kobj = kset_find_obj(&memory_sysdev_class.kset, name); |
| 481 | if (!kobj) |
| 482 | return NULL; |
| 483 | |
| 484 | sysdev = container_of(kobj, struct sys_device, kobj); |
| 485 | mem = container_of(sysdev, struct memory_block, sysdev); |
| 486 | |
| 487 | return mem; |
| 488 | } |
| 489 | |
| 490 | int remove_memory_block(unsigned long node_id, struct mem_section *section, |
| 491 | int phys_device) |
| 492 | { |
| 493 | struct memory_block *mem; |
| 494 | |
| 495 | mem = find_memory_block(section); |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 496 | unregister_mem_sect_under_nodes(mem); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 497 | mem_remove_simple_file(mem, phys_index); |
| 498 | mem_remove_simple_file(mem, state); |
| 499 | mem_remove_simple_file(mem, phys_device); |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 500 | mem_remove_simple_file(mem, removable); |
Badari Pulavarty | 00a41db | 2008-02-11 09:23:18 -0800 | [diff] [blame] | 501 | unregister_memory(mem, section); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 502 | |
| 503 | return 0; |
| 504 | } |
| 505 | |
| 506 | /* |
| 507 | * need an interface for the VM to add new memory regions, |
| 508 | * but without onlining it. |
| 509 | */ |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 510 | int register_new_memory(int nid, struct mem_section *section) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 511 | { |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 512 | return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 513 | } |
| 514 | |
| 515 | int unregister_memory_section(struct mem_section *section) |
| 516 | { |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 517 | if (!present_section(section)) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 518 | return -EINVAL; |
| 519 | |
| 520 | return remove_memory_block(0, section, 0); |
| 521 | } |
| 522 | |
| 523 | /* |
| 524 | * Initialize the sysfs support for memory devices... |
| 525 | */ |
| 526 | int __init memory_dev_init(void) |
| 527 | { |
| 528 | unsigned int i; |
| 529 | int ret; |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 530 | int err; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 531 | |
Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 532 | memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 533 | ret = sysdev_class_register(&memory_sysdev_class); |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 534 | if (ret) |
| 535 | goto out; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 536 | |
| 537 | /* |
| 538 | * Create entries for memory sections that were found |
| 539 | * during boot and have been initialized |
| 540 | */ |
| 541 | for (i = 0; i < NR_MEM_SECTIONS; i++) { |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 542 | if (!present_section_nr(i)) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 543 | continue; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 544 | err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, |
| 545 | 0, BOOT); |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 546 | if (!ret) |
| 547 | ret = err; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 548 | } |
| 549 | |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 550 | err = memory_probe_init(); |
| 551 | if (!ret) |
| 552 | ret = err; |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 553 | err = memory_fail_init(); |
| 554 | if (!ret) |
| 555 | ret = err; |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 556 | err = block_size_init(); |
| 557 | if (!ret) |
| 558 | ret = err; |
| 559 | out: |
| 560 | if (ret) |
Harvey Harrison | 2b3a302 | 2008-03-04 16:41:05 -0800 | [diff] [blame] | 561 | printk(KERN_ERR "%s() failed: %d\n", __func__, ret); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 562 | return ret; |
| 563 | } |