blob: b8731040b9f9292f566f50d9f59b1112f87f104c [file] [log] [blame]
Dave Hansen3947be12005-10-29 18:16:54 -07001/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
Dave Hansen3947be12005-10-29 18:16:54 -07007#include <linux/stddef.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/interrupt.h>
11#include <linux/pagemap.h>
12#include <linux/bootmem.h>
13#include <linux/compiler.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040014#include <linux/export.h>
Dave Hansen3947be12005-10-29 18:16:54 -070015#include <linux/pagevec.h>
Chandra Seetharaman2d1d43f2006-09-29 02:01:25 -070016#include <linux/writeback.h>
Dave Hansen3947be12005-10-29 18:16:54 -070017#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h>
23#include <linux/vmalloc.h>
KAMEZAWA Hiroyuki0a547032006-06-27 02:53:35 -070024#include <linux/ioport.h>
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -070025#include <linux/delay.h>
26#include <linux/migrate.h>
27#include <linux/page-isolation.h>
Badari Pulavarty71088782008-10-18 20:25:58 -070028#include <linux/pfn.h>
Andi Kleen6ad696d2009-11-17 14:06:22 -080029#include <linux/suspend.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080030#include <linux/mm_inline.h>
akpm@linux-foundation.orgd96ae532010-03-05 13:41:58 -080031#include <linux/firmware-map.h>
Dave Hansen3947be12005-10-29 18:16:54 -070032
33#include <asm/tlbflush.h>
34
Adrian Bunk1e5ad9a2008-04-28 20:40:08 +030035#include "internal.h"
36
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -070037/*
38 * online_page_callback contains pointer to current page onlining function.
39 * Initially it is generic_online_page(). If it is required it could be
40 * changed by calling set_online_page_callback() for callback registration
41 * and restore_online_page_callback() for generic callback restore.
42 */
43
44static void generic_online_page(struct page *page);
45
46static online_page_callback_t online_page_callback = generic_online_page;
47
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -080048DEFINE_MUTEX(mem_hotplug_mutex);
49
50void lock_memory_hotplug(void)
51{
52 mutex_lock(&mem_hotplug_mutex);
53
54 /* for exclusive hibernation if CONFIG_HIBERNATION=y */
55 lock_system_sleep();
56}
57
58void unlock_memory_hotplug(void)
59{
60 unlock_system_sleep();
61 mutex_unlock(&mem_hotplug_mutex);
62}
63
64
Keith Mannthey45e0b782006-09-30 23:27:09 -070065/* add this memory to iomem resource */
66static struct resource *register_memory_resource(u64 start, u64 size)
67{
68 struct resource *res;
69 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
70 BUG_ON(!res);
71
72 res->name = "System RAM";
73 res->start = start;
74 res->end = start + size - 1;
Yasunori Goto887c3cb2007-11-14 16:59:20 -080075 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
Keith Mannthey45e0b782006-09-30 23:27:09 -070076 if (request_resource(&iomem_resource, res) < 0) {
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -070077 printk("System RAM resource %pR cannot be added\n", res);
Keith Mannthey45e0b782006-09-30 23:27:09 -070078 kfree(res);
79 res = NULL;
80 }
81 return res;
82}
83
84static void release_memory_resource(struct resource *res)
85{
86 if (!res)
87 return;
88 release_resource(res);
89 kfree(res);
90 return;
91}
92
Keith Mannthey53947022006-09-30 23:27:08 -070093#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
Yasunori Goto04753272008-04-28 02:13:31 -070094#ifndef CONFIG_SPARSEMEM_VMEMMAP
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080095static void get_page_bootmem(unsigned long info, struct page *page,
96 unsigned long type)
Yasunori Goto04753272008-04-28 02:13:31 -070097{
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080098 page->lru.next = (struct list_head *) type;
Yasunori Goto04753272008-04-28 02:13:31 -070099 SetPagePrivate(page);
100 set_page_private(page, info);
101 atomic_inc(&page->_count);
102}
103
Rakib Mullick23ce9322009-12-14 17:59:44 -0800104/* reference to __meminit __free_pages_bootmem is valid
105 * so use __ref to tell modpost not to generate a warning */
106void __ref put_page_bootmem(struct page *page)
Yasunori Goto04753272008-04-28 02:13:31 -0700107{
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -0800108 unsigned long type;
Yasunori Goto04753272008-04-28 02:13:31 -0700109
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -0800110 type = (unsigned long) page->lru.next;
111 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
112 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
Yasunori Goto04753272008-04-28 02:13:31 -0700113
114 if (atomic_dec_return(&page->_count) == 1) {
115 ClearPagePrivate(page);
116 set_page_private(page, 0);
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -0800117 INIT_LIST_HEAD(&page->lru);
Yasunori Goto04753272008-04-28 02:13:31 -0700118 __free_pages_bootmem(page, 0);
119 }
120
121}
122
Adrian Bunkd92bc312008-07-23 21:28:12 -0700123static void register_page_bootmem_info_section(unsigned long start_pfn)
Yasunori Goto04753272008-04-28 02:13:31 -0700124{
125 unsigned long *usemap, mapsize, section_nr, i;
126 struct mem_section *ms;
127 struct page *page, *memmap;
128
129 if (!pfn_valid(start_pfn))
130 return;
131
132 section_nr = pfn_to_section_nr(start_pfn);
133 ms = __nr_to_section(section_nr);
134
135 /* Get section's memmap address */
136 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
137
138 /*
139 * Get page for the memmap's phys address
140 * XXX: need more consideration for sparse_vmemmap...
141 */
142 page = virt_to_page(memmap);
143 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
144 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
145
146 /* remember memmap's page */
147 for (i = 0; i < mapsize; i++, page++)
148 get_page_bootmem(section_nr, page, SECTION_INFO);
149
150 usemap = __nr_to_section(section_nr)->pageblock_flags;
151 page = virt_to_page(usemap);
152
153 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
154
155 for (i = 0; i < mapsize; i++, page++)
Yasunori Gotoaf370fb2008-07-23 21:28:17 -0700156 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
Yasunori Goto04753272008-04-28 02:13:31 -0700157
158}
159
160void register_page_bootmem_info_node(struct pglist_data *pgdat)
161{
162 unsigned long i, pfn, end_pfn, nr_pages;
163 int node = pgdat->node_id;
164 struct page *page;
165 struct zone *zone;
166
167 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
168 page = virt_to_page(pgdat);
169
170 for (i = 0; i < nr_pages; i++, page++)
171 get_page_bootmem(node, page, NODE_INFO);
172
173 zone = &pgdat->node_zones[0];
174 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
175 if (zone->wait_table) {
176 nr_pages = zone->wait_table_hash_nr_entries
177 * sizeof(wait_queue_head_t);
178 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
179 page = virt_to_page(zone->wait_table);
180
181 for (i = 0; i < nr_pages; i++, page++)
182 get_page_bootmem(node, page, NODE_INFO);
183 }
184 }
185
186 pfn = pgdat->node_start_pfn;
187 end_pfn = pfn + pgdat->node_spanned_pages;
188
189 /* register_section info */
190 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
191 register_page_bootmem_info_section(pfn);
192
193}
194#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
195
Heiko Carstens76cdd582008-05-14 16:05:52 -0700196static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
197 unsigned long end_pfn)
198{
199 unsigned long old_zone_end_pfn;
200
201 zone_span_writelock(zone);
202
203 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
204 if (start_pfn < zone->zone_start_pfn)
205 zone->zone_start_pfn = start_pfn;
206
207 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
208 zone->zone_start_pfn;
209
210 zone_span_writeunlock(zone);
211}
212
213static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
214 unsigned long end_pfn)
215{
216 unsigned long old_pgdat_end_pfn =
217 pgdat->node_start_pfn + pgdat->node_spanned_pages;
218
219 if (start_pfn < pgdat->node_start_pfn)
220 pgdat->node_start_pfn = start_pfn;
221
222 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
223 pgdat->node_start_pfn;
224}
225
Al Viro31168482008-11-22 17:33:24 +0000226static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
Dave Hansen3947be12005-10-29 18:16:54 -0700227{
228 struct pglist_data *pgdat = zone->zone_pgdat;
229 int nr_pages = PAGES_PER_SECTION;
230 int nid = pgdat->node_id;
231 int zone_type;
Heiko Carstens76cdd582008-05-14 16:05:52 -0700232 unsigned long flags;
Dave Hansen3947be12005-10-29 18:16:54 -0700233
234 zone_type = zone - pgdat->node_zones;
Heiko Carstens76cdd582008-05-14 16:05:52 -0700235 if (!zone->wait_table) {
236 int ret;
237
238 ret = init_currently_empty_zone(zone, phys_start_pfn,
239 nr_pages, MEMMAP_HOTPLUG);
240 if (ret)
241 return ret;
242 }
243 pgdat_resize_lock(zone->zone_pgdat, &flags);
244 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
245 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
246 phys_start_pfn + nr_pages);
247 pgdat_resize_unlock(zone->zone_pgdat, &flags);
Dave Hansena2f3aa022007-01-10 23:15:30 -0800248 memmap_init_zone(nr_pages, nid, zone_type,
249 phys_start_pfn, MEMMAP_HOTPLUG);
Yasunori Goto718127c2006-06-23 02:03:10 -0700250 return 0;
Dave Hansen3947be12005-10-29 18:16:54 -0700251}
252
Gary Hadec04fc582009-01-06 14:39:14 -0800253static int __meminit __add_section(int nid, struct zone *zone,
254 unsigned long phys_start_pfn)
Dave Hansen3947be12005-10-29 18:16:54 -0700255{
Dave Hansen3947be12005-10-29 18:16:54 -0700256 int nr_pages = PAGES_PER_SECTION;
Dave Hansen3947be12005-10-29 18:16:54 -0700257 int ret;
258
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -0700259 if (pfn_valid(phys_start_pfn))
260 return -EEXIST;
261
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700262 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
Dave Hansen3947be12005-10-29 18:16:54 -0700263
264 if (ret < 0)
265 return ret;
266
Yasunori Goto718127c2006-06-23 02:03:10 -0700267 ret = __add_zone(zone, phys_start_pfn);
268
269 if (ret < 0)
270 return ret;
271
Gary Hadec04fc582009-01-06 14:39:14 -0800272 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
Dave Hansen3947be12005-10-29 18:16:54 -0700273}
274
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700275#ifdef CONFIG_SPARSEMEM_VMEMMAP
276static int __remove_section(struct zone *zone, struct mem_section *ms)
277{
278 /*
279 * XXX: Freeing memmap with vmemmap is not implement yet.
280 * This should be removed later.
281 */
282 return -EBUSY;
283}
284#else
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700285static int __remove_section(struct zone *zone, struct mem_section *ms)
286{
287 unsigned long flags;
288 struct pglist_data *pgdat = zone->zone_pgdat;
289 int ret = -EINVAL;
290
291 if (!valid_section(ms))
292 return ret;
293
294 ret = unregister_memory_section(ms);
295 if (ret)
296 return ret;
297
298 pgdat_resize_lock(pgdat, &flags);
299 sparse_remove_one_section(zone, ms);
300 pgdat_resize_unlock(pgdat, &flags);
301 return 0;
302}
Yasunori Goto0c0a4a52008-04-28 02:13:34 -0700303#endif
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700304
Dave Hansen3947be12005-10-29 18:16:54 -0700305/*
306 * Reasonably generic function for adding memory. It is
307 * expected that archs that support memory hotplug will
308 * call this function after deciding the zone to which to
309 * add the new pages.
310 */
Gary Hadec04fc582009-01-06 14:39:14 -0800311int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
312 unsigned long nr_pages)
Dave Hansen3947be12005-10-29 18:16:54 -0700313{
314 unsigned long i;
315 int err = 0;
KAMEZAWA Hiroyuki6f712712006-08-05 12:14:58 -0700316 int start_sec, end_sec;
317 /* during initialize mem_map, align hot-added range to section */
318 start_sec = pfn_to_section_nr(phys_start_pfn);
319 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
Dave Hansen3947be12005-10-29 18:16:54 -0700320
KAMEZAWA Hiroyuki6f712712006-08-05 12:14:58 -0700321 for (i = start_sec; i <= end_sec; i++) {
Gary Hadec04fc582009-01-06 14:39:14 -0800322 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
Dave Hansen3947be12005-10-29 18:16:54 -0700323
KAMEZAWA Hiroyuki6f712712006-08-05 12:14:58 -0700324 /*
Simon Arlott183ff222007-10-20 01:27:18 +0200325 * EEXIST is finally dealt with by ioresource collision
KAMEZAWA Hiroyuki6f712712006-08-05 12:14:58 -0700326 * check. see add_memory() => register_memory_resource()
327 * Warning will be printed if there is collision.
Joel H Schoppbed120c2006-05-01 12:16:11 -0700328 */
329 if (err && (err != -EEXIST))
Dave Hansen3947be12005-10-29 18:16:54 -0700330 break;
KAMEZAWA Hiroyuki6f712712006-08-05 12:14:58 -0700331 err = 0;
Dave Hansen3947be12005-10-29 18:16:54 -0700332 }
333
334 return err;
335}
Joel H Schoppbed120c2006-05-01 12:16:11 -0700336EXPORT_SYMBOL_GPL(__add_pages);
Dave Hansen3947be12005-10-29 18:16:54 -0700337
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700338/**
339 * __remove_pages() - remove sections of pages from a zone
340 * @zone: zone from which pages need to be removed
341 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
342 * @nr_pages: number of pages to remove (must be multiple of section size)
343 *
344 * Generic helper function to remove section mappings and sysfs entries
345 * for the section of the memory we are removing. Caller needs to make
346 * sure that pages are marked reserved and zones are adjust properly by
347 * calling offline_pages().
348 */
349int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
350 unsigned long nr_pages)
351{
352 unsigned long i, ret = 0;
353 int sections_to_remove;
354
355 /*
356 * We can only remove entire sections
357 */
358 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
359 BUG_ON(nr_pages % PAGES_PER_SECTION);
360
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700361 sections_to_remove = nr_pages / PAGES_PER_SECTION;
362 for (i = 0; i < sections_to_remove; i++) {
363 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
Nathan Fontenotde7f0cb2008-10-18 20:27:14 -0700364 release_mem_region(pfn << PAGE_SHIFT,
365 PAGES_PER_SECTION << PAGE_SHIFT);
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700366 ret = __remove_section(zone, __pfn_to_section(pfn));
367 if (ret)
368 break;
369 }
370 return ret;
371}
372EXPORT_SYMBOL_GPL(__remove_pages);
373
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700374int set_online_page_callback(online_page_callback_t callback)
375{
376 int rc = -EINVAL;
377
378 lock_memory_hotplug();
379
380 if (online_page_callback == generic_online_page) {
381 online_page_callback = callback;
382 rc = 0;
383 }
384
385 unlock_memory_hotplug();
386
387 return rc;
388}
389EXPORT_SYMBOL_GPL(set_online_page_callback);
390
391int restore_online_page_callback(online_page_callback_t callback)
392{
393 int rc = -EINVAL;
394
395 lock_memory_hotplug();
396
397 if (online_page_callback == callback) {
398 online_page_callback = generic_online_page;
399 rc = 0;
400 }
401
402 unlock_memory_hotplug();
403
404 return rc;
405}
406EXPORT_SYMBOL_GPL(restore_online_page_callback);
407
408void __online_page_set_limits(struct page *page)
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700409{
Jan Beulich4738e1b2009-09-21 17:03:03 -0700410 unsigned long pfn = page_to_pfn(page);
411
Jan Beulich4738e1b2009-09-21 17:03:03 -0700412 if (pfn >= num_physpages)
413 num_physpages = pfn + 1;
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700414}
415EXPORT_SYMBOL_GPL(__online_page_set_limits);
416
417void __online_page_increment_counters(struct page *page)
418{
419 totalram_pages++;
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700420
421#ifdef CONFIG_HIGHMEM
422 if (PageHighMem(page))
423 totalhigh_pages++;
424#endif
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700425}
426EXPORT_SYMBOL_GPL(__online_page_increment_counters);
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700427
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700428void __online_page_free(struct page *page)
429{
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700430 ClearPageReserved(page);
431 init_page_count(page);
432 __free_page(page);
433}
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700434EXPORT_SYMBOL_GPL(__online_page_free);
435
436static void generic_online_page(struct page *page)
437{
438 __online_page_set_limits(page);
439 __online_page_increment_counters(page);
440 __online_page_free(page);
441}
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700442
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700443static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
444 void *arg)
Dave Hansen3947be12005-10-29 18:16:54 -0700445{
446 unsigned long i;
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700447 unsigned long onlined_pages = *(unsigned long *)arg;
448 struct page *page;
449 if (PageReserved(pfn_to_page(start_pfn)))
450 for (i = 0; i < nr_pages; i++) {
451 page = pfn_to_page(start_pfn + i);
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700452 (*online_page_callback)(page);
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700453 onlined_pages++;
454 }
455 *(unsigned long *)arg = onlined_pages;
456 return 0;
457}
458
459
KOSAKI Motohiro839a4fc2011-05-24 17:11:31 -0700460int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700461{
Dave Hansen3947be12005-10-29 18:16:54 -0700462 unsigned long onlined_pages = 0;
463 struct zone *zone;
Yasunori Goto68113782006-06-23 02:03:11 -0700464 int need_zonelists_rebuild = 0;
Yasunori Goto7b78d332007-10-21 16:41:36 -0700465 int nid;
466 int ret;
467 struct memory_notify arg;
Dave Hansen3947be12005-10-29 18:16:54 -0700468
KAMEZAWA Hiroyuki925268a2011-01-11 16:44:01 +0900469 lock_memory_hotplug();
Yasunori Goto7b78d332007-10-21 16:41:36 -0700470 arg.start_pfn = pfn;
471 arg.nr_pages = nr_pages;
472 arg.status_change_nid = -1;
473
474 nid = page_to_nid(pfn_to_page(pfn));
475 if (node_present_pages(nid) == 0)
476 arg.status_change_nid = nid;
477
478 ret = memory_notify(MEM_GOING_ONLINE, &arg);
479 ret = notifier_to_errno(ret);
480 if (ret) {
481 memory_notify(MEM_CANCEL_ONLINE, &arg);
KAMEZAWA Hiroyuki925268a2011-01-11 16:44:01 +0900482 unlock_memory_hotplug();
Yasunori Goto7b78d332007-10-21 16:41:36 -0700483 return ret;
484 }
Dave Hansen3947be12005-10-29 18:16:54 -0700485 /*
486 * This doesn't need a lock to do pfn_to_page().
487 * The section can't be removed here because of the
Daniel Walkerda19cbc2008-02-04 23:35:47 -0800488 * memory_block->state_mutex.
Dave Hansen3947be12005-10-29 18:16:54 -0700489 */
490 zone = page_zone(pfn_to_page(pfn));
Yasunori Goto68113782006-06-23 02:03:11 -0700491 /*
492 * If this zone is not populated, then it is not in zonelist.
493 * This means the page allocator ignores this zone.
494 * So, zonelist must be updated after online.
495 */
Haicheng Li4eaf3f62010-05-24 14:32:52 -0700496 mutex_lock(&zonelists_mutex);
Yasunori Goto68113782006-06-23 02:03:11 -0700497 if (!populated_zone(zone))
498 need_zonelists_rebuild = 1;
499
KAMEZAWA Hiroyuki908eedc2009-09-22 16:45:46 -0700500 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700501 online_pages_range);
Geoff Levandfd8a4222008-05-14 16:05:50 -0700502 if (ret) {
Haicheng Li4eaf3f62010-05-24 14:32:52 -0700503 mutex_unlock(&zonelists_mutex);
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -0700504 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
505 (unsigned long long) pfn << PAGE_SHIFT,
506 (((unsigned long long) pfn + nr_pages)
507 << PAGE_SHIFT) - 1);
Geoff Levandfd8a4222008-05-14 16:05:50 -0700508 memory_notify(MEM_CANCEL_ONLINE, &arg);
KAMEZAWA Hiroyuki925268a2011-01-11 16:44:01 +0900509 unlock_memory_hotplug();
Geoff Levandfd8a4222008-05-14 16:05:50 -0700510 return ret;
511 }
512
Dave Hansen3947be12005-10-29 18:16:54 -0700513 zone->present_pages += onlined_pages;
Yasunori Gotof2937be2006-03-09 17:33:51 -0800514 zone->zone_pgdat->node_present_pages += onlined_pages;
Haicheng Li1f522502010-05-24 14:32:51 -0700515 if (need_zonelists_rebuild)
Jiang Liu9adb62a2012-07-31 16:43:28 -0700516 build_all_zonelists(NULL, zone);
Haicheng Li1f522502010-05-24 14:32:51 -0700517 else
518 zone_pcp_update(zone);
Dave Hansen3947be12005-10-29 18:16:54 -0700519
Haicheng Li4eaf3f62010-05-24 14:32:52 -0700520 mutex_unlock(&zonelists_mutex);
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -0700521
522 init_per_zone_wmark_min();
523
Christoph Lameter7ea15302007-10-16 01:25:29 -0700524 if (onlined_pages) {
525 kswapd_run(zone_to_nid(zone));
526 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
527 }
Dave Hansen61b13992005-10-29 18:16:56 -0700528
Haicheng Li1f522502010-05-24 14:32:51 -0700529 vm_total_pages = nr_free_pagecache_pages();
Kent Liu2f7f24e2008-07-23 21:28:18 -0700530
Chandra Seetharaman2d1d43f2006-09-29 02:01:25 -0700531 writeback_set_ratelimit();
Yasunori Goto7b78d332007-10-21 16:41:36 -0700532
533 if (onlined_pages)
534 memory_notify(MEM_ONLINE, &arg);
KAMEZAWA Hiroyuki925268a2011-01-11 16:44:01 +0900535 unlock_memory_hotplug();
Yasunori Goto7b78d332007-10-21 16:41:36 -0700536
Dave Hansen3947be12005-10-29 18:16:54 -0700537 return 0;
538}
Keith Mannthey53947022006-09-30 23:27:08 -0700539#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
Yasunori Gotobc02af92006-06-27 02:53:30 -0700540
Hidetoshi Setoe1319332009-11-17 14:06:18 -0800541/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
542static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700543{
544 struct pglist_data *pgdat;
545 unsigned long zones_size[MAX_NR_ZONES] = {0};
546 unsigned long zholes_size[MAX_NR_ZONES] = {0};
547 unsigned long start_pfn = start >> PAGE_SHIFT;
548
549 pgdat = arch_alloc_nodedata(nid);
550 if (!pgdat)
551 return NULL;
552
553 arch_refresh_nodedata(nid, pgdat);
554
555 /* we can use NODE_DATA(nid) from here */
556
557 /* init node's zones as empty zones, we don't have any present pages.*/
Johannes Weiner9109fb72008-07-23 21:27:20 -0700558 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700559
KAMEZAWA Hiroyuki959ecc42011-06-15 15:08:38 -0700560 /*
561 * The node we allocated has no zone fallback lists. For avoiding
562 * to access not-initialized zonelist, build here.
563 */
David Rientjesf957db42011-06-22 18:13:04 -0700564 mutex_lock(&zonelists_mutex);
Jiang Liu9adb62a2012-07-31 16:43:28 -0700565 build_all_zonelists(pgdat, NULL);
David Rientjesf957db42011-06-22 18:13:04 -0700566 mutex_unlock(&zonelists_mutex);
KAMEZAWA Hiroyuki959ecc42011-06-15 15:08:38 -0700567
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700568 return pgdat;
569}
570
571static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
572{
573 arch_refresh_nodedata(nid, NULL);
574 arch_free_nodedata(pgdat);
575 return;
576}
577
KAMEZAWA Hiroyuki0a547032006-06-27 02:53:35 -0700578
minskey guocf234222010-05-24 14:32:41 -0700579/*
580 * called by cpu_up() to online a node without onlined memory.
581 */
582int mem_online_node(int nid)
583{
584 pg_data_t *pgdat;
585 int ret;
586
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800587 lock_memory_hotplug();
minskey guocf234222010-05-24 14:32:41 -0700588 pgdat = hotadd_new_pgdat(nid, 0);
David Rientjes7553e8f2011-06-22 18:13:01 -0700589 if (!pgdat) {
minskey guocf234222010-05-24 14:32:41 -0700590 ret = -ENOMEM;
591 goto out;
592 }
593 node_set_online(nid);
594 ret = register_one_node(nid);
595 BUG_ON(ret);
596
597out:
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800598 unlock_memory_hotplug();
minskey guocf234222010-05-24 14:32:41 -0700599 return ret;
600}
601
Al Viro31168482008-11-22 17:33:24 +0000602/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
603int __ref add_memory(int nid, u64 start, u64 size)
Yasunori Gotobc02af92006-06-27 02:53:30 -0700604{
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700605 pg_data_t *pgdat = NULL;
606 int new_pgdat = 0;
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -0700607 struct resource *res;
Yasunori Gotobc02af92006-06-27 02:53:30 -0700608 int ret;
609
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800610 lock_memory_hotplug();
Andi Kleen6ad696d2009-11-17 14:06:22 -0800611
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -0700612 res = register_memory_resource(start, size);
Andi Kleen6ad696d2009-11-17 14:06:22 -0800613 ret = -EEXIST;
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -0700614 if (!res)
Andi Kleen6ad696d2009-11-17 14:06:22 -0800615 goto out;
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -0700616
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700617 if (!node_online(nid)) {
618 pgdat = hotadd_new_pgdat(nid, start);
Andi Kleen6ad696d2009-11-17 14:06:22 -0800619 ret = -ENOMEM;
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700620 if (!pgdat)
Wen Congyang41b9e2d2012-07-11 14:02:31 -0700621 goto error;
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700622 new_pgdat = 1;
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700623 }
624
Yasunori Gotobc02af92006-06-27 02:53:30 -0700625 /* call arch's memory hotadd */
626 ret = arch_add_memory(nid, start, size);
627
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700628 if (ret < 0)
629 goto error;
630
Yasunori Goto0fc44152006-06-27 02:53:38 -0700631 /* we online node here. we can't roll back from here. */
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700632 node_set_online(nid);
633
Yasunori Goto0fc44152006-06-27 02:53:38 -0700634 if (new_pgdat) {
635 ret = register_one_node(nid);
636 /*
637 * If sysfs file of new node can't create, cpu on the node
638 * can't be hot-added. There is no rollback way now.
639 * So, check by BUG_ON() to catch it reluctantly..
640 */
641 BUG_ON(ret);
642 }
643
akpm@linux-foundation.orgd96ae532010-03-05 13:41:58 -0800644 /* create new memmap entry */
645 firmware_map_add_hotplug(start, start + size, "System RAM");
646
Andi Kleen6ad696d2009-11-17 14:06:22 -0800647 goto out;
648
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700649error:
650 /* rollback pgdat allocation and others */
651 if (new_pgdat)
652 rollback_node_hotadd(nid, pgdat);
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -0700653 if (res)
654 release_memory_resource(res);
Yasunori Goto9af3c2d2006-06-27 02:53:34 -0700655
Andi Kleen6ad696d2009-11-17 14:06:22 -0800656out:
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800657 unlock_memory_hotplug();
Yasunori Gotobc02af92006-06-27 02:53:30 -0700658 return ret;
659}
660EXPORT_SYMBOL_GPL(add_memory);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700661
662#ifdef CONFIG_MEMORY_HOTREMOVE
663/*
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700664 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
665 * set and the size of the free page is given by page_order(). Using this,
666 * the function determines if the pageblock contains only free pages.
667 * Due to buddy contraints, a free page at least the size of a pageblock will
668 * be located at the start of the pageblock
669 */
670static inline int pageblock_free(struct page *page)
671{
672 return PageBuddy(page) && page_order(page) >= pageblock_order;
673}
674
675/* Return the start of the next active pageblock after a given page */
676static struct page *next_active_pageblock(struct page *page)
677{
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700678 /* Ensure the starting page is pageblock-aligned */
679 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
680
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700681 /* If the entire pageblock is free, move to the end of free page */
KAMEZAWA Hiroyuki0dcc48c2010-09-09 16:38:01 -0700682 if (pageblock_free(page)) {
683 int order;
684 /* be careful. we don't have locks, page_order can be changed.*/
685 order = page_order(page);
686 if ((order < MAX_ORDER) && (order >= pageblock_order))
687 return page + (1 << order);
688 }
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700689
KAMEZAWA Hiroyuki0dcc48c2010-09-09 16:38:01 -0700690 return page + pageblock_nr_pages;
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700691}
692
693/* Checks if this range of memory is likely to be hot-removable. */
694int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
695{
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700696 struct page *page = pfn_to_page(start_pfn);
697 struct page *end_page = page + nr_pages;
698
699 /* Check the starting page of each pageblock within the range */
700 for (; page < end_page; page = next_active_pageblock(page)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -0700701 if (!is_pageblock_removable_nolock(page))
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700702 return 0;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -0700703 cond_resched();
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700704 }
705
706 /* All pageblocks in the memory block are likely to be hot-removable */
707 return 1;
708}
709
710/*
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700711 * Confirm all pages in a range [start, end) is belongs to the same zone.
712 */
713static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
714{
715 unsigned long pfn;
716 struct zone *zone = NULL;
717 struct page *page;
718 int i;
719 for (pfn = start_pfn;
720 pfn < end_pfn;
721 pfn += MAX_ORDER_NR_PAGES) {
722 i = 0;
723 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
724 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
725 i++;
726 if (i == MAX_ORDER_NR_PAGES)
727 continue;
728 page = pfn_to_page(pfn + i);
729 if (zone && page_zone(page) != zone)
730 return 0;
731 zone = page_zone(page);
732 }
733 return 1;
734}
735
736/*
737 * Scanning pfn is much easier than scanning lru list.
738 * Scan pfn from start to end and Find LRU page.
739 */
Andrew Morton7bbc0902010-10-26 14:22:05 -0700740static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700741{
742 unsigned long pfn;
743 struct page *page;
744 for (pfn = start; pfn < end; pfn++) {
745 if (pfn_valid(pfn)) {
746 page = pfn_to_page(pfn);
747 if (PageLRU(page))
748 return pfn;
749 }
750 }
751 return 0;
752}
753
754static struct page *
Hugh Dickins3c1d4372009-01-06 14:39:23 -0800755hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700756{
Hugh Dickins3c1d4372009-01-06 14:39:23 -0800757 /* This should be improooooved!! */
758 return alloc_page(GFP_HIGHUSER_MOVABLE);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700759}
760
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700761#define NR_OFFLINE_AT_ONCE_PAGES (256)
762static int
763do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
764{
765 unsigned long pfn;
766 struct page *page;
767 int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
768 int not_managed = 0;
769 int ret = 0;
770 LIST_HEAD(source);
771
772 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
773 if (!pfn_valid(pfn))
774 continue;
775 page = pfn_to_page(pfn);
Konstantin Khlebnikov700c2a42011-05-24 17:12:19 -0700776 if (!get_page_unless_zero(page))
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700777 continue;
778 /*
779 * We can skip free pages. And we can only deal with pages on
780 * LRU.
781 */
Nick Piggin62695a82008-10-18 20:26:09 -0700782 ret = isolate_lru_page(page);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700783 if (!ret) { /* Success */
Konstantin Khlebnikov700c2a42011-05-24 17:12:19 -0700784 put_page(page);
Nick Piggin62695a82008-10-18 20:26:09 -0700785 list_add_tail(&page->lru, &source);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700786 move_pages--;
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800787 inc_zone_page_state(page, NR_ISOLATED_ANON +
788 page_is_file_cache(page));
789
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700790 } else {
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700791#ifdef CONFIG_DEBUG_VM
Wu Fengguang718a3822010-03-10 15:20:43 -0800792 printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
793 pfn);
794 dump_page(page);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700795#endif
Konstantin Khlebnikov700c2a42011-05-24 17:12:19 -0700796 put_page(page);
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300797 /* Because we don't have big zone->lock. we should
Bob Liu809c4442010-10-26 14:22:10 -0700798 check this again here. */
799 if (page_count(page)) {
800 not_managed++;
Bob Liuf3ab2632010-10-26 14:22:10 -0700801 ret = -EBUSY;
Bob Liu809c4442010-10-26 14:22:10 -0700802 break;
803 }
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700804 }
805 }
Bob Liuf3ab2632010-10-26 14:22:10 -0700806 if (!list_empty(&source)) {
807 if (not_managed) {
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700808 putback_lru_pages(&source);
Bob Liuf3ab2632010-10-26 14:22:10 -0700809 goto out;
810 }
811 /* this function returns # of failed pages */
Mel Gorman77f1fe62011-01-13 15:45:57 -0800812 ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800813 true, MIGRATE_SYNC);
Bob Liuf3ab2632010-10-26 14:22:10 -0700814 if (ret)
815 putback_lru_pages(&source);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700816 }
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700817out:
818 return ret;
819}
820
821/*
822 * remove from free_area[] and mark all as Reserved.
823 */
824static int
825offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
826 void *data)
827{
828 __offline_isolated_pages(start, start + nr_pages);
829 return 0;
830}
831
832static void
833offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
834{
KAMEZAWA Hiroyuki908eedc2009-09-22 16:45:46 -0700835 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700836 offline_isolated_pages_cb);
837}
838
839/*
840 * Check all pages in range, recoreded as memory resource, are isolated.
841 */
842static int
843check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
844 void *data)
845{
846 int ret;
847 long offlined = *(long *)data;
848 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
849 offlined = nr_pages;
850 if (!ret)
851 *(long *)data += offlined;
852 return ret;
853}
854
855static long
856check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
857{
858 long offlined = 0;
859 int ret;
860
KAMEZAWA Hiroyuki908eedc2009-09-22 16:45:46 -0700861 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700862 check_pages_isolated_cb);
863 if (ret < 0)
864 offlined = (long)ret;
865 return offlined;
866}
867
KOSAKI Motohiro839a4fc2011-05-24 17:11:31 -0700868static int __ref offline_pages(unsigned long start_pfn,
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700869 unsigned long end_pfn, unsigned long timeout)
870{
871 unsigned long pfn, nr_pages, expire;
872 long offlined_pages;
Yasunori Goto7b78d332007-10-21 16:41:36 -0700873 int ret, drain, retry_max, node;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700874 struct zone *zone;
Yasunori Goto7b78d332007-10-21 16:41:36 -0700875 struct memory_notify arg;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700876
877 BUG_ON(start_pfn >= end_pfn);
878 /* at least, alignment against pageblock is necessary */
879 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
880 return -EINVAL;
881 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
882 return -EINVAL;
883 /* This makes hotplug much easier...and readable.
884 we assume this for now. .*/
885 if (!test_pages_in_a_zone(start_pfn, end_pfn))
886 return -EINVAL;
Yasunori Goto7b78d332007-10-21 16:41:36 -0700887
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800888 lock_memory_hotplug();
Andi Kleen6ad696d2009-11-17 14:06:22 -0800889
Yasunori Goto7b78d332007-10-21 16:41:36 -0700890 zone = page_zone(pfn_to_page(start_pfn));
891 node = zone_to_nid(zone);
892 nr_pages = end_pfn - start_pfn;
893
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700894 /* set above range as isolated */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200895 ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700896 if (ret)
Andi Kleen6ad696d2009-11-17 14:06:22 -0800897 goto out;
Yasunori Goto7b78d332007-10-21 16:41:36 -0700898
899 arg.start_pfn = start_pfn;
900 arg.nr_pages = nr_pages;
901 arg.status_change_nid = -1;
902 if (nr_pages >= node_present_pages(node))
903 arg.status_change_nid = node;
904
905 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
906 ret = notifier_to_errno(ret);
907 if (ret)
908 goto failed_removal;
909
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700910 pfn = start_pfn;
911 expire = jiffies + timeout;
912 drain = 0;
913 retry_max = 5;
914repeat:
915 /* start memory hot removal */
916 ret = -EAGAIN;
917 if (time_after(jiffies, expire))
918 goto failed_removal;
919 ret = -EINTR;
920 if (signal_pending(current))
921 goto failed_removal;
922 ret = 0;
923 if (drain) {
924 lru_add_drain_all();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700925 cond_resched();
Christoph Lameter9f8f2172008-02-04 22:29:11 -0800926 drain_all_pages();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700927 }
928
929 pfn = scan_lru_pages(start_pfn, end_pfn);
930 if (pfn) { /* We have page on LRU */
931 ret = do_migrate_range(pfn, end_pfn);
932 if (!ret) {
933 drain = 1;
934 goto repeat;
935 } else {
936 if (ret < 0)
937 if (--retry_max == 0)
938 goto failed_removal;
939 yield();
940 drain = 1;
941 goto repeat;
942 }
943 }
944 /* drain all zone's lru pagevec, this is asyncronous... */
945 lru_add_drain_all();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700946 yield();
947 /* drain pcp pages , this is synchrouns. */
Christoph Lameter9f8f2172008-02-04 22:29:11 -0800948 drain_all_pages();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700949 /* check again */
950 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
951 if (offlined_pages < 0) {
952 ret = -EBUSY;
953 goto failed_removal;
954 }
955 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
956 /* Ok, all of our target is islaoted.
957 We cannot do rollback at this point. */
958 offline_isolated_pages(start_pfn, end_pfn);
KAMEZAWA Hiroyukidbc0e4c2007-11-14 16:59:12 -0800959 /* reset pagetype flags and makes migrate type to be MOVABLE */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200960 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700961 /* removal success */
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700962 zone->present_pages -= offlined_pages;
963 zone->zone_pgdat->node_present_pages -= offlined_pages;
964 totalram_pages -= offlined_pages;
Yasunori Goto7b78d332007-10-21 16:41:36 -0700965
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -0700966 init_per_zone_wmark_min();
967
David Rientjes8fe23e02009-12-14 17:58:33 -0800968 if (!node_present_pages(node)) {
969 node_clear_state(node, N_HIGH_MEMORY);
970 kswapd_stop(node);
971 }
Minchan Kimbce73942009-06-16 15:32:50 -0700972
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700973 vm_total_pages = nr_free_pagecache_pages();
974 writeback_set_ratelimit();
Yasunori Goto7b78d332007-10-21 16:41:36 -0700975
976 memory_notify(MEM_OFFLINE, &arg);
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800977 unlock_memory_hotplug();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700978 return 0;
979
980failed_removal:
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -0700981 printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
982 (unsigned long long) start_pfn << PAGE_SHIFT,
983 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
Yasunori Goto7b78d332007-10-21 16:41:36 -0700984 memory_notify(MEM_CANCEL_OFFLINE, &arg);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700985 /* pushback to free area */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200986 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
Yasunori Goto7b78d332007-10-21 16:41:36 -0700987
Andi Kleen6ad696d2009-11-17 14:06:22 -0800988out:
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800989 unlock_memory_hotplug();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -0700990 return ret;
991}
Badari Pulavarty71088782008-10-18 20:25:58 -0700992
993int remove_memory(u64 start, u64 size)
994{
995 unsigned long start_pfn, end_pfn;
996
997 start_pfn = PFN_DOWN(start);
998 end_pfn = start_pfn + PFN_DOWN(size);
999 return offline_pages(start_pfn, end_pfn, 120 * HZ);
1000}
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -07001001#else
1002int remove_memory(u64 start, u64 size)
1003{
1004 return -EINVAL;
1005}
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001006#endif /* CONFIG_MEMORY_HOTREMOVE */
Badari Pulavarty71088782008-10-18 20:25:58 -07001007EXPORT_SYMBOL_GPL(remove_memory);