blob: 7026fbc42aaa378daba81d704250e328b129e16e [file] [log] [blame]
Dave Hansen3947be12005-10-29 18:16:54 -07001/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
Dave Hansen3947be12005-10-29 18:16:54 -07007#include <linux/stddef.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/interrupt.h>
11#include <linux/pagemap.h>
12#include <linux/bootmem.h>
13#include <linux/compiler.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040014#include <linux/export.h>
Dave Hansen3947be12005-10-29 18:16:54 -070015#include <linux/pagevec.h>
Chandra Seetharaman2d1d43f2006-09-29 02:01:25 -070016#include <linux/writeback.h>
Dave Hansen3947be12005-10-29 18:16:54 -070017#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h>
23#include <linux/vmalloc.h>
KAMEZAWA Hiroyuki0a547032006-06-27 02:53:35 -070024#include <linux/ioport.h>
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -070025#include <linux/delay.h>
26#include <linux/migrate.h>
27#include <linux/page-isolation.h>
Badari Pulavarty71088782008-10-18 20:25:58 -070028#include <linux/pfn.h>
Andi Kleen6ad696d2009-11-17 14:06:22 -080029#include <linux/suspend.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080030#include <linux/mm_inline.h>
akpm@linux-foundation.orgd96ae532010-03-05 13:41:58 -080031#include <linux/firmware-map.h>
Tang Chen60a5a192013-02-22 16:33:14 -080032#include <linux/stop_machine.h>
Dave Hansen3947be12005-10-29 18:16:54 -070033
34#include <asm/tlbflush.h>
35
Adrian Bunk1e5ad9a2008-04-28 20:40:08 +030036#include "internal.h"
37
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -070038/*
39 * online_page_callback contains pointer to current page onlining function.
40 * Initially it is generic_online_page(). If it is required it could be
41 * changed by calling set_online_page_callback() for callback registration
42 * and restore_online_page_callback() for generic callback restore.
43 */
44
45static void generic_online_page(struct page *page);
46
47static online_page_callback_t online_page_callback = generic_online_page;
48
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -080049DEFINE_MUTEX(mem_hotplug_mutex);
50
51void lock_memory_hotplug(void)
52{
53 mutex_lock(&mem_hotplug_mutex);
54
55 /* for exclusive hibernation if CONFIG_HIBERNATION=y */
56 lock_system_sleep();
57}
58
59void unlock_memory_hotplug(void)
60{
61 unlock_system_sleep();
62 mutex_unlock(&mem_hotplug_mutex);
63}
64
65
Keith Mannthey45e0b782006-09-30 23:27:09 -070066/* add this memory to iomem resource */
67static struct resource *register_memory_resource(u64 start, u64 size)
68{
69 struct resource *res;
70 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
71 BUG_ON(!res);
72
73 res->name = "System RAM";
74 res->start = start;
75 res->end = start + size - 1;
Yasunori Goto887c3cb2007-11-14 16:59:20 -080076 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
Keith Mannthey45e0b782006-09-30 23:27:09 -070077 if (request_resource(&iomem_resource, res) < 0) {
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -070078 printk("System RAM resource %pR cannot be added\n", res);
Keith Mannthey45e0b782006-09-30 23:27:09 -070079 kfree(res);
80 res = NULL;
81 }
82 return res;
83}
84
85static void release_memory_resource(struct resource *res)
86{
87 if (!res)
88 return;
89 release_resource(res);
90 kfree(res);
91 return;
92}
93
Keith Mannthey53947022006-09-30 23:27:08 -070094#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -080095void get_page_bootmem(unsigned long info, struct page *page,
96 unsigned long type)
Yasunori Goto04753272008-04-28 02:13:31 -070097{
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080098 page->lru.next = (struct list_head *) type;
Yasunori Goto04753272008-04-28 02:13:31 -070099 SetPagePrivate(page);
100 set_page_private(page, info);
101 atomic_inc(&page->_count);
102}
103
Rakib Mullick23ce9322009-12-14 17:59:44 -0800104/* reference to __meminit __free_pages_bootmem is valid
105 * so use __ref to tell modpost not to generate a warning */
106void __ref put_page_bootmem(struct page *page)
Yasunori Goto04753272008-04-28 02:13:31 -0700107{
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -0800108 unsigned long type;
Jiang Liu9feedc92012-12-12 13:52:12 -0800109 static DEFINE_MUTEX(ppb_lock);
Yasunori Goto04753272008-04-28 02:13:31 -0700110
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -0800111 type = (unsigned long) page->lru.next;
112 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
113 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
Yasunori Goto04753272008-04-28 02:13:31 -0700114
115 if (atomic_dec_return(&page->_count) == 1) {
116 ClearPagePrivate(page);
117 set_page_private(page, 0);
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -0800118 INIT_LIST_HEAD(&page->lru);
Jiang Liu9feedc92012-12-12 13:52:12 -0800119
120 /*
121 * Please refer to comment for __free_pages_bootmem()
122 * for why we serialize here.
123 */
124 mutex_lock(&ppb_lock);
Yasunori Goto04753272008-04-28 02:13:31 -0700125 __free_pages_bootmem(page, 0);
Jiang Liu9feedc92012-12-12 13:52:12 -0800126 mutex_unlock(&ppb_lock);
Jiang Liuc60514b2013-02-22 16:33:56 -0800127 totalram_pages++;
Yasunori Goto04753272008-04-28 02:13:31 -0700128 }
129
130}
131
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800132#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
133#ifndef CONFIG_SPARSEMEM_VMEMMAP
Adrian Bunkd92bc312008-07-23 21:28:12 -0700134static void register_page_bootmem_info_section(unsigned long start_pfn)
Yasunori Goto04753272008-04-28 02:13:31 -0700135{
136 unsigned long *usemap, mapsize, section_nr, i;
137 struct mem_section *ms;
138 struct page *page, *memmap;
139
Yasunori Goto04753272008-04-28 02:13:31 -0700140 section_nr = pfn_to_section_nr(start_pfn);
141 ms = __nr_to_section(section_nr);
142
143 /* Get section's memmap address */
144 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
145
146 /*
147 * Get page for the memmap's phys address
148 * XXX: need more consideration for sparse_vmemmap...
149 */
150 page = virt_to_page(memmap);
151 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
152 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
153
154 /* remember memmap's page */
155 for (i = 0; i < mapsize; i++, page++)
156 get_page_bootmem(section_nr, page, SECTION_INFO);
157
158 usemap = __nr_to_section(section_nr)->pageblock_flags;
159 page = virt_to_page(usemap);
160
161 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
162
163 for (i = 0; i < mapsize; i++, page++)
Yasunori Gotoaf370fb2008-07-23 21:28:17 -0700164 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
Yasunori Goto04753272008-04-28 02:13:31 -0700165
166}
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800167#else /* CONFIG_SPARSEMEM_VMEMMAP */
168static void register_page_bootmem_info_section(unsigned long start_pfn)
169{
170 unsigned long *usemap, mapsize, section_nr, i;
171 struct mem_section *ms;
172 struct page *page, *memmap;
173
174 if (!pfn_valid(start_pfn))
175 return;
176
177 section_nr = pfn_to_section_nr(start_pfn);
178 ms = __nr_to_section(section_nr);
179
180 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
181
182 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
183
184 usemap = __nr_to_section(section_nr)->pageblock_flags;
185 page = virt_to_page(usemap);
186
187 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
188
189 for (i = 0; i < mapsize; i++, page++)
190 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
191}
192#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
Yasunori Goto04753272008-04-28 02:13:31 -0700193
194void register_page_bootmem_info_node(struct pglist_data *pgdat)
195{
196 unsigned long i, pfn, end_pfn, nr_pages;
197 int node = pgdat->node_id;
198 struct page *page;
199 struct zone *zone;
200
201 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
202 page = virt_to_page(pgdat);
203
204 for (i = 0; i < nr_pages; i++, page++)
205 get_page_bootmem(node, page, NODE_INFO);
206
207 zone = &pgdat->node_zones[0];
208 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
209 if (zone->wait_table) {
210 nr_pages = zone->wait_table_hash_nr_entries
211 * sizeof(wait_queue_head_t);
212 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
213 page = virt_to_page(zone->wait_table);
214
215 for (i = 0; i < nr_pages; i++, page++)
216 get_page_bootmem(node, page, NODE_INFO);
217 }
218 }
219
220 pfn = pgdat->node_start_pfn;
Cody P Schaferc1f19492013-02-22 16:35:32 -0800221 end_pfn = pgdat_end_pfn(pgdat);
Yasunori Goto04753272008-04-28 02:13:31 -0700222
223 /* register_section info */
qiuxishif14851a2012-09-17 14:09:24 -0700224 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
225 /*
226 * Some platforms can assign the same pfn to multiple nodes - on
227 * node0 as well as nodeN. To avoid registering a pfn against
228 * multiple nodes we check that this pfn does not already
229 * reside in some other node.
230 */
231 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
232 register_page_bootmem_info_section(pfn);
233 }
Yasunori Goto04753272008-04-28 02:13:31 -0700234}
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800235#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
Yasunori Goto04753272008-04-28 02:13:31 -0700236
Heiko Carstens76cdd582008-05-14 16:05:52 -0700237static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
238 unsigned long end_pfn)
239{
240 unsigned long old_zone_end_pfn;
241
242 zone_span_writelock(zone);
243
244 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
Tang Chen712cd382012-12-11 16:01:07 -0800245 if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn)
Heiko Carstens76cdd582008-05-14 16:05:52 -0700246 zone->zone_start_pfn = start_pfn;
247
248 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
249 zone->zone_start_pfn;
250
251 zone_span_writeunlock(zone);
252}
253
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800254static void resize_zone(struct zone *zone, unsigned long start_pfn,
255 unsigned long end_pfn)
256{
257 zone_span_writelock(zone);
258
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800259 if (end_pfn - start_pfn) {
260 zone->zone_start_pfn = start_pfn;
261 zone->spanned_pages = end_pfn - start_pfn;
262 } else {
263 /*
264 * make it consist as free_area_init_core(),
265 * if spanned_pages = 0, then keep start_pfn = 0
266 */
267 zone->zone_start_pfn = 0;
268 zone->spanned_pages = 0;
269 }
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800270
271 zone_span_writeunlock(zone);
272}
273
274static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
275 unsigned long end_pfn)
276{
277 enum zone_type zid = zone_idx(zone);
278 int nid = zone->zone_pgdat->node_id;
279 unsigned long pfn;
280
281 for (pfn = start_pfn; pfn < end_pfn; pfn++)
282 set_page_links(pfn_to_page(pfn), zid, nid, pfn);
283}
284
Cody P Schaferf6bbb782013-02-22 16:35:30 -0800285/* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
286 * alloc_bootmem_node_nopanic() */
287static int __ref ensure_zone_is_initialized(struct zone *zone,
288 unsigned long start_pfn, unsigned long num_pages)
289{
290 if (!zone_is_initialized(zone))
291 return init_currently_empty_zone(zone, start_pfn, num_pages,
292 MEMMAP_HOTPLUG);
293 return 0;
294}
295
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800296static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800297 unsigned long start_pfn, unsigned long end_pfn)
298{
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800299 int ret;
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800300 unsigned long flags;
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800301 unsigned long z1_start_pfn;
302
Cody P Schafer64dd1b22013-02-22 16:35:31 -0800303 ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
304 if (ret)
305 return ret;
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800306
307 pgdat_resize_lock(z1->zone_pgdat, &flags);
308
309 /* can't move pfns which are higher than @z2 */
Cody P Schafer108bcc92013-02-22 16:35:23 -0800310 if (end_pfn > zone_end_pfn(z2))
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800311 goto out_fail;
312 /* the move out part mast at the left most of @z2 */
313 if (start_pfn > z2->zone_start_pfn)
314 goto out_fail;
315 /* must included/overlap */
316 if (end_pfn <= z2->zone_start_pfn)
317 goto out_fail;
318
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800319 /* use start_pfn for z1's start_pfn if z1 is empty */
320 if (z1->spanned_pages)
321 z1_start_pfn = z1->zone_start_pfn;
322 else
323 z1_start_pfn = start_pfn;
324
325 resize_zone(z1, z1_start_pfn, end_pfn);
Cody P Schafer108bcc92013-02-22 16:35:23 -0800326 resize_zone(z2, end_pfn, zone_end_pfn(z2));
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800327
328 pgdat_resize_unlock(z1->zone_pgdat, &flags);
329
330 fix_zone_id(z1, start_pfn, end_pfn);
331
332 return 0;
333out_fail:
334 pgdat_resize_unlock(z1->zone_pgdat, &flags);
335 return -1;
336}
337
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800338static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800339 unsigned long start_pfn, unsigned long end_pfn)
340{
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800341 int ret;
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800342 unsigned long flags;
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800343 unsigned long z2_end_pfn;
344
Cody P Schafer64dd1b22013-02-22 16:35:31 -0800345 ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
346 if (ret)
347 return ret;
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800348
349 pgdat_resize_lock(z1->zone_pgdat, &flags);
350
351 /* can't move pfns which are lower than @z1 */
352 if (z1->zone_start_pfn > start_pfn)
353 goto out_fail;
354 /* the move out part mast at the right most of @z1 */
Cody P Schafer108bcc92013-02-22 16:35:23 -0800355 if (zone_end_pfn(z1) > end_pfn)
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800356 goto out_fail;
357 /* must included/overlap */
Cody P Schafer108bcc92013-02-22 16:35:23 -0800358 if (start_pfn >= zone_end_pfn(z1))
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800359 goto out_fail;
360
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800361 /* use end_pfn for z2's end_pfn if z2 is empty */
362 if (z2->spanned_pages)
Cody P Schafer108bcc92013-02-22 16:35:23 -0800363 z2_end_pfn = zone_end_pfn(z2);
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800364 else
365 z2_end_pfn = end_pfn;
366
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800367 resize_zone(z1, z1->zone_start_pfn, start_pfn);
Lai Jiangshane455a9b2012-12-11 16:03:20 -0800368 resize_zone(z2, start_pfn, z2_end_pfn);
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800369
370 pgdat_resize_unlock(z1->zone_pgdat, &flags);
371
372 fix_zone_id(z2, start_pfn, end_pfn);
373
374 return 0;
375out_fail:
376 pgdat_resize_unlock(z1->zone_pgdat, &flags);
377 return -1;
378}
379
Heiko Carstens76cdd582008-05-14 16:05:52 -0700380static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
381 unsigned long end_pfn)
382{
383 unsigned long old_pgdat_end_pfn =
384 pgdat->node_start_pfn + pgdat->node_spanned_pages;
385
Tang Chen712cd382012-12-11 16:01:07 -0800386 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
Heiko Carstens76cdd582008-05-14 16:05:52 -0700387 pgdat->node_start_pfn = start_pfn;
388
389 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
390 pgdat->node_start_pfn;
391}
392
Al Viro31168482008-11-22 17:33:24 +0000393static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
Dave Hansen3947be12005-10-29 18:16:54 -0700394{
395 struct pglist_data *pgdat = zone->zone_pgdat;
396 int nr_pages = PAGES_PER_SECTION;
397 int nid = pgdat->node_id;
398 int zone_type;
Heiko Carstens76cdd582008-05-14 16:05:52 -0700399 unsigned long flags;
Cody P Schafer64dd1b22013-02-22 16:35:31 -0800400 int ret;
Dave Hansen3947be12005-10-29 18:16:54 -0700401
402 zone_type = zone - pgdat->node_zones;
Cody P Schafer64dd1b22013-02-22 16:35:31 -0800403 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
404 if (ret)
405 return ret;
Heiko Carstens76cdd582008-05-14 16:05:52 -0700406
Heiko Carstens76cdd582008-05-14 16:05:52 -0700407 pgdat_resize_lock(zone->zone_pgdat, &flags);
408 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
409 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
410 phys_start_pfn + nr_pages);
411 pgdat_resize_unlock(zone->zone_pgdat, &flags);
Dave Hansena2f3aa022007-01-10 23:15:30 -0800412 memmap_init_zone(nr_pages, nid, zone_type,
413 phys_start_pfn, MEMMAP_HOTPLUG);
Yasunori Goto718127c2006-06-23 02:03:10 -0700414 return 0;
Dave Hansen3947be12005-10-29 18:16:54 -0700415}
416
Gary Hadec04fc582009-01-06 14:39:14 -0800417static int __meminit __add_section(int nid, struct zone *zone,
418 unsigned long phys_start_pfn)
Dave Hansen3947be12005-10-29 18:16:54 -0700419{
Dave Hansen3947be12005-10-29 18:16:54 -0700420 int nr_pages = PAGES_PER_SECTION;
Dave Hansen3947be12005-10-29 18:16:54 -0700421 int ret;
422
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -0700423 if (pfn_valid(phys_start_pfn))
424 return -EEXIST;
425
Dave Hansen0b0acbe2005-10-29 18:16:55 -0700426 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
Dave Hansen3947be12005-10-29 18:16:54 -0700427
428 if (ret < 0)
429 return ret;
430
Yasunori Goto718127c2006-06-23 02:03:10 -0700431 ret = __add_zone(zone, phys_start_pfn);
432
433 if (ret < 0)
434 return ret;
435
Gary Hadec04fc582009-01-06 14:39:14 -0800436 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
Dave Hansen3947be12005-10-29 18:16:54 -0700437}
438
David Rientjes4edd7ce2013-04-29 15:08:22 -0700439/*
440 * Reasonably generic function for adding memory. It is
441 * expected that archs that support memory hotplug will
442 * call this function after deciding the zone to which to
443 * add the new pages.
444 */
445int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
446 unsigned long nr_pages)
447{
448 unsigned long i;
449 int err = 0;
450 int start_sec, end_sec;
451 /* during initialize mem_map, align hot-added range to section */
452 start_sec = pfn_to_section_nr(phys_start_pfn);
453 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
454
455 for (i = start_sec; i <= end_sec; i++) {
456 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
457
458 /*
459 * EEXIST is finally dealt with by ioresource collision
460 * check. see add_memory() => register_memory_resource()
461 * Warning will be printed if there is collision.
462 */
463 if (err && (err != -EEXIST))
464 break;
465 err = 0;
466 }
467
468 return err;
469}
470EXPORT_SYMBOL_GPL(__add_pages);
471
472#ifdef CONFIG_MEMORY_HOTREMOVE
Yasuaki Ishimatsu815121d2013-02-22 16:33:12 -0800473/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
474static int find_smallest_section_pfn(int nid, struct zone *zone,
475 unsigned long start_pfn,
476 unsigned long end_pfn)
477{
478 struct mem_section *ms;
479
480 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
481 ms = __pfn_to_section(start_pfn);
482
483 if (unlikely(!valid_section(ms)))
484 continue;
485
486 if (unlikely(pfn_to_nid(start_pfn) != nid))
487 continue;
488
489 if (zone && zone != page_zone(pfn_to_page(start_pfn)))
490 continue;
491
492 return start_pfn;
493 }
494
495 return 0;
496}
497
498/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
499static int find_biggest_section_pfn(int nid, struct zone *zone,
500 unsigned long start_pfn,
501 unsigned long end_pfn)
502{
503 struct mem_section *ms;
504 unsigned long pfn;
505
506 /* pfn is the end pfn of a memory section. */
507 pfn = end_pfn - 1;
508 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
509 ms = __pfn_to_section(pfn);
510
511 if (unlikely(!valid_section(ms)))
512 continue;
513
514 if (unlikely(pfn_to_nid(pfn) != nid))
515 continue;
516
517 if (zone && zone != page_zone(pfn_to_page(pfn)))
518 continue;
519
520 return pfn;
521 }
522
523 return 0;
524}
525
526static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
527 unsigned long end_pfn)
528{
529 unsigned long zone_start_pfn = zone->zone_start_pfn;
530 unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
531 unsigned long pfn;
532 struct mem_section *ms;
533 int nid = zone_to_nid(zone);
534
535 zone_span_writelock(zone);
536 if (zone_start_pfn == start_pfn) {
537 /*
538 * If the section is smallest section in the zone, it need
539 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
540 * In this case, we find second smallest valid mem_section
541 * for shrinking zone.
542 */
543 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
544 zone_end_pfn);
545 if (pfn) {
546 zone->zone_start_pfn = pfn;
547 zone->spanned_pages = zone_end_pfn - pfn;
548 }
549 } else if (zone_end_pfn == end_pfn) {
550 /*
551 * If the section is biggest section in the zone, it need
552 * shrink zone->spanned_pages.
553 * In this case, we find second biggest valid mem_section for
554 * shrinking zone.
555 */
556 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
557 start_pfn);
558 if (pfn)
559 zone->spanned_pages = pfn - zone_start_pfn + 1;
560 }
561
562 /*
563 * The section is not biggest or smallest mem_section in the zone, it
564 * only creates a hole in the zone. So in this case, we need not
565 * change the zone. But perhaps, the zone has only hole data. Thus
566 * it check the zone has only hole or not.
567 */
568 pfn = zone_start_pfn;
569 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
570 ms = __pfn_to_section(pfn);
571
572 if (unlikely(!valid_section(ms)))
573 continue;
574
575 if (page_zone(pfn_to_page(pfn)) != zone)
576 continue;
577
578 /* If the section is current section, it continues the loop */
579 if (start_pfn == pfn)
580 continue;
581
582 /* If we find valid section, we have nothing to do */
583 zone_span_writeunlock(zone);
584 return;
585 }
586
587 /* The zone has no valid section */
588 zone->zone_start_pfn = 0;
589 zone->spanned_pages = 0;
590 zone_span_writeunlock(zone);
591}
592
593static void shrink_pgdat_span(struct pglist_data *pgdat,
594 unsigned long start_pfn, unsigned long end_pfn)
595{
596 unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
597 unsigned long pgdat_end_pfn =
598 pgdat->node_start_pfn + pgdat->node_spanned_pages;
599 unsigned long pfn;
600 struct mem_section *ms;
601 int nid = pgdat->node_id;
602
603 if (pgdat_start_pfn == start_pfn) {
604 /*
605 * If the section is smallest section in the pgdat, it need
606 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
607 * In this case, we find second smallest valid mem_section
608 * for shrinking zone.
609 */
610 pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
611 pgdat_end_pfn);
612 if (pfn) {
613 pgdat->node_start_pfn = pfn;
614 pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
615 }
616 } else if (pgdat_end_pfn == end_pfn) {
617 /*
618 * If the section is biggest section in the pgdat, it need
619 * shrink pgdat->node_spanned_pages.
620 * In this case, we find second biggest valid mem_section for
621 * shrinking zone.
622 */
623 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
624 start_pfn);
625 if (pfn)
626 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
627 }
628
629 /*
630 * If the section is not biggest or smallest mem_section in the pgdat,
631 * it only creates a hole in the pgdat. So in this case, we need not
632 * change the pgdat.
633 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
634 * has only hole or not.
635 */
636 pfn = pgdat_start_pfn;
637 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
638 ms = __pfn_to_section(pfn);
639
640 if (unlikely(!valid_section(ms)))
641 continue;
642
643 if (pfn_to_nid(pfn) != nid)
644 continue;
645
646 /* If the section is current section, it continues the loop */
647 if (start_pfn == pfn)
648 continue;
649
650 /* If we find valid section, we have nothing to do */
651 return;
652 }
653
654 /* The pgdat has no valid section */
655 pgdat->node_start_pfn = 0;
656 pgdat->node_spanned_pages = 0;
657}
658
659static void __remove_zone(struct zone *zone, unsigned long start_pfn)
660{
661 struct pglist_data *pgdat = zone->zone_pgdat;
662 int nr_pages = PAGES_PER_SECTION;
663 int zone_type;
664 unsigned long flags;
665
666 zone_type = zone - pgdat->node_zones;
667
668 pgdat_resize_lock(zone->zone_pgdat, &flags);
669 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
670 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
671 pgdat_resize_unlock(zone->zone_pgdat, &flags);
672}
673
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700674static int __remove_section(struct zone *zone, struct mem_section *ms)
675{
Yasuaki Ishimatsu815121d2013-02-22 16:33:12 -0800676 unsigned long start_pfn;
677 int scn_nr;
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700678 int ret = -EINVAL;
679
680 if (!valid_section(ms))
681 return ret;
682
683 ret = unregister_memory_section(ms);
684 if (ret)
685 return ret;
686
Yasuaki Ishimatsu815121d2013-02-22 16:33:12 -0800687 scn_nr = __section_nr(ms);
688 start_pfn = section_nr_to_pfn(scn_nr);
689 __remove_zone(zone, start_pfn);
690
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700691 sparse_remove_one_section(zone, ms);
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700692 return 0;
693}
694
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700695/**
696 * __remove_pages() - remove sections of pages from a zone
697 * @zone: zone from which pages need to be removed
698 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
699 * @nr_pages: number of pages to remove (must be multiple of section size)
700 *
701 * Generic helper function to remove section mappings and sysfs entries
702 * for the section of the memory we are removing. Caller needs to make
703 * sure that pages are marked reserved and zones are adjust properly by
704 * calling offline_pages().
705 */
706int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
707 unsigned long nr_pages)
708{
Toshi Kanife74ebb2013-04-29 15:08:20 -0700709 unsigned long i;
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700710 int sections_to_remove;
Toshi Kanife74ebb2013-04-29 15:08:20 -0700711 resource_size_t start, size;
712 int ret = 0;
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700713
714 /*
715 * We can only remove entire sections
716 */
717 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
718 BUG_ON(nr_pages % PAGES_PER_SECTION);
719
Toshi Kanife74ebb2013-04-29 15:08:20 -0700720 start = phys_start_pfn << PAGE_SHIFT;
721 size = nr_pages * PAGE_SIZE;
722 ret = release_mem_region_adjustable(&iomem_resource, start, size);
723 if (ret)
724 pr_warn("Unable to release resource <%016llx-%016llx> (%d)\n",
725 start, start + size - 1, ret);
Yasuaki Ishimatsud760afd2012-10-08 16:34:14 -0700726
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700727 sections_to_remove = nr_pages / PAGES_PER_SECTION;
728 for (i = 0; i < sections_to_remove; i++) {
729 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
730 ret = __remove_section(zone, __pfn_to_section(pfn));
731 if (ret)
732 break;
733 }
734 return ret;
735}
736EXPORT_SYMBOL_GPL(__remove_pages);
David Rientjes4edd7ce2013-04-29 15:08:22 -0700737#endif /* CONFIG_MEMORY_HOTREMOVE */
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700738
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700739int set_online_page_callback(online_page_callback_t callback)
740{
741 int rc = -EINVAL;
742
743 lock_memory_hotplug();
744
745 if (online_page_callback == generic_online_page) {
746 online_page_callback = callback;
747 rc = 0;
748 }
749
750 unlock_memory_hotplug();
751
752 return rc;
753}
754EXPORT_SYMBOL_GPL(set_online_page_callback);
755
756int restore_online_page_callback(online_page_callback_t callback)
757{
758 int rc = -EINVAL;
759
760 lock_memory_hotplug();
761
762 if (online_page_callback == callback) {
763 online_page_callback = generic_online_page;
764 rc = 0;
765 }
766
767 unlock_memory_hotplug();
768
769 return rc;
770}
771EXPORT_SYMBOL_GPL(restore_online_page_callback);
772
773void __online_page_set_limits(struct page *page)
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700774{
Jan Beulich4738e1b2009-09-21 17:03:03 -0700775 unsigned long pfn = page_to_pfn(page);
776
Jan Beulich4738e1b2009-09-21 17:03:03 -0700777 if (pfn >= num_physpages)
778 num_physpages = pfn + 1;
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700779}
780EXPORT_SYMBOL_GPL(__online_page_set_limits);
781
782void __online_page_increment_counters(struct page *page)
783{
784 totalram_pages++;
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700785
786#ifdef CONFIG_HIGHMEM
787 if (PageHighMem(page))
788 totalhigh_pages++;
789#endif
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700790}
791EXPORT_SYMBOL_GPL(__online_page_increment_counters);
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700792
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700793void __online_page_free(struct page *page)
794{
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700795 ClearPageReserved(page);
796 init_page_count(page);
797 __free_page(page);
798}
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700799EXPORT_SYMBOL_GPL(__online_page_free);
800
801static void generic_online_page(struct page *page)
802{
803 __online_page_set_limits(page);
804 __online_page_increment_counters(page);
805 __online_page_free(page);
806}
Jeremy Fitzhardinge180c06e2008-04-28 02:12:03 -0700807
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700808static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
809 void *arg)
Dave Hansen3947be12005-10-29 18:16:54 -0700810{
811 unsigned long i;
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700812 unsigned long onlined_pages = *(unsigned long *)arg;
813 struct page *page;
814 if (PageReserved(pfn_to_page(start_pfn)))
815 for (i = 0; i < nr_pages; i++) {
816 page = pfn_to_page(start_pfn + i);
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700817 (*online_page_callback)(page);
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700818 onlined_pages++;
819 }
820 *(unsigned long *)arg = onlined_pages;
821 return 0;
822}
823
Lai Jiangshan09285af2012-12-12 13:52:04 -0800824#ifdef CONFIG_MOVABLE_NODE
Tang Chen79a4dce2012-12-18 14:23:24 -0800825/*
826 * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
827 * normal memory.
828 */
Lai Jiangshan09285af2012-12-12 13:52:04 -0800829static bool can_online_high_movable(struct zone *zone)
830{
831 return true;
832}
Tang Chen79a4dce2012-12-18 14:23:24 -0800833#else /* CONFIG_MOVABLE_NODE */
Lai Jiangshan74d42d82012-12-11 16:03:23 -0800834/* ensure every online node has NORMAL memory */
835static bool can_online_high_movable(struct zone *zone)
836{
837 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
838}
Tang Chen79a4dce2012-12-18 14:23:24 -0800839#endif /* CONFIG_MOVABLE_NODE */
Lai Jiangshan74d42d82012-12-11 16:03:23 -0800840
Lai Jiangshand9713672012-12-11 16:01:03 -0800841/* check which state of node_states will be changed when online memory */
842static void node_states_check_changes_online(unsigned long nr_pages,
843 struct zone *zone, struct memory_notify *arg)
844{
845 int nid = zone_to_nid(zone);
846 enum zone_type zone_last = ZONE_NORMAL;
847
848 /*
Lai Jiangshan6715ddf2012-12-12 13:51:49 -0800849 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
850 * contains nodes which have zones of 0...ZONE_NORMAL,
851 * set zone_last to ZONE_NORMAL.
Lai Jiangshand9713672012-12-11 16:01:03 -0800852 *
Lai Jiangshan6715ddf2012-12-12 13:51:49 -0800853 * If we don't have HIGHMEM nor movable node,
854 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
855 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
Lai Jiangshand9713672012-12-11 16:01:03 -0800856 */
Lai Jiangshan6715ddf2012-12-12 13:51:49 -0800857 if (N_MEMORY == N_NORMAL_MEMORY)
Lai Jiangshand9713672012-12-11 16:01:03 -0800858 zone_last = ZONE_MOVABLE;
859
860 /*
861 * if the memory to be online is in a zone of 0...zone_last, and
862 * the zones of 0...zone_last don't have memory before online, we will
863 * need to set the node to node_states[N_NORMAL_MEMORY] after
864 * the memory is online.
865 */
866 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
867 arg->status_change_nid_normal = nid;
868 else
869 arg->status_change_nid_normal = -1;
870
Lai Jiangshan6715ddf2012-12-12 13:51:49 -0800871#ifdef CONFIG_HIGHMEM
872 /*
873 * If we have movable node, node_states[N_HIGH_MEMORY]
874 * contains nodes which have zones of 0...ZONE_HIGHMEM,
875 * set zone_last to ZONE_HIGHMEM.
876 *
877 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
878 * contains nodes which have zones of 0...ZONE_MOVABLE,
879 * set zone_last to ZONE_MOVABLE.
880 */
881 zone_last = ZONE_HIGHMEM;
882 if (N_MEMORY == N_HIGH_MEMORY)
883 zone_last = ZONE_MOVABLE;
884
885 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
886 arg->status_change_nid_high = nid;
887 else
888 arg->status_change_nid_high = -1;
889#else
890 arg->status_change_nid_high = arg->status_change_nid_normal;
891#endif
892
Lai Jiangshand9713672012-12-11 16:01:03 -0800893 /*
894 * if the node don't have memory befor online, we will need to
Lai Jiangshan6715ddf2012-12-12 13:51:49 -0800895 * set the node to node_states[N_MEMORY] after the memory
Lai Jiangshand9713672012-12-11 16:01:03 -0800896 * is online.
897 */
Lai Jiangshan6715ddf2012-12-12 13:51:49 -0800898 if (!node_state(nid, N_MEMORY))
Lai Jiangshand9713672012-12-11 16:01:03 -0800899 arg->status_change_nid = nid;
900 else
901 arg->status_change_nid = -1;
902}
903
904static void node_states_set_node(int node, struct memory_notify *arg)
905{
906 if (arg->status_change_nid_normal >= 0)
907 node_set_state(node, N_NORMAL_MEMORY);
908
Lai Jiangshan6715ddf2012-12-12 13:51:49 -0800909 if (arg->status_change_nid_high >= 0)
910 node_set_state(node, N_HIGH_MEMORY);
911
912 node_set_state(node, N_MEMORY);
Lai Jiangshand9713672012-12-11 16:01:03 -0800913}
914
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700915
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800916int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700917{
Dave Hansen3947be12005-10-29 18:16:54 -0700918 unsigned long onlined_pages = 0;
919 struct zone *zone;
Yasunori Goto68113782006-06-23 02:03:11 -0700920 int need_zonelists_rebuild = 0;
Yasunori Goto7b78d332007-10-21 16:41:36 -0700921 int nid;
922 int ret;
923 struct memory_notify arg;
Dave Hansen3947be12005-10-29 18:16:54 -0700924
KAMEZAWA Hiroyuki925268a2011-01-11 16:44:01 +0900925 lock_memory_hotplug();
Lai Jiangshand9713672012-12-11 16:01:03 -0800926 /*
927 * This doesn't need a lock to do pfn_to_page().
928 * The section can't be removed here because of the
929 * memory_block->state_mutex.
930 */
931 zone = page_zone(pfn_to_page(pfn));
932
Lai Jiangshan74d42d82012-12-11 16:03:23 -0800933 if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) &&
934 !can_online_high_movable(zone)) {
935 unlock_memory_hotplug();
936 return -1;
937 }
938
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800939 if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) {
940 if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) {
941 unlock_memory_hotplug();
942 return -1;
943 }
944 }
945 if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) {
946 if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) {
947 unlock_memory_hotplug();
948 return -1;
949 }
950 }
951
952 /* Previous code may changed the zone of the pfn range */
953 zone = page_zone(pfn_to_page(pfn));
954
Yasunori Goto7b78d332007-10-21 16:41:36 -0700955 arg.start_pfn = pfn;
956 arg.nr_pages = nr_pages;
Lai Jiangshand9713672012-12-11 16:01:03 -0800957 node_states_check_changes_online(nr_pages, zone, &arg);
Yasunori Goto7b78d332007-10-21 16:41:36 -0700958
959 nid = page_to_nid(pfn_to_page(pfn));
Yasunori Goto7b78d332007-10-21 16:41:36 -0700960
961 ret = memory_notify(MEM_GOING_ONLINE, &arg);
962 ret = notifier_to_errno(ret);
963 if (ret) {
964 memory_notify(MEM_CANCEL_ONLINE, &arg);
KAMEZAWA Hiroyuki925268a2011-01-11 16:44:01 +0900965 unlock_memory_hotplug();
Yasunori Goto7b78d332007-10-21 16:41:36 -0700966 return ret;
967 }
Dave Hansen3947be12005-10-29 18:16:54 -0700968 /*
Yasunori Goto68113782006-06-23 02:03:11 -0700969 * If this zone is not populated, then it is not in zonelist.
970 * This means the page allocator ignores this zone.
971 * So, zonelist must be updated after online.
972 */
Haicheng Li4eaf3f62010-05-24 14:32:52 -0700973 mutex_lock(&zonelists_mutex);
Wen Congyang6dcd73d2012-12-11 16:01:01 -0800974 if (!populated_zone(zone)) {
Yasunori Goto68113782006-06-23 02:03:11 -0700975 need_zonelists_rebuild = 1;
Wen Congyang6dcd73d2012-12-11 16:01:01 -0800976 build_all_zonelists(NULL, zone);
977 }
Yasunori Goto68113782006-06-23 02:03:11 -0700978
KAMEZAWA Hiroyuki908eedc2009-09-22 16:45:46 -0700979 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -0700980 online_pages_range);
Geoff Levandfd8a4222008-05-14 16:05:50 -0700981 if (ret) {
Wen Congyang6dcd73d2012-12-11 16:01:01 -0800982 if (need_zonelists_rebuild)
983 zone_pcp_reset(zone);
Haicheng Li4eaf3f62010-05-24 14:32:52 -0700984 mutex_unlock(&zonelists_mutex);
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -0700985 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
986 (unsigned long long) pfn << PAGE_SHIFT,
987 (((unsigned long long) pfn + nr_pages)
988 << PAGE_SHIFT) - 1);
Geoff Levandfd8a4222008-05-14 16:05:50 -0700989 memory_notify(MEM_CANCEL_ONLINE, &arg);
KAMEZAWA Hiroyuki925268a2011-01-11 16:44:01 +0900990 unlock_memory_hotplug();
Geoff Levandfd8a4222008-05-14 16:05:50 -0700991 return ret;
992 }
993
Jiang Liu9feedc92012-12-12 13:52:12 -0800994 zone->managed_pages += onlined_pages;
Dave Hansen3947be12005-10-29 18:16:54 -0700995 zone->present_pages += onlined_pages;
Yasunori Gotof2937be2006-03-09 17:33:51 -0800996 zone->zone_pgdat->node_present_pages += onlined_pages;
Jiang Liu08dff7b2012-07-31 16:43:30 -0700997 if (onlined_pages) {
Lai Jiangshand9713672012-12-11 16:01:03 -0800998 node_states_set_node(zone_to_nid(zone), &arg);
Jiang Liu08dff7b2012-07-31 16:43:30 -0700999 if (need_zonelists_rebuild)
Wen Congyang6dcd73d2012-12-11 16:01:01 -08001000 build_all_zonelists(NULL, NULL);
Jiang Liu08dff7b2012-07-31 16:43:30 -07001001 else
1002 zone_pcp_update(zone);
1003 }
Dave Hansen3947be12005-10-29 18:16:54 -07001004
Haicheng Li4eaf3f62010-05-24 14:32:52 -07001005 mutex_unlock(&zonelists_mutex);
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07001006
1007 init_per_zone_wmark_min();
1008
Jiang Liu08dff7b2012-07-31 16:43:30 -07001009 if (onlined_pages)
Christoph Lameter7ea15302007-10-16 01:25:29 -07001010 kswapd_run(zone_to_nid(zone));
Dave Hansen61b13992005-10-29 18:16:56 -07001011
Haicheng Li1f522502010-05-24 14:32:51 -07001012 vm_total_pages = nr_free_pagecache_pages();
Kent Liu2f7f24e2008-07-23 21:28:18 -07001013
Chandra Seetharaman2d1d43f2006-09-29 02:01:25 -07001014 writeback_set_ratelimit();
Yasunori Goto7b78d332007-10-21 16:41:36 -07001015
1016 if (onlined_pages)
1017 memory_notify(MEM_ONLINE, &arg);
KAMEZAWA Hiroyuki925268a2011-01-11 16:44:01 +09001018 unlock_memory_hotplug();
Yasunori Goto7b78d332007-10-21 16:41:36 -07001019
Dave Hansen3947be12005-10-29 18:16:54 -07001020 return 0;
1021}
Keith Mannthey53947022006-09-30 23:27:08 -07001022#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
Yasunori Gotobc02af92006-06-27 02:53:30 -07001023
Hidetoshi Setoe1319332009-11-17 14:06:18 -08001024/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1025static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001026{
1027 struct pglist_data *pgdat;
1028 unsigned long zones_size[MAX_NR_ZONES] = {0};
1029 unsigned long zholes_size[MAX_NR_ZONES] = {0};
1030 unsigned long start_pfn = start >> PAGE_SHIFT;
1031
Tang Chena1e565a2013-02-22 16:33:18 -08001032 pgdat = NODE_DATA(nid);
1033 if (!pgdat) {
1034 pgdat = arch_alloc_nodedata(nid);
1035 if (!pgdat)
1036 return NULL;
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001037
Tang Chena1e565a2013-02-22 16:33:18 -08001038 arch_refresh_nodedata(nid, pgdat);
1039 }
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001040
1041 /* we can use NODE_DATA(nid) from here */
1042
1043 /* init node's zones as empty zones, we don't have any present pages.*/
Johannes Weiner9109fb72008-07-23 21:27:20 -07001044 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001045
KAMEZAWA Hiroyuki959ecc42011-06-15 15:08:38 -07001046 /*
1047 * The node we allocated has no zone fallback lists. For avoiding
1048 * to access not-initialized zonelist, build here.
1049 */
David Rientjesf957db42011-06-22 18:13:04 -07001050 mutex_lock(&zonelists_mutex);
Jiang Liu9adb62a2012-07-31 16:43:28 -07001051 build_all_zonelists(pgdat, NULL);
David Rientjesf957db42011-06-22 18:13:04 -07001052 mutex_unlock(&zonelists_mutex);
KAMEZAWA Hiroyuki959ecc42011-06-15 15:08:38 -07001053
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001054 return pgdat;
1055}
1056
1057static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1058{
1059 arch_refresh_nodedata(nid, NULL);
1060 arch_free_nodedata(pgdat);
1061 return;
1062}
1063
KAMEZAWA Hiroyuki0a547032006-06-27 02:53:35 -07001064
minskey guocf234222010-05-24 14:32:41 -07001065/*
1066 * called by cpu_up() to online a node without onlined memory.
1067 */
1068int mem_online_node(int nid)
1069{
1070 pg_data_t *pgdat;
1071 int ret;
1072
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -08001073 lock_memory_hotplug();
minskey guocf234222010-05-24 14:32:41 -07001074 pgdat = hotadd_new_pgdat(nid, 0);
David Rientjes7553e8f2011-06-22 18:13:01 -07001075 if (!pgdat) {
minskey guocf234222010-05-24 14:32:41 -07001076 ret = -ENOMEM;
1077 goto out;
1078 }
1079 node_set_online(nid);
1080 ret = register_one_node(nid);
1081 BUG_ON(ret);
1082
1083out:
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -08001084 unlock_memory_hotplug();
minskey guocf234222010-05-24 14:32:41 -07001085 return ret;
1086}
1087
Al Viro31168482008-11-22 17:33:24 +00001088/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1089int __ref add_memory(int nid, u64 start, u64 size)
Yasunori Gotobc02af92006-06-27 02:53:30 -07001090{
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001091 pg_data_t *pgdat = NULL;
Tang Chena1e565a2013-02-22 16:33:18 -08001092 bool new_pgdat;
1093 bool new_node;
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -07001094 struct resource *res;
Yasunori Gotobc02af92006-06-27 02:53:30 -07001095 int ret;
1096
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -08001097 lock_memory_hotplug();
Andi Kleen6ad696d2009-11-17 14:06:22 -08001098
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -07001099 res = register_memory_resource(start, size);
Andi Kleen6ad696d2009-11-17 14:06:22 -08001100 ret = -EEXIST;
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -07001101 if (!res)
Andi Kleen6ad696d2009-11-17 14:06:22 -08001102 goto out;
KAMEZAWA Hiroyukiebd15302006-08-05 12:15:06 -07001103
Tang Chena1e565a2013-02-22 16:33:18 -08001104 { /* Stupid hack to suppress address-never-null warning */
1105 void *p = NODE_DATA(nid);
1106 new_pgdat = !p;
1107 }
1108 new_node = !node_online(nid);
1109 if (new_node) {
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001110 pgdat = hotadd_new_pgdat(nid, start);
Andi Kleen6ad696d2009-11-17 14:06:22 -08001111 ret = -ENOMEM;
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001112 if (!pgdat)
Wen Congyang41b9e2d2012-07-11 14:02:31 -07001113 goto error;
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001114 }
1115
Yasunori Gotobc02af92006-06-27 02:53:30 -07001116 /* call arch's memory hotadd */
1117 ret = arch_add_memory(nid, start, size);
1118
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001119 if (ret < 0)
1120 goto error;
1121
Yasunori Goto0fc44152006-06-27 02:53:38 -07001122 /* we online node here. we can't roll back from here. */
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001123 node_set_online(nid);
1124
Tang Chena1e565a2013-02-22 16:33:18 -08001125 if (new_node) {
Yasunori Goto0fc44152006-06-27 02:53:38 -07001126 ret = register_one_node(nid);
1127 /*
1128 * If sysfs file of new node can't create, cpu on the node
1129 * can't be hot-added. There is no rollback way now.
1130 * So, check by BUG_ON() to catch it reluctantly..
1131 */
1132 BUG_ON(ret);
1133 }
1134
akpm@linux-foundation.orgd96ae532010-03-05 13:41:58 -08001135 /* create new memmap entry */
1136 firmware_map_add_hotplug(start, start + size, "System RAM");
1137
Andi Kleen6ad696d2009-11-17 14:06:22 -08001138 goto out;
1139
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001140error:
1141 /* rollback pgdat allocation and others */
1142 if (new_pgdat)
1143 rollback_node_hotadd(nid, pgdat);
Sasha Levina864b9d2013-02-22 16:32:48 -08001144 release_memory_resource(res);
Yasunori Goto9af3c2d2006-06-27 02:53:34 -07001145
Andi Kleen6ad696d2009-11-17 14:06:22 -08001146out:
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -08001147 unlock_memory_hotplug();
Yasunori Gotobc02af92006-06-27 02:53:30 -07001148 return ret;
1149}
1150EXPORT_SYMBOL_GPL(add_memory);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001151
1152#ifdef CONFIG_MEMORY_HOTREMOVE
1153/*
Badari Pulavarty5c755e92008-07-23 21:28:19 -07001154 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1155 * set and the size of the free page is given by page_order(). Using this,
1156 * the function determines if the pageblock contains only free pages.
1157 * Due to buddy contraints, a free page at least the size of a pageblock will
1158 * be located at the start of the pageblock
1159 */
1160static inline int pageblock_free(struct page *page)
1161{
1162 return PageBuddy(page) && page_order(page) >= pageblock_order;
1163}
1164
1165/* Return the start of the next active pageblock after a given page */
1166static struct page *next_active_pageblock(struct page *page)
1167{
Badari Pulavarty5c755e92008-07-23 21:28:19 -07001168 /* Ensure the starting page is pageblock-aligned */
1169 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1170
Badari Pulavarty5c755e92008-07-23 21:28:19 -07001171 /* If the entire pageblock is free, move to the end of free page */
KAMEZAWA Hiroyuki0dcc48c2010-09-09 16:38:01 -07001172 if (pageblock_free(page)) {
1173 int order;
1174 /* be careful. we don't have locks, page_order can be changed.*/
1175 order = page_order(page);
1176 if ((order < MAX_ORDER) && (order >= pageblock_order))
1177 return page + (1 << order);
1178 }
Badari Pulavarty5c755e92008-07-23 21:28:19 -07001179
KAMEZAWA Hiroyuki0dcc48c2010-09-09 16:38:01 -07001180 return page + pageblock_nr_pages;
Badari Pulavarty5c755e92008-07-23 21:28:19 -07001181}
1182
1183/* Checks if this range of memory is likely to be hot-removable. */
1184int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1185{
Badari Pulavarty5c755e92008-07-23 21:28:19 -07001186 struct page *page = pfn_to_page(start_pfn);
1187 struct page *end_page = page + nr_pages;
1188
1189 /* Check the starting page of each pageblock within the range */
1190 for (; page < end_page; page = next_active_pageblock(page)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07001191 if (!is_pageblock_removable_nolock(page))
Badari Pulavarty5c755e92008-07-23 21:28:19 -07001192 return 0;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07001193 cond_resched();
Badari Pulavarty5c755e92008-07-23 21:28:19 -07001194 }
1195
1196 /* All pageblocks in the memory block are likely to be hot-removable */
1197 return 1;
1198}
1199
1200/*
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001201 * Confirm all pages in a range [start, end) is belongs to the same zone.
1202 */
1203static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
1204{
1205 unsigned long pfn;
1206 struct zone *zone = NULL;
1207 struct page *page;
1208 int i;
1209 for (pfn = start_pfn;
1210 pfn < end_pfn;
1211 pfn += MAX_ORDER_NR_PAGES) {
1212 i = 0;
1213 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1214 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
1215 i++;
1216 if (i == MAX_ORDER_NR_PAGES)
1217 continue;
1218 page = pfn_to_page(pfn + i);
1219 if (zone && page_zone(page) != zone)
1220 return 0;
1221 zone = page_zone(page);
1222 }
1223 return 1;
1224}
1225
1226/*
1227 * Scanning pfn is much easier than scanning lru list.
1228 * Scan pfn from start to end and Find LRU page.
1229 */
Andrew Morton7bbc0902010-10-26 14:22:05 -07001230static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001231{
1232 unsigned long pfn;
1233 struct page *page;
1234 for (pfn = start; pfn < end; pfn++) {
1235 if (pfn_valid(pfn)) {
1236 page = pfn_to_page(pfn);
1237 if (PageLRU(page))
1238 return pfn;
1239 }
1240 }
1241 return 0;
1242}
1243
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001244#define NR_OFFLINE_AT_ONCE_PAGES (256)
1245static int
1246do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1247{
1248 unsigned long pfn;
1249 struct page *page;
1250 int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1251 int not_managed = 0;
1252 int ret = 0;
1253 LIST_HEAD(source);
1254
1255 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1256 if (!pfn_valid(pfn))
1257 continue;
1258 page = pfn_to_page(pfn);
Konstantin Khlebnikov700c2a42011-05-24 17:12:19 -07001259 if (!get_page_unless_zero(page))
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001260 continue;
1261 /*
1262 * We can skip free pages. And we can only deal with pages on
1263 * LRU.
1264 */
Nick Piggin62695a82008-10-18 20:26:09 -07001265 ret = isolate_lru_page(page);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001266 if (!ret) { /* Success */
Konstantin Khlebnikov700c2a42011-05-24 17:12:19 -07001267 put_page(page);
Nick Piggin62695a82008-10-18 20:26:09 -07001268 list_add_tail(&page->lru, &source);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001269 move_pages--;
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001270 inc_zone_page_state(page, NR_ISOLATED_ANON +
1271 page_is_file_cache(page));
1272
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001273 } else {
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001274#ifdef CONFIG_DEBUG_VM
Wu Fengguang718a3822010-03-10 15:20:43 -08001275 printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
1276 pfn);
1277 dump_page(page);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001278#endif
Konstantin Khlebnikov700c2a42011-05-24 17:12:19 -07001279 put_page(page);
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001280 /* Because we don't have big zone->lock. we should
Bob Liu809c4442010-10-26 14:22:10 -07001281 check this again here. */
1282 if (page_count(page)) {
1283 not_managed++;
Bob Liuf3ab2632010-10-26 14:22:10 -07001284 ret = -EBUSY;
Bob Liu809c4442010-10-26 14:22:10 -07001285 break;
1286 }
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001287 }
1288 }
Bob Liuf3ab2632010-10-26 14:22:10 -07001289 if (!list_empty(&source)) {
1290 if (not_managed) {
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001291 putback_lru_pages(&source);
Bob Liuf3ab2632010-10-26 14:22:10 -07001292 goto out;
1293 }
Minchan Kim74c08f92012-10-08 16:32:54 -07001294
1295 /*
1296 * alloc_migrate_target should be improooooved!!
1297 * migrate_pages returns # of failed pages.
1298 */
1299 ret = migrate_pages(&source, alloc_migrate_target, 0,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001300 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
Bob Liuf3ab2632010-10-26 14:22:10 -07001301 if (ret)
1302 putback_lru_pages(&source);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001303 }
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001304out:
1305 return ret;
1306}
1307
1308/*
1309 * remove from free_area[] and mark all as Reserved.
1310 */
1311static int
1312offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1313 void *data)
1314{
1315 __offline_isolated_pages(start, start + nr_pages);
1316 return 0;
1317}
1318
1319static void
1320offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1321{
KAMEZAWA Hiroyuki908eedc2009-09-22 16:45:46 -07001322 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001323 offline_isolated_pages_cb);
1324}
1325
1326/*
1327 * Check all pages in range, recoreded as memory resource, are isolated.
1328 */
1329static int
1330check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1331 void *data)
1332{
1333 int ret;
1334 long offlined = *(long *)data;
Wen Congyangb023f462012-12-11 16:00:45 -08001335 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001336 offlined = nr_pages;
1337 if (!ret)
1338 *(long *)data += offlined;
1339 return ret;
1340}
1341
1342static long
1343check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1344{
1345 long offlined = 0;
1346 int ret;
1347
KAMEZAWA Hiroyuki908eedc2009-09-22 16:45:46 -07001348 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001349 check_pages_isolated_cb);
1350 if (ret < 0)
1351 offlined = (long)ret;
1352 return offlined;
1353}
1354
Lai Jiangshan09285af2012-12-12 13:52:04 -08001355#ifdef CONFIG_MOVABLE_NODE
Tang Chen79a4dce2012-12-18 14:23:24 -08001356/*
1357 * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
1358 * normal memory.
1359 */
Lai Jiangshan09285af2012-12-12 13:52:04 -08001360static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1361{
1362 return true;
1363}
Tang Chen79a4dce2012-12-18 14:23:24 -08001364#else /* CONFIG_MOVABLE_NODE */
Lai Jiangshan74d42d82012-12-11 16:03:23 -08001365/* ensure the node has NORMAL memory if it is still online */
1366static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1367{
1368 struct pglist_data *pgdat = zone->zone_pgdat;
1369 unsigned long present_pages = 0;
1370 enum zone_type zt;
1371
1372 for (zt = 0; zt <= ZONE_NORMAL; zt++)
1373 present_pages += pgdat->node_zones[zt].present_pages;
1374
1375 if (present_pages > nr_pages)
1376 return true;
1377
1378 present_pages = 0;
1379 for (; zt <= ZONE_MOVABLE; zt++)
1380 present_pages += pgdat->node_zones[zt].present_pages;
1381
1382 /*
1383 * we can't offline the last normal memory until all
1384 * higher memory is offlined.
1385 */
1386 return present_pages == 0;
1387}
Tang Chen79a4dce2012-12-18 14:23:24 -08001388#endif /* CONFIG_MOVABLE_NODE */
Lai Jiangshan74d42d82012-12-11 16:03:23 -08001389
Lai Jiangshand9713672012-12-11 16:01:03 -08001390/* check which state of node_states will be changed when offline memory */
1391static void node_states_check_changes_offline(unsigned long nr_pages,
1392 struct zone *zone, struct memory_notify *arg)
1393{
1394 struct pglist_data *pgdat = zone->zone_pgdat;
1395 unsigned long present_pages = 0;
1396 enum zone_type zt, zone_last = ZONE_NORMAL;
1397
1398 /*
Lai Jiangshan6715ddf2012-12-12 13:51:49 -08001399 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1400 * contains nodes which have zones of 0...ZONE_NORMAL,
1401 * set zone_last to ZONE_NORMAL.
Lai Jiangshand9713672012-12-11 16:01:03 -08001402 *
Lai Jiangshan6715ddf2012-12-12 13:51:49 -08001403 * If we don't have HIGHMEM nor movable node,
1404 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1405 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
Lai Jiangshand9713672012-12-11 16:01:03 -08001406 */
Lai Jiangshan6715ddf2012-12-12 13:51:49 -08001407 if (N_MEMORY == N_NORMAL_MEMORY)
Lai Jiangshand9713672012-12-11 16:01:03 -08001408 zone_last = ZONE_MOVABLE;
1409
1410 /*
1411 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1412 * If the memory to be offline is in a zone of 0...zone_last,
1413 * and it is the last present memory, 0...zone_last will
1414 * become empty after offline , thus we can determind we will
1415 * need to clear the node from node_states[N_NORMAL_MEMORY].
1416 */
1417 for (zt = 0; zt <= zone_last; zt++)
1418 present_pages += pgdat->node_zones[zt].present_pages;
1419 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1420 arg->status_change_nid_normal = zone_to_nid(zone);
1421 else
1422 arg->status_change_nid_normal = -1;
1423
Lai Jiangshan6715ddf2012-12-12 13:51:49 -08001424#ifdef CONFIG_HIGHMEM
1425 /*
1426 * If we have movable node, node_states[N_HIGH_MEMORY]
1427 * contains nodes which have zones of 0...ZONE_HIGHMEM,
1428 * set zone_last to ZONE_HIGHMEM.
1429 *
1430 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1431 * contains nodes which have zones of 0...ZONE_MOVABLE,
1432 * set zone_last to ZONE_MOVABLE.
1433 */
1434 zone_last = ZONE_HIGHMEM;
1435 if (N_MEMORY == N_HIGH_MEMORY)
1436 zone_last = ZONE_MOVABLE;
1437
1438 for (; zt <= zone_last; zt++)
1439 present_pages += pgdat->node_zones[zt].present_pages;
1440 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1441 arg->status_change_nid_high = zone_to_nid(zone);
1442 else
1443 arg->status_change_nid_high = -1;
1444#else
1445 arg->status_change_nid_high = arg->status_change_nid_normal;
1446#endif
1447
Lai Jiangshand9713672012-12-11 16:01:03 -08001448 /*
1449 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1450 */
1451 zone_last = ZONE_MOVABLE;
1452
1453 /*
1454 * check whether node_states[N_HIGH_MEMORY] will be changed
1455 * If we try to offline the last present @nr_pages from the node,
1456 * we can determind we will need to clear the node from
1457 * node_states[N_HIGH_MEMORY].
1458 */
1459 for (; zt <= zone_last; zt++)
1460 present_pages += pgdat->node_zones[zt].present_pages;
1461 if (nr_pages >= present_pages)
1462 arg->status_change_nid = zone_to_nid(zone);
1463 else
1464 arg->status_change_nid = -1;
1465}
1466
1467static void node_states_clear_node(int node, struct memory_notify *arg)
1468{
1469 if (arg->status_change_nid_normal >= 0)
1470 node_clear_state(node, N_NORMAL_MEMORY);
1471
Lai Jiangshan6715ddf2012-12-12 13:51:49 -08001472 if ((N_MEMORY != N_NORMAL_MEMORY) &&
1473 (arg->status_change_nid_high >= 0))
Lai Jiangshand9713672012-12-11 16:01:03 -08001474 node_clear_state(node, N_HIGH_MEMORY);
Lai Jiangshan6715ddf2012-12-12 13:51:49 -08001475
1476 if ((N_MEMORY != N_HIGH_MEMORY) &&
1477 (arg->status_change_nid >= 0))
1478 node_clear_state(node, N_MEMORY);
Lai Jiangshand9713672012-12-11 16:01:03 -08001479}
1480
Wen Congyanga16cee12012-10-08 16:33:58 -07001481static int __ref __offline_pages(unsigned long start_pfn,
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001482 unsigned long end_pfn, unsigned long timeout)
1483{
1484 unsigned long pfn, nr_pages, expire;
1485 long offlined_pages;
Yasunori Goto7b78d332007-10-21 16:41:36 -07001486 int ret, drain, retry_max, node;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001487 struct zone *zone;
Yasunori Goto7b78d332007-10-21 16:41:36 -07001488 struct memory_notify arg;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001489
1490 BUG_ON(start_pfn >= end_pfn);
1491 /* at least, alignment against pageblock is necessary */
1492 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1493 return -EINVAL;
1494 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1495 return -EINVAL;
1496 /* This makes hotplug much easier...and readable.
1497 we assume this for now. .*/
1498 if (!test_pages_in_a_zone(start_pfn, end_pfn))
1499 return -EINVAL;
Yasunori Goto7b78d332007-10-21 16:41:36 -07001500
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -08001501 lock_memory_hotplug();
Andi Kleen6ad696d2009-11-17 14:06:22 -08001502
Yasunori Goto7b78d332007-10-21 16:41:36 -07001503 zone = page_zone(pfn_to_page(start_pfn));
1504 node = zone_to_nid(zone);
1505 nr_pages = end_pfn - start_pfn;
1506
Lai Jiangshan74d42d82012-12-11 16:03:23 -08001507 ret = -EINVAL;
1508 if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
1509 goto out;
1510
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001511 /* set above range as isolated */
Wen Congyangb023f462012-12-11 16:00:45 -08001512 ret = start_isolate_page_range(start_pfn, end_pfn,
1513 MIGRATE_MOVABLE, true);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001514 if (ret)
Andi Kleen6ad696d2009-11-17 14:06:22 -08001515 goto out;
Yasunori Goto7b78d332007-10-21 16:41:36 -07001516
1517 arg.start_pfn = start_pfn;
1518 arg.nr_pages = nr_pages;
Lai Jiangshand9713672012-12-11 16:01:03 -08001519 node_states_check_changes_offline(nr_pages, zone, &arg);
Yasunori Goto7b78d332007-10-21 16:41:36 -07001520
1521 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1522 ret = notifier_to_errno(ret);
1523 if (ret)
1524 goto failed_removal;
1525
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001526 pfn = start_pfn;
1527 expire = jiffies + timeout;
1528 drain = 0;
1529 retry_max = 5;
1530repeat:
1531 /* start memory hot removal */
1532 ret = -EAGAIN;
1533 if (time_after(jiffies, expire))
1534 goto failed_removal;
1535 ret = -EINTR;
1536 if (signal_pending(current))
1537 goto failed_removal;
1538 ret = 0;
1539 if (drain) {
1540 lru_add_drain_all();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001541 cond_resched();
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001542 drain_all_pages();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001543 }
1544
1545 pfn = scan_lru_pages(start_pfn, end_pfn);
1546 if (pfn) { /* We have page on LRU */
1547 ret = do_migrate_range(pfn, end_pfn);
1548 if (!ret) {
1549 drain = 1;
1550 goto repeat;
1551 } else {
1552 if (ret < 0)
1553 if (--retry_max == 0)
1554 goto failed_removal;
1555 yield();
1556 drain = 1;
1557 goto repeat;
1558 }
1559 }
Adam Buchbinderb3834be2012-09-19 21:48:02 -04001560 /* drain all zone's lru pagevec, this is asynchronous... */
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001561 lru_add_drain_all();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001562 yield();
Adam Buchbinderb3834be2012-09-19 21:48:02 -04001563 /* drain pcp pages, this is synchronous. */
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001564 drain_all_pages();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001565 /* check again */
1566 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1567 if (offlined_pages < 0) {
1568 ret = -EBUSY;
1569 goto failed_removal;
1570 }
1571 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
Adam Buchbinderb3834be2012-09-19 21:48:02 -04001572 /* Ok, all of our target is isolated.
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001573 We cannot do rollback at this point. */
1574 offline_isolated_pages(start_pfn, end_pfn);
KAMEZAWA Hiroyukidbc0e4c2007-11-14 16:59:12 -08001575 /* reset pagetype flags and makes migrate type to be MOVABLE */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02001576 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001577 /* removal success */
Jiang Liu9feedc92012-12-12 13:52:12 -08001578 zone->managed_pages -= offlined_pages;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001579 zone->present_pages -= offlined_pages;
1580 zone->zone_pgdat->node_present_pages -= offlined_pages;
1581 totalram_pages -= offlined_pages;
Yasunori Goto7b78d332007-10-21 16:41:36 -07001582
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07001583 init_per_zone_wmark_min();
1584
Xishi Qiu1e8537b2012-10-08 16:31:51 -07001585 if (!populated_zone(zone)) {
Jiang Liu340175b2012-07-31 16:43:32 -07001586 zone_pcp_reset(zone);
Xishi Qiu1e8537b2012-10-08 16:31:51 -07001587 mutex_lock(&zonelists_mutex);
1588 build_all_zonelists(NULL, NULL);
1589 mutex_unlock(&zonelists_mutex);
1590 } else
1591 zone_pcp_update(zone);
Jiang Liu340175b2012-07-31 16:43:32 -07001592
Lai Jiangshand9713672012-12-11 16:01:03 -08001593 node_states_clear_node(node, &arg);
1594 if (arg.status_change_nid >= 0)
David Rientjes8fe23e02009-12-14 17:58:33 -08001595 kswapd_stop(node);
Minchan Kimbce73942009-06-16 15:32:50 -07001596
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001597 vm_total_pages = nr_free_pagecache_pages();
1598 writeback_set_ratelimit();
Yasunori Goto7b78d332007-10-21 16:41:36 -07001599
1600 memory_notify(MEM_OFFLINE, &arg);
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -08001601 unlock_memory_hotplug();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001602 return 0;
1603
1604failed_removal:
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -07001605 printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
1606 (unsigned long long) start_pfn << PAGE_SHIFT,
1607 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
Yasunori Goto7b78d332007-10-21 16:41:36 -07001608 memory_notify(MEM_CANCEL_OFFLINE, &arg);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001609 /* pushback to free area */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02001610 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
Yasunori Goto7b78d332007-10-21 16:41:36 -07001611
Andi Kleen6ad696d2009-11-17 14:06:22 -08001612out:
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -08001613 unlock_memory_hotplug();
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001614 return ret;
1615}
Badari Pulavarty71088782008-10-18 20:25:58 -07001616
Wen Congyanga16cee12012-10-08 16:33:58 -07001617int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1618{
1619 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1620}
Rafael J. Wysockie2ff3942013-05-08 00:29:49 +02001621#endif /* CONFIG_MEMORY_HOTREMOVE */
Wen Congyanga16cee12012-10-08 16:33:58 -07001622
Wen Congyangbbc76be2013-02-22 16:32:54 -08001623/**
1624 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1625 * @start_pfn: start pfn of the memory range
Toshi Kanie05c4bb2013-04-29 15:06:16 -07001626 * @end_pfn: end pfn of the memory range
Wen Congyangbbc76be2013-02-22 16:32:54 -08001627 * @arg: argument passed to func
1628 * @func: callback for each memory section walked
1629 *
1630 * This function walks through all present mem sections in range
1631 * [start_pfn, end_pfn) and call func on each mem section.
1632 *
1633 * Returns the return value of func.
1634 */
Rafael J. Wysockie2ff3942013-05-08 00:29:49 +02001635int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
Wen Congyangbbc76be2013-02-22 16:32:54 -08001636 void *arg, int (*func)(struct memory_block *, void *))
Badari Pulavarty71088782008-10-18 20:25:58 -07001637{
Wen Congyange90bdb72012-10-08 16:34:01 -07001638 struct memory_block *mem = NULL;
1639 struct mem_section *section;
Wen Congyange90bdb72012-10-08 16:34:01 -07001640 unsigned long pfn, section_nr;
1641 int ret;
Badari Pulavarty71088782008-10-18 20:25:58 -07001642
Wen Congyange90bdb72012-10-08 16:34:01 -07001643 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1644 section_nr = pfn_to_section_nr(pfn);
1645 if (!present_section_nr(section_nr))
1646 continue;
1647
1648 section = __nr_to_section(section_nr);
1649 /* same memblock? */
1650 if (mem)
1651 if ((section_nr >= mem->start_section_nr) &&
1652 (section_nr <= mem->end_section_nr))
1653 continue;
1654
1655 mem = find_memory_block_hinted(section, mem);
1656 if (!mem)
1657 continue;
1658
Wen Congyangbbc76be2013-02-22 16:32:54 -08001659 ret = func(mem, arg);
Wen Congyange90bdb72012-10-08 16:34:01 -07001660 if (ret) {
Wen Congyangbbc76be2013-02-22 16:32:54 -08001661 kobject_put(&mem->dev.kobj);
1662 return ret;
Wen Congyange90bdb72012-10-08 16:34:01 -07001663 }
1664 }
1665
1666 if (mem)
1667 kobject_put(&mem->dev.kobj);
1668
Wen Congyangbbc76be2013-02-22 16:32:54 -08001669 return 0;
1670}
1671
Rafael J. Wysockie2ff3942013-05-08 00:29:49 +02001672#ifdef CONFIG_MEMORY_HOTREMOVE
Wen Congyangbbc76be2013-02-22 16:32:54 -08001673static int is_memblock_offlined_cb(struct memory_block *mem, void *arg)
1674{
1675 int ret = !is_memblock_offlined(mem);
1676
Randy Dunlap349daa02013-04-29 15:08:49 -07001677 if (unlikely(ret)) {
1678 phys_addr_t beginpa, endpa;
1679
1680 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1681 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
Wen Congyangbbc76be2013-02-22 16:32:54 -08001682 pr_warn("removing memory fails, because memory "
Randy Dunlap349daa02013-04-29 15:08:49 -07001683 "[%pa-%pa] is onlined\n",
1684 &beginpa, &endpa);
1685 }
Wen Congyangbbc76be2013-02-22 16:32:54 -08001686
1687 return ret;
1688}
1689
Tang Chen60a5a192013-02-22 16:33:14 -08001690static int check_cpu_on_node(void *data)
1691{
1692 struct pglist_data *pgdat = data;
1693 int cpu;
1694
1695 for_each_present_cpu(cpu) {
1696 if (cpu_to_node(cpu) == pgdat->node_id)
1697 /*
1698 * the cpu on this node isn't removed, and we can't
1699 * offline this node.
1700 */
1701 return -EBUSY;
1702 }
1703
1704 return 0;
1705}
1706
Wen Congyange13fe862013-02-22 16:33:31 -08001707static void unmap_cpu_on_node(void *data)
1708{
1709#ifdef CONFIG_ACPI_NUMA
1710 struct pglist_data *pgdat = data;
1711 int cpu;
1712
1713 for_each_possible_cpu(cpu)
1714 if (cpu_to_node(cpu) == pgdat->node_id)
1715 numa_clear_node(cpu);
1716#endif
1717}
1718
1719static int check_and_unmap_cpu_on_node(void *data)
1720{
1721 int ret = check_cpu_on_node(data);
1722
1723 if (ret)
1724 return ret;
1725
1726 /*
1727 * the node will be offlined when we come here, so we can clear
1728 * the cpu_to_node() now.
1729 */
1730
1731 unmap_cpu_on_node(data);
1732 return 0;
1733}
1734
Tang Chen60a5a192013-02-22 16:33:14 -08001735/* offline the node if all memory sections of this node are removed */
Wen Congyang90b30cd2013-02-22 16:33:27 -08001736void try_offline_node(int nid)
Tang Chen60a5a192013-02-22 16:33:14 -08001737{
Wen Congyangd822b862013-02-22 16:33:16 -08001738 pg_data_t *pgdat = NODE_DATA(nid);
1739 unsigned long start_pfn = pgdat->node_start_pfn;
1740 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
Tang Chen60a5a192013-02-22 16:33:14 -08001741 unsigned long pfn;
Wen Congyangd822b862013-02-22 16:33:16 -08001742 struct page *pgdat_page = virt_to_page(pgdat);
1743 int i;
Tang Chen60a5a192013-02-22 16:33:14 -08001744
1745 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1746 unsigned long section_nr = pfn_to_section_nr(pfn);
1747
1748 if (!present_section_nr(section_nr))
1749 continue;
1750
1751 if (pfn_to_nid(pfn) != nid)
1752 continue;
1753
1754 /*
1755 * some memory sections of this node are not removed, and we
1756 * can't offline node now.
1757 */
1758 return;
1759 }
1760
Wen Congyange13fe862013-02-22 16:33:31 -08001761 if (stop_machine(check_and_unmap_cpu_on_node, pgdat, NULL))
Tang Chen60a5a192013-02-22 16:33:14 -08001762 return;
1763
1764 /*
1765 * all memory/cpu of this node are removed, we can offline this
1766 * node now.
1767 */
1768 node_set_offline(nid);
1769 unregister_one_node(nid);
Wen Congyangd822b862013-02-22 16:33:16 -08001770
1771 if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page))
1772 /* node data is allocated from boot memory */
1773 return;
1774
1775 /* free waittable in each zone */
1776 for (i = 0; i < MAX_NR_ZONES; i++) {
1777 struct zone *zone = pgdat->node_zones + i;
1778
Jianguo Wuca4b3f32013-03-22 15:04:50 -07001779 /*
1780 * wait_table may be allocated from boot memory,
1781 * here only free if it's allocated by vmalloc.
1782 */
1783 if (is_vmalloc_addr(zone->wait_table))
Wen Congyangd822b862013-02-22 16:33:16 -08001784 vfree(zone->wait_table);
1785 }
1786
1787 /*
1788 * Since there is no way to guarentee the address of pgdat/zone is not
1789 * on stack of any kernel threads or used by other kernel objects
1790 * without reference counting or other symchronizing method, do not
1791 * reset node_data and free pgdat here. Just reset it to 0 and reuse
1792 * the memory when the node is online again.
1793 */
1794 memset(pgdat, 0, sizeof(*pgdat));
Tang Chen60a5a192013-02-22 16:33:14 -08001795}
Wen Congyang90b30cd2013-02-22 16:33:27 -08001796EXPORT_SYMBOL(try_offline_node);
Tang Chen60a5a192013-02-22 16:33:14 -08001797
Rafael J. Wysocki242831e2013-05-27 12:58:46 +02001798void __ref remove_memory(int nid, u64 start, u64 size)
Wen Congyangbbc76be2013-02-22 16:32:54 -08001799{
Rafael J. Wysocki242831e2013-05-27 12:58:46 +02001800 int ret;
Wen Congyang993c1aa2013-02-22 16:32:50 -08001801
Yasuaki Ishimatsu6677e3e2013-02-22 16:32:52 -08001802 lock_memory_hotplug();
1803
1804 /*
Rafael J. Wysocki242831e2013-05-27 12:58:46 +02001805 * All memory blocks must be offlined before removing memory. Check
1806 * whether all memory blocks in question are offline and trigger a BUG()
1807 * if this is not the case.
Yasuaki Ishimatsu6677e3e2013-02-22 16:32:52 -08001808 */
Rafael J. Wysocki242831e2013-05-27 12:58:46 +02001809 ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
Wen Congyangbbc76be2013-02-22 16:32:54 -08001810 is_memblock_offlined_cb);
1811 if (ret) {
1812 unlock_memory_hotplug();
Rafael J. Wysocki242831e2013-05-27 12:58:46 +02001813 BUG();
Yasuaki Ishimatsu6677e3e2013-02-22 16:32:52 -08001814 }
1815
Yasuaki Ishimatsu46c66c42013-02-22 16:32:56 -08001816 /* remove memmap entry */
1817 firmware_map_remove(start, start + size, "System RAM");
1818
Wen Congyang24d335c2013-02-22 16:32:58 -08001819 arch_remove_memory(start, size);
1820
Tang Chen60a5a192013-02-22 16:33:14 -08001821 try_offline_node(nid);
1822
Yasuaki Ishimatsu6677e3e2013-02-22 16:32:52 -08001823 unlock_memory_hotplug();
Badari Pulavarty71088782008-10-18 20:25:58 -07001824}
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -07001825#else
Wen Congyanga16cee12012-10-08 16:33:58 -07001826int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1827{
1828 return -EINVAL;
1829}
Rafael J. Wysocki242831e2013-05-27 12:58:46 +02001830void remove_memory(int nid, u64 start, u64 size) {}
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07001831#endif /* CONFIG_MEMORY_HOTREMOVE */
Badari Pulavarty71088782008-10-18 20:25:58 -07001832EXPORT_SYMBOL_GPL(remove_memory);