blob: d8b785518a2326e7c6b744b40ecd4583b4900dff [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Dave Hansen208d54e2005-10-29 18:16:52 -07002#ifndef __LINUX_MEMORY_HOTPLUG_H
3#define __LINUX_MEMORY_HOTPLUG_H
4
5#include <linux/mmzone.h>
6#include <linux/spinlock.h>
Dave Hansen3947be12005-10-29 18:16:54 -07007#include <linux/notifier.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05008#include <linux/bug.h>
Dave Hansen208d54e2005-10-29 18:16:52 -07009
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080010struct page;
11struct zone;
12struct pglist_data;
Badari Pulavartyea01ea92008-04-28 02:12:01 -070013struct mem_section;
Wen Congyange90bdb72012-10-08 16:34:01 -070014struct memory_block;
David Vrabel62cedb92015-06-25 16:35:49 +010015struct resource;
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +010016struct vmem_altmap;
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080017
Dave Hansen208d54e2005-10-29 18:16:52 -070018#ifdef CONFIG_MEMORY_HOTPLUG
Michal Hocko2d070ea2017-07-06 15:37:56 -070019/*
20 * Return page for the valid pfn only if the page is online. All pfn
21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page
23 */
24#define pfn_to_online_page(pfn) \
25({ \
26 struct page *___page = NULL; \
27 unsigned long ___nr = pfn_to_section_nr(pfn); \
28 \
29 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
30 ___page = pfn_to_page(pfn); \
31 ___page; \
32})
Yasunori Goto04753272008-04-28 02:13:31 -070033
34/*
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080035 * Types for free bootmem stored in page->lru.next. These have to be in
36 * some random range in unsigned long space for debugging purposes.
Yasunori Goto04753272008-04-28 02:13:31 -070037 */
Andrea Arcangeli5f24ce52011-01-13 15:47:00 -080038enum {
39 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
40 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
41 MIX_SECTION_INFO,
42 NODE_INFO,
43 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
44};
Yasunori Goto04753272008-04-28 02:13:31 -070045
Tang Chen4f7c6b42014-08-06 16:05:13 -070046/* Types for control the zone type of onlined and offlined memory */
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080047enum {
Tang Chen4f7c6b42014-08-06 16:05:13 -070048 MMOP_OFFLINE = -1,
49 MMOP_ONLINE_KEEP,
50 MMOP_ONLINE_KERNEL,
51 MMOP_ONLINE_MOVABLE,
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080052};
53
Dave Hansen208d54e2005-10-29 18:16:52 -070054/*
Dave Hansenbdc8cb92005-10-29 18:16:53 -070055 * Zone resizing functions
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080056 *
57 * Note: any attempt to resize a zone should has pgdat_resize_lock()
58 * zone_span_writelock() both held. This ensure the size of a zone
59 * can't be changed while pgdat_resize_lock() held.
Dave Hansenbdc8cb92005-10-29 18:16:53 -070060 */
61static inline unsigned zone_span_seqbegin(struct zone *zone)
62{
63 return read_seqbegin(&zone->span_seqlock);
64}
65static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
66{
67 return read_seqretry(&zone->span_seqlock, iv);
68}
69static inline void zone_span_writelock(struct zone *zone)
70{
71 write_seqlock(&zone->span_seqlock);
72}
73static inline void zone_span_writeunlock(struct zone *zone)
74{
75 write_sequnlock(&zone->span_seqlock);
76}
77static inline void zone_seqlock_init(struct zone *zone)
78{
79 seqlock_init(&zone->span_seqlock);
80}
Dave Hansen3947be12005-10-29 18:16:54 -070081extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
82extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
83extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
Dave Hansen3947be12005-10-29 18:16:54 -070084/* VM interface that may be used by firmware interface */
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080085extern int online_pages(unsigned long, unsigned long, int);
Toshi Kania96dfdd2017-02-03 13:13:23 -080086extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
87 unsigned long *valid_start, unsigned long *valid_end);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -070088extern void __offline_isolated_pages(unsigned long, unsigned long);
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -070089
Arun KSfd2f5e02017-09-15 13:52:00 +053090typedef int (*online_page_callback_t)(struct page *page);
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -070091
92extern int set_online_page_callback(online_page_callback_t callback);
93extern int restore_online_page_callback(online_page_callback_t callback);
94
95extern void __online_page_set_limits(struct page *page);
96extern void __online_page_increment_counters(struct page *page);
97extern void __online_page_free(struct page *page);
98
Toshi Kani01b0f192013-11-12 15:07:25 -080099extern int try_online_node(int nid);
Liam Markba692392018-05-23 10:56:12 -0700100extern bool try_online_one_block(int nid);
Toshi Kani01b0f192013-11-12 15:07:25 -0800101
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700102extern bool memhp_auto_online;
Michal Hocko49323812017-07-06 15:41:05 -0700103/* If movable_node boot option specified */
104extern bool movable_node_enabled;
105static inline bool movable_node_is_enabled(void)
106{
107 return movable_node_enabled;
108}
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700109
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -0700110#ifdef CONFIG_MEMORY_HOTREMOVE
Christoph Hellwigda024512017-12-29 08:53:55 +0100111extern int arch_remove_memory(u64 start, u64 size,
112 struct vmem_altmap *altmap);
David Rientjes4edd7ce2013-04-29 15:08:22 -0700113extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
Christoph Hellwigda024512017-12-29 08:53:55 +0100114 unsigned long nr_pages, struct vmem_altmap *altmap);
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -0700115#endif /* CONFIG_MEMORY_HOTREMOVE */
116
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700117/* reasonably generic interface to expand the physical pages */
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100118extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
119 struct vmem_altmap *altmap, bool want_memblock);
Yasunori Gotobc02af92006-06-27 02:53:30 -0700120
Michal Hocko3072e412017-09-08 16:11:39 -0700121#ifndef CONFIG_ARCH_HAS_ADD_PAGES
122static inline int add_pages(int nid, unsigned long start_pfn,
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100123 unsigned long nr_pages, struct vmem_altmap *altmap,
124 bool want_memblock)
Michal Hocko3072e412017-09-08 16:11:39 -0700125{
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100126 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
Michal Hocko3072e412017-09-08 16:11:39 -0700127}
128#else /* ARCH_HAS_ADD_PAGES */
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100129int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
130 struct vmem_altmap *altmap, bool want_memblock);
Michal Hocko3072e412017-09-08 16:11:39 -0700131#endif /* ARCH_HAS_ADD_PAGES */
132
Yasunori Gotobc02af92006-06-27 02:53:30 -0700133#ifdef CONFIG_NUMA
134extern int memory_add_physaddr_to_nid(u64 start);
135#else
136static inline int memory_add_physaddr_to_nid(u64 start)
137{
138 return 0;
139}
140#endif
141
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700142#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
143/*
144 * For supporting node-hotadd, we have to allocate a new pgdat.
145 *
146 * If an arch has generic style NODE_DATA(),
147 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
148 *
149 * In general, generic_alloc_nodedata() is used.
150 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
151 *
152 */
Yasunori Gotodd0932d2006-06-27 02:53:40 -0700153extern pg_data_t *arch_alloc_nodedata(int nid);
154extern void arch_free_nodedata(pg_data_t *pgdat);
Yasunori Goto70490272006-06-27 02:53:39 -0700155extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700156
157#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
158
159#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
160#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
161
162#ifdef CONFIG_NUMA
163/*
164 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
165 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
166 * Because, pgdat for the new node is not allocated/initialized yet itself.
167 * To use new node's memory, more consideration will be necessary.
168 */
169#define generic_alloc_nodedata(nid) \
170({ \
171 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
172})
173/*
174 * This definition is just for error path in node hotadd.
175 * For node hotremove, we have to replace this.
176 */
177#define generic_free_nodedata(pgdat) kfree(pgdat)
178
Yasunori Goto10ad4002006-06-27 02:53:33 -0700179extern pg_data_t *node_data[];
180static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
181{
182 node_data[nid] = pgdat;
183}
184
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700185#else /* !CONFIG_NUMA */
186
187/* never called */
188static inline pg_data_t *generic_alloc_nodedata(int nid)
189{
190 BUG();
191 return NULL;
192}
193static inline void generic_free_nodedata(pg_data_t *pgdat)
194{
195}
Yasunori Goto10ad4002006-06-27 02:53:33 -0700196static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
197{
198}
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700199#endif /* CONFIG_NUMA */
200#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
201
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800202#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
Linus Torvalds7ded3842016-05-27 15:23:32 -0700203extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800204#else
Yasunori Goto04753272008-04-28 02:13:31 -0700205static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
206{
207}
Yasunori Goto04753272008-04-28 02:13:31 -0700208#endif
Yasuaki Ishimatsu46723bf2013-02-22 16:33:00 -0800209extern void put_page_bootmem(struct page *page);
210extern void get_page_bootmem(unsigned long ingo, struct page *page,
211 unsigned long type);
Yasunori Goto04753272008-04-28 02:13:31 -0700212
Vladimir Davydovbfc8c902014-06-04 16:07:18 -0700213void get_online_mems(void);
214void put_online_mems(void);
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800215
David Rientjes30467e02015-04-14 15:45:11 -0700216void mem_hotplug_begin(void);
217void mem_hotplug_done(void);
218
Joonsoo Kimd883c6c2018-05-23 10:18:21 +0900219extern void set_zone_contiguous(struct zone *zone);
220extern void clear_zone_contiguous(struct zone *zone);
221
Dave Hansen208d54e2005-10-29 18:16:52 -0700222#else /* ! CONFIG_MEMORY_HOTPLUG */
Michal Hocko2d070ea2017-07-06 15:37:56 -0700223#define pfn_to_online_page(pfn) \
224({ \
225 struct page *___page = NULL; \
226 if (pfn_valid(pfn)) \
227 ___page = pfn_to_page(pfn); \
228 ___page; \
229 })
230
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700231static inline unsigned zone_span_seqbegin(struct zone *zone)
232{
233 return 0;
234}
235static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
236{
237 return 0;
238}
239static inline void zone_span_writelock(struct zone *zone) {}
240static inline void zone_span_writeunlock(struct zone *zone) {}
241static inline void zone_seqlock_init(struct zone *zone) {}
Dave Hansen3947be12005-10-29 18:16:54 -0700242
243static inline int mhp_notimplemented(const char *func)
244{
245 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
246 dump_stack();
247 return -ENOSYS;
248}
249
Yasunori Goto04753272008-04-28 02:13:31 -0700250static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
251{
252}
253
Toshi Kani01b0f192013-11-12 15:07:25 -0800254static inline int try_online_node(int nid)
255{
256 return 0;
257}
258
Liam Markba692392018-05-23 10:56:12 -0700259static inline bool try_online_one_block(int nid)
260{
261 return false;
262}
263
Vladimir Davydovbfc8c902014-06-04 16:07:18 -0700264static inline void get_online_mems(void) {}
265static inline void put_online_mems(void) {}
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800266
David Rientjes30467e02015-04-14 15:45:11 -0700267static inline void mem_hotplug_begin(void) {}
268static inline void mem_hotplug_done(void) {}
269
Michal Hocko49323812017-07-06 15:41:05 -0700270static inline bool movable_node_is_enabled(void)
271{
272 return false;
273}
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700274#endif /* ! CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200275
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -0700276#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
277/*
278 * pgdat resizing functions
279 */
280static inline
281void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
282{
283 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
284}
285static inline
286void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
287{
288 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
289}
290static inline
291void pgdat_resize_init(struct pglist_data *pgdat)
292{
293 spin_lock_init(&pgdat->node_size_lock);
294}
295#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
296/*
297 * Stub functions for when hotplug is off
298 */
299static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
300static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
301static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
302#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
303
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700304#ifdef CONFIG_MEMORY_HOTREMOVE
305
Yaowei Baic98940f2016-05-19 17:11:26 -0700306extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
Wen Congyang90b30cd2013-02-22 16:33:27 -0800307extern void try_offline_node(int nid);
Rafael J. Wysockiaba6efc2013-06-01 22:24:07 +0200308extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
309extern void remove_memory(int nid, u64 start, u64 size);
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700310
311#else
Yaowei Baic98940f2016-05-19 17:11:26 -0700312static inline bool is_mem_section_removable(unsigned long pfn,
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700313 unsigned long nr_pages)
314{
Yaowei Baic98940f2016-05-19 17:11:26 -0700315 return false;
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700316}
Wen Congyang90b30cd2013-02-22 16:33:27 -0800317
318static inline void try_offline_node(int nid) {}
Rafael J. Wysockiaba6efc2013-06-01 22:24:07 +0200319
320static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
321{
322 return -EINVAL;
323}
324
325static inline void remove_memory(int nid, u64 start, u64 size) {}
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700326#endif /* CONFIG_MEMORY_HOTREMOVE */
327
Oscar Salvador03e85f92018-08-21 21:53:43 -0700328extern void __ref free_area_init_core_hotplug(int nid);
Rafael J. Wysockie2ff3942013-05-08 00:29:49 +0200329extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
330 void *arg, int (*func)(struct memory_block *, void *));
Yasunori Gotobc02af92006-06-27 02:53:30 -0700331extern int add_memory(int nid, u64 start, u64 size);
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700332extern int add_memory_resource(int nid, struct resource *resource, bool online);
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100333extern int arch_add_memory(int nid, u64 start, u64 size,
334 struct vmem_altmap *altmap, bool want_memblock);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700335extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
Christoph Hellwiga99583e2017-12-29 08:53:57 +0100336 unsigned long nr_pages, struct vmem_altmap *altmap);
Wen Congyanga16cee12012-10-08 16:33:58 -0700337extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
Yasuaki Ishimatsu6677e3e2013-02-22 16:32:52 -0800338extern bool is_memblock_offlined(struct memory_block *mem);
Rafael J. Wysocki242831e2013-05-27 12:58:46 +0200339extern void remove_memory(int nid, u64 start, u64 size);
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100340extern int sparse_add_one_section(struct pglist_data *pgdat,
341 unsigned long start_pfn, struct vmem_altmap *altmap);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800342extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100343 unsigned long map_offset, struct vmem_altmap *altmap);
Yasunori Goto04753272008-04-28 02:13:31 -0700344extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
345 unsigned long pnum);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700346extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
347 int online_type);
Michal Hockoe5e68932017-09-06 16:19:37 -0700348extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
Michal Hockoc246a212017-07-06 15:38:18 -0700349 unsigned long nr_pages);
Dave Hansen208d54e2005-10-29 18:16:52 -0700350#endif /* __LINUX_MEMORY_HOTPLUG_H */