blob: 31c237a00c48e472b7c8e6c435fe66de9b0e232a [file] [log] [blame]
Dave Hansen208d54e2005-10-29 18:16:52 -07001#ifndef __LINUX_MEMORY_HOTPLUG_H
2#define __LINUX_MEMORY_HOTPLUG_H
3
4#include <linux/mmzone.h>
5#include <linux/spinlock.h>
Dave Hansen3947be12005-10-29 18:16:54 -07006#include <linux/notifier.h>
Dave Hansen208d54e2005-10-29 18:16:52 -07007
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -08008struct page;
9struct zone;
10struct pglist_data;
Badari Pulavartyea01ea92008-04-28 02:12:01 -070011struct mem_section;
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080012
Dave Hansen208d54e2005-10-29 18:16:52 -070013#ifdef CONFIG_MEMORY_HOTPLUG
Yasunori Goto04753272008-04-28 02:13:31 -070014
15/*
Yasunori Gotoaf370fb2008-07-23 21:28:17 -070016 * Types for free bootmem.
Yasunori Goto04753272008-04-28 02:13:31 -070017 * The normal smallest mapcount is -1. Here is smaller value than it.
18 */
Yasunori Gotoaf370fb2008-07-23 21:28:17 -070019#define SECTION_INFO (-1 - 1)
20#define MIX_SECTION_INFO (-1 - 2)
21#define NODE_INFO (-1 - 3)
Yasunori Goto04753272008-04-28 02:13:31 -070022
Dave Hansen208d54e2005-10-29 18:16:52 -070023/*
24 * pgdat resizing functions
25 */
26static inline
27void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
28{
29 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
30}
31static inline
32void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
33{
Dave Hansenbdc8cb92005-10-29 18:16:53 -070034 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
Dave Hansen208d54e2005-10-29 18:16:52 -070035}
36static inline
37void pgdat_resize_init(struct pglist_data *pgdat)
38{
39 spin_lock_init(&pgdat->node_size_lock);
40}
Dave Hansenbdc8cb92005-10-29 18:16:53 -070041/*
42 * Zone resizing functions
43 */
44static inline unsigned zone_span_seqbegin(struct zone *zone)
45{
46 return read_seqbegin(&zone->span_seqlock);
47}
48static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
49{
50 return read_seqretry(&zone->span_seqlock, iv);
51}
52static inline void zone_span_writelock(struct zone *zone)
53{
54 write_seqlock(&zone->span_seqlock);
55}
56static inline void zone_span_writeunlock(struct zone *zone)
57{
58 write_sequnlock(&zone->span_seqlock);
59}
60static inline void zone_seqlock_init(struct zone *zone)
61{
62 seqlock_init(&zone->span_seqlock);
63}
Dave Hansen3947be12005-10-29 18:16:54 -070064extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
65extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
66extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
67/* need some defines for these for archs that don't support it */
68extern void online_page(struct page *page);
69/* VM interface that may be used by firmware interface */
Dave Hansen3947be12005-10-29 18:16:54 -070070extern int online_pages(unsigned long, unsigned long);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -070071extern void __offline_isolated_pages(unsigned long, unsigned long);
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -070072
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -070073#ifdef CONFIG_MEMORY_HOTREMOVE
74extern bool is_pageblock_removable_nolock(struct page *page);
75#endif /* CONFIG_MEMORY_HOTREMOVE */
76
Dave Hansen3947be12005-10-29 18:16:54 -070077/* reasonably generic interface to expand the physical pages in a zone */
Gary Hadec04fc582009-01-06 14:39:14 -080078extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
Dave Hansen3947be12005-10-29 18:16:54 -070079 unsigned long nr_pages);
Badari Pulavartyea01ea92008-04-28 02:12:01 -070080extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
81 unsigned long nr_pages);
Yasunori Gotobc02af92006-06-27 02:53:30 -070082
83#ifdef CONFIG_NUMA
84extern int memory_add_physaddr_to_nid(u64 start);
85#else
86static inline int memory_add_physaddr_to_nid(u64 start)
87{
88 return 0;
89}
90#endif
91
Yasunori Goto306d6cb2006-06-27 02:53:32 -070092#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
93/*
94 * For supporting node-hotadd, we have to allocate a new pgdat.
95 *
96 * If an arch has generic style NODE_DATA(),
97 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
98 *
99 * In general, generic_alloc_nodedata() is used.
100 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
101 *
102 */
Yasunori Gotodd0932d2006-06-27 02:53:40 -0700103extern pg_data_t *arch_alloc_nodedata(int nid);
104extern void arch_free_nodedata(pg_data_t *pgdat);
Yasunori Goto70490272006-06-27 02:53:39 -0700105extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700106
107#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
108
109#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
110#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
111
112#ifdef CONFIG_NUMA
113/*
114 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
115 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
116 * Because, pgdat for the new node is not allocated/initialized yet itself.
117 * To use new node's memory, more consideration will be necessary.
118 */
119#define generic_alloc_nodedata(nid) \
120({ \
121 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
122})
123/*
124 * This definition is just for error path in node hotadd.
125 * For node hotremove, we have to replace this.
126 */
127#define generic_free_nodedata(pgdat) kfree(pgdat)
128
Yasunori Goto10ad4002006-06-27 02:53:33 -0700129extern pg_data_t *node_data[];
130static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
131{
132 node_data[nid] = pgdat;
133}
134
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700135#else /* !CONFIG_NUMA */
136
137/* never called */
138static inline pg_data_t *generic_alloc_nodedata(int nid)
139{
140 BUG();
141 return NULL;
142}
143static inline void generic_free_nodedata(pg_data_t *pgdat)
144{
145}
Yasunori Goto10ad4002006-06-27 02:53:33 -0700146static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
147{
148}
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700149#endif /* CONFIG_NUMA */
150#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
151
Yasunori Goto04753272008-04-28 02:13:31 -0700152#ifdef CONFIG_SPARSEMEM_VMEMMAP
153static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
154{
155}
156static inline void put_page_bootmem(struct page *page)
157{
158}
159#else
160extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
161extern void put_page_bootmem(struct page *page);
162#endif
163
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800164void lock_memory_hotplug(void);
165void unlock_memory_hotplug(void);
166
Dave Hansen208d54e2005-10-29 18:16:52 -0700167#else /* ! CONFIG_MEMORY_HOTPLUG */
168/*
169 * Stub functions for when hotplug is off
170 */
171static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
172static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
173static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700174
175static inline unsigned zone_span_seqbegin(struct zone *zone)
176{
177 return 0;
178}
179static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
180{
181 return 0;
182}
183static inline void zone_span_writelock(struct zone *zone) {}
184static inline void zone_span_writeunlock(struct zone *zone) {}
185static inline void zone_seqlock_init(struct zone *zone) {}
Dave Hansen3947be12005-10-29 18:16:54 -0700186
187static inline int mhp_notimplemented(const char *func)
188{
189 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
190 dump_stack();
191 return -ENOSYS;
192}
193
Yasunori Goto04753272008-04-28 02:13:31 -0700194static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
195{
196}
197
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800198static inline void lock_memory_hotplug(void) {}
199static inline void unlock_memory_hotplug(void) {}
200
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700201#endif /* ! CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200202
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700203#ifdef CONFIG_MEMORY_HOTREMOVE
204
205extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
206
207#else
208static inline int is_mem_section_removable(unsigned long pfn,
209 unsigned long nr_pages)
210{
211 return 0;
212}
213#endif /* CONFIG_MEMORY_HOTREMOVE */
214
minskey guocf234222010-05-24 14:32:41 -0700215extern int mem_online_node(int nid);
Yasunori Gotobc02af92006-06-27 02:53:30 -0700216extern int add_memory(int nid, u64 start, u64 size);
217extern int arch_add_memory(int nid, u64 start, u64 size);
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200218extern int remove_memory(u64 start, u64 size);
Keith Manntheyf28c5ed2006-09-30 23:27:04 -0700219extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
220 int nr_pages);
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700221extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
Yasunori Goto04753272008-04-28 02:13:31 -0700222extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
223 unsigned long pnum);
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200224
Dave Hansen208d54e2005-10-29 18:16:52 -0700225#endif /* __LINUX_MEMORY_HOTPLUG_H */