blob: d95f72e79b829bfe85a873f9494b532deef99bac [file] [log] [blame]
Dave Hansen208d54e2005-10-29 18:16:52 -07001#ifndef __LINUX_MEMORY_HOTPLUG_H
2#define __LINUX_MEMORY_HOTPLUG_H
3
4#include <linux/mmzone.h>
5#include <linux/spinlock.h>
Dave Hansen3947be12005-10-29 18:16:54 -07006#include <linux/notifier.h>
Dave Hansen208d54e2005-10-29 18:16:52 -07007
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -08008struct page;
9struct zone;
10struct pglist_data;
Badari Pulavartyea01ea92008-04-28 02:12:01 -070011struct mem_section;
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080012
Dave Hansen208d54e2005-10-29 18:16:52 -070013#ifdef CONFIG_MEMORY_HOTPLUG
Yasunori Goto04753272008-04-28 02:13:31 -070014
15/*
Yasunori Gotoaf370fb2008-07-23 21:28:17 -070016 * Types for free bootmem.
Yasunori Goto04753272008-04-28 02:13:31 -070017 * The normal smallest mapcount is -1. Here is smaller value than it.
18 */
Yasunori Gotoaf370fb2008-07-23 21:28:17 -070019#define SECTION_INFO (-1 - 1)
20#define MIX_SECTION_INFO (-1 - 2)
21#define NODE_INFO (-1 - 3)
Yasunori Goto04753272008-04-28 02:13:31 -070022
Dave Hansen208d54e2005-10-29 18:16:52 -070023/*
24 * pgdat resizing functions
25 */
26static inline
27void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
28{
29 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
30}
31static inline
32void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
33{
Dave Hansenbdc8cb92005-10-29 18:16:53 -070034 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
Dave Hansen208d54e2005-10-29 18:16:52 -070035}
36static inline
37void pgdat_resize_init(struct pglist_data *pgdat)
38{
39 spin_lock_init(&pgdat->node_size_lock);
40}
Dave Hansenbdc8cb92005-10-29 18:16:53 -070041/*
42 * Zone resizing functions
43 */
44static inline unsigned zone_span_seqbegin(struct zone *zone)
45{
46 return read_seqbegin(&zone->span_seqlock);
47}
48static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
49{
50 return read_seqretry(&zone->span_seqlock, iv);
51}
52static inline void zone_span_writelock(struct zone *zone)
53{
54 write_seqlock(&zone->span_seqlock);
55}
56static inline void zone_span_writeunlock(struct zone *zone)
57{
58 write_sequnlock(&zone->span_seqlock);
59}
60static inline void zone_seqlock_init(struct zone *zone)
61{
62 seqlock_init(&zone->span_seqlock);
63}
Dave Hansen3947be12005-10-29 18:16:54 -070064extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
65extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
66extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
67/* need some defines for these for archs that don't support it */
68extern void online_page(struct page *page);
69/* VM interface that may be used by firmware interface */
Dave Hansen3947be12005-10-29 18:16:54 -070070extern int online_pages(unsigned long, unsigned long);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -070071extern void __offline_isolated_pages(unsigned long, unsigned long);
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -070072extern int offline_pages(unsigned long, unsigned long, unsigned long);
73
Dave Hansen3947be12005-10-29 18:16:54 -070074/* reasonably generic interface to expand the physical pages in a zone */
Gary Hadec04fc582009-01-06 14:39:14 -080075extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
Dave Hansen3947be12005-10-29 18:16:54 -070076 unsigned long nr_pages);
Badari Pulavartyea01ea92008-04-28 02:12:01 -070077extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
78 unsigned long nr_pages);
Yasunori Gotobc02af92006-06-27 02:53:30 -070079
80#ifdef CONFIG_NUMA
81extern int memory_add_physaddr_to_nid(u64 start);
82#else
83static inline int memory_add_physaddr_to_nid(u64 start)
84{
85 return 0;
86}
87#endif
88
Yasunori Goto306d6cb2006-06-27 02:53:32 -070089#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
90/*
91 * For supporting node-hotadd, we have to allocate a new pgdat.
92 *
93 * If an arch has generic style NODE_DATA(),
94 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
95 *
96 * In general, generic_alloc_nodedata() is used.
97 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
98 *
99 */
Yasunori Gotodd0932d2006-06-27 02:53:40 -0700100extern pg_data_t *arch_alloc_nodedata(int nid);
101extern void arch_free_nodedata(pg_data_t *pgdat);
Yasunori Goto70490272006-06-27 02:53:39 -0700102extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700103
104#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
105
106#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
107#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
108
109#ifdef CONFIG_NUMA
110/*
111 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
112 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
113 * Because, pgdat for the new node is not allocated/initialized yet itself.
114 * To use new node's memory, more consideration will be necessary.
115 */
116#define generic_alloc_nodedata(nid) \
117({ \
118 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
119})
120/*
121 * This definition is just for error path in node hotadd.
122 * For node hotremove, we have to replace this.
123 */
124#define generic_free_nodedata(pgdat) kfree(pgdat)
125
Yasunori Goto10ad4002006-06-27 02:53:33 -0700126extern pg_data_t *node_data[];
127static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
128{
129 node_data[nid] = pgdat;
130}
131
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700132#else /* !CONFIG_NUMA */
133
134/* never called */
135static inline pg_data_t *generic_alloc_nodedata(int nid)
136{
137 BUG();
138 return NULL;
139}
140static inline void generic_free_nodedata(pg_data_t *pgdat)
141{
142}
Yasunori Goto10ad4002006-06-27 02:53:33 -0700143static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
144{
145}
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700146#endif /* CONFIG_NUMA */
147#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
148
Yasunori Goto04753272008-04-28 02:13:31 -0700149#ifdef CONFIG_SPARSEMEM_VMEMMAP
150static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
151{
152}
153static inline void put_page_bootmem(struct page *page)
154{
155}
156#else
157extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
158extern void put_page_bootmem(struct page *page);
159#endif
160
Dave Hansen208d54e2005-10-29 18:16:52 -0700161#else /* ! CONFIG_MEMORY_HOTPLUG */
162/*
163 * Stub functions for when hotplug is off
164 */
165static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
166static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
167static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700168
169static inline unsigned zone_span_seqbegin(struct zone *zone)
170{
171 return 0;
172}
173static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
174{
175 return 0;
176}
177static inline void zone_span_writelock(struct zone *zone) {}
178static inline void zone_span_writeunlock(struct zone *zone) {}
179static inline void zone_seqlock_init(struct zone *zone) {}
Dave Hansen3947be12005-10-29 18:16:54 -0700180
181static inline int mhp_notimplemented(const char *func)
182{
183 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
184 dump_stack();
185 return -ENOSYS;
186}
187
Yasunori Goto04753272008-04-28 02:13:31 -0700188static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
189{
190}
191
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700192#endif /* ! CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200193
Nathan Lynch0d579942008-06-04 08:30:54 +1000194/*
195 * Walk through all memory which is registered as resource.
196 * arg is (start_pfn, nr_pages, private_arg_pointer)
197 */
198extern int walk_memory_resource(unsigned long start_pfn,
199 unsigned long nr_pages, void *arg,
200 int (*func)(unsigned long, unsigned long, void *));
201
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700202#ifdef CONFIG_MEMORY_HOTREMOVE
203
204extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
205
206#else
207static inline int is_mem_section_removable(unsigned long pfn,
208 unsigned long nr_pages)
209{
210 return 0;
211}
212#endif /* CONFIG_MEMORY_HOTREMOVE */
213
Yasunori Gotobc02af92006-06-27 02:53:30 -0700214extern int add_memory(int nid, u64 start, u64 size);
215extern int arch_add_memory(int nid, u64 start, u64 size);
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200216extern int remove_memory(u64 start, u64 size);
Keith Manntheyf28c5ed2006-09-30 23:27:04 -0700217extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
218 int nr_pages);
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700219extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
Yasunori Goto04753272008-04-28 02:13:31 -0700220extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
221 unsigned long pnum);
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200222
Dave Hansen208d54e2005-10-29 18:16:52 -0700223#endif /* __LINUX_MEMORY_HOTPLUG_H */