blob: 73e358612eaffa3aa9ef27026d542a382b3a2afd [file] [log] [blame]
Dave Hansen208d54e2005-10-29 18:16:52 -07001#ifndef __LINUX_MEMORY_HOTPLUG_H
2#define __LINUX_MEMORY_HOTPLUG_H
3
4#include <linux/mmzone.h>
5#include <linux/spinlock.h>
Dave Hansen3947be12005-10-29 18:16:54 -07006#include <linux/notifier.h>
Dave Hansen208d54e2005-10-29 18:16:52 -07007
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -08008struct page;
9struct zone;
10struct pglist_data;
Badari Pulavartyea01ea92008-04-28 02:12:01 -070011struct mem_section;
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080012
Dave Hansen208d54e2005-10-29 18:16:52 -070013#ifdef CONFIG_MEMORY_HOTPLUG
Yasunori Goto04753272008-04-28 02:13:31 -070014
15/*
16 * Magic number for free bootmem.
17 * The normal smallest mapcount is -1. Here is smaller value than it.
18 */
19#define SECTION_INFO 0xfffffffe
20#define MIX_INFO 0xfffffffd
21#define NODE_INFO 0xfffffffc
22
Dave Hansen208d54e2005-10-29 18:16:52 -070023/*
24 * pgdat resizing functions
25 */
26static inline
27void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
28{
29 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
30}
31static inline
32void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
33{
Dave Hansenbdc8cb92005-10-29 18:16:53 -070034 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
Dave Hansen208d54e2005-10-29 18:16:52 -070035}
36static inline
37void pgdat_resize_init(struct pglist_data *pgdat)
38{
39 spin_lock_init(&pgdat->node_size_lock);
40}
Dave Hansenbdc8cb92005-10-29 18:16:53 -070041/*
42 * Zone resizing functions
43 */
44static inline unsigned zone_span_seqbegin(struct zone *zone)
45{
46 return read_seqbegin(&zone->span_seqlock);
47}
48static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
49{
50 return read_seqretry(&zone->span_seqlock, iv);
51}
52static inline void zone_span_writelock(struct zone *zone)
53{
54 write_seqlock(&zone->span_seqlock);
55}
56static inline void zone_span_writeunlock(struct zone *zone)
57{
58 write_sequnlock(&zone->span_seqlock);
59}
60static inline void zone_seqlock_init(struct zone *zone)
61{
62 seqlock_init(&zone->span_seqlock);
63}
Dave Hansen3947be12005-10-29 18:16:54 -070064extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
65extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
66extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
67/* need some defines for these for archs that don't support it */
68extern void online_page(struct page *page);
69/* VM interface that may be used by firmware interface */
Dave Hansen3947be12005-10-29 18:16:54 -070070extern int online_pages(unsigned long, unsigned long);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -070071extern void __offline_isolated_pages(unsigned long, unsigned long);
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -070072extern int offline_pages(unsigned long, unsigned long, unsigned long);
73
Dave Hansen3947be12005-10-29 18:16:54 -070074/* reasonably generic interface to expand the physical pages in a zone */
75extern int __add_pages(struct zone *zone, unsigned long start_pfn,
76 unsigned long nr_pages);
Badari Pulavartyea01ea92008-04-28 02:12:01 -070077extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
78 unsigned long nr_pages);
Yasunori Gotobc02af92006-06-27 02:53:30 -070079
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -070080/*
Badari Pulavartyea01ea92008-04-28 02:12:01 -070081 * Walk through all memory which is registered as resource.
KAMEZAWA Hiroyuki75884fb2007-10-16 01:26:10 -070082 * arg is (start_pfn, nr_pages, private_arg_pointer)
83 */
84extern int walk_memory_resource(unsigned long start_pfn,
85 unsigned long nr_pages, void *arg,
86 int (*func)(unsigned long, unsigned long, void *));
87
Yasunori Gotobc02af92006-06-27 02:53:30 -070088#ifdef CONFIG_NUMA
89extern int memory_add_physaddr_to_nid(u64 start);
90#else
91static inline int memory_add_physaddr_to_nid(u64 start)
92{
93 return 0;
94}
95#endif
96
Yasunori Goto306d6cb2006-06-27 02:53:32 -070097#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
98/*
99 * For supporting node-hotadd, we have to allocate a new pgdat.
100 *
101 * If an arch has generic style NODE_DATA(),
102 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
103 *
104 * In general, generic_alloc_nodedata() is used.
105 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
106 *
107 */
Yasunori Gotodd0932d2006-06-27 02:53:40 -0700108extern pg_data_t *arch_alloc_nodedata(int nid);
109extern void arch_free_nodedata(pg_data_t *pgdat);
Yasunori Goto70490272006-06-27 02:53:39 -0700110extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700111
112#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
113
114#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
115#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
116
117#ifdef CONFIG_NUMA
118/*
119 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
120 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
121 * Because, pgdat for the new node is not allocated/initialized yet itself.
122 * To use new node's memory, more consideration will be necessary.
123 */
124#define generic_alloc_nodedata(nid) \
125({ \
126 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
127})
128/*
129 * This definition is just for error path in node hotadd.
130 * For node hotremove, we have to replace this.
131 */
132#define generic_free_nodedata(pgdat) kfree(pgdat)
133
Yasunori Goto10ad4002006-06-27 02:53:33 -0700134extern pg_data_t *node_data[];
135static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
136{
137 node_data[nid] = pgdat;
138}
139
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700140#else /* !CONFIG_NUMA */
141
142/* never called */
143static inline pg_data_t *generic_alloc_nodedata(int nid)
144{
145 BUG();
146 return NULL;
147}
148static inline void generic_free_nodedata(pg_data_t *pgdat)
149{
150}
Yasunori Goto10ad4002006-06-27 02:53:33 -0700151static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
152{
153}
Yasunori Goto306d6cb2006-06-27 02:53:32 -0700154#endif /* CONFIG_NUMA */
155#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
156
Yasunori Goto04753272008-04-28 02:13:31 -0700157#ifdef CONFIG_SPARSEMEM_VMEMMAP
158static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
159{
160}
161static inline void put_page_bootmem(struct page *page)
162{
163}
164#else
165extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
166extern void put_page_bootmem(struct page *page);
167#endif
168
Dave Hansen208d54e2005-10-29 18:16:52 -0700169#else /* ! CONFIG_MEMORY_HOTPLUG */
170/*
171 * Stub functions for when hotplug is off
172 */
173static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
174static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
175static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700176
177static inline unsigned zone_span_seqbegin(struct zone *zone)
178{
179 return 0;
180}
181static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
182{
183 return 0;
184}
185static inline void zone_span_writelock(struct zone *zone) {}
186static inline void zone_span_writeunlock(struct zone *zone) {}
187static inline void zone_seqlock_init(struct zone *zone) {}
Dave Hansen3947be12005-10-29 18:16:54 -0700188
189static inline int mhp_notimplemented(const char *func)
190{
191 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
192 dump_stack();
193 return -ENOSYS;
194}
195
Yasunori Goto04753272008-04-28 02:13:31 -0700196static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
197{
198}
199
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700200#endif /* ! CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200201
Yasunori Gotobc02af92006-06-27 02:53:30 -0700202extern int add_memory(int nid, u64 start, u64 size);
203extern int arch_add_memory(int nid, u64 start, u64 size);
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200204extern int remove_memory(u64 start, u64 size);
Keith Manntheyf28c5ed2006-09-30 23:27:04 -0700205extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
206 int nr_pages);
Badari Pulavartyea01ea92008-04-28 02:12:01 -0700207extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
Yasunori Goto04753272008-04-28 02:13:31 -0700208extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
209 unsigned long pnum);
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200210
Dave Hansen208d54e2005-10-29 18:16:52 -0700211#endif /* __LINUX_MEMORY_HOTPLUG_H */