Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_MEMORY_HOTPLUG_H |
| 2 | #define __LINUX_MEMORY_HOTPLUG_H |
| 3 | |
| 4 | #include <linux/mmzone.h> |
| 5 | #include <linux/spinlock.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 6 | #include <linux/notifier.h> |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 7 | |
KAMEZAWA Hiroyuki | 7867930 | 2006-03-06 15:42:49 -0800 | [diff] [blame] | 8 | struct page; |
| 9 | struct zone; |
| 10 | struct pglist_data; |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 11 | struct mem_section; |
KAMEZAWA Hiroyuki | 7867930 | 2006-03-06 15:42:49 -0800 | [diff] [blame] | 12 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 13 | #ifdef CONFIG_MEMORY_HOTPLUG |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 14 | |
| 15 | /* |
Andrea Arcangeli | 5f24ce5 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 16 | * Types for free bootmem stored in page->lru.next. These have to be in |
| 17 | * some random range in unsigned long space for debugging purposes. |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 18 | */ |
Andrea Arcangeli | 5f24ce5 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 19 | enum { |
| 20 | MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, |
| 21 | SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, |
| 22 | MIX_SECTION_INFO, |
| 23 | NODE_INFO, |
| 24 | MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, |
| 25 | }; |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 26 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 27 | /* |
| 28 | * pgdat resizing functions |
| 29 | */ |
| 30 | static inline |
| 31 | void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) |
| 32 | { |
| 33 | spin_lock_irqsave(&pgdat->node_size_lock, *flags); |
| 34 | } |
| 35 | static inline |
| 36 | void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) |
| 37 | { |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 38 | spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 39 | } |
| 40 | static inline |
| 41 | void pgdat_resize_init(struct pglist_data *pgdat) |
| 42 | { |
| 43 | spin_lock_init(&pgdat->node_size_lock); |
| 44 | } |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 45 | /* |
| 46 | * Zone resizing functions |
| 47 | */ |
| 48 | static inline unsigned zone_span_seqbegin(struct zone *zone) |
| 49 | { |
| 50 | return read_seqbegin(&zone->span_seqlock); |
| 51 | } |
| 52 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) |
| 53 | { |
| 54 | return read_seqretry(&zone->span_seqlock, iv); |
| 55 | } |
| 56 | static inline void zone_span_writelock(struct zone *zone) |
| 57 | { |
| 58 | write_seqlock(&zone->span_seqlock); |
| 59 | } |
| 60 | static inline void zone_span_writeunlock(struct zone *zone) |
| 61 | { |
| 62 | write_sequnlock(&zone->span_seqlock); |
| 63 | } |
| 64 | static inline void zone_seqlock_init(struct zone *zone) |
| 65 | { |
| 66 | seqlock_init(&zone->span_seqlock); |
| 67 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 68 | extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); |
| 69 | extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); |
| 70 | extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 71 | /* VM interface that may be used by firmware interface */ |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 72 | extern int online_pages(unsigned long, unsigned long); |
KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 73 | extern void __offline_isolated_pages(unsigned long, unsigned long); |
KAMEZAWA Hiroyuki | 48e9419 | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 74 | |
Daniel Kiper | 9d0ad8c | 2011-07-25 17:12:05 -0700 | [diff] [blame] | 75 | typedef void (*online_page_callback_t)(struct page *page); |
| 76 | |
| 77 | extern int set_online_page_callback(online_page_callback_t callback); |
| 78 | extern int restore_online_page_callback(online_page_callback_t callback); |
| 79 | |
| 80 | extern void __online_page_set_limits(struct page *page); |
| 81 | extern void __online_page_increment_counters(struct page *page); |
| 82 | extern void __online_page_free(struct page *page); |
| 83 | |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 84 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 85 | extern bool is_pageblock_removable_nolock(struct page *page); |
| 86 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 87 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 88 | /* reasonably generic interface to expand the physical pages in a zone */ |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 89 | extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 90 | unsigned long nr_pages); |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 91 | extern int __remove_pages(struct zone *zone, unsigned long start_pfn, |
| 92 | unsigned long nr_pages); |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 93 | |
| 94 | #ifdef CONFIG_NUMA |
| 95 | extern int memory_add_physaddr_to_nid(u64 start); |
| 96 | #else |
| 97 | static inline int memory_add_physaddr_to_nid(u64 start) |
| 98 | { |
| 99 | return 0; |
| 100 | } |
| 101 | #endif |
| 102 | |
Yasunori Goto | 306d6cb | 2006-06-27 02:53:32 -0700 | [diff] [blame] | 103 | #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION |
| 104 | /* |
| 105 | * For supporting node-hotadd, we have to allocate a new pgdat. |
| 106 | * |
| 107 | * If an arch has generic style NODE_DATA(), |
| 108 | * node_data[nid] = kzalloc() works well. But it depends on the architecture. |
| 109 | * |
| 110 | * In general, generic_alloc_nodedata() is used. |
| 111 | * Now, arch_free_nodedata() is just defined for error path of node_hot_add. |
| 112 | * |
| 113 | */ |
Yasunori Goto | dd0932d | 2006-06-27 02:53:40 -0700 | [diff] [blame] | 114 | extern pg_data_t *arch_alloc_nodedata(int nid); |
| 115 | extern void arch_free_nodedata(pg_data_t *pgdat); |
Yasunori Goto | 7049027 | 2006-06-27 02:53:39 -0700 | [diff] [blame] | 116 | extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); |
Yasunori Goto | 306d6cb | 2006-06-27 02:53:32 -0700 | [diff] [blame] | 117 | |
| 118 | #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ |
| 119 | |
| 120 | #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) |
| 121 | #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) |
| 122 | |
| 123 | #ifdef CONFIG_NUMA |
| 124 | /* |
| 125 | * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. |
| 126 | * XXX: kmalloc_node() can't work well to get new node's memory at this time. |
| 127 | * Because, pgdat for the new node is not allocated/initialized yet itself. |
| 128 | * To use new node's memory, more consideration will be necessary. |
| 129 | */ |
| 130 | #define generic_alloc_nodedata(nid) \ |
| 131 | ({ \ |
| 132 | kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ |
| 133 | }) |
| 134 | /* |
| 135 | * This definition is just for error path in node hotadd. |
| 136 | * For node hotremove, we have to replace this. |
| 137 | */ |
| 138 | #define generic_free_nodedata(pgdat) kfree(pgdat) |
| 139 | |
Yasunori Goto | 10ad400 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 140 | extern pg_data_t *node_data[]; |
| 141 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) |
| 142 | { |
| 143 | node_data[nid] = pgdat; |
| 144 | } |
| 145 | |
Yasunori Goto | 306d6cb | 2006-06-27 02:53:32 -0700 | [diff] [blame] | 146 | #else /* !CONFIG_NUMA */ |
| 147 | |
| 148 | /* never called */ |
| 149 | static inline pg_data_t *generic_alloc_nodedata(int nid) |
| 150 | { |
| 151 | BUG(); |
| 152 | return NULL; |
| 153 | } |
| 154 | static inline void generic_free_nodedata(pg_data_t *pgdat) |
| 155 | { |
| 156 | } |
Yasunori Goto | 10ad400 | 2006-06-27 02:53:33 -0700 | [diff] [blame] | 157 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) |
| 158 | { |
| 159 | } |
Yasunori Goto | 306d6cb | 2006-06-27 02:53:32 -0700 | [diff] [blame] | 160 | #endif /* CONFIG_NUMA */ |
| 161 | #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ |
| 162 | |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 163 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 164 | static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) |
| 165 | { |
| 166 | } |
| 167 | static inline void put_page_bootmem(struct page *page) |
| 168 | { |
| 169 | } |
| 170 | #else |
| 171 | extern void register_page_bootmem_info_node(struct pglist_data *pgdat); |
| 172 | extern void put_page_bootmem(struct page *page); |
| 173 | #endif |
| 174 | |
KAMEZAWA Hiroyuki | 925268a | 2011-01-11 16:44:01 +0900 | [diff] [blame] | 175 | /* |
| 176 | * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug |
| 177 | * notifier will be called under this. 2) offline/online/add/remove memory |
| 178 | * will not run simultaneously. |
| 179 | */ |
| 180 | |
KOSAKI Motohiro | 20d6c96 | 2010-12-02 14:31:19 -0800 | [diff] [blame] | 181 | void lock_memory_hotplug(void); |
| 182 | void unlock_memory_hotplug(void); |
| 183 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 184 | #else /* ! CONFIG_MEMORY_HOTPLUG */ |
| 185 | /* |
| 186 | * Stub functions for when hotplug is off |
| 187 | */ |
| 188 | static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} |
| 189 | static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} |
| 190 | static inline void pgdat_resize_init(struct pglist_data *pgdat) {} |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 191 | |
| 192 | static inline unsigned zone_span_seqbegin(struct zone *zone) |
| 193 | { |
| 194 | return 0; |
| 195 | } |
| 196 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) |
| 197 | { |
| 198 | return 0; |
| 199 | } |
| 200 | static inline void zone_span_writelock(struct zone *zone) {} |
| 201 | static inline void zone_span_writeunlock(struct zone *zone) {} |
| 202 | static inline void zone_seqlock_init(struct zone *zone) {} |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 203 | |
| 204 | static inline int mhp_notimplemented(const char *func) |
| 205 | { |
| 206 | printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); |
| 207 | dump_stack(); |
| 208 | return -ENOSYS; |
| 209 | } |
| 210 | |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 211 | static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) |
| 212 | { |
| 213 | } |
| 214 | |
KOSAKI Motohiro | 20d6c96 | 2010-12-02 14:31:19 -0800 | [diff] [blame] | 215 | static inline void lock_memory_hotplug(void) {} |
| 216 | static inline void unlock_memory_hotplug(void) {} |
| 217 | |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 218 | #endif /* ! CONFIG_MEMORY_HOTPLUG */ |
Andi Kleen | 9d99aaa | 2006-04-07 19:49:15 +0200 | [diff] [blame] | 219 | |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 220 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 221 | |
| 222 | extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); |
| 223 | |
| 224 | #else |
| 225 | static inline int is_mem_section_removable(unsigned long pfn, |
| 226 | unsigned long nr_pages) |
| 227 | { |
| 228 | return 0; |
| 229 | } |
| 230 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 231 | |
minskey guo | cf23422 | 2010-05-24 14:32:41 -0700 | [diff] [blame] | 232 | extern int mem_online_node(int nid); |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 233 | extern int add_memory(int nid, u64 start, u64 size); |
| 234 | extern int arch_add_memory(int nid, u64 start, u64 size); |
Andi Kleen | 9d99aaa | 2006-04-07 19:49:15 +0200 | [diff] [blame] | 235 | extern int remove_memory(u64 start, u64 size); |
Keith Mannthey | f28c5ed | 2006-09-30 23:27:04 -0700 | [diff] [blame] | 236 | extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
| 237 | int nr_pages); |
Badari Pulavarty | ea01ea9 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 238 | extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 239 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
| 240 | unsigned long pnum); |
Andi Kleen | 9d99aaa | 2006-04-07 19:49:15 +0200 | [diff] [blame] | 241 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 242 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |