Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * linux/mm/memory_hotplug.c |
| 3 | * |
| 4 | * Copyright (C) |
| 5 | */ |
| 6 | |
| 7 | #include <linux/config.h> |
| 8 | #include <linux/stddef.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/swap.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/bootmem.h> |
| 14 | #include <linux/compiler.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/pagevec.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/sysctl.h> |
| 19 | #include <linux/cpu.h> |
| 20 | #include <linux/memory.h> |
| 21 | #include <linux/memory_hotplug.h> |
| 22 | #include <linux/highmem.h> |
| 23 | #include <linux/vmalloc.h> |
| 24 | |
| 25 | #include <asm/tlbflush.h> |
| 26 | |
| 27 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) |
| 28 | { |
| 29 | struct page *page, *ret; |
| 30 | unsigned long memmap_size = sizeof(struct page) * nr_pages; |
| 31 | |
| 32 | page = alloc_pages(GFP_KERNEL, get_order(memmap_size)); |
| 33 | if (page) |
| 34 | goto got_map_page; |
| 35 | |
| 36 | ret = vmalloc(memmap_size); |
| 37 | if (ret) |
| 38 | goto got_map_ptr; |
| 39 | |
| 40 | return NULL; |
| 41 | got_map_page: |
| 42 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); |
| 43 | got_map_ptr: |
| 44 | memset(ret, 0, memmap_size); |
| 45 | |
| 46 | return ret; |
| 47 | } |
| 48 | |
| 49 | extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, |
| 50 | unsigned long size); |
| 51 | static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
| 52 | { |
| 53 | struct pglist_data *pgdat = zone->zone_pgdat; |
| 54 | int nr_pages = PAGES_PER_SECTION; |
| 55 | int nid = pgdat->node_id; |
| 56 | int zone_type; |
| 57 | |
| 58 | zone_type = zone - pgdat->node_zones; |
| 59 | memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); |
| 60 | zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); |
| 61 | } |
| 62 | |
| 63 | extern int sparse_add_one_section(struct zone *, unsigned long, |
| 64 | struct page *mem_map); |
| 65 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) |
| 66 | { |
| 67 | struct pglist_data *pgdat = zone->zone_pgdat; |
| 68 | int nr_pages = PAGES_PER_SECTION; |
| 69 | struct page *memmap; |
| 70 | int ret; |
| 71 | |
| 72 | /* |
| 73 | * This can potentially allocate memory, and does its own |
| 74 | * internal locking. |
| 75 | */ |
| 76 | sparse_index_init(pfn_to_section_nr(phys_start_pfn), pgdat->node_id); |
| 77 | |
| 78 | pgdat_resize_lock(pgdat, &flags); |
| 79 | memmap = __kmalloc_section_memmap(nr_pages); |
| 80 | ret = sparse_add_one_section(zone, phys_start_pfn, memmap); |
| 81 | pgdat_resize_unlock(pgdat, &flags); |
| 82 | |
| 83 | if (ret <= 0) { |
| 84 | /* the mem_map didn't get used */ |
| 85 | if (memmap >= (struct page *)VMALLOC_START && |
| 86 | memmap < (struct page *)VMALLOC_END) |
| 87 | vfree(memmap); |
| 88 | else |
| 89 | free_pages((unsigned long)memmap, |
| 90 | get_order(sizeof(struct page) * nr_pages)); |
| 91 | } |
| 92 | |
| 93 | if (ret < 0) |
| 94 | return ret; |
| 95 | |
| 96 | __add_zone(zone, phys_start_pfn); |
| 97 | return register_new_memory(__pfn_to_section(phys_start_pfn)); |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Reasonably generic function for adding memory. It is |
| 102 | * expected that archs that support memory hotplug will |
| 103 | * call this function after deciding the zone to which to |
| 104 | * add the new pages. |
| 105 | */ |
| 106 | int __add_pages(struct zone *zone, unsigned long phys_start_pfn, |
| 107 | unsigned long nr_pages) |
| 108 | { |
| 109 | unsigned long i; |
| 110 | int err = 0; |
| 111 | |
| 112 | for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { |
| 113 | err = __add_section(zone, phys_start_pfn + i); |
| 114 | |
| 115 | if (err) |
| 116 | break; |
| 117 | } |
| 118 | |
| 119 | return err; |
| 120 | } |
| 121 | |
| 122 | static void grow_zone_span(struct zone *zone, |
| 123 | unsigned long start_pfn, unsigned long end_pfn) |
| 124 | { |
| 125 | unsigned long old_zone_end_pfn; |
| 126 | |
| 127 | zone_span_writelock(zone); |
| 128 | |
| 129 | old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; |
| 130 | if (start_pfn < zone->zone_start_pfn) |
| 131 | zone->zone_start_pfn = start_pfn; |
| 132 | |
| 133 | if (end_pfn > old_zone_end_pfn) |
| 134 | zone->spanned_pages = end_pfn - zone->zone_start_pfn; |
| 135 | |
| 136 | zone_span_writeunlock(zone); |
| 137 | } |
| 138 | |
| 139 | static void grow_pgdat_span(struct pglist_data *pgdat, |
| 140 | unsigned long start_pfn, unsigned long end_pfn) |
| 141 | { |
| 142 | unsigned long old_pgdat_end_pfn = |
| 143 | pgdat->node_start_pfn + pgdat->node_spanned_pages; |
| 144 | |
| 145 | if (start_pfn < pgdat->node_start_pfn) |
| 146 | pgdat->node_start_pfn = start_pfn; |
| 147 | |
| 148 | if (end_pfn > old_pgdat_end_pfn) |
| 149 | pgdat->node_spanned_pages = end_pfn - pgdat->node_spanned_pages; |
| 150 | } |
| 151 | |
| 152 | int online_pages(unsigned long pfn, unsigned long nr_pages) |
| 153 | { |
| 154 | unsigned long i; |
| 155 | unsigned long flags; |
| 156 | unsigned long onlined_pages = 0; |
| 157 | struct zone *zone; |
| 158 | |
| 159 | /* |
| 160 | * This doesn't need a lock to do pfn_to_page(). |
| 161 | * The section can't be removed here because of the |
| 162 | * memory_block->state_sem. |
| 163 | */ |
| 164 | zone = page_zone(pfn_to_page(pfn)); |
| 165 | pgdat_resize_lock(zone->zone_pgdat, &flags); |
| 166 | grow_zone_span(zone, pfn, pfn + nr_pages); |
| 167 | grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); |
| 168 | pgdat_resize_unlock(zone->zone_pgdat, &flags); |
| 169 | |
| 170 | for (i = 0; i < nr_pages; i++) { |
| 171 | struct page *page = pfn_to_page(pfn + i); |
| 172 | online_page(page); |
| 173 | onlined_pages++; |
| 174 | } |
| 175 | zone->present_pages += onlined_pages; |
| 176 | |
| 177 | return 0; |
| 178 | } |