Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/page_alloc.c |
| 3 | * |
| 4 | * Manages the free list, the system allocates free pages here. |
| 5 | * Note that kmalloc() lives in slab.c |
| 6 | * |
| 7 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 8 | * Swap reorganised 29.12.95, Stephen Tweedie |
| 9 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
| 10 | * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 |
| 11 | * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 |
| 12 | * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 |
| 13 | * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 |
| 14 | * (lots of bits borrowed from Ingo Molnar & Andrew Morton) |
| 15 | */ |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/stddef.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/swap.h> |
| 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/pagemap.h> |
| 22 | #include <linux/bootmem.h> |
| 23 | #include <linux/compiler.h> |
Randy Dunlap | 9f15833 | 2005-09-13 01:25:16 -0700 | [diff] [blame] | 24 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/module.h> |
| 26 | #include <linux/suspend.h> |
| 27 | #include <linux/pagevec.h> |
| 28 | #include <linux/blkdev.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/notifier.h> |
| 31 | #include <linux/topology.h> |
| 32 | #include <linux/sysctl.h> |
| 33 | #include <linux/cpu.h> |
| 34 | #include <linux/cpuset.h> |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 35 | #include <linux/memory_hotplug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/nodemask.h> |
| 37 | #include <linux/vmalloc.h> |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 38 | #include <linux/mempolicy.h> |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 39 | #include <linux/stop_machine.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
| 41 | #include <asm/tlbflush.h> |
Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 42 | #include <asm/div64.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include "internal.h" |
| 44 | |
| 45 | /* |
| 46 | * MCD - HACK: Find somewhere to initialize this EARLY, or make this |
| 47 | * initializer cleaner |
| 48 | */ |
Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 49 | nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; |
Dean Nelson | 7223a93 | 2005-03-23 19:00:00 -0700 | [diff] [blame] | 50 | EXPORT_SYMBOL(node_online_map); |
Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 51 | nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; |
Dean Nelson | 7223a93 | 2005-03-23 19:00:00 -0700 | [diff] [blame] | 52 | EXPORT_SYMBOL(node_possible_map); |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 53 | unsigned long totalram_pages __read_mostly; |
| 54 | unsigned long totalhigh_pages __read_mostly; |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 55 | unsigned long totalreserve_pages __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | long nr_swap_pages; |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 57 | int percpu_pagelist_fraction; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 59 | static void __free_pages_ok(struct page *page, unsigned int order); |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 60 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | /* |
| 62 | * results with 256, 32 in the lowmem_reserve sysctl: |
| 63 | * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) |
| 64 | * 1G machine -> (16M dma, 784M normal, 224M high) |
| 65 | * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA |
| 66 | * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL |
| 67 | * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 68 | * |
| 69 | * TBD: should special case ZONE_DMA32 machines here - in those we normally |
| 70 | * don't need any ZONE_NORMAL reservation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | */ |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 72 | int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
| 74 | EXPORT_SYMBOL(totalram_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
| 76 | /* |
| 77 | * Used by page_zone() to look up the address of the struct zone whose |
| 78 | * id is encoded in the upper bits of page->flags |
| 79 | */ |
Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 80 | struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | EXPORT_SYMBOL(zone_table); |
| 82 | |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 83 | static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | int min_free_kbytes = 1024; |
| 85 | |
Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 86 | unsigned long __meminitdata nr_kernel_pages; |
| 87 | unsigned long __meminitdata nr_all_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 89 | #ifdef CONFIG_DEBUG_VM |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 90 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | { |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 92 | int ret = 0; |
| 93 | unsigned seq; |
| 94 | unsigned long pfn = page_to_pfn(page); |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 95 | |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 96 | do { |
| 97 | seq = zone_span_seqbegin(zone); |
| 98 | if (pfn >= zone->zone_start_pfn + zone->spanned_pages) |
| 99 | ret = 1; |
| 100 | else if (pfn < zone->zone_start_pfn) |
| 101 | ret = 1; |
| 102 | } while (zone_span_seqretry(zone, seq)); |
| 103 | |
| 104 | return ret; |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | static int page_is_consistent(struct zone *zone, struct page *page) |
| 108 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | #ifdef CONFIG_HOLES_IN_ZONE |
| 110 | if (!pfn_valid(page_to_pfn(page))) |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 111 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | #endif |
| 113 | if (zone != page_zone(page)) |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 114 | return 0; |
| 115 | |
| 116 | return 1; |
| 117 | } |
| 118 | /* |
| 119 | * Temporary debugging check for pages not lying within a given zone. |
| 120 | */ |
| 121 | static int bad_range(struct zone *zone, struct page *page) |
| 122 | { |
| 123 | if (page_outside_zone_boundaries(zone, page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | return 1; |
Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 125 | if (!page_is_consistent(zone, page)) |
| 126 | return 1; |
| 127 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | return 0; |
| 129 | } |
| 130 | |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 131 | #else |
| 132 | static inline int bad_range(struct zone *zone, struct page *page) |
| 133 | { |
| 134 | return 0; |
| 135 | } |
| 136 | #endif |
| 137 | |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 138 | static void bad_page(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | { |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 140 | printk(KERN_EMERG "Bad page state in process '%s'\n" |
Hugh Dickins | 7365f3d1 | 2006-01-11 12:17:18 -0800 | [diff] [blame] | 141 | KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" |
| 142 | KERN_EMERG "Trying to fix it up, but a reboot is needed\n" |
| 143 | KERN_EMERG "Backtrace:\n", |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 144 | current->comm, page, (int)(2*sizeof(unsigned long)), |
| 145 | (unsigned long)page->flags, page->mapping, |
| 146 | page_mapcount(page), page_count(page)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | dump_stack(); |
Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 148 | page->flags &= ~(1 << PG_lru | |
| 149 | 1 << PG_private | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | 1 << PG_locked | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | 1 << PG_active | |
| 152 | 1 << PG_dirty | |
Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 153 | 1 << PG_reclaim | |
| 154 | 1 << PG_slab | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | 1 << PG_swapcache | |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 156 | 1 << PG_writeback | |
| 157 | 1 << PG_buddy ); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | set_page_count(page, 0); |
| 159 | reset_page_mapcount(page); |
| 160 | page->mapping = NULL; |
Randy Dunlap | 9f15833 | 2005-09-13 01:25:16 -0700 | [diff] [blame] | 161 | add_taint(TAINT_BAD_PAGE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | /* |
| 165 | * Higher-order pages are called "compound pages". They are structured thusly: |
| 166 | * |
| 167 | * The first PAGE_SIZE page is called the "head page". |
| 168 | * |
| 169 | * The remaining PAGE_SIZE pages are called "tail pages". |
| 170 | * |
| 171 | * All pages have PG_compound set. All pages have their ->private pointing at |
| 172 | * the head page (even the head page has this). |
| 173 | * |
Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 174 | * The first tail page's ->lru.next holds the address of the compound page's |
| 175 | * put_page() function. Its ->lru.prev holds the order of allocation. |
| 176 | * This usage means that zero-order pages may not be compound. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | */ |
Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 178 | |
| 179 | static void free_compound_page(struct page *page) |
| 180 | { |
| 181 | __free_pages_ok(page, (unsigned long)page[1].lru.prev); |
| 182 | } |
| 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | static void prep_compound_page(struct page *page, unsigned long order) |
| 185 | { |
| 186 | int i; |
| 187 | int nr_pages = 1 << order; |
| 188 | |
Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 189 | page[1].lru.next = (void *)free_compound_page; /* set dtor */ |
Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 190 | page[1].lru.prev = (void *)order; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | for (i = 0; i < nr_pages; i++) { |
| 192 | struct page *p = page + i; |
| 193 | |
Nick Piggin | 5e9dace | 2006-03-22 00:08:01 -0800 | [diff] [blame] | 194 | __SetPageCompound(p); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 195 | set_page_private(p, (unsigned long)page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | } |
| 197 | } |
| 198 | |
| 199 | static void destroy_compound_page(struct page *page, unsigned long order) |
| 200 | { |
| 201 | int i; |
| 202 | int nr_pages = 1 << order; |
| 203 | |
Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 204 | if (unlikely((unsigned long)page[1].lru.prev != order)) |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 205 | bad_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | |
| 207 | for (i = 0; i < nr_pages; i++) { |
| 208 | struct page *p = page + i; |
| 209 | |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 210 | if (unlikely(!PageCompound(p) | |
| 211 | (page_private(p) != (unsigned long)page))) |
| 212 | bad_page(page); |
Nick Piggin | 5e9dace | 2006-03-22 00:08:01 -0800 | [diff] [blame] | 213 | __ClearPageCompound(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | } |
| 215 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | |
Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 217 | static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) |
| 218 | { |
| 219 | int i; |
| 220 | |
| 221 | BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); |
Andrew Morton | 6626c5d | 2006-03-22 00:08:42 -0800 | [diff] [blame] | 222 | /* |
| 223 | * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO |
| 224 | * and __GFP_HIGHMEM from hard or soft interrupt context. |
| 225 | */ |
| 226 | BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); |
Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 227 | for (i = 0; i < (1 << order); i++) |
| 228 | clear_highpage(page + i); |
| 229 | } |
| 230 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | /* |
| 232 | * function for dealing with page's order in buddy system. |
| 233 | * zone->lock is already acquired when we use these. |
| 234 | * So, we don't need atomic page->flags operations here. |
| 235 | */ |
Andrew Morton | 6aa3001b2 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 236 | static inline unsigned long page_order(struct page *page) |
| 237 | { |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 238 | return page_private(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | } |
| 240 | |
Andrew Morton | 6aa3001b2 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 241 | static inline void set_page_order(struct page *page, int order) |
| 242 | { |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 243 | set_page_private(page, order); |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 244 | __SetPageBuddy(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | static inline void rmv_page_order(struct page *page) |
| 248 | { |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 249 | __ClearPageBuddy(page); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 250 | set_page_private(page, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | } |
| 252 | |
| 253 | /* |
| 254 | * Locate the struct page for both the matching buddy in our |
| 255 | * pair (buddy1) and the combined O(n+1) page they form (page). |
| 256 | * |
| 257 | * 1) Any buddy B1 will have an order O twin B2 which satisfies |
| 258 | * the following equation: |
| 259 | * B2 = B1 ^ (1 << O) |
| 260 | * For example, if the starting buddy (buddy2) is #8 its order |
| 261 | * 1 buddy is #10: |
| 262 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 |
| 263 | * |
| 264 | * 2) Any buddy B will have an order O+1 parent P which |
| 265 | * satisfies the following equation: |
| 266 | * P = B & ~(1 << O) |
| 267 | * |
Andreas Mohr | d6e05ed | 2006-06-26 18:35:02 +0200 | [diff] [blame] | 268 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | */ |
| 270 | static inline struct page * |
| 271 | __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) |
| 272 | { |
| 273 | unsigned long buddy_idx = page_idx ^ (1 << order); |
| 274 | |
| 275 | return page + (buddy_idx - page_idx); |
| 276 | } |
| 277 | |
| 278 | static inline unsigned long |
| 279 | __find_combined_index(unsigned long page_idx, unsigned int order) |
| 280 | { |
| 281 | return (page_idx & ~(1 << order)); |
| 282 | } |
| 283 | |
| 284 | /* |
| 285 | * This function checks whether a page is free && is the buddy |
| 286 | * we can do coalesce a page and its buddy if |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 287 | * (a) the buddy is not in a hole && |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 288 | * (b) the buddy is in the buddy system && |
Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 289 | * (c) a page and its buddy have the same order && |
| 290 | * (d) a page and its buddy are in the same zone. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | * |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 292 | * For recording whether a page is in the buddy system, we use PG_buddy. |
| 293 | * Setting, clearing, and testing PG_buddy is serialized by zone->lock. |
| 294 | * |
| 295 | * For recording page's order, we use page_private(page). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | */ |
Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 297 | static inline int page_is_buddy(struct page *page, struct page *buddy, |
| 298 | int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | { |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 300 | #ifdef CONFIG_HOLES_IN_ZONE |
Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 301 | if (!pfn_valid(page_to_pfn(buddy))) |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 302 | return 0; |
| 303 | #endif |
| 304 | |
Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 305 | if (page_zone_id(page) != page_zone_id(buddy)) |
| 306 | return 0; |
| 307 | |
| 308 | if (PageBuddy(buddy) && page_order(buddy) == order) { |
| 309 | BUG_ON(page_count(buddy) != 0); |
Andrew Morton | 6aa3001b2 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 310 | return 1; |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 311 | } |
Andrew Morton | 6aa3001b2 | 2006-04-18 22:20:52 -0700 | [diff] [blame] | 312 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | } |
| 314 | |
| 315 | /* |
| 316 | * Freeing function for a buddy system allocator. |
| 317 | * |
| 318 | * The concept of a buddy system is to maintain direct-mapped table |
| 319 | * (containing bit values) for memory blocks of various "orders". |
| 320 | * The bottom level table contains the map for the smallest allocatable |
| 321 | * units of memory (here, pages), and each level above it describes |
| 322 | * pairs of units from the levels below, hence, "buddies". |
| 323 | * At a high level, all that happens here is marking the table entry |
| 324 | * at the bottom level available, and propagating the changes upward |
| 325 | * as necessary, plus some accounting needed to play nicely with other |
| 326 | * parts of the VM system. |
| 327 | * At each level, we keep a list of pages, which are heads of continuous |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 328 | * free pages of length of (1 << order) and marked with PG_buddy. Page's |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 329 | * order is recorded in page_private(page) field. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | * So when we are allocating or freeing one, we can derive the state of the |
| 331 | * other. That is, if we allocate a small block, and both were |
| 332 | * free, the remainder of the region must be split into blocks. |
| 333 | * If a block is freed, and its buddy is also free, then this |
| 334 | * triggers coalescing into a block of larger size. |
| 335 | * |
| 336 | * -- wli |
| 337 | */ |
| 338 | |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 339 | static inline void __free_one_page(struct page *page, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | struct zone *zone, unsigned int order) |
| 341 | { |
| 342 | unsigned long page_idx; |
| 343 | int order_size = 1 << order; |
| 344 | |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 345 | if (unlikely(PageCompound(page))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | destroy_compound_page(page, order); |
| 347 | |
| 348 | page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); |
| 349 | |
| 350 | BUG_ON(page_idx & (order_size - 1)); |
| 351 | BUG_ON(bad_range(zone, page)); |
| 352 | |
| 353 | zone->free_pages += order_size; |
| 354 | while (order < MAX_ORDER-1) { |
| 355 | unsigned long combined_idx; |
| 356 | struct free_area *area; |
| 357 | struct page *buddy; |
| 358 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | buddy = __page_find_buddy(page, page_idx, order); |
Andy Whitcroft | cb2b95e | 2006-06-23 02:03:01 -0700 | [diff] [blame] | 360 | if (!page_is_buddy(page, buddy, order)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | break; /* Move the buddy up one level. */ |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 362 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | list_del(&buddy->lru); |
| 364 | area = zone->free_area + order; |
| 365 | area->nr_free--; |
| 366 | rmv_page_order(buddy); |
Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 367 | combined_idx = __find_combined_index(page_idx, order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | page = page + (combined_idx - page_idx); |
| 369 | page_idx = combined_idx; |
| 370 | order++; |
| 371 | } |
| 372 | set_page_order(page, order); |
| 373 | list_add(&page->lru, &zone->free_area[order].free_list); |
| 374 | zone->free_area[order].nr_free++; |
| 375 | } |
| 376 | |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 377 | static inline int free_pages_check(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | { |
Nick Piggin | 92be2e33 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 379 | if (unlikely(page_mapcount(page) | |
| 380 | (page->mapping != NULL) | |
| 381 | (page_count(page) != 0) | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | (page->flags & ( |
| 383 | 1 << PG_lru | |
| 384 | 1 << PG_private | |
| 385 | 1 << PG_locked | |
| 386 | 1 << PG_active | |
| 387 | 1 << PG_reclaim | |
| 388 | 1 << PG_slab | |
| 389 | 1 << PG_swapcache | |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 390 | 1 << PG_writeback | |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 391 | 1 << PG_reserved | |
| 392 | 1 << PG_buddy )))) |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 393 | bad_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | if (PageDirty(page)) |
Nick Piggin | 242e546 | 2005-09-03 15:54:50 -0700 | [diff] [blame] | 395 | __ClearPageDirty(page); |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 396 | /* |
| 397 | * For now, we report if PG_reserved was found set, but do not |
| 398 | * clear it, and do not free the page. But we shall soon need |
| 399 | * to do more, for when the ZERO_PAGE count wraps negative. |
| 400 | */ |
| 401 | return PageReserved(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | } |
| 403 | |
| 404 | /* |
| 405 | * Frees a list of pages. |
| 406 | * Assumes all pages on list are in same zone, and of same order. |
Renaud Lienhart | 207f36e | 2005-09-10 00:26:59 -0700 | [diff] [blame] | 407 | * count is the number of pages to free. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | * |
| 409 | * If the zone was previously in an "all pages pinned" state then look to |
| 410 | * see if this freeing clears that state. |
| 411 | * |
| 412 | * And clear the zone's pages_scanned counter, to hold off the "all pages are |
| 413 | * pinned" detection logic. |
| 414 | */ |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 415 | static void free_pages_bulk(struct zone *zone, int count, |
| 416 | struct list_head *list, int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | { |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 418 | spin_lock(&zone->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | zone->all_unreclaimable = 0; |
| 420 | zone->pages_scanned = 0; |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 421 | while (count--) { |
| 422 | struct page *page; |
| 423 | |
| 424 | BUG_ON(list_empty(list)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | page = list_entry(list->prev, struct page, lru); |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 426 | /* have to delete it as __free_one_page list manipulates */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | list_del(&page->lru); |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 428 | __free_one_page(page, zone, order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | } |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 430 | spin_unlock(&zone->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | } |
| 432 | |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 433 | static void free_one_page(struct zone *zone, struct page *page, int order) |
| 434 | { |
| 435 | LIST_HEAD(list); |
| 436 | list_add(&page->lru, &list); |
| 437 | free_pages_bulk(zone, 1, &list, order); |
| 438 | } |
| 439 | |
| 440 | static void __free_pages_ok(struct page *page, unsigned int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | { |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 442 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | int i; |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 444 | int reserved = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | |
| 446 | arch_free_page(page, order); |
Ingo Molnar | de5097c | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 447 | if (!PageHighMem(page)) |
Ingo Molnar | f9b8404 | 2006-06-27 02:54:49 -0700 | [diff] [blame] | 448 | debug_check_no_locks_freed(page_address(page), |
| 449 | PAGE_SIZE<<order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | for (i = 0 ; i < (1 << order) ; ++i) |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 452 | reserved += free_pages_check(page + i); |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 453 | if (reserved) |
| 454 | return; |
| 455 | |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 456 | kernel_map_pages(page, 1 << order, 0); |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 457 | local_irq_save(flags); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 458 | __count_vm_events(PGFREE, 1 << order); |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 459 | free_one_page(page_zone(page), page, order); |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 460 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | } |
| 462 | |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 463 | /* |
| 464 | * permit the bootmem allocator to evade page validation on high-order frees |
| 465 | */ |
| 466 | void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) |
| 467 | { |
| 468 | if (order == 0) { |
| 469 | __ClearPageReserved(page); |
| 470 | set_page_count(page, 0); |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 471 | set_page_refcounted(page); |
Nick Piggin | 545b1ea | 2006-03-22 00:08:07 -0800 | [diff] [blame] | 472 | __free_page(page); |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 473 | } else { |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 474 | int loop; |
| 475 | |
Nick Piggin | 545b1ea | 2006-03-22 00:08:07 -0800 | [diff] [blame] | 476 | prefetchw(page); |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 477 | for (loop = 0; loop < BITS_PER_LONG; loop++) { |
| 478 | struct page *p = &page[loop]; |
| 479 | |
Nick Piggin | 545b1ea | 2006-03-22 00:08:07 -0800 | [diff] [blame] | 480 | if (loop + 1 < BITS_PER_LONG) |
| 481 | prefetchw(p + 1); |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 482 | __ClearPageReserved(p); |
| 483 | set_page_count(p, 0); |
| 484 | } |
| 485 | |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 486 | set_page_refcounted(page); |
Nick Piggin | 545b1ea | 2006-03-22 00:08:07 -0800 | [diff] [blame] | 487 | __free_pages(page, order); |
David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 488 | } |
| 489 | } |
| 490 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | |
| 492 | /* |
| 493 | * The order of subdivision here is critical for the IO subsystem. |
| 494 | * Please do not alter this order without good reasons and regression |
| 495 | * testing. Specifically, as large blocks of memory are subdivided, |
| 496 | * the order in which smaller blocks are delivered depends on the order |
| 497 | * they're subdivided in this function. This is the primary factor |
| 498 | * influencing the order in which pages are delivered to the IO |
| 499 | * subsystem according to empirical testing, and this is also justified |
| 500 | * by considering the behavior of a buddy system containing a single |
| 501 | * large block of memory acted on by a series of small allocations. |
| 502 | * This behavior is a critical factor in sglist merging's success. |
| 503 | * |
| 504 | * -- wli |
| 505 | */ |
Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 506 | static inline void expand(struct zone *zone, struct page *page, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | int low, int high, struct free_area *area) |
| 508 | { |
| 509 | unsigned long size = 1 << high; |
| 510 | |
| 511 | while (high > low) { |
| 512 | area--; |
| 513 | high--; |
| 514 | size >>= 1; |
| 515 | BUG_ON(bad_range(zone, &page[size])); |
| 516 | list_add(&page[size].lru, &area->free_list); |
| 517 | area->nr_free++; |
| 518 | set_page_order(&page[size], high); |
| 519 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | } |
| 521 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | /* |
| 523 | * This page is about to be returned from the page allocator |
| 524 | */ |
Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 525 | static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | { |
Nick Piggin | 92be2e33 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 527 | if (unlikely(page_mapcount(page) | |
| 528 | (page->mapping != NULL) | |
| 529 | (page_count(page) != 0) | |
Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 530 | (page->flags & ( |
| 531 | 1 << PG_lru | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | 1 << PG_private | |
| 533 | 1 << PG_locked | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | 1 << PG_active | |
| 535 | 1 << PG_dirty | |
| 536 | 1 << PG_reclaim | |
Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 537 | 1 << PG_slab | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | 1 << PG_swapcache | |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 539 | 1 << PG_writeback | |
Nick Piggin | 676165a | 2006-04-10 11:21:48 +1000 | [diff] [blame] | 540 | 1 << PG_reserved | |
| 541 | 1 << PG_buddy )))) |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 542 | bad_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 544 | /* |
| 545 | * For now, we report if PG_reserved was found set, but do not |
| 546 | * clear it, and do not allocate the page: as a safety net. |
| 547 | */ |
| 548 | if (PageReserved(page)) |
| 549 | return 1; |
| 550 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | page->flags &= ~(1 << PG_uptodate | 1 << PG_error | |
| 552 | 1 << PG_referenced | 1 << PG_arch_1 | |
| 553 | 1 << PG_checked | 1 << PG_mappedtodisk); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 554 | set_page_private(page, 0); |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 555 | set_page_refcounted(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | kernel_map_pages(page, 1 << order, 1); |
Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 557 | |
| 558 | if (gfp_flags & __GFP_ZERO) |
| 559 | prep_zero_page(page, order, gfp_flags); |
| 560 | |
| 561 | if (order && (gfp_flags & __GFP_COMP)) |
| 562 | prep_compound_page(page, order); |
| 563 | |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 564 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | } |
| 566 | |
| 567 | /* |
| 568 | * Do the hard work of removing an element from the buddy allocator. |
| 569 | * Call me with the zone->lock already held. |
| 570 | */ |
| 571 | static struct page *__rmqueue(struct zone *zone, unsigned int order) |
| 572 | { |
| 573 | struct free_area * area; |
| 574 | unsigned int current_order; |
| 575 | struct page *page; |
| 576 | |
| 577 | for (current_order = order; current_order < MAX_ORDER; ++current_order) { |
| 578 | area = zone->free_area + current_order; |
| 579 | if (list_empty(&area->free_list)) |
| 580 | continue; |
| 581 | |
| 582 | page = list_entry(area->free_list.next, struct page, lru); |
| 583 | list_del(&page->lru); |
| 584 | rmv_page_order(page); |
| 585 | area->nr_free--; |
| 586 | zone->free_pages -= 1UL << order; |
Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 587 | expand(zone, page, order, current_order, area); |
| 588 | return page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | } |
| 590 | |
| 591 | return NULL; |
| 592 | } |
| 593 | |
| 594 | /* |
| 595 | * Obtain a specified number of elements from the buddy allocator, all under |
| 596 | * a single hold of the lock, for efficiency. Add them to the supplied list. |
| 597 | * Returns the number of new pages which were placed at *list. |
| 598 | */ |
| 599 | static int rmqueue_bulk(struct zone *zone, unsigned int order, |
| 600 | unsigned long count, struct list_head *list) |
| 601 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 604 | spin_lock(&zone->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | for (i = 0; i < count; ++i) { |
Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 606 | struct page *page = __rmqueue(zone, order); |
| 607 | if (unlikely(page == NULL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | list_add_tail(&page->lru, list); |
| 610 | } |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 611 | spin_unlock(&zone->lock); |
Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 612 | return i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | } |
| 614 | |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 615 | #ifdef CONFIG_NUMA |
Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 616 | /* |
| 617 | * Called from the slab reaper to drain pagesets on a particular node that |
| 618 | * belong to the currently executing processor. |
Christoph Lameter | 879336c | 2006-03-22 00:09:08 -0800 | [diff] [blame] | 619 | * Note that this function must be called with the thread pinned to |
| 620 | * a single processor. |
Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 621 | */ |
| 622 | void drain_node_pages(int nodeid) |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 623 | { |
Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 624 | int i, z; |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 625 | unsigned long flags; |
| 626 | |
Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 627 | for (z = 0; z < MAX_NR_ZONES; z++) { |
| 628 | struct zone *zone = NODE_DATA(nodeid)->node_zones + z; |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 629 | struct per_cpu_pageset *pset; |
| 630 | |
Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 631 | pset = zone_pcp(zone, smp_processor_id()); |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 632 | for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { |
| 633 | struct per_cpu_pages *pcp; |
| 634 | |
| 635 | pcp = &pset->pcp[i]; |
Christoph Lameter | 879336c | 2006-03-22 00:09:08 -0800 | [diff] [blame] | 636 | if (pcp->count) { |
| 637 | local_irq_save(flags); |
| 638 | free_pages_bulk(zone, pcp->count, &pcp->list, 0); |
| 639 | pcp->count = 0; |
| 640 | local_irq_restore(flags); |
| 641 | } |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 642 | } |
| 643 | } |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 644 | } |
| 645 | #endif |
| 646 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) |
| 648 | static void __drain_pages(unsigned int cpu) |
| 649 | { |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 650 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | struct zone *zone; |
| 652 | int i; |
| 653 | |
| 654 | for_each_zone(zone) { |
| 655 | struct per_cpu_pageset *pset; |
| 656 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 657 | pset = zone_pcp(zone, cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { |
| 659 | struct per_cpu_pages *pcp; |
| 660 | |
| 661 | pcp = &pset->pcp[i]; |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 662 | local_irq_save(flags); |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 663 | free_pages_bulk(zone, pcp->count, &pcp->list, 0); |
| 664 | pcp->count = 0; |
Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 665 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | } |
| 667 | } |
| 668 | } |
| 669 | #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ |
| 670 | |
| 671 | #ifdef CONFIG_PM |
| 672 | |
| 673 | void mark_free_pages(struct zone *zone) |
| 674 | { |
| 675 | unsigned long zone_pfn, flags; |
| 676 | int order; |
| 677 | struct list_head *curr; |
| 678 | |
| 679 | if (!zone->spanned_pages) |
| 680 | return; |
| 681 | |
| 682 | spin_lock_irqsave(&zone->lock, flags); |
| 683 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) |
| 684 | ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); |
| 685 | |
| 686 | for (order = MAX_ORDER - 1; order >= 0; --order) |
| 687 | list_for_each(curr, &zone->free_area[order].free_list) { |
| 688 | unsigned long start_pfn, i; |
| 689 | |
| 690 | start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); |
| 691 | |
| 692 | for (i=0; i < (1<<order); i++) |
| 693 | SetPageNosaveFree(pfn_to_page(start_pfn+i)); |
| 694 | } |
| 695 | spin_unlock_irqrestore(&zone->lock, flags); |
| 696 | } |
| 697 | |
| 698 | /* |
| 699 | * Spill all of this CPU's per-cpu pages back into the buddy allocator. |
| 700 | */ |
| 701 | void drain_local_pages(void) |
| 702 | { |
| 703 | unsigned long flags; |
| 704 | |
| 705 | local_irq_save(flags); |
| 706 | __drain_pages(smp_processor_id()); |
| 707 | local_irq_restore(flags); |
| 708 | } |
| 709 | #endif /* CONFIG_PM */ |
| 710 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | /* |
| 712 | * Free a 0-order page |
| 713 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | static void fastcall free_hot_cold_page(struct page *page, int cold) |
| 715 | { |
| 716 | struct zone *zone = page_zone(page); |
| 717 | struct per_cpu_pages *pcp; |
| 718 | unsigned long flags; |
| 719 | |
| 720 | arch_free_page(page, 0); |
| 721 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | if (PageAnon(page)) |
| 723 | page->mapping = NULL; |
Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 724 | if (free_pages_check(page)) |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 725 | return; |
| 726 | |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 727 | kernel_map_pages(page, 1, 0); |
| 728 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 729 | pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | local_irq_save(flags); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 731 | __count_vm_event(PGFREE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | list_add(&page->lru, &pcp->list); |
| 733 | pcp->count++; |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 734 | if (pcp->count >= pcp->high) { |
| 735 | free_pages_bulk(zone, pcp->batch, &pcp->list, 0); |
| 736 | pcp->count -= pcp->batch; |
| 737 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | local_irq_restore(flags); |
| 739 | put_cpu(); |
| 740 | } |
| 741 | |
| 742 | void fastcall free_hot_page(struct page *page) |
| 743 | { |
| 744 | free_hot_cold_page(page, 0); |
| 745 | } |
| 746 | |
| 747 | void fastcall free_cold_page(struct page *page) |
| 748 | { |
| 749 | free_hot_cold_page(page, 1); |
| 750 | } |
| 751 | |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 752 | /* |
| 753 | * split_page takes a non-compound higher-order page, and splits it into |
| 754 | * n (1<<order) sub-pages: page[0..n] |
| 755 | * Each sub-page must be freed individually. |
| 756 | * |
| 757 | * Note: this is probably too low level an operation for use in drivers. |
| 758 | * Please consult with lkml before using this in your driver. |
| 759 | */ |
| 760 | void split_page(struct page *page, unsigned int order) |
| 761 | { |
| 762 | int i; |
| 763 | |
| 764 | BUG_ON(PageCompound(page)); |
| 765 | BUG_ON(!page_count(page)); |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 766 | for (i = 1; i < (1 << order); i++) |
| 767 | set_page_refcounted(page + i); |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 768 | } |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 769 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | /* |
| 771 | * Really, prep_compound_page() should be called from __rmqueue_bulk(). But |
| 772 | * we cheat by calling it from here, in the order > 0 path. Saves a branch |
| 773 | * or two. |
| 774 | */ |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 775 | static struct page *buffered_rmqueue(struct zonelist *zonelist, |
| 776 | struct zone *zone, int order, gfp_t gfp_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | { |
| 778 | unsigned long flags; |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 779 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | int cold = !!(gfp_flags & __GFP_COLD); |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 781 | int cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | |
Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 783 | again: |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 784 | cpu = get_cpu(); |
Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 785 | if (likely(order == 0)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | struct per_cpu_pages *pcp; |
| 787 | |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 788 | pcp = &zone_pcp(zone, cpu)->pcp[cold]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | local_irq_save(flags); |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 790 | if (!pcp->count) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | pcp->count += rmqueue_bulk(zone, 0, |
| 792 | pcp->batch, &pcp->list); |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 793 | if (unlikely(!pcp->count)) |
| 794 | goto failed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | } |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 796 | page = list_entry(pcp->list.next, struct page, lru); |
| 797 | list_del(&page->lru); |
| 798 | pcp->count--; |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 799 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | spin_lock_irqsave(&zone->lock, flags); |
| 801 | page = __rmqueue(zone, order); |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 802 | spin_unlock(&zone->lock); |
| 803 | if (!page) |
| 804 | goto failed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | } |
| 806 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 807 | __count_zone_vm_events(PGALLOC, zone, 1 << order); |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 808 | zone_statistics(zonelist, zone); |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 809 | local_irq_restore(flags); |
| 810 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 812 | BUG_ON(bad_range(zone, page)); |
Nick Piggin | 17cf440 | 2006-03-22 00:08:41 -0800 | [diff] [blame] | 813 | if (prep_new_page(page, order, gfp_flags)) |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 814 | goto again; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | return page; |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 816 | |
| 817 | failed: |
| 818 | local_irq_restore(flags); |
| 819 | put_cpu(); |
| 820 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | } |
| 822 | |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 823 | #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ |
Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 824 | #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ |
| 825 | #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ |
| 826 | #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ |
| 827 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
| 828 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
| 829 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 830 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | /* |
| 832 | * Return 1 if free pages are above 'mark'. This takes into account the order |
| 833 | * of the allocation. |
| 834 | */ |
| 835 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 836 | int classzone_idx, int alloc_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | { |
| 838 | /* free_pages my go negative - that's OK */ |
| 839 | long min = mark, free_pages = z->free_pages - (1 << order) + 1; |
| 840 | int o; |
| 841 | |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 842 | if (alloc_flags & ALLOC_HIGH) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | min -= min / 2; |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 844 | if (alloc_flags & ALLOC_HARDER) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | min -= min / 4; |
| 846 | |
| 847 | if (free_pages <= min + z->lowmem_reserve[classzone_idx]) |
| 848 | return 0; |
| 849 | for (o = 0; o < order; o++) { |
| 850 | /* At the next order, this order's pages become unavailable */ |
| 851 | free_pages -= z->free_area[o].nr_free << o; |
| 852 | |
| 853 | /* Require fewer higher order pages to be free */ |
| 854 | min >>= 1; |
| 855 | |
| 856 | if (free_pages <= min) |
| 857 | return 0; |
| 858 | } |
| 859 | return 1; |
| 860 | } |
| 861 | |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 862 | /* |
| 863 | * get_page_from_freeliest goes through the zonelist trying to allocate |
| 864 | * a page. |
| 865 | */ |
| 866 | static struct page * |
| 867 | get_page_from_freelist(gfp_t gfp_mask, unsigned int order, |
| 868 | struct zonelist *zonelist, int alloc_flags) |
Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 869 | { |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 870 | struct zone **z = zonelist->zones; |
| 871 | struct page *page = NULL; |
| 872 | int classzone_idx = zone_idx(*z); |
| 873 | |
| 874 | /* |
| 875 | * Go through the zonelist once, looking for a zone with enough free. |
| 876 | * See also cpuset_zone_allowed() comment in kernel/cpuset.c. |
| 877 | */ |
| 878 | do { |
| 879 | if ((alloc_flags & ALLOC_CPUSET) && |
| 880 | !cpuset_zone_allowed(*z, gfp_mask)) |
| 881 | continue; |
| 882 | |
| 883 | if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { |
Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 884 | unsigned long mark; |
| 885 | if (alloc_flags & ALLOC_WMARK_MIN) |
| 886 | mark = (*z)->pages_min; |
| 887 | else if (alloc_flags & ALLOC_WMARK_LOW) |
| 888 | mark = (*z)->pages_low; |
| 889 | else |
| 890 | mark = (*z)->pages_high; |
| 891 | if (!zone_watermark_ok(*z, order, mark, |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 892 | classzone_idx, alloc_flags)) |
Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 893 | if (!zone_reclaim_mode || |
| 894 | !zone_reclaim(*z, gfp_mask, order)) |
| 895 | continue; |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 896 | } |
| 897 | |
Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 898 | page = buffered_rmqueue(zonelist, *z, order, gfp_mask); |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 899 | if (page) { |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 900 | break; |
| 901 | } |
| 902 | } while (*(++z) != NULL); |
| 903 | return page; |
Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 904 | } |
| 905 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | /* |
| 907 | * This is the 'heart' of the zoned buddy allocator. |
| 908 | */ |
| 909 | struct page * fastcall |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 910 | __alloc_pages(gfp_t gfp_mask, unsigned int order, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | struct zonelist *zonelist) |
| 912 | { |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 913 | const gfp_t wait = gfp_mask & __GFP_WAIT; |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 914 | struct zone **z; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | struct page *page; |
| 916 | struct reclaim_state reclaim_state; |
| 917 | struct task_struct *p = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | int do_retry; |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 919 | int alloc_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | int did_some_progress; |
| 921 | |
| 922 | might_sleep_if(wait); |
| 923 | |
Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 924 | restart: |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 925 | z = zonelist->zones; /* the list of zones suitable for gfp_mask */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 927 | if (unlikely(*z == NULL)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | /* Should this ever happen?? */ |
| 929 | return NULL; |
| 930 | } |
Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 931 | |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 932 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, |
Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 933 | zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 934 | if (page) |
| 935 | goto got_pg; |
| 936 | |
Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 937 | do { |
Chris Wright | 43b0bc0 | 2006-06-25 05:47:55 -0700 | [diff] [blame] | 938 | wakeup_kswapd(*z, order); |
Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 939 | } while (*(++z)); |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 940 | |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 941 | /* |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 942 | * OK, we're below the kswapd watermark and have kicked background |
| 943 | * reclaim. Now things get more complex, so set up alloc_flags according |
| 944 | * to how we want to proceed. |
| 945 | * |
| 946 | * The caller may dip into page reserves a bit more if the caller |
| 947 | * cannot run direct reclaim, or if the caller has realtime scheduling |
Paul Jackson | 4eac915 | 2006-01-11 12:17:19 -0800 | [diff] [blame] | 948 | * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will |
| 949 | * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 950 | */ |
Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 951 | alloc_flags = ALLOC_WMARK_MIN; |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 952 | if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) |
| 953 | alloc_flags |= ALLOC_HARDER; |
| 954 | if (gfp_mask & __GFP_HIGH) |
| 955 | alloc_flags |= ALLOC_HIGH; |
Paul Jackson | bdd804f | 2006-05-20 15:00:09 -0700 | [diff] [blame] | 956 | if (wait) |
| 957 | alloc_flags |= ALLOC_CPUSET; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | |
| 959 | /* |
| 960 | * Go through the zonelist again. Let __GFP_HIGH and allocations |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 961 | * coming from realtime tasks go deeper into reserves. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | * |
| 963 | * This is the last chance, in general, before the goto nopage. |
| 964 | * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. |
Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 965 | * See also cpuset_zone_allowed() comment in kernel/cpuset.c. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | */ |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 967 | page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); |
| 968 | if (page) |
| 969 | goto got_pg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | |
| 971 | /* This allocation should allow future memory freeing. */ |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 972 | |
| 973 | if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) |
| 974 | && !in_interrupt()) { |
| 975 | if (!(gfp_mask & __GFP_NOMEMALLOC)) { |
Kirill Korotaev | 885036d | 2005-11-13 16:06:41 -0800 | [diff] [blame] | 976 | nofail_alloc: |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 977 | /* go through the zonelist yet again, ignoring mins */ |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 978 | page = get_page_from_freelist(gfp_mask, order, |
Paul Jackson | 47f3a86 | 2006-01-06 00:10:32 -0800 | [diff] [blame] | 979 | zonelist, ALLOC_NO_WATERMARKS); |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 980 | if (page) |
| 981 | goto got_pg; |
Kirill Korotaev | 885036d | 2005-11-13 16:06:41 -0800 | [diff] [blame] | 982 | if (gfp_mask & __GFP_NOFAIL) { |
| 983 | blk_congestion_wait(WRITE, HZ/50); |
| 984 | goto nofail_alloc; |
| 985 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 986 | } |
| 987 | goto nopage; |
| 988 | } |
| 989 | |
| 990 | /* Atomic allocations - we can't balance anything */ |
| 991 | if (!wait) |
| 992 | goto nopage; |
| 993 | |
| 994 | rebalance: |
| 995 | cond_resched(); |
| 996 | |
| 997 | /* We now go into synchronous reclaim */ |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 998 | cpuset_memory_pressure_bump(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | p->flags |= PF_MEMALLOC; |
| 1000 | reclaim_state.reclaimed_slab = 0; |
| 1001 | p->reclaim_state = &reclaim_state; |
| 1002 | |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1003 | did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 | |
| 1005 | p->reclaim_state = NULL; |
| 1006 | p->flags &= ~PF_MEMALLOC; |
| 1007 | |
| 1008 | cond_resched(); |
| 1009 | |
| 1010 | if (likely(did_some_progress)) { |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1011 | page = get_page_from_freelist(gfp_mask, order, |
| 1012 | zonelist, alloc_flags); |
| 1013 | if (page) |
| 1014 | goto got_pg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { |
| 1016 | /* |
| 1017 | * Go through the zonelist yet one more time, keep |
| 1018 | * very high watermark here, this is only to catch |
| 1019 | * a parallel oom killing, we must fail if we're still |
| 1020 | * under heavy pressure. |
| 1021 | */ |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1022 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, |
Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 1023 | zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1024 | if (page) |
| 1025 | goto got_pg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1026 | |
Christoph Lameter | 9b0f8b0 | 2006-02-20 18:27:52 -0800 | [diff] [blame] | 1027 | out_of_memory(zonelist, gfp_mask, order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1028 | goto restart; |
| 1029 | } |
| 1030 | |
| 1031 | /* |
| 1032 | * Don't let big-order allocations loop unless the caller explicitly |
| 1033 | * requests that. Wait for some write requests to complete then retry. |
| 1034 | * |
| 1035 | * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order |
| 1036 | * <= 3, but that may not be true in other implementations. |
| 1037 | */ |
| 1038 | do_retry = 0; |
| 1039 | if (!(gfp_mask & __GFP_NORETRY)) { |
| 1040 | if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) |
| 1041 | do_retry = 1; |
| 1042 | if (gfp_mask & __GFP_NOFAIL) |
| 1043 | do_retry = 1; |
| 1044 | } |
| 1045 | if (do_retry) { |
| 1046 | blk_congestion_wait(WRITE, HZ/50); |
| 1047 | goto rebalance; |
| 1048 | } |
| 1049 | |
| 1050 | nopage: |
| 1051 | if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { |
| 1052 | printk(KERN_WARNING "%s: page allocation failure." |
| 1053 | " order:%d, mode:0x%x\n", |
| 1054 | p->comm, order, gfp_mask); |
| 1055 | dump_stack(); |
Janet Morgan | 578c2fd | 2005-06-21 17:14:56 -0700 | [diff] [blame] | 1056 | show_mem(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | got_pg: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | return page; |
| 1060 | } |
| 1061 | |
| 1062 | EXPORT_SYMBOL(__alloc_pages); |
| 1063 | |
| 1064 | /* |
| 1065 | * Common helper functions. |
| 1066 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1067 | fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | { |
| 1069 | struct page * page; |
| 1070 | page = alloc_pages(gfp_mask, order); |
| 1071 | if (!page) |
| 1072 | return 0; |
| 1073 | return (unsigned long) page_address(page); |
| 1074 | } |
| 1075 | |
| 1076 | EXPORT_SYMBOL(__get_free_pages); |
| 1077 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1078 | fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 | { |
| 1080 | struct page * page; |
| 1081 | |
| 1082 | /* |
| 1083 | * get_zeroed_page() returns a 32-bit address, which cannot represent |
| 1084 | * a highmem page |
| 1085 | */ |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1086 | BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | |
| 1088 | page = alloc_pages(gfp_mask | __GFP_ZERO, 0); |
| 1089 | if (page) |
| 1090 | return (unsigned long) page_address(page); |
| 1091 | return 0; |
| 1092 | } |
| 1093 | |
| 1094 | EXPORT_SYMBOL(get_zeroed_page); |
| 1095 | |
| 1096 | void __pagevec_free(struct pagevec *pvec) |
| 1097 | { |
| 1098 | int i = pagevec_count(pvec); |
| 1099 | |
| 1100 | while (--i >= 0) |
| 1101 | free_hot_cold_page(pvec->pages[i], pvec->cold); |
| 1102 | } |
| 1103 | |
| 1104 | fastcall void __free_pages(struct page *page, unsigned int order) |
| 1105 | { |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1106 | if (put_page_testzero(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1107 | if (order == 0) |
| 1108 | free_hot_page(page); |
| 1109 | else |
| 1110 | __free_pages_ok(page, order); |
| 1111 | } |
| 1112 | } |
| 1113 | |
| 1114 | EXPORT_SYMBOL(__free_pages); |
| 1115 | |
| 1116 | fastcall void free_pages(unsigned long addr, unsigned int order) |
| 1117 | { |
| 1118 | if (addr != 0) { |
| 1119 | BUG_ON(!virt_addr_valid((void *)addr)); |
| 1120 | __free_pages(virt_to_page((void *)addr), order); |
| 1121 | } |
| 1122 | } |
| 1123 | |
| 1124 | EXPORT_SYMBOL(free_pages); |
| 1125 | |
| 1126 | /* |
| 1127 | * Total amount of free (allocatable) RAM: |
| 1128 | */ |
| 1129 | unsigned int nr_free_pages(void) |
| 1130 | { |
| 1131 | unsigned int sum = 0; |
| 1132 | struct zone *zone; |
| 1133 | |
| 1134 | for_each_zone(zone) |
| 1135 | sum += zone->free_pages; |
| 1136 | |
| 1137 | return sum; |
| 1138 | } |
| 1139 | |
| 1140 | EXPORT_SYMBOL(nr_free_pages); |
| 1141 | |
| 1142 | #ifdef CONFIG_NUMA |
| 1143 | unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) |
| 1144 | { |
| 1145 | unsigned int i, sum = 0; |
| 1146 | |
| 1147 | for (i = 0; i < MAX_NR_ZONES; i++) |
| 1148 | sum += pgdat->node_zones[i].free_pages; |
| 1149 | |
| 1150 | return sum; |
| 1151 | } |
| 1152 | #endif |
| 1153 | |
| 1154 | static unsigned int nr_free_zone_pages(int offset) |
| 1155 | { |
Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1156 | /* Just pick one node, since fallback list is circular */ |
| 1157 | pg_data_t *pgdat = NODE_DATA(numa_node_id()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | unsigned int sum = 0; |
| 1159 | |
Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1160 | struct zonelist *zonelist = pgdat->node_zonelists + offset; |
| 1161 | struct zone **zonep = zonelist->zones; |
| 1162 | struct zone *zone; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 | |
Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1164 | for (zone = *zonep++; zone; zone = *zonep++) { |
| 1165 | unsigned long size = zone->present_pages; |
| 1166 | unsigned long high = zone->pages_high; |
| 1167 | if (size > high) |
| 1168 | sum += size - high; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | } |
| 1170 | |
| 1171 | return sum; |
| 1172 | } |
| 1173 | |
| 1174 | /* |
| 1175 | * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL |
| 1176 | */ |
| 1177 | unsigned int nr_free_buffer_pages(void) |
| 1178 | { |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 1179 | return nr_free_zone_pages(gfp_zone(GFP_USER)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1180 | } |
| 1181 | |
| 1182 | /* |
| 1183 | * Amount of free RAM allocatable within all zones |
| 1184 | */ |
| 1185 | unsigned int nr_free_pagecache_pages(void) |
| 1186 | { |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 1187 | return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | } |
| 1189 | |
| 1190 | #ifdef CONFIG_HIGHMEM |
| 1191 | unsigned int nr_free_highpages (void) |
| 1192 | { |
| 1193 | pg_data_t *pgdat; |
| 1194 | unsigned int pages = 0; |
| 1195 | |
KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 1196 | for_each_online_pgdat(pgdat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; |
| 1198 | |
| 1199 | return pages; |
| 1200 | } |
| 1201 | #endif |
| 1202 | |
| 1203 | #ifdef CONFIG_NUMA |
| 1204 | static void show_node(struct zone *zone) |
| 1205 | { |
| 1206 | printk("Node %d ", zone->zone_pgdat->node_id); |
| 1207 | } |
| 1208 | #else |
| 1209 | #define show_node(zone) do { } while (0) |
| 1210 | #endif |
| 1211 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | void si_meminfo(struct sysinfo *val) |
| 1213 | { |
| 1214 | val->totalram = totalram_pages; |
| 1215 | val->sharedram = 0; |
| 1216 | val->freeram = nr_free_pages(); |
| 1217 | val->bufferram = nr_blockdev_pages(); |
| 1218 | #ifdef CONFIG_HIGHMEM |
| 1219 | val->totalhigh = totalhigh_pages; |
| 1220 | val->freehigh = nr_free_highpages(); |
| 1221 | #else |
| 1222 | val->totalhigh = 0; |
| 1223 | val->freehigh = 0; |
| 1224 | #endif |
| 1225 | val->mem_unit = PAGE_SIZE; |
| 1226 | } |
| 1227 | |
| 1228 | EXPORT_SYMBOL(si_meminfo); |
| 1229 | |
| 1230 | #ifdef CONFIG_NUMA |
| 1231 | void si_meminfo_node(struct sysinfo *val, int nid) |
| 1232 | { |
| 1233 | pg_data_t *pgdat = NODE_DATA(nid); |
| 1234 | |
| 1235 | val->totalram = pgdat->node_present_pages; |
| 1236 | val->freeram = nr_free_pages_pgdat(pgdat); |
| 1237 | val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; |
| 1238 | val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; |
| 1239 | val->mem_unit = PAGE_SIZE; |
| 1240 | } |
| 1241 | #endif |
| 1242 | |
| 1243 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
| 1244 | |
| 1245 | /* |
| 1246 | * Show free area list (used inside shift_scroll-lock stuff) |
| 1247 | * We also calculate the percentage fragmentation. We do this by counting the |
| 1248 | * memory on each free list with the exception of the first item on the list. |
| 1249 | */ |
| 1250 | void show_free_areas(void) |
| 1251 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1252 | int cpu, temperature; |
| 1253 | unsigned long active; |
| 1254 | unsigned long inactive; |
| 1255 | unsigned long free; |
| 1256 | struct zone *zone; |
| 1257 | |
| 1258 | for_each_zone(zone) { |
| 1259 | show_node(zone); |
| 1260 | printk("%s per-cpu:", zone->name); |
| 1261 | |
Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1262 | if (!populated_zone(zone)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | printk(" empty\n"); |
| 1264 | continue; |
| 1265 | } else |
| 1266 | printk("\n"); |
| 1267 | |
Dave Jones | 6b482c6 | 2005-11-10 15:45:56 -0500 | [diff] [blame] | 1268 | for_each_online_cpu(cpu) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | struct per_cpu_pageset *pageset; |
| 1270 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1271 | pageset = zone_pcp(zone, cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | |
| 1273 | for (temperature = 0; temperature < 2; temperature++) |
Nick Piggin | 2d92c5c | 2006-01-06 00:10:59 -0800 | [diff] [blame] | 1274 | printk("cpu %d %s: high %d, batch %d used:%d\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 | cpu, |
| 1276 | temperature ? "cold" : "hot", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | pageset->pcp[temperature].high, |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 1278 | pageset->pcp[temperature].batch, |
| 1279 | pageset->pcp[temperature].count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 | } |
| 1281 | } |
| 1282 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | get_zone_counts(&active, &inactive, &free); |
| 1284 | |
Denis Vlasenko | c0d6221 | 2005-06-21 17:15:14 -0700 | [diff] [blame] | 1285 | printk("Free pages: %11ukB (%ukB HighMem)\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | K(nr_free_pages()), |
| 1287 | K(nr_free_highpages())); |
| 1288 | |
| 1289 | printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " |
| 1290 | "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", |
| 1291 | active, |
| 1292 | inactive, |
Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 1293 | global_page_state(NR_FILE_DIRTY), |
Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 1294 | global_page_state(NR_WRITEBACK), |
Christoph Lameter | fd39fc8 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 1295 | global_page_state(NR_UNSTABLE_NFS), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | nr_free_pages(), |
Christoph Lameter | 9a865ff | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 1297 | global_page_state(NR_SLAB), |
Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 1298 | global_page_state(NR_FILE_MAPPED), |
Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 1299 | global_page_state(NR_PAGETABLE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | |
| 1301 | for_each_zone(zone) { |
| 1302 | int i; |
| 1303 | |
| 1304 | show_node(zone); |
| 1305 | printk("%s" |
| 1306 | " free:%lukB" |
| 1307 | " min:%lukB" |
| 1308 | " low:%lukB" |
| 1309 | " high:%lukB" |
| 1310 | " active:%lukB" |
| 1311 | " inactive:%lukB" |
| 1312 | " present:%lukB" |
| 1313 | " pages_scanned:%lu" |
| 1314 | " all_unreclaimable? %s" |
| 1315 | "\n", |
| 1316 | zone->name, |
| 1317 | K(zone->free_pages), |
| 1318 | K(zone->pages_min), |
| 1319 | K(zone->pages_low), |
| 1320 | K(zone->pages_high), |
| 1321 | K(zone->nr_active), |
| 1322 | K(zone->nr_inactive), |
| 1323 | K(zone->present_pages), |
| 1324 | zone->pages_scanned, |
| 1325 | (zone->all_unreclaimable ? "yes" : "no") |
| 1326 | ); |
| 1327 | printk("lowmem_reserve[]:"); |
| 1328 | for (i = 0; i < MAX_NR_ZONES; i++) |
| 1329 | printk(" %lu", zone->lowmem_reserve[i]); |
| 1330 | printk("\n"); |
| 1331 | } |
| 1332 | |
| 1333 | for_each_zone(zone) { |
Kirill Korotaev | 8f9de51 | 2006-06-23 02:03:50 -0700 | [diff] [blame] | 1334 | unsigned long nr[MAX_ORDER], flags, order, total = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1335 | |
| 1336 | show_node(zone); |
| 1337 | printk("%s: ", zone->name); |
Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1338 | if (!populated_zone(zone)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1339 | printk("empty\n"); |
| 1340 | continue; |
| 1341 | } |
| 1342 | |
| 1343 | spin_lock_irqsave(&zone->lock, flags); |
| 1344 | for (order = 0; order < MAX_ORDER; order++) { |
Kirill Korotaev | 8f9de51 | 2006-06-23 02:03:50 -0700 | [diff] [blame] | 1345 | nr[order] = zone->free_area[order].nr_free; |
| 1346 | total += nr[order] << order; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | } |
| 1348 | spin_unlock_irqrestore(&zone->lock, flags); |
Kirill Korotaev | 8f9de51 | 2006-06-23 02:03:50 -0700 | [diff] [blame] | 1349 | for (order = 0; order < MAX_ORDER; order++) |
| 1350 | printk("%lu*%lukB ", nr[order], K(1UL) << order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1351 | printk("= %lukB\n", K(total)); |
| 1352 | } |
| 1353 | |
| 1354 | show_swap_cache_info(); |
| 1355 | } |
| 1356 | |
| 1357 | /* |
| 1358 | * Builds allocation fallback zone lists. |
Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1359 | * |
| 1360 | * Add all populated zones of a node to the zonelist. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1361 | */ |
Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1362 | static int __meminit build_zonelists_node(pg_data_t *pgdat, |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1363 | struct zonelist *zonelist, int nr_zones, int zone_type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | { |
Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1365 | struct zone *zone; |
| 1366 | |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1367 | BUG_ON(zone_type > ZONE_HIGHMEM); |
Christoph Lameter | 02a68a5 | 2006-01-06 00:11:18 -0800 | [diff] [blame] | 1368 | |
| 1369 | do { |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1370 | zone = pgdat->node_zones + zone_type; |
Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1371 | if (populated_zone(zone)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | #ifndef CONFIG_HIGHMEM |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1373 | BUG_ON(zone_type > ZONE_NORMAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1374 | #endif |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1375 | zonelist->zones[nr_zones++] = zone; |
| 1376 | check_highest_zone(zone_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | } |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1378 | zone_type--; |
Christoph Lameter | 02a68a5 | 2006-01-06 00:11:18 -0800 | [diff] [blame] | 1379 | |
Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1380 | } while (zone_type >= 0); |
| 1381 | return nr_zones; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 | } |
| 1383 | |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1384 | static inline int highest_zone(int zone_bits) |
| 1385 | { |
| 1386 | int res = ZONE_NORMAL; |
| 1387 | if (zone_bits & (__force int)__GFP_HIGHMEM) |
| 1388 | res = ZONE_HIGHMEM; |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1389 | if (zone_bits & (__force int)__GFP_DMA32) |
| 1390 | res = ZONE_DMA32; |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1391 | if (zone_bits & (__force int)__GFP_DMA) |
| 1392 | res = ZONE_DMA; |
| 1393 | return res; |
| 1394 | } |
| 1395 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | #ifdef CONFIG_NUMA |
| 1397 | #define MAX_NODE_LOAD (num_online_nodes()) |
Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1398 | static int __meminitdata node_load[MAX_NUMNODES]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | /** |
Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 1400 | * find_next_best_node - find the next node that should appear in a given node's fallback list |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | * @node: node whose fallback list we're appending |
| 1402 | * @used_node_mask: nodemask_t of already used nodes |
| 1403 | * |
| 1404 | * We use a number of factors to determine which is the next node that should |
| 1405 | * appear on a given node's fallback list. The node should not have appeared |
| 1406 | * already in @node's fallback list, and it should be the next closest node |
| 1407 | * according to the distance array (which contains arbitrary distance values |
| 1408 | * from each node to each node in the system), and should also prefer nodes |
| 1409 | * with no CPUs, since presumably they'll have very little allocation pressure |
| 1410 | * on them otherwise. |
| 1411 | * It returns -1 if no node is found. |
| 1412 | */ |
Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1413 | static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1414 | { |
Linus Torvalds | 4cf808eb | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1415 | int n, val; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1416 | int min_val = INT_MAX; |
| 1417 | int best_node = -1; |
| 1418 | |
Linus Torvalds | 4cf808eb | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1419 | /* Use the local node if we haven't already */ |
| 1420 | if (!node_isset(node, *used_node_mask)) { |
| 1421 | node_set(node, *used_node_mask); |
| 1422 | return node; |
| 1423 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1424 | |
Linus Torvalds | 4cf808eb | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1425 | for_each_online_node(n) { |
| 1426 | cpumask_t tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | |
| 1428 | /* Don't want a node to appear more than once */ |
| 1429 | if (node_isset(n, *used_node_mask)) |
| 1430 | continue; |
| 1431 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 | /* Use the distance array to find the distance */ |
| 1433 | val = node_distance(node, n); |
| 1434 | |
Linus Torvalds | 4cf808eb | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1435 | /* Penalize nodes under us ("prefer the next node") */ |
| 1436 | val += (n < node); |
| 1437 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1438 | /* Give preference to headless and unused nodes */ |
| 1439 | tmp = node_to_cpumask(n); |
| 1440 | if (!cpus_empty(tmp)) |
| 1441 | val += PENALTY_FOR_NODE_WITH_CPUS; |
| 1442 | |
| 1443 | /* Slight preference for less loaded node */ |
| 1444 | val *= (MAX_NODE_LOAD*MAX_NUMNODES); |
| 1445 | val += node_load[n]; |
| 1446 | |
| 1447 | if (val < min_val) { |
| 1448 | min_val = val; |
| 1449 | best_node = n; |
| 1450 | } |
| 1451 | } |
| 1452 | |
| 1453 | if (best_node >= 0) |
| 1454 | node_set(best_node, *used_node_mask); |
| 1455 | |
| 1456 | return best_node; |
| 1457 | } |
| 1458 | |
Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1459 | static void __meminit build_zonelists(pg_data_t *pgdat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | { |
| 1461 | int i, j, k, node, local_node; |
| 1462 | int prev_node, load; |
| 1463 | struct zonelist *zonelist; |
| 1464 | nodemask_t used_mask; |
| 1465 | |
| 1466 | /* initialize zonelists */ |
| 1467 | for (i = 0; i < GFP_ZONETYPES; i++) { |
| 1468 | zonelist = pgdat->node_zonelists + i; |
| 1469 | zonelist->zones[0] = NULL; |
| 1470 | } |
| 1471 | |
| 1472 | /* NUMA-aware ordering of nodes */ |
| 1473 | local_node = pgdat->node_id; |
| 1474 | load = num_online_nodes(); |
| 1475 | prev_node = local_node; |
| 1476 | nodes_clear(used_mask); |
| 1477 | while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { |
Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1478 | int distance = node_distance(local_node, node); |
| 1479 | |
| 1480 | /* |
| 1481 | * If another node is sufficiently far away then it is better |
| 1482 | * to reclaim pages in a zone before going off node. |
| 1483 | */ |
| 1484 | if (distance > RECLAIM_DISTANCE) |
| 1485 | zone_reclaim_mode = 1; |
| 1486 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | /* |
| 1488 | * We don't want to pressure a particular node. |
| 1489 | * So adding penalty to the first node in same |
| 1490 | * distance group to make it round-robin. |
| 1491 | */ |
Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1492 | |
| 1493 | if (distance != node_distance(local_node, prev_node)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 | node_load[node] += load; |
| 1495 | prev_node = node; |
| 1496 | load--; |
| 1497 | for (i = 0; i < GFP_ZONETYPES; i++) { |
| 1498 | zonelist = pgdat->node_zonelists + i; |
| 1499 | for (j = 0; zonelist->zones[j] != NULL; j++); |
| 1500 | |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1501 | k = highest_zone(i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | |
| 1503 | j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); |
| 1504 | zonelist->zones[j] = NULL; |
| 1505 | } |
| 1506 | } |
| 1507 | } |
| 1508 | |
| 1509 | #else /* CONFIG_NUMA */ |
| 1510 | |
Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1511 | static void __meminit build_zonelists(pg_data_t *pgdat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | { |
| 1513 | int i, j, k, node, local_node; |
| 1514 | |
| 1515 | local_node = pgdat->node_id; |
| 1516 | for (i = 0; i < GFP_ZONETYPES; i++) { |
| 1517 | struct zonelist *zonelist; |
| 1518 | |
| 1519 | zonelist = pgdat->node_zonelists + i; |
| 1520 | |
| 1521 | j = 0; |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1522 | k = highest_zone(i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | j = build_zonelists_node(pgdat, zonelist, j, k); |
| 1524 | /* |
| 1525 | * Now we build the zonelist so that it contains the zones |
| 1526 | * of all the other nodes. |
| 1527 | * We don't want to pressure a particular node, so when |
| 1528 | * building the zones for node N, we make sure that the |
| 1529 | * zones coming right after the local ones are those from |
| 1530 | * node N+1 (modulo N) |
| 1531 | */ |
| 1532 | for (node = local_node + 1; node < MAX_NUMNODES; node++) { |
| 1533 | if (!node_online(node)) |
| 1534 | continue; |
| 1535 | j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); |
| 1536 | } |
| 1537 | for (node = 0; node < local_node; node++) { |
| 1538 | if (!node_online(node)) |
| 1539 | continue; |
| 1540 | j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); |
| 1541 | } |
| 1542 | |
| 1543 | zonelist->zones[j] = NULL; |
| 1544 | } |
| 1545 | } |
| 1546 | |
| 1547 | #endif /* CONFIG_NUMA */ |
| 1548 | |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 1549 | /* return values int ....just for stop_machine_run() */ |
| 1550 | static int __meminit __build_all_zonelists(void *dummy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | { |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 1552 | int nid; |
| 1553 | for_each_online_node(nid) |
| 1554 | build_zonelists(NODE_DATA(nid)); |
| 1555 | return 0; |
| 1556 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 | |
Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 1558 | void __meminit build_all_zonelists(void) |
| 1559 | { |
| 1560 | if (system_state == SYSTEM_BOOTING) { |
| 1561 | __build_all_zonelists(0); |
| 1562 | cpuset_init_current_mems_allowed(); |
| 1563 | } else { |
| 1564 | /* we have to stop all cpus to guaranntee there is no user |
| 1565 | of zonelist */ |
| 1566 | stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); |
| 1567 | /* cpuset refresh routine should be here */ |
| 1568 | } |
Andrew Morton | bd1e22b | 2006-06-23 02:03:47 -0700 | [diff] [blame] | 1569 | vm_total_pages = nr_free_pagecache_pages(); |
| 1570 | printk("Built %i zonelists. Total pages: %ld\n", |
| 1571 | num_online_nodes(), vm_total_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | } |
| 1573 | |
| 1574 | /* |
| 1575 | * Helper functions to size the waitqueue hash table. |
| 1576 | * Essentially these want to choose hash table sizes sufficiently |
| 1577 | * large so that collisions trying to wait on pages are rare. |
| 1578 | * But in fact, the number of active page waitqueues on typical |
| 1579 | * systems is ridiculously low, less than 200. So this is even |
| 1580 | * conservative, even though it seems large. |
| 1581 | * |
| 1582 | * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to |
| 1583 | * waitqueues, i.e. the size of the waitq table given the number of pages. |
| 1584 | */ |
| 1585 | #define PAGES_PER_WAITQUEUE 256 |
| 1586 | |
Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1587 | #ifndef CONFIG_MEMORY_HOTPLUG |
Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 1588 | static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | { |
| 1590 | unsigned long size = 1; |
| 1591 | |
| 1592 | pages /= PAGES_PER_WAITQUEUE; |
| 1593 | |
| 1594 | while (size < pages) |
| 1595 | size <<= 1; |
| 1596 | |
| 1597 | /* |
| 1598 | * Once we have dozens or even hundreds of threads sleeping |
| 1599 | * on IO we've got bigger problems than wait queue collision. |
| 1600 | * Limit the size of the wait table to a reasonable size. |
| 1601 | */ |
| 1602 | size = min(size, 4096UL); |
| 1603 | |
| 1604 | return max(size, 4UL); |
| 1605 | } |
Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1606 | #else |
| 1607 | /* |
| 1608 | * A zone's size might be changed by hot-add, so it is not possible to determine |
| 1609 | * a suitable size for its wait_table. So we use the maximum size now. |
| 1610 | * |
| 1611 | * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: |
| 1612 | * |
| 1613 | * i386 (preemption config) : 4096 x 16 = 64Kbyte. |
| 1614 | * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. |
| 1615 | * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. |
| 1616 | * |
| 1617 | * The maximum entries are prepared when a zone's memory is (512K + 256) pages |
| 1618 | * or more by the traditional way. (See above). It equals: |
| 1619 | * |
| 1620 | * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. |
| 1621 | * ia64(16K page size) : = ( 8G + 4M)byte. |
| 1622 | * powerpc (64K page size) : = (32G +16M)byte. |
| 1623 | */ |
| 1624 | static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) |
| 1625 | { |
| 1626 | return 4096UL; |
| 1627 | } |
| 1628 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | |
| 1630 | /* |
| 1631 | * This is an integer logarithm so that shifts can be used later |
| 1632 | * to extract the more random high bits from the multiplicative |
| 1633 | * hash function before the remainder is taken. |
| 1634 | */ |
| 1635 | static inline unsigned long wait_table_bits(unsigned long size) |
| 1636 | { |
| 1637 | return ffz(~size); |
| 1638 | } |
| 1639 | |
| 1640 | #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) |
| 1641 | |
| 1642 | static void __init calculate_zone_totalpages(struct pglist_data *pgdat, |
| 1643 | unsigned long *zones_size, unsigned long *zholes_size) |
| 1644 | { |
| 1645 | unsigned long realtotalpages, totalpages = 0; |
| 1646 | int i; |
| 1647 | |
| 1648 | for (i = 0; i < MAX_NR_ZONES; i++) |
| 1649 | totalpages += zones_size[i]; |
| 1650 | pgdat->node_spanned_pages = totalpages; |
| 1651 | |
| 1652 | realtotalpages = totalpages; |
| 1653 | if (zholes_size) |
| 1654 | for (i = 0; i < MAX_NR_ZONES; i++) |
| 1655 | realtotalpages -= zholes_size[i]; |
| 1656 | pgdat->node_present_pages = realtotalpages; |
| 1657 | printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); |
| 1658 | } |
| 1659 | |
| 1660 | |
| 1661 | /* |
| 1662 | * Initially all pages are reserved - free ones are freed |
| 1663 | * up by free_all_bootmem() once the early boot process is |
| 1664 | * done. Non-atomic initialization, single-pass. |
| 1665 | */ |
Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 1666 | void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | unsigned long start_pfn) |
| 1668 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1669 | struct page *page; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1670 | unsigned long end_pfn = start_pfn + size; |
| 1671 | unsigned long pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1672 | |
Greg Ungerer | cbe8dd4 | 2006-01-12 01:05:24 -0800 | [diff] [blame] | 1673 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1674 | if (!early_pfn_valid(pfn)) |
| 1675 | continue; |
| 1676 | page = pfn_to_page(pfn); |
| 1677 | set_page_links(page, zone, nid, pfn); |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 1678 | init_page_count(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1679 | reset_page_mapcount(page); |
| 1680 | SetPageReserved(page); |
| 1681 | INIT_LIST_HEAD(&page->lru); |
| 1682 | #ifdef WANT_PAGE_VIRTUAL |
| 1683 | /* The shift won't overflow because ZONE_NORMAL is below 4G. */ |
| 1684 | if (!is_highmem_idx(zone)) |
Bob Picco | 3212c6b | 2005-06-27 14:36:28 -0700 | [diff] [blame] | 1685 | set_page_address(page, __va(pfn << PAGE_SHIFT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1686 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | } |
| 1688 | } |
| 1689 | |
| 1690 | void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, |
| 1691 | unsigned long size) |
| 1692 | { |
| 1693 | int order; |
| 1694 | for (order = 0; order < MAX_ORDER ; order++) { |
| 1695 | INIT_LIST_HEAD(&zone->free_area[order].free_list); |
| 1696 | zone->free_area[order].nr_free = 0; |
| 1697 | } |
| 1698 | } |
| 1699 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1700 | #define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) |
| 1701 | void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, |
| 1702 | unsigned long size) |
| 1703 | { |
| 1704 | unsigned long snum = pfn_to_section_nr(pfn); |
| 1705 | unsigned long end = pfn_to_section_nr(pfn + size); |
| 1706 | |
| 1707 | if (FLAGS_HAS_NODE) |
| 1708 | zone_table[ZONETABLE_INDEX(nid, zid)] = zone; |
| 1709 | else |
| 1710 | for (; snum <= end; snum++) |
| 1711 | zone_table[ZONETABLE_INDEX(snum, zid)] = zone; |
| 1712 | } |
| 1713 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1714 | #ifndef __HAVE_ARCH_MEMMAP_INIT |
| 1715 | #define memmap_init(size, nid, zone, start_pfn) \ |
| 1716 | memmap_init_zone((size), (nid), (zone), (start_pfn)) |
| 1717 | #endif |
| 1718 | |
Ashok Raj | 6292d9a | 2006-02-01 03:04:44 -0800 | [diff] [blame] | 1719 | static int __cpuinit zone_batchsize(struct zone *zone) |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1720 | { |
| 1721 | int batch; |
| 1722 | |
| 1723 | /* |
| 1724 | * The per-cpu-pages pools are set to around 1000th of the |
Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1725 | * size of the zone. But no more than 1/2 of a meg. |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1726 | * |
| 1727 | * OK, so we don't know how big the cache is. So guess. |
| 1728 | */ |
| 1729 | batch = zone->present_pages / 1024; |
Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1730 | if (batch * PAGE_SIZE > 512 * 1024) |
| 1731 | batch = (512 * 1024) / PAGE_SIZE; |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1732 | batch /= 4; /* We effectively *= 4 below */ |
| 1733 | if (batch < 1) |
| 1734 | batch = 1; |
| 1735 | |
| 1736 | /* |
Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1737 | * Clamp the batch to a 2^n - 1 value. Having a power |
| 1738 | * of 2 value was found to be more likely to have |
| 1739 | * suboptimal cache aliasing properties in some cases. |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1740 | * |
Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1741 | * For example if 2 tasks are alternately allocating |
| 1742 | * batches of pages, one task can end up with a lot |
| 1743 | * of pages of one half of the possible page colors |
| 1744 | * and the other with pages of the other colors. |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1745 | */ |
Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1746 | batch = (1 << (fls(batch + batch/2)-1)) - 1; |
Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1747 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1748 | return batch; |
| 1749 | } |
| 1750 | |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1751 | inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) |
| 1752 | { |
| 1753 | struct per_cpu_pages *pcp; |
| 1754 | |
Magnus Damm | 1c6fe94 | 2005-10-26 01:58:59 -0700 | [diff] [blame] | 1755 | memset(p, 0, sizeof(*p)); |
| 1756 | |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1757 | pcp = &p->pcp[0]; /* hot */ |
| 1758 | pcp->count = 0; |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1759 | pcp->high = 6 * batch; |
| 1760 | pcp->batch = max(1UL, 1 * batch); |
| 1761 | INIT_LIST_HEAD(&pcp->list); |
| 1762 | |
| 1763 | pcp = &p->pcp[1]; /* cold*/ |
| 1764 | pcp->count = 0; |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1765 | pcp->high = 2 * batch; |
Seth, Rohit | e46a5e2 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 1766 | pcp->batch = max(1UL, batch/2); |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1767 | INIT_LIST_HEAD(&pcp->list); |
| 1768 | } |
| 1769 | |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 1770 | /* |
| 1771 | * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist |
| 1772 | * to the value high for the pageset p. |
| 1773 | */ |
| 1774 | |
| 1775 | static void setup_pagelist_highmark(struct per_cpu_pageset *p, |
| 1776 | unsigned long high) |
| 1777 | { |
| 1778 | struct per_cpu_pages *pcp; |
| 1779 | |
| 1780 | pcp = &p->pcp[0]; /* hot list */ |
| 1781 | pcp->high = high; |
| 1782 | pcp->batch = max(1UL, high/4); |
| 1783 | if ((high/4) > (PAGE_SHIFT * 8)) |
| 1784 | pcp->batch = PAGE_SHIFT * 8; |
| 1785 | } |
| 1786 | |
| 1787 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1788 | #ifdef CONFIG_NUMA |
| 1789 | /* |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1790 | * Boot pageset table. One per cpu which is going to be used for all |
| 1791 | * zones and all nodes. The parameters will be set in such a way |
| 1792 | * that an item put on a list will immediately be handed over to |
| 1793 | * the buddy list. This is safe since pageset manipulation is done |
| 1794 | * with interrupts disabled. |
| 1795 | * |
| 1796 | * Some NUMA counter updates may also be caught by the boot pagesets. |
Christoph Lameter | b7c84c6 | 2005-06-22 20:26:07 -0700 | [diff] [blame] | 1797 | * |
| 1798 | * The boot_pagesets must be kept even after bootup is complete for |
| 1799 | * unused processors and/or zones. They do play a role for bootstrapping |
| 1800 | * hotplugged processors. |
| 1801 | * |
| 1802 | * zoneinfo_show() and maybe other functions do |
| 1803 | * not check if the processor is online before following the pageset pointer. |
| 1804 | * Other parts of the kernel may not check if the zone is available. |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1805 | */ |
Eric Dumazet | 88a2a4ac | 2006-02-04 23:27:36 -0800 | [diff] [blame] | 1806 | static struct per_cpu_pageset boot_pageset[NR_CPUS]; |
Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1807 | |
| 1808 | /* |
| 1809 | * Dynamically allocate memory for the |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1810 | * per cpu pageset array in struct zone. |
| 1811 | */ |
Ashok Raj | 6292d9a | 2006-02-01 03:04:44 -0800 | [diff] [blame] | 1812 | static int __cpuinit process_zones(int cpu) |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1813 | { |
| 1814 | struct zone *zone, *dzone; |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1815 | |
| 1816 | for_each_zone(zone) { |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1817 | |
Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1818 | zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1819 | GFP_KERNEL, cpu_to_node(cpu)); |
Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1820 | if (!zone_pcp(zone, cpu)) |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1821 | goto bad; |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1822 | |
Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1823 | setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 1824 | |
| 1825 | if (percpu_pagelist_fraction) |
| 1826 | setup_pagelist_highmark(zone_pcp(zone, cpu), |
| 1827 | (zone->present_pages / percpu_pagelist_fraction)); |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1828 | } |
| 1829 | |
| 1830 | return 0; |
| 1831 | bad: |
| 1832 | for_each_zone(dzone) { |
| 1833 | if (dzone == zone) |
| 1834 | break; |
Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1835 | kfree(zone_pcp(dzone, cpu)); |
| 1836 | zone_pcp(dzone, cpu) = NULL; |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1837 | } |
| 1838 | return -ENOMEM; |
| 1839 | } |
| 1840 | |
| 1841 | static inline void free_zone_pagesets(int cpu) |
| 1842 | { |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1843 | struct zone *zone; |
| 1844 | |
| 1845 | for_each_zone(zone) { |
| 1846 | struct per_cpu_pageset *pset = zone_pcp(zone, cpu); |
| 1847 | |
David Rientjes | f3ef9ea | 2006-09-25 16:24:57 -0700 | [diff] [blame^] | 1848 | /* Free per_cpu_pageset if it is slab allocated */ |
| 1849 | if (pset != &boot_pageset[cpu]) |
| 1850 | kfree(pset); |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1851 | zone_pcp(zone, cpu) = NULL; |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1852 | } |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1853 | } |
| 1854 | |
Chandra Seetharaman | 9c7b216 | 2006-06-27 02:54:07 -0700 | [diff] [blame] | 1855 | static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1856 | unsigned long action, |
| 1857 | void *hcpu) |
| 1858 | { |
| 1859 | int cpu = (long)hcpu; |
| 1860 | int ret = NOTIFY_OK; |
| 1861 | |
| 1862 | switch (action) { |
| 1863 | case CPU_UP_PREPARE: |
| 1864 | if (process_zones(cpu)) |
| 1865 | ret = NOTIFY_BAD; |
| 1866 | break; |
Andi Kleen | b0d4169 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1867 | case CPU_UP_CANCELED: |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1868 | case CPU_DEAD: |
| 1869 | free_zone_pagesets(cpu); |
| 1870 | break; |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1871 | default: |
| 1872 | break; |
| 1873 | } |
| 1874 | return ret; |
| 1875 | } |
| 1876 | |
Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 1877 | static struct notifier_block __cpuinitdata pageset_notifier = |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1878 | { &pageset_cpuup_callback, NULL, 0 }; |
| 1879 | |
Al Viro | 78d9955 | 2005-12-15 09:18:25 +0000 | [diff] [blame] | 1880 | void __init setup_per_cpu_pageset(void) |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1881 | { |
| 1882 | int err; |
| 1883 | |
| 1884 | /* Initialize per_cpu_pageset for cpu 0. |
| 1885 | * A cpuup callback will do this for every cpu |
| 1886 | * as it comes online |
| 1887 | */ |
| 1888 | err = process_zones(smp_processor_id()); |
| 1889 | BUG_ON(err); |
| 1890 | register_cpu_notifier(&pageset_notifier); |
| 1891 | } |
| 1892 | |
| 1893 | #endif |
| 1894 | |
Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 1895 | static __meminit |
Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1896 | int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1897 | { |
| 1898 | int i; |
| 1899 | struct pglist_data *pgdat = zone->zone_pgdat; |
Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1900 | size_t alloc_size; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1901 | |
| 1902 | /* |
| 1903 | * The per-page waitqueue mechanism uses hashed waitqueues |
| 1904 | * per zone. |
| 1905 | */ |
Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 1906 | zone->wait_table_hash_nr_entries = |
| 1907 | wait_table_hash_nr_entries(zone_size_pages); |
| 1908 | zone->wait_table_bits = |
| 1909 | wait_table_bits(zone->wait_table_hash_nr_entries); |
Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1910 | alloc_size = zone->wait_table_hash_nr_entries |
| 1911 | * sizeof(wait_queue_head_t); |
| 1912 | |
| 1913 | if (system_state == SYSTEM_BOOTING) { |
| 1914 | zone->wait_table = (wait_queue_head_t *) |
| 1915 | alloc_bootmem_node(pgdat, alloc_size); |
| 1916 | } else { |
| 1917 | /* |
| 1918 | * This case means that a zone whose size was 0 gets new memory |
| 1919 | * via memory hot-add. |
| 1920 | * But it may be the case that a new node was hot-added. In |
| 1921 | * this case vmalloc() will not be able to use this new node's |
| 1922 | * memory - this wait_table must be initialized to use this new |
| 1923 | * node itself as well. |
| 1924 | * To use this new node's memory, further consideration will be |
| 1925 | * necessary. |
| 1926 | */ |
| 1927 | zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); |
| 1928 | } |
| 1929 | if (!zone->wait_table) |
| 1930 | return -ENOMEM; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1931 | |
Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 1932 | for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1933 | init_waitqueue_head(zone->wait_table + i); |
Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1934 | |
| 1935 | return 0; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1936 | } |
| 1937 | |
Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 1938 | static __meminit void zone_pcp_init(struct zone *zone) |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1939 | { |
| 1940 | int cpu; |
| 1941 | unsigned long batch = zone_batchsize(zone); |
| 1942 | |
| 1943 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
| 1944 | #ifdef CONFIG_NUMA |
| 1945 | /* Early boot. Slab allocator not functional yet */ |
Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1946 | zone_pcp(zone, cpu) = &boot_pageset[cpu]; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1947 | setup_pageset(&boot_pageset[cpu],0); |
| 1948 | #else |
| 1949 | setup_pageset(zone_pcp(zone,cpu), batch); |
| 1950 | #endif |
| 1951 | } |
Anton Blanchard | f5335c0 | 2006-03-25 03:06:49 -0800 | [diff] [blame] | 1952 | if (zone->present_pages) |
| 1953 | printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", |
| 1954 | zone->name, zone->present_pages, batch); |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1955 | } |
| 1956 | |
Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1957 | __meminit int init_currently_empty_zone(struct zone *zone, |
| 1958 | unsigned long zone_start_pfn, |
| 1959 | unsigned long size) |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1960 | { |
| 1961 | struct pglist_data *pgdat = zone->zone_pgdat; |
Yasunori Goto | cca448f | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1962 | int ret; |
| 1963 | ret = zone_wait_table_init(zone, size); |
| 1964 | if (ret) |
| 1965 | return ret; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1966 | pgdat->nr_zones = zone_idx(zone) + 1; |
| 1967 | |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1968 | zone->zone_start_pfn = zone_start_pfn; |
| 1969 | |
| 1970 | memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); |
| 1971 | |
| 1972 | zone_init_free_lists(pgdat, zone, zone->spanned_pages); |
Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1973 | |
| 1974 | return 0; |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1975 | } |
| 1976 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1977 | /* |
| 1978 | * Set up the zone data structures: |
| 1979 | * - mark all pages reserved |
| 1980 | * - mark all memory queues empty |
| 1981 | * - clear the memory bitmaps |
| 1982 | */ |
Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 1983 | static void __meminit free_area_init_core(struct pglist_data *pgdat, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1984 | unsigned long *zones_size, unsigned long *zholes_size) |
| 1985 | { |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1986 | unsigned long j; |
| 1987 | int nid = pgdat->node_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1988 | unsigned long zone_start_pfn = pgdat->node_start_pfn; |
Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 1989 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 1991 | pgdat_resize_init(pgdat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1992 | pgdat->nr_zones = 0; |
| 1993 | init_waitqueue_head(&pgdat->kswapd_wait); |
| 1994 | pgdat->kswapd_max_order = 0; |
| 1995 | |
| 1996 | for (j = 0; j < MAX_NR_ZONES; j++) { |
| 1997 | struct zone *zone = pgdat->node_zones + j; |
| 1998 | unsigned long size, realsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2000 | realsize = size = zones_size[j]; |
| 2001 | if (zholes_size) |
| 2002 | realsize -= zholes_size[j]; |
| 2003 | |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 2004 | if (j < ZONE_HIGHMEM) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2005 | nr_kernel_pages += realsize; |
| 2006 | nr_all_pages += realsize; |
| 2007 | |
| 2008 | zone->spanned_pages = size; |
| 2009 | zone->present_pages = realsize; |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2010 | #ifdef CONFIG_NUMA |
| 2011 | zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio) |
| 2012 | / 100; |
| 2013 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2014 | zone->name = zone_names[j]; |
| 2015 | spin_lock_init(&zone->lock); |
| 2016 | spin_lock_init(&zone->lru_lock); |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 2017 | zone_seqlock_init(zone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2018 | zone->zone_pgdat = pgdat; |
| 2019 | zone->free_pages = 0; |
| 2020 | |
| 2021 | zone->temp_priority = zone->prev_priority = DEF_PRIORITY; |
| 2022 | |
Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2023 | zone_pcp_init(zone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2024 | INIT_LIST_HEAD(&zone->active_list); |
| 2025 | INIT_LIST_HEAD(&zone->inactive_list); |
| 2026 | zone->nr_scan_active = 0; |
| 2027 | zone->nr_scan_inactive = 0; |
| 2028 | zone->nr_active = 0; |
| 2029 | zone->nr_inactive = 0; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 2030 | zap_zone_vm_stats(zone); |
Martin Hicks | 53e9a61 | 2005-09-03 15:54:51 -0700 | [diff] [blame] | 2031 | atomic_set(&zone->reclaim_in_progress, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2032 | if (!size) |
| 2033 | continue; |
| 2034 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2035 | zonetable_add(zone, nid, j, zone_start_pfn, size); |
Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 2036 | ret = init_currently_empty_zone(zone, zone_start_pfn, size); |
| 2037 | BUG_ON(ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2038 | zone_start_pfn += size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2039 | } |
| 2040 | } |
| 2041 | |
| 2042 | static void __init alloc_node_mem_map(struct pglist_data *pgdat) |
| 2043 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2044 | /* Skip empty nodes */ |
| 2045 | if (!pgdat->node_spanned_pages) |
| 2046 | return; |
| 2047 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2048 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2049 | /* ia64 gets its own node_mem_map, before this, without bootmem */ |
| 2050 | if (!pgdat->node_mem_map) { |
Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 2051 | unsigned long size, start, end; |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2052 | struct page *map; |
| 2053 | |
Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 2054 | /* |
| 2055 | * The zone's endpoints aren't required to be MAX_ORDER |
| 2056 | * aligned but the node_mem_map endpoints must be in order |
| 2057 | * for the buddy allocator to function correctly. |
| 2058 | */ |
| 2059 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); |
| 2060 | end = pgdat->node_start_pfn + pgdat->node_spanned_pages; |
| 2061 | end = ALIGN(end, MAX_ORDER_NR_PAGES); |
| 2062 | size = (end - start) * sizeof(struct page); |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 2063 | map = alloc_remap(pgdat->node_id, size); |
| 2064 | if (!map) |
| 2065 | map = alloc_bootmem_node(pgdat, size); |
Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 2066 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2067 | } |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2068 | #ifdef CONFIG_FLATMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2069 | /* |
| 2070 | * With no DISCONTIG, the global mem_map is just set as node 0's |
| 2071 | */ |
| 2072 | if (pgdat == NODE_DATA(0)) |
| 2073 | mem_map = NODE_DATA(0)->node_mem_map; |
| 2074 | #endif |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2075 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2076 | } |
| 2077 | |
Yasunori Goto | 86356ab | 2006-06-23 02:03:09 -0700 | [diff] [blame] | 2078 | void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2079 | unsigned long *zones_size, unsigned long node_start_pfn, |
| 2080 | unsigned long *zholes_size) |
| 2081 | { |
| 2082 | pgdat->node_id = nid; |
| 2083 | pgdat->node_start_pfn = node_start_pfn; |
| 2084 | calculate_zone_totalpages(pgdat, zones_size, zholes_size); |
| 2085 | |
| 2086 | alloc_node_mem_map(pgdat); |
| 2087 | |
| 2088 | free_area_init_core(pgdat, zones_size, zholes_size); |
| 2089 | } |
| 2090 | |
Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2091 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2092 | static bootmem_data_t contig_bootmem_data; |
| 2093 | struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; |
| 2094 | |
| 2095 | EXPORT_SYMBOL(contig_page_data); |
Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2096 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2097 | |
| 2098 | void __init free_area_init(unsigned long *zones_size) |
| 2099 | { |
Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2100 | free_area_init_node(0, NODE_DATA(0), zones_size, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2101 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); |
| 2102 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2103 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2104 | #ifdef CONFIG_HOTPLUG_CPU |
| 2105 | static int page_alloc_cpu_notify(struct notifier_block *self, |
| 2106 | unsigned long action, void *hcpu) |
| 2107 | { |
| 2108 | int cpu = (unsigned long)hcpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2109 | |
| 2110 | if (action == CPU_DEAD) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2111 | local_irq_disable(); |
| 2112 | __drain_pages(cpu); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2113 | vm_events_fold_cpu(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2114 | local_irq_enable(); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 2115 | refresh_cpu_vm_stats(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 | } |
| 2117 | return NOTIFY_OK; |
| 2118 | } |
| 2119 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 2120 | |
| 2121 | void __init page_alloc_init(void) |
| 2122 | { |
| 2123 | hotcpu_notifier(page_alloc_cpu_notify, 0); |
| 2124 | } |
| 2125 | |
| 2126 | /* |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 2127 | * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio |
| 2128 | * or min_free_kbytes changes. |
| 2129 | */ |
| 2130 | static void calculate_totalreserve_pages(void) |
| 2131 | { |
| 2132 | struct pglist_data *pgdat; |
| 2133 | unsigned long reserve_pages = 0; |
| 2134 | int i, j; |
| 2135 | |
| 2136 | for_each_online_pgdat(pgdat) { |
| 2137 | for (i = 0; i < MAX_NR_ZONES; i++) { |
| 2138 | struct zone *zone = pgdat->node_zones + i; |
| 2139 | unsigned long max = 0; |
| 2140 | |
| 2141 | /* Find valid and maximum lowmem_reserve in the zone */ |
| 2142 | for (j = i; j < MAX_NR_ZONES; j++) { |
| 2143 | if (zone->lowmem_reserve[j] > max) |
| 2144 | max = zone->lowmem_reserve[j]; |
| 2145 | } |
| 2146 | |
| 2147 | /* we treat pages_high as reserved pages. */ |
| 2148 | max += zone->pages_high; |
| 2149 | |
| 2150 | if (max > zone->present_pages) |
| 2151 | max = zone->present_pages; |
| 2152 | reserve_pages += max; |
| 2153 | } |
| 2154 | } |
| 2155 | totalreserve_pages = reserve_pages; |
| 2156 | } |
| 2157 | |
| 2158 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2159 | * setup_per_zone_lowmem_reserve - called whenever |
| 2160 | * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone |
| 2161 | * has a correct pages reserved value, so an adequate number of |
| 2162 | * pages are left in the zone after a successful __alloc_pages(). |
| 2163 | */ |
| 2164 | static void setup_per_zone_lowmem_reserve(void) |
| 2165 | { |
| 2166 | struct pglist_data *pgdat; |
| 2167 | int j, idx; |
| 2168 | |
KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 2169 | for_each_online_pgdat(pgdat) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2170 | for (j = 0; j < MAX_NR_ZONES; j++) { |
| 2171 | struct zone *zone = pgdat->node_zones + j; |
| 2172 | unsigned long present_pages = zone->present_pages; |
| 2173 | |
| 2174 | zone->lowmem_reserve[j] = 0; |
| 2175 | |
| 2176 | for (idx = j-1; idx >= 0; idx--) { |
| 2177 | struct zone *lower_zone; |
| 2178 | |
| 2179 | if (sysctl_lowmem_reserve_ratio[idx] < 1) |
| 2180 | sysctl_lowmem_reserve_ratio[idx] = 1; |
| 2181 | |
| 2182 | lower_zone = pgdat->node_zones + idx; |
| 2183 | lower_zone->lowmem_reserve[j] = present_pages / |
| 2184 | sysctl_lowmem_reserve_ratio[idx]; |
| 2185 | present_pages += lower_zone->present_pages; |
| 2186 | } |
| 2187 | } |
| 2188 | } |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 2189 | |
| 2190 | /* update totalreserve_pages */ |
| 2191 | calculate_totalreserve_pages(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2192 | } |
| 2193 | |
| 2194 | /* |
| 2195 | * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures |
| 2196 | * that the pages_{min,low,high} values for each zone are set correctly |
| 2197 | * with respect to min_free_kbytes. |
| 2198 | */ |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 2199 | void setup_per_zone_pages_min(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2200 | { |
| 2201 | unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); |
| 2202 | unsigned long lowmem_pages = 0; |
| 2203 | struct zone *zone; |
| 2204 | unsigned long flags; |
| 2205 | |
| 2206 | /* Calculate total number of !ZONE_HIGHMEM pages */ |
| 2207 | for_each_zone(zone) { |
| 2208 | if (!is_highmem(zone)) |
| 2209 | lowmem_pages += zone->present_pages; |
| 2210 | } |
| 2211 | |
| 2212 | for_each_zone(zone) { |
Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 2213 | u64 tmp; |
| 2214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | spin_lock_irqsave(&zone->lru_lock, flags); |
Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 2216 | tmp = (u64)pages_min * zone->present_pages; |
| 2217 | do_div(tmp, lowmem_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2218 | if (is_highmem(zone)) { |
| 2219 | /* |
Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2220 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't |
| 2221 | * need highmem pages, so cap pages_min to a small |
| 2222 | * value here. |
| 2223 | * |
| 2224 | * The (pages_high-pages_low) and (pages_low-pages_min) |
| 2225 | * deltas controls asynch page reclaim, and so should |
| 2226 | * not be capped for highmem. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2227 | */ |
| 2228 | int min_pages; |
| 2229 | |
| 2230 | min_pages = zone->present_pages / 1024; |
| 2231 | if (min_pages < SWAP_CLUSTER_MAX) |
| 2232 | min_pages = SWAP_CLUSTER_MAX; |
| 2233 | if (min_pages > 128) |
| 2234 | min_pages = 128; |
| 2235 | zone->pages_min = min_pages; |
| 2236 | } else { |
Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2237 | /* |
| 2238 | * If it's a lowmem zone, reserve a number of pages |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2239 | * proportionate to the zone's size. |
| 2240 | */ |
Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2241 | zone->pages_min = tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2242 | } |
| 2243 | |
Andrew Morton | ac924c6 | 2006-05-15 09:43:59 -0700 | [diff] [blame] | 2244 | zone->pages_low = zone->pages_min + (tmp >> 2); |
| 2245 | zone->pages_high = zone->pages_min + (tmp >> 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2246 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
| 2247 | } |
Hideo AOKI | cb45b0e | 2006-04-10 22:52:59 -0700 | [diff] [blame] | 2248 | |
| 2249 | /* update totalreserve_pages */ |
| 2250 | calculate_totalreserve_pages(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2251 | } |
| 2252 | |
| 2253 | /* |
| 2254 | * Initialise min_free_kbytes. |
| 2255 | * |
| 2256 | * For small machines we want it small (128k min). For large machines |
| 2257 | * we want it large (64MB max). But it is not linear, because network |
| 2258 | * bandwidth does not increase linearly with machine size. We use |
| 2259 | * |
| 2260 | * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: |
| 2261 | * min_free_kbytes = sqrt(lowmem_kbytes * 16) |
| 2262 | * |
| 2263 | * which yields |
| 2264 | * |
| 2265 | * 16MB: 512k |
| 2266 | * 32MB: 724k |
| 2267 | * 64MB: 1024k |
| 2268 | * 128MB: 1448k |
| 2269 | * 256MB: 2048k |
| 2270 | * 512MB: 2896k |
| 2271 | * 1024MB: 4096k |
| 2272 | * 2048MB: 5792k |
| 2273 | * 4096MB: 8192k |
| 2274 | * 8192MB: 11584k |
| 2275 | * 16384MB: 16384k |
| 2276 | */ |
| 2277 | static int __init init_per_zone_pages_min(void) |
| 2278 | { |
| 2279 | unsigned long lowmem_kbytes; |
| 2280 | |
| 2281 | lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); |
| 2282 | |
| 2283 | min_free_kbytes = int_sqrt(lowmem_kbytes * 16); |
| 2284 | if (min_free_kbytes < 128) |
| 2285 | min_free_kbytes = 128; |
| 2286 | if (min_free_kbytes > 65536) |
| 2287 | min_free_kbytes = 65536; |
| 2288 | setup_per_zone_pages_min(); |
| 2289 | setup_per_zone_lowmem_reserve(); |
| 2290 | return 0; |
| 2291 | } |
| 2292 | module_init(init_per_zone_pages_min) |
| 2293 | |
| 2294 | /* |
| 2295 | * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so |
| 2296 | * that we can call two helper functions whenever min_free_kbytes |
| 2297 | * changes. |
| 2298 | */ |
| 2299 | int min_free_kbytes_sysctl_handler(ctl_table *table, int write, |
| 2300 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) |
| 2301 | { |
| 2302 | proc_dointvec(table, write, file, buffer, length, ppos); |
| 2303 | setup_per_zone_pages_min(); |
| 2304 | return 0; |
| 2305 | } |
| 2306 | |
Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 2307 | #ifdef CONFIG_NUMA |
| 2308 | int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, |
| 2309 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) |
| 2310 | { |
| 2311 | struct zone *zone; |
| 2312 | int rc; |
| 2313 | |
| 2314 | rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); |
| 2315 | if (rc) |
| 2316 | return rc; |
| 2317 | |
| 2318 | for_each_zone(zone) |
| 2319 | zone->min_unmapped_ratio = (zone->present_pages * |
| 2320 | sysctl_min_unmapped_ratio) / 100; |
| 2321 | return 0; |
| 2322 | } |
| 2323 | #endif |
| 2324 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2325 | /* |
| 2326 | * lowmem_reserve_ratio_sysctl_handler - just a wrapper around |
| 2327 | * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() |
| 2328 | * whenever sysctl_lowmem_reserve_ratio changes. |
| 2329 | * |
| 2330 | * The reserve ratio obviously has absolutely no relation with the |
| 2331 | * pages_min watermarks. The lowmem reserve ratio can only make sense |
| 2332 | * if in function of the boot time zone sizes. |
| 2333 | */ |
| 2334 | int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, |
| 2335 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) |
| 2336 | { |
| 2337 | proc_dointvec_minmax(table, write, file, buffer, length, ppos); |
| 2338 | setup_per_zone_lowmem_reserve(); |
| 2339 | return 0; |
| 2340 | } |
| 2341 | |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 2342 | /* |
| 2343 | * percpu_pagelist_fraction - changes the pcp->high for each zone on each |
| 2344 | * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist |
| 2345 | * can have before it gets flushed back to buddy allocator. |
| 2346 | */ |
| 2347 | |
| 2348 | int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, |
| 2349 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) |
| 2350 | { |
| 2351 | struct zone *zone; |
| 2352 | unsigned int cpu; |
| 2353 | int ret; |
| 2354 | |
| 2355 | ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); |
| 2356 | if (!write || (ret == -EINVAL)) |
| 2357 | return ret; |
| 2358 | for_each_zone(zone) { |
| 2359 | for_each_online_cpu(cpu) { |
| 2360 | unsigned long high; |
| 2361 | high = zone->present_pages / percpu_pagelist_fraction; |
| 2362 | setup_pagelist_highmark(zone_pcp(zone, cpu), high); |
| 2363 | } |
| 2364 | } |
| 2365 | return 0; |
| 2366 | } |
| 2367 | |
David S. Miller | f034b5d | 2006-08-24 03:08:07 -0700 | [diff] [blame] | 2368 | int hashdist = HASHDIST_DEFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2369 | |
| 2370 | #ifdef CONFIG_NUMA |
| 2371 | static int __init set_hashdist(char *str) |
| 2372 | { |
| 2373 | if (!str) |
| 2374 | return 0; |
| 2375 | hashdist = simple_strtoul(str, &str, 0); |
| 2376 | return 1; |
| 2377 | } |
| 2378 | __setup("hashdist=", set_hashdist); |
| 2379 | #endif |
| 2380 | |
| 2381 | /* |
| 2382 | * allocate a large system hash table from bootmem |
| 2383 | * - it is assumed that the hash table must contain an exact power-of-2 |
| 2384 | * quantity of entries |
| 2385 | * - limit is the number of hash buckets, not the total allocation size |
| 2386 | */ |
| 2387 | void *__init alloc_large_system_hash(const char *tablename, |
| 2388 | unsigned long bucketsize, |
| 2389 | unsigned long numentries, |
| 2390 | int scale, |
| 2391 | int flags, |
| 2392 | unsigned int *_hash_shift, |
| 2393 | unsigned int *_hash_mask, |
| 2394 | unsigned long limit) |
| 2395 | { |
| 2396 | unsigned long long max = limit; |
| 2397 | unsigned long log2qty, size; |
| 2398 | void *table = NULL; |
| 2399 | |
| 2400 | /* allow the kernel cmdline to have a say */ |
| 2401 | if (!numentries) { |
| 2402 | /* round applicable memory size up to nearest megabyte */ |
| 2403 | numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; |
| 2404 | numentries += (1UL << (20 - PAGE_SHIFT)) - 1; |
| 2405 | numentries >>= 20 - PAGE_SHIFT; |
| 2406 | numentries <<= 20 - PAGE_SHIFT; |
| 2407 | |
| 2408 | /* limit to 1 bucket per 2^scale bytes of low memory */ |
| 2409 | if (scale > PAGE_SHIFT) |
| 2410 | numentries >>= (scale - PAGE_SHIFT); |
| 2411 | else |
| 2412 | numentries <<= (PAGE_SHIFT - scale); |
| 2413 | } |
John Hawkes | 6e692ed | 2006-03-25 03:08:02 -0800 | [diff] [blame] | 2414 | numentries = roundup_pow_of_two(numentries); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2415 | |
| 2416 | /* limit allocation size to 1/16 total memory by default */ |
| 2417 | if (max == 0) { |
| 2418 | max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; |
| 2419 | do_div(max, bucketsize); |
| 2420 | } |
| 2421 | |
| 2422 | if (numentries > max) |
| 2423 | numentries = max; |
| 2424 | |
| 2425 | log2qty = long_log2(numentries); |
| 2426 | |
| 2427 | do { |
| 2428 | size = bucketsize << log2qty; |
| 2429 | if (flags & HASH_EARLY) |
| 2430 | table = alloc_bootmem(size); |
| 2431 | else if (hashdist) |
| 2432 | table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); |
| 2433 | else { |
| 2434 | unsigned long order; |
| 2435 | for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) |
| 2436 | ; |
| 2437 | table = (void*) __get_free_pages(GFP_ATOMIC, order); |
| 2438 | } |
| 2439 | } while (!table && size > PAGE_SIZE && --log2qty); |
| 2440 | |
| 2441 | if (!table) |
| 2442 | panic("Failed to allocate %s hash table\n", tablename); |
| 2443 | |
| 2444 | printk("%s hash table entries: %d (order: %d, %lu bytes)\n", |
| 2445 | tablename, |
| 2446 | (1U << log2qty), |
| 2447 | long_log2(size) - PAGE_SHIFT, |
| 2448 | size); |
| 2449 | |
| 2450 | if (_hash_shift) |
| 2451 | *_hash_shift = log2qty; |
| 2452 | if (_hash_mask) |
| 2453 | *_hash_mask = (1 << log2qty) - 1; |
| 2454 | |
| 2455 | return table; |
| 2456 | } |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 2457 | |
| 2458 | #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 2459 | struct page *pfn_to_page(unsigned long pfn) |
| 2460 | { |
Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 2461 | return __pfn_to_page(pfn); |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 2462 | } |
| 2463 | unsigned long page_to_pfn(struct page *page) |
| 2464 | { |
Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 2465 | return __page_to_pfn(page); |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 2466 | } |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 2467 | EXPORT_SYMBOL(pfn_to_page); |
| 2468 | EXPORT_SYMBOL(page_to_pfn); |
| 2469 | #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ |