blob: 418102a02921aa74016a90aaac50f7d933a117a0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/config.h>
18#include <linux/stddef.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/interrupt.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/module.h>
26#include <linux/suspend.h>
27#include <linux/pagevec.h>
28#include <linux/blkdev.h>
29#include <linux/slab.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/nodemask.h>
36#include <linux/vmalloc.h>
37
38#include <asm/tlbflush.h>
39#include "internal.h"
40
41/*
42 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
43 * initializer cleaner
44 */
45nodemask_t node_online_map = { { [0] = 1UL } };
Dean Nelson7223a932005-03-23 19:00:00 -070046EXPORT_SYMBOL(node_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047nodemask_t node_possible_map = NODE_MASK_ALL;
Dean Nelson7223a932005-03-23 19:00:00 -070048EXPORT_SYMBOL(node_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049struct pglist_data *pgdat_list;
50unsigned long totalram_pages;
51unsigned long totalhigh_pages;
52long nr_swap_pages;
53
54/*
55 * results with 256, 32 in the lowmem_reserve sysctl:
56 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
57 * 1G machine -> (16M dma, 784M normal, 224M high)
58 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
59 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
60 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
61 */
62int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
63
64EXPORT_SYMBOL(totalram_pages);
65EXPORT_SYMBOL(nr_swap_pages);
66
67/*
68 * Used by page_zone() to look up the address of the struct zone whose
69 * id is encoded in the upper bits of page->flags
70 */
71struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
72EXPORT_SYMBOL(zone_table);
73
74static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
75int min_free_kbytes = 1024;
76
77unsigned long __initdata nr_kernel_pages;
78unsigned long __initdata nr_all_pages;
79
80/*
81 * Temporary debugging check for pages not lying within a given zone.
82 */
83static int bad_range(struct zone *zone, struct page *page)
84{
85 if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
86 return 1;
87 if (page_to_pfn(page) < zone->zone_start_pfn)
88 return 1;
89#ifdef CONFIG_HOLES_IN_ZONE
90 if (!pfn_valid(page_to_pfn(page)))
91 return 1;
92#endif
93 if (zone != page_zone(page))
94 return 1;
95 return 0;
96}
97
98static void bad_page(const char *function, struct page *page)
99{
100 printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
101 function, current->comm, page);
102 printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
103 (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags,
104 page->mapping, page_mapcount(page), page_count(page));
105 printk(KERN_EMERG "Backtrace:\n");
106 dump_stack();
107 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
108 page->flags &= ~(1 << PG_private |
109 1 << PG_locked |
110 1 << PG_lru |
111 1 << PG_active |
112 1 << PG_dirty |
113 1 << PG_swapcache |
114 1 << PG_writeback);
115 set_page_count(page, 0);
116 reset_page_mapcount(page);
117 page->mapping = NULL;
118 tainted |= TAINT_BAD_PAGE;
119}
120
121#ifndef CONFIG_HUGETLB_PAGE
122#define prep_compound_page(page, order) do { } while (0)
123#define destroy_compound_page(page, order) do { } while (0)
124#else
125/*
126 * Higher-order pages are called "compound pages". They are structured thusly:
127 *
128 * The first PAGE_SIZE page is called the "head page".
129 *
130 * The remaining PAGE_SIZE pages are called "tail pages".
131 *
132 * All pages have PG_compound set. All pages have their ->private pointing at
133 * the head page (even the head page has this).
134 *
135 * The first tail page's ->mapping, if non-zero, holds the address of the
136 * compound page's put_page() function.
137 *
138 * The order of the allocation is stored in the first tail page's ->index
139 * This is only for debug at present. This usage means that zero-order pages
140 * may not be compound.
141 */
142static void prep_compound_page(struct page *page, unsigned long order)
143{
144 int i;
145 int nr_pages = 1 << order;
146
147 page[1].mapping = NULL;
148 page[1].index = order;
149 for (i = 0; i < nr_pages; i++) {
150 struct page *p = page + i;
151
152 SetPageCompound(p);
153 p->private = (unsigned long)page;
154 }
155}
156
157static void destroy_compound_page(struct page *page, unsigned long order)
158{
159 int i;
160 int nr_pages = 1 << order;
161
162 if (!PageCompound(page))
163 return;
164
165 if (page[1].index != order)
166 bad_page(__FUNCTION__, page);
167
168 for (i = 0; i < nr_pages; i++) {
169 struct page *p = page + i;
170
171 if (!PageCompound(p))
172 bad_page(__FUNCTION__, page);
173 if (p->private != (unsigned long)page)
174 bad_page(__FUNCTION__, page);
175 ClearPageCompound(p);
176 }
177}
178#endif /* CONFIG_HUGETLB_PAGE */
179
180/*
181 * function for dealing with page's order in buddy system.
182 * zone->lock is already acquired when we use these.
183 * So, we don't need atomic page->flags operations here.
184 */
185static inline unsigned long page_order(struct page *page) {
186 return page->private;
187}
188
189static inline void set_page_order(struct page *page, int order) {
190 page->private = order;
191 __SetPagePrivate(page);
192}
193
194static inline void rmv_page_order(struct page *page)
195{
196 __ClearPagePrivate(page);
197 page->private = 0;
198}
199
200/*
201 * Locate the struct page for both the matching buddy in our
202 * pair (buddy1) and the combined O(n+1) page they form (page).
203 *
204 * 1) Any buddy B1 will have an order O twin B2 which satisfies
205 * the following equation:
206 * B2 = B1 ^ (1 << O)
207 * For example, if the starting buddy (buddy2) is #8 its order
208 * 1 buddy is #10:
209 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
210 *
211 * 2) Any buddy B will have an order O+1 parent P which
212 * satisfies the following equation:
213 * P = B & ~(1 << O)
214 *
215 * Assumption: *_mem_map is contigious at least up to MAX_ORDER
216 */
217static inline struct page *
218__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
219{
220 unsigned long buddy_idx = page_idx ^ (1 << order);
221
222 return page + (buddy_idx - page_idx);
223}
224
225static inline unsigned long
226__find_combined_index(unsigned long page_idx, unsigned int order)
227{
228 return (page_idx & ~(1 << order));
229}
230
231/*
232 * This function checks whether a page is free && is the buddy
233 * we can do coalesce a page and its buddy if
234 * (a) the buddy is free &&
235 * (b) the buddy is on the buddy system &&
236 * (c) a page and its buddy have the same order.
237 * for recording page's order, we use page->private and PG_private.
238 *
239 */
240static inline int page_is_buddy(struct page *page, int order)
241{
242 if (PagePrivate(page) &&
243 (page_order(page) == order) &&
244 !PageReserved(page) &&
245 page_count(page) == 0)
246 return 1;
247 return 0;
248}
249
250/*
251 * Freeing function for a buddy system allocator.
252 *
253 * The concept of a buddy system is to maintain direct-mapped table
254 * (containing bit values) for memory blocks of various "orders".
255 * The bottom level table contains the map for the smallest allocatable
256 * units of memory (here, pages), and each level above it describes
257 * pairs of units from the levels below, hence, "buddies".
258 * At a high level, all that happens here is marking the table entry
259 * at the bottom level available, and propagating the changes upward
260 * as necessary, plus some accounting needed to play nicely with other
261 * parts of the VM system.
262 * At each level, we keep a list of pages, which are heads of continuous
263 * free pages of length of (1 << order) and marked with PG_Private.Page's
264 * order is recorded in page->private field.
265 * So when we are allocating or freeing one, we can derive the state of the
266 * other. That is, if we allocate a small block, and both were
267 * free, the remainder of the region must be split into blocks.
268 * If a block is freed, and its buddy is also free, then this
269 * triggers coalescing into a block of larger size.
270 *
271 * -- wli
272 */
273
274static inline void __free_pages_bulk (struct page *page,
275 struct zone *zone, unsigned int order)
276{
277 unsigned long page_idx;
278 int order_size = 1 << order;
279
280 if (unlikely(order))
281 destroy_compound_page(page, order);
282
283 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
284
285 BUG_ON(page_idx & (order_size - 1));
286 BUG_ON(bad_range(zone, page));
287
288 zone->free_pages += order_size;
289 while (order < MAX_ORDER-1) {
290 unsigned long combined_idx;
291 struct free_area *area;
292 struct page *buddy;
293
294 combined_idx = __find_combined_index(page_idx, order);
295 buddy = __page_find_buddy(page, page_idx, order);
296
297 if (bad_range(zone, buddy))
298 break;
299 if (!page_is_buddy(buddy, order))
300 break; /* Move the buddy up one level. */
301 list_del(&buddy->lru);
302 area = zone->free_area + order;
303 area->nr_free--;
304 rmv_page_order(buddy);
305 page = page + (combined_idx - page_idx);
306 page_idx = combined_idx;
307 order++;
308 }
309 set_page_order(page, order);
310 list_add(&page->lru, &zone->free_area[order].free_list);
311 zone->free_area[order].nr_free++;
312}
313
314static inline void free_pages_check(const char *function, struct page *page)
315{
316 if ( page_mapcount(page) ||
317 page->mapping != NULL ||
318 page_count(page) != 0 ||
319 (page->flags & (
320 1 << PG_lru |
321 1 << PG_private |
322 1 << PG_locked |
323 1 << PG_active |
324 1 << PG_reclaim |
325 1 << PG_slab |
326 1 << PG_swapcache |
327 1 << PG_writeback )))
328 bad_page(function, page);
329 if (PageDirty(page))
330 ClearPageDirty(page);
331}
332
333/*
334 * Frees a list of pages.
335 * Assumes all pages on list are in same zone, and of same order.
336 * count is the number of pages to free, or 0 for all on the list.
337 *
338 * If the zone was previously in an "all pages pinned" state then look to
339 * see if this freeing clears that state.
340 *
341 * And clear the zone's pages_scanned counter, to hold off the "all pages are
342 * pinned" detection logic.
343 */
344static int
345free_pages_bulk(struct zone *zone, int count,
346 struct list_head *list, unsigned int order)
347{
348 unsigned long flags;
349 struct page *page = NULL;
350 int ret = 0;
351
352 spin_lock_irqsave(&zone->lock, flags);
353 zone->all_unreclaimable = 0;
354 zone->pages_scanned = 0;
355 while (!list_empty(list) && count--) {
356 page = list_entry(list->prev, struct page, lru);
357 /* have to delete it as __free_pages_bulk list manipulates */
358 list_del(&page->lru);
359 __free_pages_bulk(page, zone, order);
360 ret++;
361 }
362 spin_unlock_irqrestore(&zone->lock, flags);
363 return ret;
364}
365
366void __free_pages_ok(struct page *page, unsigned int order)
367{
368 LIST_HEAD(list);
369 int i;
370
371 arch_free_page(page, order);
372
373 mod_page_state(pgfree, 1 << order);
374
375#ifndef CONFIG_MMU
376 if (order > 0)
377 for (i = 1 ; i < (1 << order) ; ++i)
378 __put_page(page + i);
379#endif
380
381 for (i = 0 ; i < (1 << order) ; ++i)
382 free_pages_check(__FUNCTION__, page + i);
383 list_add(&page->lru, &list);
384 kernel_map_pages(page, 1<<order, 0);
385 free_pages_bulk(page_zone(page), 1, &list, order);
386}
387
388
389/*
390 * The order of subdivision here is critical for the IO subsystem.
391 * Please do not alter this order without good reasons and regression
392 * testing. Specifically, as large blocks of memory are subdivided,
393 * the order in which smaller blocks are delivered depends on the order
394 * they're subdivided in this function. This is the primary factor
395 * influencing the order in which pages are delivered to the IO
396 * subsystem according to empirical testing, and this is also justified
397 * by considering the behavior of a buddy system containing a single
398 * large block of memory acted on by a series of small allocations.
399 * This behavior is a critical factor in sglist merging's success.
400 *
401 * -- wli
402 */
403static inline struct page *
404expand(struct zone *zone, struct page *page,
405 int low, int high, struct free_area *area)
406{
407 unsigned long size = 1 << high;
408
409 while (high > low) {
410 area--;
411 high--;
412 size >>= 1;
413 BUG_ON(bad_range(zone, &page[size]));
414 list_add(&page[size].lru, &area->free_list);
415 area->nr_free++;
416 set_page_order(&page[size], high);
417 }
418 return page;
419}
420
421void set_page_refs(struct page *page, int order)
422{
423#ifdef CONFIG_MMU
424 set_page_count(page, 1);
425#else
426 int i;
427
428 /*
429 * We need to reference all the pages for this order, otherwise if
430 * anyone accesses one of the pages with (get/put) it will be freed.
431 * - eg: access_process_vm()
432 */
433 for (i = 0; i < (1 << order); i++)
434 set_page_count(page + i, 1);
435#endif /* CONFIG_MMU */
436}
437
438/*
439 * This page is about to be returned from the page allocator
440 */
441static void prep_new_page(struct page *page, int order)
442{
443 if (page->mapping || page_mapcount(page) ||
444 (page->flags & (
445 1 << PG_private |
446 1 << PG_locked |
447 1 << PG_lru |
448 1 << PG_active |
449 1 << PG_dirty |
450 1 << PG_reclaim |
451 1 << PG_swapcache |
452 1 << PG_writeback )))
453 bad_page(__FUNCTION__, page);
454
455 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
456 1 << PG_referenced | 1 << PG_arch_1 |
457 1 << PG_checked | 1 << PG_mappedtodisk);
458 page->private = 0;
459 set_page_refs(page, order);
460 kernel_map_pages(page, 1 << order, 1);
461}
462
463/*
464 * Do the hard work of removing an element from the buddy allocator.
465 * Call me with the zone->lock already held.
466 */
467static struct page *__rmqueue(struct zone *zone, unsigned int order)
468{
469 struct free_area * area;
470 unsigned int current_order;
471 struct page *page;
472
473 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
474 area = zone->free_area + current_order;
475 if (list_empty(&area->free_list))
476 continue;
477
478 page = list_entry(area->free_list.next, struct page, lru);
479 list_del(&page->lru);
480 rmv_page_order(page);
481 area->nr_free--;
482 zone->free_pages -= 1UL << order;
483 return expand(zone, page, order, current_order, area);
484 }
485
486 return NULL;
487}
488
489/*
490 * Obtain a specified number of elements from the buddy allocator, all under
491 * a single hold of the lock, for efficiency. Add them to the supplied list.
492 * Returns the number of new pages which were placed at *list.
493 */
494static int rmqueue_bulk(struct zone *zone, unsigned int order,
495 unsigned long count, struct list_head *list)
496{
497 unsigned long flags;
498 int i;
499 int allocated = 0;
500 struct page *page;
501
502 spin_lock_irqsave(&zone->lock, flags);
503 for (i = 0; i < count; ++i) {
504 page = __rmqueue(zone, order);
505 if (page == NULL)
506 break;
507 allocated++;
508 list_add_tail(&page->lru, list);
509 }
510 spin_unlock_irqrestore(&zone->lock, flags);
511 return allocated;
512}
513
Christoph Lameter4ae7c032005-06-21 17:14:57 -0700514#ifdef CONFIG_NUMA
515/* Called from the slab reaper to drain remote pagesets */
516void drain_remote_pages(void)
517{
518 struct zone *zone;
519 int i;
520 unsigned long flags;
521
522 local_irq_save(flags);
523 for_each_zone(zone) {
524 struct per_cpu_pageset *pset;
525
526 /* Do not drain local pagesets */
527 if (zone->zone_pgdat->node_id == numa_node_id())
528 continue;
529
530 pset = zone->pageset[smp_processor_id()];
531 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
532 struct per_cpu_pages *pcp;
533
534 pcp = &pset->pcp[i];
535 if (pcp->count)
536 pcp->count -= free_pages_bulk(zone, pcp->count,
537 &pcp->list, 0);
538 }
539 }
540 local_irq_restore(flags);
541}
542#endif
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
545static void __drain_pages(unsigned int cpu)
546{
547 struct zone *zone;
548 int i;
549
550 for_each_zone(zone) {
551 struct per_cpu_pageset *pset;
552
Christoph Lametere7c8d5c2005-06-21 17:14:47 -0700553 pset = zone_pcp(zone, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
555 struct per_cpu_pages *pcp;
556
557 pcp = &pset->pcp[i];
558 pcp->count -= free_pages_bulk(zone, pcp->count,
559 &pcp->list, 0);
560 }
561 }
562}
563#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
564
565#ifdef CONFIG_PM
566
567void mark_free_pages(struct zone *zone)
568{
569 unsigned long zone_pfn, flags;
570 int order;
571 struct list_head *curr;
572
573 if (!zone->spanned_pages)
574 return;
575
576 spin_lock_irqsave(&zone->lock, flags);
577 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
578 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
579
580 for (order = MAX_ORDER - 1; order >= 0; --order)
581 list_for_each(curr, &zone->free_area[order].free_list) {
582 unsigned long start_pfn, i;
583
584 start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
585
586 for (i=0; i < (1<<order); i++)
587 SetPageNosaveFree(pfn_to_page(start_pfn+i));
588 }
589 spin_unlock_irqrestore(&zone->lock, flags);
590}
591
592/*
593 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
594 */
595void drain_local_pages(void)
596{
597 unsigned long flags;
598
599 local_irq_save(flags);
600 __drain_pages(smp_processor_id());
601 local_irq_restore(flags);
602}
603#endif /* CONFIG_PM */
604
605static void zone_statistics(struct zonelist *zonelist, struct zone *z)
606{
607#ifdef CONFIG_NUMA
608 unsigned long flags;
609 int cpu;
610 pg_data_t *pg = z->zone_pgdat;
611 pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
612 struct per_cpu_pageset *p;
613
614 local_irq_save(flags);
615 cpu = smp_processor_id();
Christoph Lametere7c8d5c2005-06-21 17:14:47 -0700616 p = zone_pcp(z,cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 if (pg == orig) {
Christoph Lametere7c8d5c2005-06-21 17:14:47 -0700618 p->numa_hit++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 } else {
620 p->numa_miss++;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -0700621 zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 }
623 if (pg == NODE_DATA(numa_node_id()))
624 p->local_node++;
625 else
626 p->other_node++;
627 local_irq_restore(flags);
628#endif
629}
630
631/*
632 * Free a 0-order page
633 */
634static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
635static void fastcall free_hot_cold_page(struct page *page, int cold)
636{
637 struct zone *zone = page_zone(page);
638 struct per_cpu_pages *pcp;
639 unsigned long flags;
640
641 arch_free_page(page, 0);
642
643 kernel_map_pages(page, 1, 0);
644 inc_page_state(pgfree);
645 if (PageAnon(page))
646 page->mapping = NULL;
647 free_pages_check(__FUNCTION__, page);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -0700648 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 list_add(&page->lru, &pcp->list);
651 pcp->count++;
Christoph Lameter2caaad42005-06-21 17:15:00 -0700652 if (pcp->count >= pcp->high)
653 pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 local_irq_restore(flags);
655 put_cpu();
656}
657
658void fastcall free_hot_page(struct page *page)
659{
660 free_hot_cold_page(page, 0);
661}
662
663void fastcall free_cold_page(struct page *page)
664{
665 free_hot_cold_page(page, 1);
666}
667
668static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags)
669{
670 int i;
671
672 BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
673 for(i = 0; i < (1 << order); i++)
674 clear_highpage(page + i);
675}
676
677/*
678 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
679 * we cheat by calling it from here, in the order > 0 path. Saves a branch
680 * or two.
681 */
682static struct page *
683buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags)
684{
685 unsigned long flags;
686 struct page *page = NULL;
687 int cold = !!(gfp_flags & __GFP_COLD);
688
689 if (order == 0) {
690 struct per_cpu_pages *pcp;
691
Christoph Lametere7c8d5c2005-06-21 17:14:47 -0700692 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 local_irq_save(flags);
694 if (pcp->count <= pcp->low)
695 pcp->count += rmqueue_bulk(zone, 0,
696 pcp->batch, &pcp->list);
697 if (pcp->count) {
698 page = list_entry(pcp->list.next, struct page, lru);
699 list_del(&page->lru);
700 pcp->count--;
701 }
702 local_irq_restore(flags);
703 put_cpu();
704 }
705
706 if (page == NULL) {
707 spin_lock_irqsave(&zone->lock, flags);
708 page = __rmqueue(zone, order);
709 spin_unlock_irqrestore(&zone->lock, flags);
710 }
711
712 if (page != NULL) {
713 BUG_ON(bad_range(zone, page));
714 mod_page_state_zone(zone, pgalloc, 1 << order);
715 prep_new_page(page, order);
716
717 if (gfp_flags & __GFP_ZERO)
718 prep_zero_page(page, order, gfp_flags);
719
720 if (order && (gfp_flags & __GFP_COMP))
721 prep_compound_page(page, order);
722 }
723 return page;
724}
725
726/*
727 * Return 1 if free pages are above 'mark'. This takes into account the order
728 * of the allocation.
729 */
730int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
731 int classzone_idx, int can_try_harder, int gfp_high)
732{
733 /* free_pages my go negative - that's OK */
734 long min = mark, free_pages = z->free_pages - (1 << order) + 1;
735 int o;
736
737 if (gfp_high)
738 min -= min / 2;
739 if (can_try_harder)
740 min -= min / 4;
741
742 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
743 return 0;
744 for (o = 0; o < order; o++) {
745 /* At the next order, this order's pages become unavailable */
746 free_pages -= z->free_area[o].nr_free << o;
747
748 /* Require fewer higher order pages to be free */
749 min >>= 1;
750
751 if (free_pages <= min)
752 return 0;
753 }
754 return 1;
755}
756
Martin Hicks753ee722005-06-21 17:14:41 -0700757static inline int
758should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
759{
760 if (!z->reclaim_pages)
761 return 0;
Martin Hicks0c35bba2005-06-21 17:14:42 -0700762 if (gfp_mask & __GFP_NORECLAIM)
763 return 0;
Martin Hicks753ee722005-06-21 17:14:41 -0700764 return 1;
765}
766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767/*
768 * This is the 'heart' of the zoned buddy allocator.
769 */
770struct page * fastcall
771__alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
772 struct zonelist *zonelist)
773{
774 const int wait = gfp_mask & __GFP_WAIT;
775 struct zone **zones, *z;
776 struct page *page;
777 struct reclaim_state reclaim_state;
778 struct task_struct *p = current;
779 int i;
780 int classzone_idx;
781 int do_retry;
782 int can_try_harder;
783 int did_some_progress;
784
785 might_sleep_if(wait);
786
787 /*
788 * The caller may dip into page reserves a bit more if the caller
789 * cannot run direct reclaim, or is the caller has realtime scheduling
790 * policy
791 */
792 can_try_harder = (unlikely(rt_task(p)) && !in_interrupt()) || !wait;
793
794 zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
795
796 if (unlikely(zones[0] == NULL)) {
797 /* Should this ever happen?? */
798 return NULL;
799 }
800
801 classzone_idx = zone_idx(zones[0]);
802
Martin Hicks753ee722005-06-21 17:14:41 -0700803restart:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 /* Go through the zonelist once, looking for a zone with enough free */
805 for (i = 0; (z = zones[i]) != NULL; i++) {
Martin Hicks753ee722005-06-21 17:14:41 -0700806 int do_reclaim = should_reclaim_zone(z, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
808 if (!cpuset_zone_allowed(z))
809 continue;
810
Martin Hicks753ee722005-06-21 17:14:41 -0700811 /*
812 * If the zone is to attempt early page reclaim then this loop
813 * will try to reclaim pages and check the watermark a second
814 * time before giving up and falling back to the next zone.
815 */
816zone_reclaim_retry:
817 if (!zone_watermark_ok(z, order, z->pages_low,
818 classzone_idx, 0, 0)) {
819 if (!do_reclaim)
820 continue;
821 else {
822 zone_reclaim(z, gfp_mask, order);
823 /* Only try reclaim once */
824 do_reclaim = 0;
825 goto zone_reclaim_retry;
826 }
827 }
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 page = buffered_rmqueue(z, order, gfp_mask);
830 if (page)
831 goto got_pg;
832 }
833
834 for (i = 0; (z = zones[i]) != NULL; i++)
835 wakeup_kswapd(z, order);
836
837 /*
838 * Go through the zonelist again. Let __GFP_HIGH and allocations
839 * coming from realtime tasks to go deeper into reserves
840 *
841 * This is the last chance, in general, before the goto nopage.
842 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
843 */
844 for (i = 0; (z = zones[i]) != NULL; i++) {
845 if (!zone_watermark_ok(z, order, z->pages_min,
846 classzone_idx, can_try_harder,
847 gfp_mask & __GFP_HIGH))
848 continue;
849
850 if (wait && !cpuset_zone_allowed(z))
851 continue;
852
853 page = buffered_rmqueue(z, order, gfp_mask);
854 if (page)
855 goto got_pg;
856 }
857
858 /* This allocation should allow future memory freeing. */
Nick Pigginb84a35b2005-05-01 08:58:36 -0700859
860 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
861 && !in_interrupt()) {
862 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
863 /* go through the zonelist yet again, ignoring mins */
864 for (i = 0; (z = zones[i]) != NULL; i++) {
865 if (!cpuset_zone_allowed(z))
866 continue;
867 page = buffered_rmqueue(z, order, gfp_mask);
868 if (page)
869 goto got_pg;
870 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 }
872 goto nopage;
873 }
874
875 /* Atomic allocations - we can't balance anything */
876 if (!wait)
877 goto nopage;
878
879rebalance:
880 cond_resched();
881
882 /* We now go into synchronous reclaim */
883 p->flags |= PF_MEMALLOC;
884 reclaim_state.reclaimed_slab = 0;
885 p->reclaim_state = &reclaim_state;
886
Darren Hart1ad539b2005-06-21 17:14:53 -0700887 did_some_progress = try_to_free_pages(zones, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889 p->reclaim_state = NULL;
890 p->flags &= ~PF_MEMALLOC;
891
892 cond_resched();
893
894 if (likely(did_some_progress)) {
895 /*
896 * Go through the zonelist yet one more time, keep
897 * very high watermark here, this is only to catch
898 * a parallel oom killing, we must fail if we're still
899 * under heavy pressure.
900 */
901 for (i = 0; (z = zones[i]) != NULL; i++) {
902 if (!zone_watermark_ok(z, order, z->pages_min,
903 classzone_idx, can_try_harder,
904 gfp_mask & __GFP_HIGH))
905 continue;
906
907 if (!cpuset_zone_allowed(z))
908 continue;
909
910 page = buffered_rmqueue(z, order, gfp_mask);
911 if (page)
912 goto got_pg;
913 }
914 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
915 /*
916 * Go through the zonelist yet one more time, keep
917 * very high watermark here, this is only to catch
918 * a parallel oom killing, we must fail if we're still
919 * under heavy pressure.
920 */
921 for (i = 0; (z = zones[i]) != NULL; i++) {
922 if (!zone_watermark_ok(z, order, z->pages_high,
923 classzone_idx, 0, 0))
924 continue;
925
926 if (!cpuset_zone_allowed(z))
927 continue;
928
929 page = buffered_rmqueue(z, order, gfp_mask);
930 if (page)
931 goto got_pg;
932 }
933
934 out_of_memory(gfp_mask);
935 goto restart;
936 }
937
938 /*
939 * Don't let big-order allocations loop unless the caller explicitly
940 * requests that. Wait for some write requests to complete then retry.
941 *
942 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
943 * <= 3, but that may not be true in other implementations.
944 */
945 do_retry = 0;
946 if (!(gfp_mask & __GFP_NORETRY)) {
947 if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
948 do_retry = 1;
949 if (gfp_mask & __GFP_NOFAIL)
950 do_retry = 1;
951 }
952 if (do_retry) {
953 blk_congestion_wait(WRITE, HZ/50);
954 goto rebalance;
955 }
956
957nopage:
958 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
959 printk(KERN_WARNING "%s: page allocation failure."
960 " order:%d, mode:0x%x\n",
961 p->comm, order, gfp_mask);
962 dump_stack();
Janet Morgan578c2fd2005-06-21 17:14:56 -0700963 show_mem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 }
965 return NULL;
966got_pg:
967 zone_statistics(zonelist, z);
968 return page;
969}
970
971EXPORT_SYMBOL(__alloc_pages);
972
973/*
974 * Common helper functions.
975 */
976fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)
977{
978 struct page * page;
979 page = alloc_pages(gfp_mask, order);
980 if (!page)
981 return 0;
982 return (unsigned long) page_address(page);
983}
984
985EXPORT_SYMBOL(__get_free_pages);
986
987fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask)
988{
989 struct page * page;
990
991 /*
992 * get_zeroed_page() returns a 32-bit address, which cannot represent
993 * a highmem page
994 */
995 BUG_ON(gfp_mask & __GFP_HIGHMEM);
996
997 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
998 if (page)
999 return (unsigned long) page_address(page);
1000 return 0;
1001}
1002
1003EXPORT_SYMBOL(get_zeroed_page);
1004
1005void __pagevec_free(struct pagevec *pvec)
1006{
1007 int i = pagevec_count(pvec);
1008
1009 while (--i >= 0)
1010 free_hot_cold_page(pvec->pages[i], pvec->cold);
1011}
1012
1013fastcall void __free_pages(struct page *page, unsigned int order)
1014{
1015 if (!PageReserved(page) && put_page_testzero(page)) {
1016 if (order == 0)
1017 free_hot_page(page);
1018 else
1019 __free_pages_ok(page, order);
1020 }
1021}
1022
1023EXPORT_SYMBOL(__free_pages);
1024
1025fastcall void free_pages(unsigned long addr, unsigned int order)
1026{
1027 if (addr != 0) {
1028 BUG_ON(!virt_addr_valid((void *)addr));
1029 __free_pages(virt_to_page((void *)addr), order);
1030 }
1031}
1032
1033EXPORT_SYMBOL(free_pages);
1034
1035/*
1036 * Total amount of free (allocatable) RAM:
1037 */
1038unsigned int nr_free_pages(void)
1039{
1040 unsigned int sum = 0;
1041 struct zone *zone;
1042
1043 for_each_zone(zone)
1044 sum += zone->free_pages;
1045
1046 return sum;
1047}
1048
1049EXPORT_SYMBOL(nr_free_pages);
1050
1051#ifdef CONFIG_NUMA
1052unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
1053{
1054 unsigned int i, sum = 0;
1055
1056 for (i = 0; i < MAX_NR_ZONES; i++)
1057 sum += pgdat->node_zones[i].free_pages;
1058
1059 return sum;
1060}
1061#endif
1062
1063static unsigned int nr_free_zone_pages(int offset)
1064{
1065 pg_data_t *pgdat;
1066 unsigned int sum = 0;
1067
1068 for_each_pgdat(pgdat) {
1069 struct zonelist *zonelist = pgdat->node_zonelists + offset;
1070 struct zone **zonep = zonelist->zones;
1071 struct zone *zone;
1072
1073 for (zone = *zonep++; zone; zone = *zonep++) {
1074 unsigned long size = zone->present_pages;
1075 unsigned long high = zone->pages_high;
1076 if (size > high)
1077 sum += size - high;
1078 }
1079 }
1080
1081 return sum;
1082}
1083
1084/*
1085 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1086 */
1087unsigned int nr_free_buffer_pages(void)
1088{
1089 return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
1090}
1091
1092/*
1093 * Amount of free RAM allocatable within all zones
1094 */
1095unsigned int nr_free_pagecache_pages(void)
1096{
1097 return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
1098}
1099
1100#ifdef CONFIG_HIGHMEM
1101unsigned int nr_free_highpages (void)
1102{
1103 pg_data_t *pgdat;
1104 unsigned int pages = 0;
1105
1106 for_each_pgdat(pgdat)
1107 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1108
1109 return pages;
1110}
1111#endif
1112
1113#ifdef CONFIG_NUMA
1114static void show_node(struct zone *zone)
1115{
1116 printk("Node %d ", zone->zone_pgdat->node_id);
1117}
1118#else
1119#define show_node(zone) do { } while (0)
1120#endif
1121
1122/*
1123 * Accumulate the page_state information across all CPUs.
1124 * The result is unavoidably approximate - it can change
1125 * during and after execution of this function.
1126 */
1127static DEFINE_PER_CPU(struct page_state, page_states) = {0};
1128
1129atomic_t nr_pagecache = ATOMIC_INIT(0);
1130EXPORT_SYMBOL(nr_pagecache);
1131#ifdef CONFIG_SMP
1132DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
1133#endif
1134
1135void __get_page_state(struct page_state *ret, int nr)
1136{
1137 int cpu = 0;
1138
1139 memset(ret, 0, sizeof(*ret));
1140
1141 cpu = first_cpu(cpu_online_map);
1142 while (cpu < NR_CPUS) {
1143 unsigned long *in, *out, off;
1144
1145 in = (unsigned long *)&per_cpu(page_states, cpu);
1146
1147 cpu = next_cpu(cpu, cpu_online_map);
1148
1149 if (cpu < NR_CPUS)
1150 prefetch(&per_cpu(page_states, cpu));
1151
1152 out = (unsigned long *)ret;
1153 for (off = 0; off < nr; off++)
1154 *out++ += *in++;
1155 }
1156}
1157
1158void get_page_state(struct page_state *ret)
1159{
1160 int nr;
1161
1162 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1163 nr /= sizeof(unsigned long);
1164
1165 __get_page_state(ret, nr + 1);
1166}
1167
1168void get_full_page_state(struct page_state *ret)
1169{
1170 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
1171}
1172
Benjamin LaHaisec2f29ea2005-06-21 17:14:55 -07001173unsigned long __read_page_state(unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
1175 unsigned long ret = 0;
1176 int cpu;
1177
1178 for_each_online_cpu(cpu) {
1179 unsigned long in;
1180
1181 in = (unsigned long)&per_cpu(page_states, cpu) + offset;
1182 ret += *((unsigned long *)in);
1183 }
1184 return ret;
1185}
1186
Benjamin LaHaise83e5d8f2005-06-21 17:14:54 -07001187void __mod_page_state(unsigned long offset, unsigned long delta)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
1189 unsigned long flags;
1190 void* ptr;
1191
1192 local_irq_save(flags);
1193 ptr = &__get_cpu_var(page_states);
1194 *(unsigned long*)(ptr + offset) += delta;
1195 local_irq_restore(flags);
1196}
1197
1198EXPORT_SYMBOL(__mod_page_state);
1199
1200void __get_zone_counts(unsigned long *active, unsigned long *inactive,
1201 unsigned long *free, struct pglist_data *pgdat)
1202{
1203 struct zone *zones = pgdat->node_zones;
1204 int i;
1205
1206 *active = 0;
1207 *inactive = 0;
1208 *free = 0;
1209 for (i = 0; i < MAX_NR_ZONES; i++) {
1210 *active += zones[i].nr_active;
1211 *inactive += zones[i].nr_inactive;
1212 *free += zones[i].free_pages;
1213 }
1214}
1215
1216void get_zone_counts(unsigned long *active,
1217 unsigned long *inactive, unsigned long *free)
1218{
1219 struct pglist_data *pgdat;
1220
1221 *active = 0;
1222 *inactive = 0;
1223 *free = 0;
1224 for_each_pgdat(pgdat) {
1225 unsigned long l, m, n;
1226 __get_zone_counts(&l, &m, &n, pgdat);
1227 *active += l;
1228 *inactive += m;
1229 *free += n;
1230 }
1231}
1232
1233void si_meminfo(struct sysinfo *val)
1234{
1235 val->totalram = totalram_pages;
1236 val->sharedram = 0;
1237 val->freeram = nr_free_pages();
1238 val->bufferram = nr_blockdev_pages();
1239#ifdef CONFIG_HIGHMEM
1240 val->totalhigh = totalhigh_pages;
1241 val->freehigh = nr_free_highpages();
1242#else
1243 val->totalhigh = 0;
1244 val->freehigh = 0;
1245#endif
1246 val->mem_unit = PAGE_SIZE;
1247}
1248
1249EXPORT_SYMBOL(si_meminfo);
1250
1251#ifdef CONFIG_NUMA
1252void si_meminfo_node(struct sysinfo *val, int nid)
1253{
1254 pg_data_t *pgdat = NODE_DATA(nid);
1255
1256 val->totalram = pgdat->node_present_pages;
1257 val->freeram = nr_free_pages_pgdat(pgdat);
1258 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1259 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1260 val->mem_unit = PAGE_SIZE;
1261}
1262#endif
1263
1264#define K(x) ((x) << (PAGE_SHIFT-10))
1265
1266/*
1267 * Show free area list (used inside shift_scroll-lock stuff)
1268 * We also calculate the percentage fragmentation. We do this by counting the
1269 * memory on each free list with the exception of the first item on the list.
1270 */
1271void show_free_areas(void)
1272{
1273 struct page_state ps;
1274 int cpu, temperature;
1275 unsigned long active;
1276 unsigned long inactive;
1277 unsigned long free;
1278 struct zone *zone;
1279
1280 for_each_zone(zone) {
1281 show_node(zone);
1282 printk("%s per-cpu:", zone->name);
1283
1284 if (!zone->present_pages) {
1285 printk(" empty\n");
1286 continue;
1287 } else
1288 printk("\n");
1289
1290 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
1291 struct per_cpu_pageset *pageset;
1292
1293 if (!cpu_possible(cpu))
1294 continue;
1295
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001296 pageset = zone_pcp(zone, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
1298 for (temperature = 0; temperature < 2; temperature++)
Christoph Lameter4ae7c032005-06-21 17:14:57 -07001299 printk("cpu %d %s: low %d, high %d, batch %d used:%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 cpu,
1301 temperature ? "cold" : "hot",
1302 pageset->pcp[temperature].low,
1303 pageset->pcp[temperature].high,
Christoph Lameter4ae7c032005-06-21 17:14:57 -07001304 pageset->pcp[temperature].batch,
1305 pageset->pcp[temperature].count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 }
1307 }
1308
1309 get_page_state(&ps);
1310 get_zone_counts(&active, &inactive, &free);
1311
1312 printk("\nFree pages: %11ukB (%ukB HighMem)\n",
1313 K(nr_free_pages()),
1314 K(nr_free_highpages()));
1315
1316 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
1317 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
1318 active,
1319 inactive,
1320 ps.nr_dirty,
1321 ps.nr_writeback,
1322 ps.nr_unstable,
1323 nr_free_pages(),
1324 ps.nr_slab,
1325 ps.nr_mapped,
1326 ps.nr_page_table_pages);
1327
1328 for_each_zone(zone) {
1329 int i;
1330
1331 show_node(zone);
1332 printk("%s"
1333 " free:%lukB"
1334 " min:%lukB"
1335 " low:%lukB"
1336 " high:%lukB"
1337 " active:%lukB"
1338 " inactive:%lukB"
1339 " present:%lukB"
1340 " pages_scanned:%lu"
1341 " all_unreclaimable? %s"
1342 "\n",
1343 zone->name,
1344 K(zone->free_pages),
1345 K(zone->pages_min),
1346 K(zone->pages_low),
1347 K(zone->pages_high),
1348 K(zone->nr_active),
1349 K(zone->nr_inactive),
1350 K(zone->present_pages),
1351 zone->pages_scanned,
1352 (zone->all_unreclaimable ? "yes" : "no")
1353 );
1354 printk("lowmem_reserve[]:");
1355 for (i = 0; i < MAX_NR_ZONES; i++)
1356 printk(" %lu", zone->lowmem_reserve[i]);
1357 printk("\n");
1358 }
1359
1360 for_each_zone(zone) {
1361 unsigned long nr, flags, order, total = 0;
1362
1363 show_node(zone);
1364 printk("%s: ", zone->name);
1365 if (!zone->present_pages) {
1366 printk("empty\n");
1367 continue;
1368 }
1369
1370 spin_lock_irqsave(&zone->lock, flags);
1371 for (order = 0; order < MAX_ORDER; order++) {
1372 nr = zone->free_area[order].nr_free;
1373 total += nr << order;
1374 printk("%lu*%lukB ", nr, K(1UL) << order);
1375 }
1376 spin_unlock_irqrestore(&zone->lock, flags);
1377 printk("= %lukB\n", K(total));
1378 }
1379
1380 show_swap_cache_info();
1381}
1382
1383/*
1384 * Builds allocation fallback zone lists.
1385 */
1386static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
1387{
1388 switch (k) {
1389 struct zone *zone;
1390 default:
1391 BUG();
1392 case ZONE_HIGHMEM:
1393 zone = pgdat->node_zones + ZONE_HIGHMEM;
1394 if (zone->present_pages) {
1395#ifndef CONFIG_HIGHMEM
1396 BUG();
1397#endif
1398 zonelist->zones[j++] = zone;
1399 }
1400 case ZONE_NORMAL:
1401 zone = pgdat->node_zones + ZONE_NORMAL;
1402 if (zone->present_pages)
1403 zonelist->zones[j++] = zone;
1404 case ZONE_DMA:
1405 zone = pgdat->node_zones + ZONE_DMA;
1406 if (zone->present_pages)
1407 zonelist->zones[j++] = zone;
1408 }
1409
1410 return j;
1411}
1412
1413#ifdef CONFIG_NUMA
1414#define MAX_NODE_LOAD (num_online_nodes())
1415static int __initdata node_load[MAX_NUMNODES];
1416/**
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001417 * find_next_best_node - find the next node that should appear in a given node's fallback list
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 * @node: node whose fallback list we're appending
1419 * @used_node_mask: nodemask_t of already used nodes
1420 *
1421 * We use a number of factors to determine which is the next node that should
1422 * appear on a given node's fallback list. The node should not have appeared
1423 * already in @node's fallback list, and it should be the next closest node
1424 * according to the distance array (which contains arbitrary distance values
1425 * from each node to each node in the system), and should also prefer nodes
1426 * with no CPUs, since presumably they'll have very little allocation pressure
1427 * on them otherwise.
1428 * It returns -1 if no node is found.
1429 */
1430static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1431{
1432 int i, n, val;
1433 int min_val = INT_MAX;
1434 int best_node = -1;
1435
1436 for_each_online_node(i) {
1437 cpumask_t tmp;
1438
1439 /* Start from local node */
1440 n = (node+i) % num_online_nodes();
1441
1442 /* Don't want a node to appear more than once */
1443 if (node_isset(n, *used_node_mask))
1444 continue;
1445
1446 /* Use the local node if we haven't already */
1447 if (!node_isset(node, *used_node_mask)) {
1448 best_node = node;
1449 break;
1450 }
1451
1452 /* Use the distance array to find the distance */
1453 val = node_distance(node, n);
1454
1455 /* Give preference to headless and unused nodes */
1456 tmp = node_to_cpumask(n);
1457 if (!cpus_empty(tmp))
1458 val += PENALTY_FOR_NODE_WITH_CPUS;
1459
1460 /* Slight preference for less loaded node */
1461 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1462 val += node_load[n];
1463
1464 if (val < min_val) {
1465 min_val = val;
1466 best_node = n;
1467 }
1468 }
1469
1470 if (best_node >= 0)
1471 node_set(best_node, *used_node_mask);
1472
1473 return best_node;
1474}
1475
1476static void __init build_zonelists(pg_data_t *pgdat)
1477{
1478 int i, j, k, node, local_node;
1479 int prev_node, load;
1480 struct zonelist *zonelist;
1481 nodemask_t used_mask;
1482
1483 /* initialize zonelists */
1484 for (i = 0; i < GFP_ZONETYPES; i++) {
1485 zonelist = pgdat->node_zonelists + i;
1486 zonelist->zones[0] = NULL;
1487 }
1488
1489 /* NUMA-aware ordering of nodes */
1490 local_node = pgdat->node_id;
1491 load = num_online_nodes();
1492 prev_node = local_node;
1493 nodes_clear(used_mask);
1494 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
1495 /*
1496 * We don't want to pressure a particular node.
1497 * So adding penalty to the first node in same
1498 * distance group to make it round-robin.
1499 */
1500 if (node_distance(local_node, node) !=
1501 node_distance(local_node, prev_node))
1502 node_load[node] += load;
1503 prev_node = node;
1504 load--;
1505 for (i = 0; i < GFP_ZONETYPES; i++) {
1506 zonelist = pgdat->node_zonelists + i;
1507 for (j = 0; zonelist->zones[j] != NULL; j++);
1508
1509 k = ZONE_NORMAL;
1510 if (i & __GFP_HIGHMEM)
1511 k = ZONE_HIGHMEM;
1512 if (i & __GFP_DMA)
1513 k = ZONE_DMA;
1514
1515 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1516 zonelist->zones[j] = NULL;
1517 }
1518 }
1519}
1520
1521#else /* CONFIG_NUMA */
1522
1523static void __init build_zonelists(pg_data_t *pgdat)
1524{
1525 int i, j, k, node, local_node;
1526
1527 local_node = pgdat->node_id;
1528 for (i = 0; i < GFP_ZONETYPES; i++) {
1529 struct zonelist *zonelist;
1530
1531 zonelist = pgdat->node_zonelists + i;
1532
1533 j = 0;
1534 k = ZONE_NORMAL;
1535 if (i & __GFP_HIGHMEM)
1536 k = ZONE_HIGHMEM;
1537 if (i & __GFP_DMA)
1538 k = ZONE_DMA;
1539
1540 j = build_zonelists_node(pgdat, zonelist, j, k);
1541 /*
1542 * Now we build the zonelist so that it contains the zones
1543 * of all the other nodes.
1544 * We don't want to pressure a particular node, so when
1545 * building the zones for node N, we make sure that the
1546 * zones coming right after the local ones are those from
1547 * node N+1 (modulo N)
1548 */
1549 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
1550 if (!node_online(node))
1551 continue;
1552 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1553 }
1554 for (node = 0; node < local_node; node++) {
1555 if (!node_online(node))
1556 continue;
1557 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1558 }
1559
1560 zonelist->zones[j] = NULL;
1561 }
1562}
1563
1564#endif /* CONFIG_NUMA */
1565
1566void __init build_all_zonelists(void)
1567{
1568 int i;
1569
1570 for_each_online_node(i)
1571 build_zonelists(NODE_DATA(i));
1572 printk("Built %i zonelists\n", num_online_nodes());
1573 cpuset_init_current_mems_allowed();
1574}
1575
1576/*
1577 * Helper functions to size the waitqueue hash table.
1578 * Essentially these want to choose hash table sizes sufficiently
1579 * large so that collisions trying to wait on pages are rare.
1580 * But in fact, the number of active page waitqueues on typical
1581 * systems is ridiculously low, less than 200. So this is even
1582 * conservative, even though it seems large.
1583 *
1584 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
1585 * waitqueues, i.e. the size of the waitq table given the number of pages.
1586 */
1587#define PAGES_PER_WAITQUEUE 256
1588
1589static inline unsigned long wait_table_size(unsigned long pages)
1590{
1591 unsigned long size = 1;
1592
1593 pages /= PAGES_PER_WAITQUEUE;
1594
1595 while (size < pages)
1596 size <<= 1;
1597
1598 /*
1599 * Once we have dozens or even hundreds of threads sleeping
1600 * on IO we've got bigger problems than wait queue collision.
1601 * Limit the size of the wait table to a reasonable size.
1602 */
1603 size = min(size, 4096UL);
1604
1605 return max(size, 4UL);
1606}
1607
1608/*
1609 * This is an integer logarithm so that shifts can be used later
1610 * to extract the more random high bits from the multiplicative
1611 * hash function before the remainder is taken.
1612 */
1613static inline unsigned long wait_table_bits(unsigned long size)
1614{
1615 return ffz(~size);
1616}
1617
1618#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1619
1620static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
1621 unsigned long *zones_size, unsigned long *zholes_size)
1622{
1623 unsigned long realtotalpages, totalpages = 0;
1624 int i;
1625
1626 for (i = 0; i < MAX_NR_ZONES; i++)
1627 totalpages += zones_size[i];
1628 pgdat->node_spanned_pages = totalpages;
1629
1630 realtotalpages = totalpages;
1631 if (zholes_size)
1632 for (i = 0; i < MAX_NR_ZONES; i++)
1633 realtotalpages -= zholes_size[i];
1634 pgdat->node_present_pages = realtotalpages;
1635 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1636}
1637
1638
1639/*
1640 * Initially all pages are reserved - free ones are freed
1641 * up by free_all_bootmem() once the early boot process is
1642 * done. Non-atomic initialization, single-pass.
1643 */
1644void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1645 unsigned long start_pfn)
1646{
1647 struct page *start = pfn_to_page(start_pfn);
1648 struct page *page;
1649
1650 for (page = start; page < (start + size); page++) {
1651 set_page_zone(page, NODEZONE(nid, zone));
1652 set_page_count(page, 0);
1653 reset_page_mapcount(page);
1654 SetPageReserved(page);
1655 INIT_LIST_HEAD(&page->lru);
1656#ifdef WANT_PAGE_VIRTUAL
1657 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1658 if (!is_highmem_idx(zone))
1659 set_page_address(page, __va(start_pfn << PAGE_SHIFT));
1660#endif
1661 start_pfn++;
1662 }
1663}
1664
1665void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1666 unsigned long size)
1667{
1668 int order;
1669 for (order = 0; order < MAX_ORDER ; order++) {
1670 INIT_LIST_HEAD(&zone->free_area[order].free_list);
1671 zone->free_area[order].nr_free = 0;
1672 }
1673}
1674
1675#ifndef __HAVE_ARCH_MEMMAP_INIT
1676#define memmap_init(size, nid, zone, start_pfn) \
1677 memmap_init_zone((size), (nid), (zone), (start_pfn))
1678#endif
1679
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001680static int __devinit zone_batchsize(struct zone *zone)
1681{
1682 int batch;
1683
1684 /*
1685 * The per-cpu-pages pools are set to around 1000th of the
1686 * size of the zone. But no more than 1/4 of a meg - there's
1687 * no point in going beyond the size of L2 cache.
1688 *
1689 * OK, so we don't know how big the cache is. So guess.
1690 */
1691 batch = zone->present_pages / 1024;
1692 if (batch * PAGE_SIZE > 256 * 1024)
1693 batch = (256 * 1024) / PAGE_SIZE;
1694 batch /= 4; /* We effectively *= 4 below */
1695 if (batch < 1)
1696 batch = 1;
1697
1698 /*
1699 * Clamp the batch to a 2^n - 1 value. Having a power
1700 * of 2 value was found to be more likely to have
1701 * suboptimal cache aliasing properties in some cases.
1702 *
1703 * For example if 2 tasks are alternately allocating
1704 * batches of pages, one task can end up with a lot
1705 * of pages of one half of the possible page colors
1706 * and the other with pages of the other colors.
1707 */
1708 batch = (1 << fls(batch + batch/2)) - 1;
1709 return batch;
1710}
1711
Christoph Lameter2caaad42005-06-21 17:15:00 -07001712inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
1713{
1714 struct per_cpu_pages *pcp;
1715
1716 pcp = &p->pcp[0]; /* hot */
1717 pcp->count = 0;
1718 pcp->low = 2 * batch;
1719 pcp->high = 6 * batch;
1720 pcp->batch = max(1UL, 1 * batch);
1721 INIT_LIST_HEAD(&pcp->list);
1722
1723 pcp = &p->pcp[1]; /* cold*/
1724 pcp->count = 0;
1725 pcp->low = 0;
1726 pcp->high = 2 * batch;
1727 pcp->batch = max(1UL, 1 * batch);
1728 INIT_LIST_HEAD(&pcp->list);
1729}
1730
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001731#ifdef CONFIG_NUMA
1732/*
Christoph Lameter2caaad42005-06-21 17:15:00 -07001733 * Boot pageset table. One per cpu which is going to be used for all
1734 * zones and all nodes. The parameters will be set in such a way
1735 * that an item put on a list will immediately be handed over to
1736 * the buddy list. This is safe since pageset manipulation is done
1737 * with interrupts disabled.
1738 *
1739 * Some NUMA counter updates may also be caught by the boot pagesets.
1740 * These will be discarded when bootup is complete.
1741 */
1742static struct per_cpu_pageset
1743 boot_pageset[NR_CPUS] __initdata;
1744
1745/*
1746 * Dynamically allocate memory for the
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001747 * per cpu pageset array in struct zone.
1748 */
1749static int __devinit process_zones(int cpu)
1750{
1751 struct zone *zone, *dzone;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001752
1753 for_each_zone(zone) {
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001754
Christoph Lameter2caaad42005-06-21 17:15:00 -07001755 zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset),
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001756 GFP_KERNEL, cpu_to_node(cpu));
Christoph Lameter2caaad42005-06-21 17:15:00 -07001757 if (!zone->pageset[cpu])
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001758 goto bad;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001759
Christoph Lameter2caaad42005-06-21 17:15:00 -07001760 setup_pageset(zone->pageset[cpu], zone_batchsize(zone));
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001761 }
1762
1763 return 0;
1764bad:
1765 for_each_zone(dzone) {
1766 if (dzone == zone)
1767 break;
1768 kfree(dzone->pageset[cpu]);
1769 dzone->pageset[cpu] = NULL;
1770 }
1771 return -ENOMEM;
1772}
1773
1774static inline void free_zone_pagesets(int cpu)
1775{
1776#ifdef CONFIG_NUMA
1777 struct zone *zone;
1778
1779 for_each_zone(zone) {
1780 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
1781
1782 zone_pcp(zone, cpu) = NULL;
1783 kfree(pset);
1784 }
1785#endif
1786}
1787
1788static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
1789 unsigned long action,
1790 void *hcpu)
1791{
1792 int cpu = (long)hcpu;
1793 int ret = NOTIFY_OK;
1794
1795 switch (action) {
1796 case CPU_UP_PREPARE:
1797 if (process_zones(cpu))
1798 ret = NOTIFY_BAD;
1799 break;
1800#ifdef CONFIG_HOTPLUG_CPU
1801 case CPU_DEAD:
1802 free_zone_pagesets(cpu);
1803 break;
1804#endif
1805 default:
1806 break;
1807 }
1808 return ret;
1809}
1810
1811static struct notifier_block pageset_notifier =
1812 { &pageset_cpuup_callback, NULL, 0 };
1813
1814void __init setup_per_cpu_pageset()
1815{
1816 int err;
1817
1818 /* Initialize per_cpu_pageset for cpu 0.
1819 * A cpuup callback will do this for every cpu
1820 * as it comes online
1821 */
1822 err = process_zones(smp_processor_id());
1823 BUG_ON(err);
1824 register_cpu_notifier(&pageset_notifier);
1825}
1826
1827#endif
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829/*
1830 * Set up the zone data structures:
1831 * - mark all pages reserved
1832 * - mark all memory queues empty
1833 * - clear the memory bitmaps
1834 */
1835static void __init free_area_init_core(struct pglist_data *pgdat,
1836 unsigned long *zones_size, unsigned long *zholes_size)
1837{
1838 unsigned long i, j;
1839 const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
1840 int cpu, nid = pgdat->node_id;
1841 unsigned long zone_start_pfn = pgdat->node_start_pfn;
1842
1843 pgdat->nr_zones = 0;
1844 init_waitqueue_head(&pgdat->kswapd_wait);
1845 pgdat->kswapd_max_order = 0;
1846
1847 for (j = 0; j < MAX_NR_ZONES; j++) {
1848 struct zone *zone = pgdat->node_zones + j;
1849 unsigned long size, realsize;
1850 unsigned long batch;
1851
1852 zone_table[NODEZONE(nid, j)] = zone;
1853 realsize = size = zones_size[j];
1854 if (zholes_size)
1855 realsize -= zholes_size[j];
1856
1857 if (j == ZONE_DMA || j == ZONE_NORMAL)
1858 nr_kernel_pages += realsize;
1859 nr_all_pages += realsize;
1860
1861 zone->spanned_pages = size;
1862 zone->present_pages = realsize;
1863 zone->name = zone_names[j];
1864 spin_lock_init(&zone->lock);
1865 spin_lock_init(&zone->lru_lock);
1866 zone->zone_pgdat = pgdat;
1867 zone->free_pages = 0;
1868
1869 zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
1870
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001871 batch = zone_batchsize(zone);
Nick Piggin8e30f272005-05-01 08:58:36 -07001872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 for (cpu = 0; cpu < NR_CPUS; cpu++) {
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001874#ifdef CONFIG_NUMA
Christoph Lameter2caaad42005-06-21 17:15:00 -07001875 /* Early boot. Slab allocator not functional yet */
1876 zone->pageset[cpu] = &boot_pageset[cpu];
1877 setup_pageset(&boot_pageset[cpu],0);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001878#else
Christoph Lameter2caaad42005-06-21 17:15:00 -07001879 setup_pageset(zone_pcp(zone,cpu), batch);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07001880#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 }
1882 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
1883 zone_names[j], realsize, batch);
1884 INIT_LIST_HEAD(&zone->active_list);
1885 INIT_LIST_HEAD(&zone->inactive_list);
1886 zone->nr_scan_active = 0;
1887 zone->nr_scan_inactive = 0;
1888 zone->nr_active = 0;
1889 zone->nr_inactive = 0;
Martin Hicks1e7e5a92005-06-21 17:14:43 -07001890 atomic_set(&zone->reclaim_in_progress, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 if (!size)
1892 continue;
1893
1894 /*
1895 * The per-page waitqueue mechanism uses hashed waitqueues
1896 * per zone.
1897 */
1898 zone->wait_table_size = wait_table_size(size);
1899 zone->wait_table_bits =
1900 wait_table_bits(zone->wait_table_size);
1901 zone->wait_table = (wait_queue_head_t *)
1902 alloc_bootmem_node(pgdat, zone->wait_table_size
1903 * sizeof(wait_queue_head_t));
1904
1905 for(i = 0; i < zone->wait_table_size; ++i)
1906 init_waitqueue_head(zone->wait_table + i);
1907
1908 pgdat->nr_zones = j+1;
1909
1910 zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1911 zone->zone_start_pfn = zone_start_pfn;
1912
1913 if ((zone_start_pfn) & (zone_required_alignment-1))
1914 printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n");
1915
1916 memmap_init(size, nid, j, zone_start_pfn);
1917
1918 zone_start_pfn += size;
1919
1920 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
1921 }
1922}
1923
1924static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1925{
1926 unsigned long size;
1927
1928 /* Skip empty nodes */
1929 if (!pgdat->node_spanned_pages)
1930 return;
1931
1932 /* ia64 gets its own node_mem_map, before this, without bootmem */
1933 if (!pgdat->node_mem_map) {
1934 size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
1935 pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
1936 }
1937#ifndef CONFIG_DISCONTIGMEM
1938 /*
1939 * With no DISCONTIG, the global mem_map is just set as node 0's
1940 */
1941 if (pgdat == NODE_DATA(0))
1942 mem_map = NODE_DATA(0)->node_mem_map;
1943#endif
1944}
1945
1946void __init free_area_init_node(int nid, struct pglist_data *pgdat,
1947 unsigned long *zones_size, unsigned long node_start_pfn,
1948 unsigned long *zholes_size)
1949{
1950 pgdat->node_id = nid;
1951 pgdat->node_start_pfn = node_start_pfn;
1952 calculate_zone_totalpages(pgdat, zones_size, zholes_size);
1953
1954 alloc_node_mem_map(pgdat);
1955
1956 free_area_init_core(pgdat, zones_size, zholes_size);
1957}
1958
1959#ifndef CONFIG_DISCONTIGMEM
1960static bootmem_data_t contig_bootmem_data;
1961struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
1962
1963EXPORT_SYMBOL(contig_page_data);
1964
1965void __init free_area_init(unsigned long *zones_size)
1966{
1967 free_area_init_node(0, &contig_page_data, zones_size,
1968 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
1969}
1970#endif
1971
1972#ifdef CONFIG_PROC_FS
1973
1974#include <linux/seq_file.h>
1975
1976static void *frag_start(struct seq_file *m, loff_t *pos)
1977{
1978 pg_data_t *pgdat;
1979 loff_t node = *pos;
1980
1981 for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
1982 --node;
1983
1984 return pgdat;
1985}
1986
1987static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1988{
1989 pg_data_t *pgdat = (pg_data_t *)arg;
1990
1991 (*pos)++;
1992 return pgdat->pgdat_next;
1993}
1994
1995static void frag_stop(struct seq_file *m, void *arg)
1996{
1997}
1998
1999/*
2000 * This walks the free areas for each zone.
2001 */
2002static int frag_show(struct seq_file *m, void *arg)
2003{
2004 pg_data_t *pgdat = (pg_data_t *)arg;
2005 struct zone *zone;
2006 struct zone *node_zones = pgdat->node_zones;
2007 unsigned long flags;
2008 int order;
2009
2010 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2011 if (!zone->present_pages)
2012 continue;
2013
2014 spin_lock_irqsave(&zone->lock, flags);
2015 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
2016 for (order = 0; order < MAX_ORDER; ++order)
2017 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
2018 spin_unlock_irqrestore(&zone->lock, flags);
2019 seq_putc(m, '\n');
2020 }
2021 return 0;
2022}
2023
2024struct seq_operations fragmentation_op = {
2025 .start = frag_start,
2026 .next = frag_next,
2027 .stop = frag_stop,
2028 .show = frag_show,
2029};
2030
Nikita Danilov295ab932005-06-21 17:14:38 -07002031/*
2032 * Output information about zones in @pgdat.
2033 */
2034static int zoneinfo_show(struct seq_file *m, void *arg)
2035{
2036 pg_data_t *pgdat = arg;
2037 struct zone *zone;
2038 struct zone *node_zones = pgdat->node_zones;
2039 unsigned long flags;
2040
2041 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
2042 int i;
2043
2044 if (!zone->present_pages)
2045 continue;
2046
2047 spin_lock_irqsave(&zone->lock, flags);
2048 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
2049 seq_printf(m,
2050 "\n pages free %lu"
2051 "\n min %lu"
2052 "\n low %lu"
2053 "\n high %lu"
2054 "\n active %lu"
2055 "\n inactive %lu"
2056 "\n scanned %lu (a: %lu i: %lu)"
2057 "\n spanned %lu"
2058 "\n present %lu",
2059 zone->free_pages,
2060 zone->pages_min,
2061 zone->pages_low,
2062 zone->pages_high,
2063 zone->nr_active,
2064 zone->nr_inactive,
2065 zone->pages_scanned,
2066 zone->nr_scan_active, zone->nr_scan_inactive,
2067 zone->spanned_pages,
2068 zone->present_pages);
2069 seq_printf(m,
2070 "\n protection: (%lu",
2071 zone->lowmem_reserve[0]);
2072 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
2073 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
2074 seq_printf(m,
2075 ")"
2076 "\n pagesets");
2077 for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) {
2078 struct per_cpu_pageset *pageset;
2079 int j;
2080
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07002081 pageset = zone_pcp(zone, i);
Nikita Danilov295ab932005-06-21 17:14:38 -07002082 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
2083 if (pageset->pcp[j].count)
2084 break;
2085 }
2086 if (j == ARRAY_SIZE(pageset->pcp))
2087 continue;
2088 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
2089 seq_printf(m,
2090 "\n cpu: %i pcp: %i"
2091 "\n count: %i"
2092 "\n low: %i"
2093 "\n high: %i"
2094 "\n batch: %i",
2095 i, j,
2096 pageset->pcp[j].count,
2097 pageset->pcp[j].low,
2098 pageset->pcp[j].high,
2099 pageset->pcp[j].batch);
2100 }
2101#ifdef CONFIG_NUMA
2102 seq_printf(m,
2103 "\n numa_hit: %lu"
2104 "\n numa_miss: %lu"
2105 "\n numa_foreign: %lu"
2106 "\n interleave_hit: %lu"
2107 "\n local_node: %lu"
2108 "\n other_node: %lu",
2109 pageset->numa_hit,
2110 pageset->numa_miss,
2111 pageset->numa_foreign,
2112 pageset->interleave_hit,
2113 pageset->local_node,
2114 pageset->other_node);
2115#endif
2116 }
2117 seq_printf(m,
2118 "\n all_unreclaimable: %u"
2119 "\n prev_priority: %i"
2120 "\n temp_priority: %i"
2121 "\n start_pfn: %lu",
2122 zone->all_unreclaimable,
2123 zone->prev_priority,
2124 zone->temp_priority,
2125 zone->zone_start_pfn);
2126 spin_unlock_irqrestore(&zone->lock, flags);
2127 seq_putc(m, '\n');
2128 }
2129 return 0;
2130}
2131
2132struct seq_operations zoneinfo_op = {
2133 .start = frag_start, /* iterate over all zones. The same as in
2134 * fragmentation. */
2135 .next = frag_next,
2136 .stop = frag_stop,
2137 .show = zoneinfo_show,
2138};
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140static char *vmstat_text[] = {
2141 "nr_dirty",
2142 "nr_writeback",
2143 "nr_unstable",
2144 "nr_page_table_pages",
2145 "nr_mapped",
2146 "nr_slab",
2147
2148 "pgpgin",
2149 "pgpgout",
2150 "pswpin",
2151 "pswpout",
2152 "pgalloc_high",
2153
2154 "pgalloc_normal",
2155 "pgalloc_dma",
2156 "pgfree",
2157 "pgactivate",
2158 "pgdeactivate",
2159
2160 "pgfault",
2161 "pgmajfault",
2162 "pgrefill_high",
2163 "pgrefill_normal",
2164 "pgrefill_dma",
2165
2166 "pgsteal_high",
2167 "pgsteal_normal",
2168 "pgsteal_dma",
2169 "pgscan_kswapd_high",
2170 "pgscan_kswapd_normal",
2171
2172 "pgscan_kswapd_dma",
2173 "pgscan_direct_high",
2174 "pgscan_direct_normal",
2175 "pgscan_direct_dma",
2176 "pginodesteal",
2177
2178 "slabs_scanned",
2179 "kswapd_steal",
2180 "kswapd_inodesteal",
2181 "pageoutrun",
2182 "allocstall",
2183
2184 "pgrotated",
KAMEZAWA Hiroyukiedfbe2b2005-05-01 08:58:37 -07002185 "nr_bounce",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186};
2187
2188static void *vmstat_start(struct seq_file *m, loff_t *pos)
2189{
2190 struct page_state *ps;
2191
2192 if (*pos >= ARRAY_SIZE(vmstat_text))
2193 return NULL;
2194
2195 ps = kmalloc(sizeof(*ps), GFP_KERNEL);
2196 m->private = ps;
2197 if (!ps)
2198 return ERR_PTR(-ENOMEM);
2199 get_full_page_state(ps);
2200 ps->pgpgin /= 2; /* sectors -> kbytes */
2201 ps->pgpgout /= 2;
2202 return (unsigned long *)ps + *pos;
2203}
2204
2205static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
2206{
2207 (*pos)++;
2208 if (*pos >= ARRAY_SIZE(vmstat_text))
2209 return NULL;
2210 return (unsigned long *)m->private + *pos;
2211}
2212
2213static int vmstat_show(struct seq_file *m, void *arg)
2214{
2215 unsigned long *l = arg;
2216 unsigned long off = l - (unsigned long *)m->private;
2217
2218 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
2219 return 0;
2220}
2221
2222static void vmstat_stop(struct seq_file *m, void *arg)
2223{
2224 kfree(m->private);
2225 m->private = NULL;
2226}
2227
2228struct seq_operations vmstat_op = {
2229 .start = vmstat_start,
2230 .next = vmstat_next,
2231 .stop = vmstat_stop,
2232 .show = vmstat_show,
2233};
2234
2235#endif /* CONFIG_PROC_FS */
2236
2237#ifdef CONFIG_HOTPLUG_CPU
2238static int page_alloc_cpu_notify(struct notifier_block *self,
2239 unsigned long action, void *hcpu)
2240{
2241 int cpu = (unsigned long)hcpu;
2242 long *count;
2243 unsigned long *src, *dest;
2244
2245 if (action == CPU_DEAD) {
2246 int i;
2247
2248 /* Drain local pagecache count. */
2249 count = &per_cpu(nr_pagecache_local, cpu);
2250 atomic_add(*count, &nr_pagecache);
2251 *count = 0;
2252 local_irq_disable();
2253 __drain_pages(cpu);
2254
2255 /* Add dead cpu's page_states to our own. */
2256 dest = (unsigned long *)&__get_cpu_var(page_states);
2257 src = (unsigned long *)&per_cpu(page_states, cpu);
2258
2259 for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
2260 i++) {
2261 dest[i] += src[i];
2262 src[i] = 0;
2263 }
2264
2265 local_irq_enable();
2266 }
2267 return NOTIFY_OK;
2268}
2269#endif /* CONFIG_HOTPLUG_CPU */
2270
2271void __init page_alloc_init(void)
2272{
2273 hotcpu_notifier(page_alloc_cpu_notify, 0);
2274}
2275
2276/*
2277 * setup_per_zone_lowmem_reserve - called whenever
2278 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
2279 * has a correct pages reserved value, so an adequate number of
2280 * pages are left in the zone after a successful __alloc_pages().
2281 */
2282static void setup_per_zone_lowmem_reserve(void)
2283{
2284 struct pglist_data *pgdat;
2285 int j, idx;
2286
2287 for_each_pgdat(pgdat) {
2288 for (j = 0; j < MAX_NR_ZONES; j++) {
2289 struct zone *zone = pgdat->node_zones + j;
2290 unsigned long present_pages = zone->present_pages;
2291
2292 zone->lowmem_reserve[j] = 0;
2293
2294 for (idx = j-1; idx >= 0; idx--) {
2295 struct zone *lower_zone;
2296
2297 if (sysctl_lowmem_reserve_ratio[idx] < 1)
2298 sysctl_lowmem_reserve_ratio[idx] = 1;
2299
2300 lower_zone = pgdat->node_zones + idx;
2301 lower_zone->lowmem_reserve[j] = present_pages /
2302 sysctl_lowmem_reserve_ratio[idx];
2303 present_pages += lower_zone->present_pages;
2304 }
2305 }
2306 }
2307}
2308
2309/*
2310 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures
2311 * that the pages_{min,low,high} values for each zone are set correctly
2312 * with respect to min_free_kbytes.
2313 */
2314static void setup_per_zone_pages_min(void)
2315{
2316 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
2317 unsigned long lowmem_pages = 0;
2318 struct zone *zone;
2319 unsigned long flags;
2320
2321 /* Calculate total number of !ZONE_HIGHMEM pages */
2322 for_each_zone(zone) {
2323 if (!is_highmem(zone))
2324 lowmem_pages += zone->present_pages;
2325 }
2326
2327 for_each_zone(zone) {
2328 spin_lock_irqsave(&zone->lru_lock, flags);
2329 if (is_highmem(zone)) {
2330 /*
2331 * Often, highmem doesn't need to reserve any pages.
2332 * But the pages_min/low/high values are also used for
2333 * batching up page reclaim activity so we need a
2334 * decent value here.
2335 */
2336 int min_pages;
2337
2338 min_pages = zone->present_pages / 1024;
2339 if (min_pages < SWAP_CLUSTER_MAX)
2340 min_pages = SWAP_CLUSTER_MAX;
2341 if (min_pages > 128)
2342 min_pages = 128;
2343 zone->pages_min = min_pages;
2344 } else {
Nikita Danilov295ab932005-06-21 17:14:38 -07002345 /* if it's a lowmem zone, reserve a number of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 * proportionate to the zone's size.
2347 */
Nikita Danilov295ab932005-06-21 17:14:38 -07002348 zone->pages_min = (pages_min * zone->present_pages) /
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 lowmem_pages;
2350 }
2351
2352 /*
2353 * When interpreting these watermarks, just keep in mind that:
2354 * zone->pages_min == (zone->pages_min * 4) / 4;
2355 */
2356 zone->pages_low = (zone->pages_min * 5) / 4;
2357 zone->pages_high = (zone->pages_min * 6) / 4;
2358 spin_unlock_irqrestore(&zone->lru_lock, flags);
2359 }
2360}
2361
2362/*
2363 * Initialise min_free_kbytes.
2364 *
2365 * For small machines we want it small (128k min). For large machines
2366 * we want it large (64MB max). But it is not linear, because network
2367 * bandwidth does not increase linearly with machine size. We use
2368 *
2369 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
2370 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
2371 *
2372 * which yields
2373 *
2374 * 16MB: 512k
2375 * 32MB: 724k
2376 * 64MB: 1024k
2377 * 128MB: 1448k
2378 * 256MB: 2048k
2379 * 512MB: 2896k
2380 * 1024MB: 4096k
2381 * 2048MB: 5792k
2382 * 4096MB: 8192k
2383 * 8192MB: 11584k
2384 * 16384MB: 16384k
2385 */
2386static int __init init_per_zone_pages_min(void)
2387{
2388 unsigned long lowmem_kbytes;
2389
2390 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
2391
2392 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
2393 if (min_free_kbytes < 128)
2394 min_free_kbytes = 128;
2395 if (min_free_kbytes > 65536)
2396 min_free_kbytes = 65536;
2397 setup_per_zone_pages_min();
2398 setup_per_zone_lowmem_reserve();
2399 return 0;
2400}
2401module_init(init_per_zone_pages_min)
2402
2403/*
2404 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
2405 * that we can call two helper functions whenever min_free_kbytes
2406 * changes.
2407 */
2408int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
2409 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2410{
2411 proc_dointvec(table, write, file, buffer, length, ppos);
2412 setup_per_zone_pages_min();
2413 return 0;
2414}
2415
2416/*
2417 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
2418 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
2419 * whenever sysctl_lowmem_reserve_ratio changes.
2420 *
2421 * The reserve ratio obviously has absolutely no relation with the
2422 * pages_min watermarks. The lowmem reserve ratio can only make sense
2423 * if in function of the boot time zone sizes.
2424 */
2425int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
2426 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2427{
2428 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2429 setup_per_zone_lowmem_reserve();
2430 return 0;
2431}
2432
2433__initdata int hashdist = HASHDIST_DEFAULT;
2434
2435#ifdef CONFIG_NUMA
2436static int __init set_hashdist(char *str)
2437{
2438 if (!str)
2439 return 0;
2440 hashdist = simple_strtoul(str, &str, 0);
2441 return 1;
2442}
2443__setup("hashdist=", set_hashdist);
2444#endif
2445
2446/*
2447 * allocate a large system hash table from bootmem
2448 * - it is assumed that the hash table must contain an exact power-of-2
2449 * quantity of entries
2450 * - limit is the number of hash buckets, not the total allocation size
2451 */
2452void *__init alloc_large_system_hash(const char *tablename,
2453 unsigned long bucketsize,
2454 unsigned long numentries,
2455 int scale,
2456 int flags,
2457 unsigned int *_hash_shift,
2458 unsigned int *_hash_mask,
2459 unsigned long limit)
2460{
2461 unsigned long long max = limit;
2462 unsigned long log2qty, size;
2463 void *table = NULL;
2464
2465 /* allow the kernel cmdline to have a say */
2466 if (!numentries) {
2467 /* round applicable memory size up to nearest megabyte */
2468 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
2469 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
2470 numentries >>= 20 - PAGE_SHIFT;
2471 numentries <<= 20 - PAGE_SHIFT;
2472
2473 /* limit to 1 bucket per 2^scale bytes of low memory */
2474 if (scale > PAGE_SHIFT)
2475 numentries >>= (scale - PAGE_SHIFT);
2476 else
2477 numentries <<= (PAGE_SHIFT - scale);
2478 }
2479 /* rounded up to nearest power of 2 in size */
2480 numentries = 1UL << (long_log2(numentries) + 1);
2481
2482 /* limit allocation size to 1/16 total memory by default */
2483 if (max == 0) {
2484 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2485 do_div(max, bucketsize);
2486 }
2487
2488 if (numentries > max)
2489 numentries = max;
2490
2491 log2qty = long_log2(numentries);
2492
2493 do {
2494 size = bucketsize << log2qty;
2495 if (flags & HASH_EARLY)
2496 table = alloc_bootmem(size);
2497 else if (hashdist)
2498 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
2499 else {
2500 unsigned long order;
2501 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
2502 ;
2503 table = (void*) __get_free_pages(GFP_ATOMIC, order);
2504 }
2505 } while (!table && size > PAGE_SIZE && --log2qty);
2506
2507 if (!table)
2508 panic("Failed to allocate %s hash table\n", tablename);
2509
2510 printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
2511 tablename,
2512 (1U << log2qty),
2513 long_log2(size) - PAGE_SHIFT,
2514 size);
2515
2516 if (_hash_shift)
2517 *_hash_shift = log2qty;
2518 if (_hash_mask)
2519 *_hash_mask = (1 << log2qty) - 1;
2520
2521 return table;
2522}