| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMZONE_H | 
 | 2 | #define _LINUX_MMZONE_H | 
 | 3 |  | 
 | 4 | #ifdef __KERNEL__ | 
 | 5 | #ifndef __ASSEMBLY__ | 
 | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/spinlock.h> | 
 | 8 | #include <linux/list.h> | 
 | 9 | #include <linux/wait.h> | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 10 | #include <linux/bitops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/cache.h> | 
 | 12 | #include <linux/threads.h> | 
 | 13 | #include <linux/numa.h> | 
 | 14 | #include <linux/init.h> | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 15 | #include <linux/seqlock.h> | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 16 | #include <linux/nodemask.h> | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 17 | #include <linux/pageblock-flags.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/atomic.h> | 
| Ralf Baechle | 93ff66b | 2006-06-04 02:51:29 -0700 | [diff] [blame] | 19 | #include <asm/page.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  | 
 | 21 | /* Free memory management - zoned buddy allocator.  */ | 
 | 22 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | 
 | 23 | #define MAX_ORDER 11 | 
 | 24 | #else | 
 | 25 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | 
 | 26 | #endif | 
| Bob Picco | e984bb4 | 2006-05-20 15:00:31 -0700 | [diff] [blame] | 27 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
| Andy Whitcroft | 5ad333e | 2007-07-17 04:03:16 -0700 | [diff] [blame] | 29 | /* | 
 | 30 |  * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | 
 | 31 |  * costly to service.  That is between allocation orders which should | 
 | 32 |  * coelesce naturally under reasonable reclaim pressure and those which | 
 | 33 |  * will not. | 
 | 34 |  */ | 
 | 35 | #define PAGE_ALLOC_COSTLY_ORDER 3 | 
 | 36 |  | 
| Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 37 | #define MIGRATE_UNMOVABLE     0 | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 38 | #define MIGRATE_RECLAIMABLE   1 | 
 | 39 | #define MIGRATE_MOVABLE       2 | 
| Mel Gorman | 64c5e13 | 2007-10-16 01:25:59 -0700 | [diff] [blame] | 40 | #define MIGRATE_RESERVE       3 | 
| KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 41 | #define MIGRATE_ISOLATE       4 /* can't allocate from here */ | 
 | 42 | #define MIGRATE_TYPES         5 | 
| Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 43 |  | 
 | 44 | #define for_each_migratetype_order(order, type) \ | 
 | 45 | 	for (order = 0; order < MAX_ORDER; order++) \ | 
 | 46 | 		for (type = 0; type < MIGRATE_TYPES; type++) | 
 | 47 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 48 | extern int page_group_by_mobility_disabled; | 
 | 49 |  | 
 | 50 | static inline int get_pageblock_migratetype(struct page *page) | 
 | 51 | { | 
 | 52 | 	if (unlikely(page_group_by_mobility_disabled)) | 
 | 53 | 		return MIGRATE_UNMOVABLE; | 
 | 54 |  | 
 | 55 | 	return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | 
 | 56 | } | 
 | 57 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | struct free_area { | 
| Mel Gorman | b2a0ac8 | 2007-10-16 01:25:48 -0700 | [diff] [blame] | 59 | 	struct list_head	free_list[MIGRATE_TYPES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | 	unsigned long		nr_free; | 
 | 61 | }; | 
 | 62 |  | 
 | 63 | struct pglist_data; | 
 | 64 |  | 
 | 65 | /* | 
 | 66 |  * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | 
 | 67 |  * So add a wild amount of padding here to ensure that they fall into separate | 
 | 68 |  * cachelines.  There are very few zone structures in the machine, so space | 
 | 69 |  * consumption is not a concern here. | 
 | 70 |  */ | 
 | 71 | #if defined(CONFIG_SMP) | 
 | 72 | struct zone_padding { | 
 | 73 | 	char x[0]; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 74 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | #define ZONE_PADDING(name)	struct zone_padding name; | 
 | 76 | #else | 
 | 77 | #define ZONE_PADDING(name) | 
 | 78 | #endif | 
 | 79 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 80 | enum zone_stat_item { | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 81 | 	/* First 128 byte cacheline (assuming 64 bit words) */ | 
| Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 82 | 	NR_FREE_PAGES, | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 83 | 	NR_INACTIVE, | 
 | 84 | 	NR_ACTIVE, | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 85 | 	NR_ANON_PAGES,	/* Mapped anonymous pages */ | 
 | 86 | 	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables. | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 87 | 			   only modified from process context */ | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 88 | 	NR_FILE_PAGES, | 
| Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 89 | 	NR_FILE_DIRTY, | 
| Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 90 | 	NR_WRITEBACK, | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 91 | 	/* Second 128 byte cacheline */ | 
 | 92 | 	NR_SLAB_RECLAIMABLE, | 
 | 93 | 	NR_SLAB_UNRECLAIMABLE, | 
 | 94 | 	NR_PAGETABLE,		/* used for pagetables */ | 
| Christoph Lameter | fd39fc8 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 95 | 	NR_UNSTABLE_NFS,	/* NFS unstable pages */ | 
| Christoph Lameter | d2c5e30 | 2006-06-30 01:55:41 -0700 | [diff] [blame] | 96 | 	NR_BOUNCE, | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 97 | 	NR_VMSCAN_WRITE, | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 98 | #ifdef CONFIG_NUMA | 
 | 99 | 	NUMA_HIT,		/* allocated in intended node */ | 
 | 100 | 	NUMA_MISS,		/* allocated in non intended node */ | 
 | 101 | 	NUMA_FOREIGN,		/* was intended here, hit elsewhere */ | 
 | 102 | 	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */ | 
 | 103 | 	NUMA_LOCAL,		/* allocation from local node */ | 
 | 104 | 	NUMA_OTHER,		/* allocation from other node */ | 
 | 105 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 106 | 	NR_VM_ZONE_STAT_ITEMS }; | 
 | 107 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | struct per_cpu_pages { | 
 | 109 | 	int count;		/* number of pages in the list */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | 	int high;		/* high watermark, emptying needed */ | 
 | 111 | 	int batch;		/* chunk size for buddy add/remove */ | 
 | 112 | 	struct list_head list;	/* the list of pages */ | 
 | 113 | }; | 
 | 114 |  | 
 | 115 | struct per_cpu_pageset { | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 116 | 	struct per_cpu_pages pcp; | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 117 | #ifdef CONFIG_NUMA | 
 | 118 | 	s8 expire; | 
 | 119 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 120 | #ifdef CONFIG_SMP | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 121 | 	s8 stat_threshold; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 122 | 	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | 
 | 123 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } ____cacheline_aligned_in_smp; | 
 | 125 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 126 | #ifdef CONFIG_NUMA | 
 | 127 | #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) | 
 | 128 | #else | 
 | 129 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) | 
 | 130 | #endif | 
 | 131 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 132 | enum zone_type { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 133 | #ifdef CONFIG_ZONE_DMA | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 134 | 	/* | 
 | 135 | 	 * ZONE_DMA is used when there are devices that are not able | 
 | 136 | 	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | 
 | 137 | 	 * carve out the portion of memory that is needed for these devices. | 
 | 138 | 	 * The range is arch specific. | 
 | 139 | 	 * | 
 | 140 | 	 * Some examples | 
 | 141 | 	 * | 
 | 142 | 	 * Architecture		Limit | 
 | 143 | 	 * --------------------------- | 
 | 144 | 	 * parisc, ia64, sparc	<4G | 
 | 145 | 	 * s390			<2G | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 146 | 	 * arm			Various | 
 | 147 | 	 * alpha		Unlimited or 0-16MB. | 
 | 148 | 	 * | 
 | 149 | 	 * i386, x86_64 and multiple other arches | 
 | 150 | 	 * 			<16M. | 
 | 151 | 	 */ | 
 | 152 | 	ZONE_DMA, | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 153 | #endif | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 154 | #ifdef CONFIG_ZONE_DMA32 | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 155 | 	/* | 
 | 156 | 	 * x86_64 needs two ZONE_DMAs because it supports devices that are | 
 | 157 | 	 * only able to do DMA to the lower 16M but also 32 bit devices that | 
 | 158 | 	 * can only do DMA areas below 4G. | 
 | 159 | 	 */ | 
 | 160 | 	ZONE_DMA32, | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 161 | #endif | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 162 | 	/* | 
 | 163 | 	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | 
 | 164 | 	 * performed on pages in ZONE_NORMAL if the DMA devices support | 
 | 165 | 	 * transfers to all addressable memory. | 
 | 166 | 	 */ | 
 | 167 | 	ZONE_NORMAL, | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 168 | #ifdef CONFIG_HIGHMEM | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 169 | 	/* | 
 | 170 | 	 * A memory area that is only addressable by the kernel through | 
 | 171 | 	 * mapping portions into its own address space. This is for example | 
 | 172 | 	 * used by i386 to allow the kernel to address the memory beyond | 
 | 173 | 	 * 900MB. The kernel will set up special mappings (page | 
 | 174 | 	 * table entries on i386) for each page that the kernel needs to | 
 | 175 | 	 * access. | 
 | 176 | 	 */ | 
 | 177 | 	ZONE_HIGHMEM, | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 178 | #endif | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 179 | 	ZONE_MOVABLE, | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 180 | 	MAX_NR_ZONES | 
 | 181 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | /* | 
 | 184 |  * When a memory allocation must conform to specific limitations (such | 
 | 185 |  * as being suitable for DMA) the caller will pass in hints to the | 
 | 186 |  * allocator in the gfp_mask, in the zone modifier bits.  These bits | 
 | 187 |  * are used to select a priority ordered list of memory zones which | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 188 |  * match the requested limits. See gfp_zone() in include/linux/gfp.h | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 |  */ | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 190 |  | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 191 | /* | 
 | 192 |  * Count the active zones.  Note that the use of defined(X) outside | 
 | 193 |  * #if and family is not necessarily defined so ensure we cannot use | 
 | 194 |  * it later.  Use __ZONE_COUNT to work out how many shift bits we need. | 
 | 195 |  */ | 
 | 196 | #define __ZONE_COUNT (			\ | 
 | 197 | 	  defined(CONFIG_ZONE_DMA)	\ | 
 | 198 | 	+ defined(CONFIG_ZONE_DMA32)	\ | 
 | 199 | 	+ 1				\ | 
 | 200 | 	+ defined(CONFIG_HIGHMEM)	\ | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 201 | 	+ 1				\ | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 202 | ) | 
 | 203 | #if __ZONE_COUNT < 2 | 
 | 204 | #define ZONES_SHIFT 0 | 
 | 205 | #elif __ZONE_COUNT <= 2 | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 206 | #define ZONES_SHIFT 1 | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 207 | #elif __ZONE_COUNT <= 4 | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 208 | #define ZONES_SHIFT 2 | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 209 | #else | 
 | 210 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 211 | #endif | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 212 | #undef __ZONE_COUNT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | struct zone { | 
 | 215 | 	/* Fields commonly accessed by the page allocator */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | 	unsigned long		pages_min, pages_low, pages_high; | 
 | 217 | 	/* | 
 | 218 | 	 * We don't know if the memory that we're going to allocate will be freeable | 
 | 219 | 	 * or/and it will be released eventually, so to avoid totally wasting several | 
 | 220 | 	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 
 | 221 | 	 * to run OOM on the lower zones despite there's tons of freeable ram | 
 | 222 | 	 * on the higher zones). This array is recalculated at runtime if the | 
 | 223 | 	 * sysctl_lowmem_reserve_ratio sysctl changes. | 
 | 224 | 	 */ | 
 | 225 | 	unsigned long		lowmem_reserve[MAX_NR_ZONES]; | 
 | 226 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 227 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | d5f541e | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 228 | 	int node; | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 229 | 	/* | 
 | 230 | 	 * zone reclaim becomes active if more unmapped pages exist. | 
 | 231 | 	 */ | 
| Christoph Lameter | 8417bba | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 232 | 	unsigned long		min_unmapped_pages; | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 233 | 	unsigned long		min_slab_pages; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 234 | 	struct per_cpu_pageset	*pageset[NR_CPUS]; | 
 | 235 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | 	struct per_cpu_pageset	pageset[NR_CPUS]; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 237 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | 	/* | 
 | 239 | 	 * free areas of different sizes | 
 | 240 | 	 */ | 
 | 241 | 	spinlock_t		lock; | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 242 | #ifdef CONFIG_MEMORY_HOTPLUG | 
 | 243 | 	/* see spanned/present_pages for more description */ | 
 | 244 | 	seqlock_t		span_seqlock; | 
 | 245 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | 	struct free_area	free_area[MAX_ORDER]; | 
 | 247 |  | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 248 | #ifndef CONFIG_SPARSEMEM | 
 | 249 | 	/* | 
| Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 250 | 	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 251 | 	 * In SPARSEMEM, this map is stored in struct mem_section | 
 | 252 | 	 */ | 
 | 253 | 	unsigned long		*pageblock_flags; | 
 | 254 | #endif /* CONFIG_SPARSEMEM */ | 
 | 255 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 |  | 
 | 257 | 	ZONE_PADDING(_pad1_) | 
 | 258 |  | 
 | 259 | 	/* Fields commonly accessed by the page reclaim scanner */ | 
 | 260 | 	spinlock_t		lru_lock;	 | 
 | 261 | 	struct list_head	active_list; | 
 | 262 | 	struct list_head	inactive_list; | 
 | 263 | 	unsigned long		nr_scan_active; | 
 | 264 | 	unsigned long		nr_scan_inactive; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | 	unsigned long		pages_scanned;	   /* since last reclaim */ | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 266 | 	unsigned long		flags;		   /* zone flags, see below */ | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 267 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 268 | 	/* Zone statistics */ | 
 | 269 | 	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 270 |  | 
 | 271 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | 	 * prev_priority holds the scanning priority for this zone.  It is | 
 | 273 | 	 * defined as the scanning priority at which we achieved our reclaim | 
 | 274 | 	 * target at the previous try_to_free_pages() or balance_pgdat() | 
 | 275 | 	 * invokation. | 
 | 276 | 	 * | 
 | 277 | 	 * We use prev_priority as a measure of how much stress page reclaim is | 
 | 278 | 	 * under - it drives the swappiness decision: whether to unmap mapped | 
 | 279 | 	 * pages. | 
 | 280 | 	 * | 
| Martin Bligh | 3bb1a85 | 2006-10-28 10:38:24 -0700 | [diff] [blame] | 281 | 	 * Access to both this field is quite racy even on uniprocessor.  But | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | 	 * it is expected to average out OK. | 
 | 283 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | 	int prev_priority; | 
 | 285 |  | 
 | 286 |  | 
 | 287 | 	ZONE_PADDING(_pad2_) | 
 | 288 | 	/* Rarely used or read-mostly fields */ | 
 | 289 |  | 
 | 290 | 	/* | 
 | 291 | 	 * wait_table		-- the array holding the hash table | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 292 | 	 * wait_table_hash_nr_entries	-- the size of the hash table array | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | 	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits) | 
 | 294 | 	 * | 
 | 295 | 	 * The purpose of all these is to keep track of the people | 
 | 296 | 	 * waiting for a page to become available and make them | 
 | 297 | 	 * runnable again when possible. The trouble is that this | 
 | 298 | 	 * consumes a lot of space, especially when so few things | 
 | 299 | 	 * wait on pages at a given time. So instead of using | 
 | 300 | 	 * per-page waitqueues, we use a waitqueue hash table. | 
 | 301 | 	 * | 
 | 302 | 	 * The bucket discipline is to sleep on the same queue when | 
 | 303 | 	 * colliding and wake all in that wait queue when removing. | 
 | 304 | 	 * When something wakes, it must check to be sure its page is | 
 | 305 | 	 * truly available, a la thundering herd. The cost of a | 
 | 306 | 	 * collision is great, but given the expected load of the | 
 | 307 | 	 * table, they should be so rare as to be outweighed by the | 
 | 308 | 	 * benefits from the saved space. | 
 | 309 | 	 * | 
 | 310 | 	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | 
 | 311 | 	 * primary users of these fields, and in mm/page_alloc.c | 
 | 312 | 	 * free_area_init_core() performs the initialization of them. | 
 | 313 | 	 */ | 
 | 314 | 	wait_queue_head_t	* wait_table; | 
| Yasunori Goto | 02b694d | 2006-06-23 02:03:08 -0700 | [diff] [blame] | 315 | 	unsigned long		wait_table_hash_nr_entries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | 	unsigned long		wait_table_bits; | 
 | 317 |  | 
 | 318 | 	/* | 
 | 319 | 	 * Discontig memory support fields. | 
 | 320 | 	 */ | 
 | 321 | 	struct pglist_data	*zone_pgdat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | 	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 
 | 323 | 	unsigned long		zone_start_pfn; | 
 | 324 |  | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 325 | 	/* | 
 | 326 | 	 * zone_start_pfn, spanned_pages and present_pages are all | 
 | 327 | 	 * protected by span_seqlock.  It is a seqlock because it has | 
 | 328 | 	 * to be read outside of zone->lock, and it is done in the main | 
 | 329 | 	 * allocator path.  But, it is written quite infrequently. | 
 | 330 | 	 * | 
 | 331 | 	 * The lock is declared along with zone->lock because it is | 
 | 332 | 	 * frequently read in proximity to zone->lock.  It's good to | 
 | 333 | 	 * give them a chance of being in the same cacheline. | 
 | 334 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 	unsigned long		spanned_pages;	/* total size, including holes */ | 
 | 336 | 	unsigned long		present_pages;	/* amount of memory (excluding holes) */ | 
 | 337 |  | 
 | 338 | 	/* | 
 | 339 | 	 * rarely used fields: | 
 | 340 | 	 */ | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 341 | 	const char		*name; | 
| Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 342 | } ____cacheline_internodealigned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 344 | typedef enum { | 
 | 345 | 	ZONE_ALL_UNRECLAIMABLE,		/* all pages pinned */ | 
 | 346 | 	ZONE_RECLAIM_LOCKED,		/* prevents concurrent reclaim */ | 
| David Rientjes | 098d7f1 | 2007-10-16 23:25:55 -0700 | [diff] [blame] | 347 | 	ZONE_OOM_LOCKED,		/* zone is in OOM killer zonelist */ | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 348 | } zone_flags_t; | 
 | 349 |  | 
 | 350 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | 
 | 351 | { | 
 | 352 | 	set_bit(flag, &zone->flags); | 
 | 353 | } | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 354 |  | 
 | 355 | static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) | 
 | 356 | { | 
 | 357 | 	return test_and_set_bit(flag, &zone->flags); | 
 | 358 | } | 
 | 359 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 360 | static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | 
 | 361 | { | 
 | 362 | 	clear_bit(flag, &zone->flags); | 
 | 363 | } | 
 | 364 |  | 
 | 365 | static inline int zone_is_all_unreclaimable(const struct zone *zone) | 
 | 366 | { | 
 | 367 | 	return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); | 
 | 368 | } | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 369 |  | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 370 | static inline int zone_is_reclaim_locked(const struct zone *zone) | 
 | 371 | { | 
 | 372 | 	return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | 
 | 373 | } | 
| David Rientjes | d773ed6 | 2007-10-16 23:26:01 -0700 | [diff] [blame] | 374 |  | 
| David Rientjes | 098d7f1 | 2007-10-16 23:25:55 -0700 | [diff] [blame] | 375 | static inline int zone_is_oom_locked(const struct zone *zone) | 
 | 376 | { | 
 | 377 | 	return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 
 | 378 | } | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 379 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | /* | 
 | 381 |  * The "priority" of VM scanning is how much of the queues we will scan in one | 
 | 382 |  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 
 | 383 |  * queues ("queue_length >> 12") during an aging round. | 
 | 384 |  */ | 
 | 385 | #define DEF_PRIORITY 12 | 
 | 386 |  | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 387 | /* Maximum number of zones on a zonelist */ | 
 | 388 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | 
 | 389 |  | 
 | 390 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 391 |  | 
 | 392 | /* | 
 | 393 |  * The NUMA zonelists are doubled becausse we need zonelists that restrict the | 
 | 394 |  * allocations to a single node for GFP_THISNODE. | 
 | 395 |  * | 
 | 396 |  * [0 .. MAX_NR_ZONES -1] 		: Zonelists with fallback | 
 | 397 |  * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1]  : No fallback (GFP_THISNODE) | 
 | 398 |  */ | 
 | 399 | #define MAX_ZONELISTS (2 * MAX_NR_ZONES) | 
 | 400 |  | 
 | 401 |  | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 402 | /* | 
 | 403 |  * We cache key information from each zonelist for smaller cache | 
 | 404 |  * footprint when scanning for free pages in get_page_from_freelist(). | 
 | 405 |  * | 
 | 406 |  * 1) The BITMAP fullzones tracks which zones in a zonelist have come | 
 | 407 |  *    up short of free memory since the last time (last_fullzone_zap) | 
 | 408 |  *    we zero'd fullzones. | 
 | 409 |  * 2) The array z_to_n[] maps each zone in the zonelist to its node | 
 | 410 |  *    id, so that we can efficiently evaluate whether that node is | 
 | 411 |  *    set in the current tasks mems_allowed. | 
 | 412 |  * | 
 | 413 |  * Both fullzones and z_to_n[] are one-to-one with the zonelist, | 
 | 414 |  * indexed by a zones offset in the zonelist zones[] array. | 
 | 415 |  * | 
 | 416 |  * The get_page_from_freelist() routine does two scans.  During the | 
 | 417 |  * first scan, we skip zones whose corresponding bit in 'fullzones' | 
 | 418 |  * is set or whose corresponding node in current->mems_allowed (which | 
 | 419 |  * comes from cpusets) is not set.  During the second scan, we bypass | 
 | 420 |  * this zonelist_cache, to ensure we look methodically at each zone. | 
 | 421 |  * | 
 | 422 |  * Once per second, we zero out (zap) fullzones, forcing us to | 
 | 423 |  * reconsider nodes that might have regained more free memory. | 
 | 424 |  * The field last_full_zap is the time we last zapped fullzones. | 
 | 425 |  * | 
 | 426 |  * This mechanism reduces the amount of time we waste repeatedly | 
 | 427 |  * reexaming zones for free memory when they just came up low on | 
 | 428 |  * memory momentarilly ago. | 
 | 429 |  * | 
 | 430 |  * The zonelist_cache struct members logically belong in struct | 
 | 431 |  * zonelist.  However, the mempolicy zonelists constructed for | 
 | 432 |  * MPOL_BIND are intentionally variable length (and usually much | 
 | 433 |  * shorter).  A general purpose mechanism for handling structs with | 
 | 434 |  * multiple variable length members is more mechanism than we want | 
 | 435 |  * here.  We resort to some special case hackery instead. | 
 | 436 |  * | 
 | 437 |  * The MPOL_BIND zonelists don't need this zonelist_cache (in good | 
 | 438 |  * part because they are shorter), so we put the fixed length stuff | 
 | 439 |  * at the front of the zonelist struct, ending in a variable length | 
 | 440 |  * zones[], as is needed by MPOL_BIND. | 
 | 441 |  * | 
 | 442 |  * Then we put the optional zonelist cache on the end of the zonelist | 
 | 443 |  * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in | 
 | 444 |  * the fixed length portion at the front of the struct.  This pointer | 
 | 445 |  * both enables us to find the zonelist cache, and in the case of | 
 | 446 |  * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) | 
 | 447 |  * to know that the zonelist cache is not there. | 
 | 448 |  * | 
 | 449 |  * The end result is that struct zonelists come in two flavors: | 
 | 450 |  *  1) The full, fixed length version, shown below, and | 
 | 451 |  *  2) The custom zonelists for MPOL_BIND. | 
 | 452 |  * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. | 
 | 453 |  * | 
 | 454 |  * Even though there may be multiple CPU cores on a node modifying | 
 | 455 |  * fullzones or last_full_zap in the same zonelist_cache at the same | 
 | 456 |  * time, we don't lock it.  This is just hint data - if it is wrong now | 
 | 457 |  * and then, the allocator will still function, perhaps a bit slower. | 
 | 458 |  */ | 
 | 459 |  | 
 | 460 |  | 
 | 461 | struct zonelist_cache { | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 462 | 	unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */ | 
| Paul Jackson | 7253f4e | 2006-12-06 20:31:49 -0800 | [diff] [blame] | 463 | 	DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */ | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 464 | 	unsigned long last_full_zap;		/* when last zap'd (jiffies) */ | 
 | 465 | }; | 
 | 466 | #else | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 467 | #define MAX_ZONELISTS MAX_NR_ZONES | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 468 | struct zonelist_cache; | 
 | 469 | #endif | 
 | 470 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | /* | 
 | 472 |  * One allocation request operates on a zonelist. A zonelist | 
 | 473 |  * is a list of zones, the first one is the 'goal' of the | 
 | 474 |  * allocation, the other zones are fallback zones, in decreasing | 
 | 475 |  * priority. | 
 | 476 |  * | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 477 |  * If zlcache_ptr is not NULL, then it is just the address of zlcache, | 
 | 478 |  * as explained above.  If zlcache_ptr is NULL, there is no zlcache. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 |  */ | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 480 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | struct zonelist { | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 482 | 	struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache | 
 | 483 | 	struct zone *zones[MAX_ZONES_PER_ZONELIST + 1];      // NULL delimited | 
 | 484 | #ifdef CONFIG_NUMA | 
 | 485 | 	struct zonelist_cache zlcache;			     // optional ... | 
 | 486 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | }; | 
 | 488 |  | 
| Mel Gorman | b377fd3 | 2007-08-22 14:02:05 -0700 | [diff] [blame] | 489 | #ifdef CONFIG_NUMA | 
 | 490 | /* | 
 | 491 |  * Only custom zonelists like MPOL_BIND need to be filtered as part of | 
 | 492 |  * policies. As described in the comment for struct zonelist_cache, these | 
 | 493 |  * zonelists will not have a zlcache so zlcache_ptr will not be set. Use | 
 | 494 |  * that to determine if the zonelists needs to be filtered or not. | 
 | 495 |  */ | 
 | 496 | static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) | 
 | 497 | { | 
 | 498 | 	return !zonelist->zlcache_ptr; | 
 | 499 | } | 
 | 500 | #else | 
 | 501 | static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) | 
 | 502 | { | 
 | 503 | 	return 0; | 
 | 504 | } | 
 | 505 | #endif /* CONFIG_NUMA */ | 
 | 506 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 507 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 
 | 508 | struct node_active_region { | 
 | 509 | 	unsigned long start_pfn; | 
 | 510 | 	unsigned long end_pfn; | 
 | 511 | 	int nid; | 
 | 512 | }; | 
 | 513 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 |  | 
| Heiko Carstens | 5b99cd0 | 2006-09-27 01:50:01 -0700 | [diff] [blame] | 515 | #ifndef CONFIG_DISCONTIGMEM | 
 | 516 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 
 | 517 | extern struct page *mem_map; | 
 | 518 | #endif | 
 | 519 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | /* | 
 | 521 |  * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | 
 | 522 |  * (mostly NUMA machines?) to denote a higher-level memory zone than the | 
 | 523 |  * zone denotes. | 
 | 524 |  * | 
 | 525 |  * On NUMA machines, each NUMA node would have a pg_data_t to describe | 
 | 526 |  * it's memory layout. | 
 | 527 |  * | 
 | 528 |  * Memory statistics and page replacement data structures are maintained on a | 
 | 529 |  * per-zone basis. | 
 | 530 |  */ | 
 | 531 | struct bootmem_data; | 
 | 532 | typedef struct pglist_data { | 
 | 533 | 	struct zone node_zones[MAX_NR_ZONES]; | 
| Christoph Lameter | 523b945 | 2007-10-16 01:25:37 -0700 | [diff] [blame] | 534 | 	struct zonelist node_zonelists[MAX_ZONELISTS]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | 	int nr_zones; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 536 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | 	struct page *node_mem_map; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 538 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | 	struct bootmem_data *bdata; | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 540 | #ifdef CONFIG_MEMORY_HOTPLUG | 
 | 541 | 	/* | 
 | 542 | 	 * Must be held any time you expect node_start_pfn, node_present_pages | 
 | 543 | 	 * or node_spanned_pages stay constant.  Holding this will also | 
 | 544 | 	 * guarantee that any pfn_valid() stays that way. | 
 | 545 | 	 * | 
 | 546 | 	 * Nests above zone->lock and zone->size_seqlock. | 
 | 547 | 	 */ | 
 | 548 | 	spinlock_t node_size_lock; | 
 | 549 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | 	unsigned long node_start_pfn; | 
 | 551 | 	unsigned long node_present_pages; /* total number of physical pages */ | 
 | 552 | 	unsigned long node_spanned_pages; /* total size of physical page | 
 | 553 | 					     range, including holes */ | 
 | 554 | 	int node_id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | 	wait_queue_head_t kswapd_wait; | 
 | 556 | 	struct task_struct *kswapd; | 
 | 557 | 	int kswapd_max_order; | 
 | 558 | } pg_data_t; | 
 | 559 |  | 
 | 560 | #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages) | 
 | 561 | #define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 562 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 563 | #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr)) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 564 | #else | 
 | 565 | #define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | 
 | 566 | #endif | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 567 | #define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 |  | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 569 | #include <linux/memory_hotplug.h> | 
 | 570 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | 
 | 572 | 			unsigned long *free); | 
 | 573 | void build_all_zonelists(void); | 
 | 574 | void wakeup_kswapd(struct zone *zone, int order); | 
 | 575 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 576 | 		int classzone_idx, int alloc_flags); | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 577 | enum memmap_context { | 
 | 578 | 	MEMMAP_EARLY, | 
 | 579 | 	MEMMAP_HOTPLUG, | 
 | 580 | }; | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 581 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 582 | 				     unsigned long size, | 
 | 583 | 				     enum memmap_context context); | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 584 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 
 | 586 | void memory_present(int nid, unsigned long start, unsigned long end); | 
 | 587 | #else | 
 | 588 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | 
 | 589 | #endif | 
 | 590 |  | 
 | 591 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | 
 | 592 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
 | 593 | #endif | 
 | 594 |  | 
 | 595 | /* | 
 | 596 |  * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | 
 | 597 |  */ | 
 | 598 | #define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones) | 
 | 599 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 600 | static inline int populated_zone(struct zone *zone) | 
 | 601 | { | 
 | 602 | 	return (!!zone->present_pages); | 
 | 603 | } | 
 | 604 |  | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 605 | extern int movable_zone; | 
 | 606 |  | 
 | 607 | static inline int zone_movable_is_highmem(void) | 
 | 608 | { | 
 | 609 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 
 | 610 | 	return movable_zone == ZONE_HIGHMEM; | 
 | 611 | #else | 
 | 612 | 	return 0; | 
 | 613 | #endif | 
 | 614 | } | 
 | 615 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 616 | static inline int is_highmem_idx(enum zone_type idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | { | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 618 | #ifdef CONFIG_HIGHMEM | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 619 | 	return (idx == ZONE_HIGHMEM || | 
 | 620 | 		(idx == ZONE_MOVABLE && zone_movable_is_highmem())); | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 621 | #else | 
 | 622 | 	return 0; | 
 | 623 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | } | 
 | 625 |  | 
| Christoph Lameter | 2f1b624 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 626 | static inline int is_normal_idx(enum zone_type idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | { | 
 | 628 | 	return (idx == ZONE_NORMAL); | 
 | 629 | } | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 630 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | /** | 
 | 632 |  * is_highmem - helper function to quickly check if a struct zone is a  | 
 | 633 |  *              highmem zone or not.  This is an attempt to keep references | 
 | 634 |  *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | 
 | 635 |  * @zone - pointer to struct zone variable | 
 | 636 |  */ | 
 | 637 | static inline int is_highmem(struct zone *zone) | 
 | 638 | { | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 639 | #ifdef CONFIG_HIGHMEM | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 640 | 	int zone_idx = zone - zone->zone_pgdat->node_zones; | 
 | 641 | 	return zone_idx == ZONE_HIGHMEM || | 
 | 642 | 		(zone_idx == ZONE_MOVABLE && zone_movable_is_highmem()); | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 643 | #else | 
 | 644 | 	return 0; | 
 | 645 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | } | 
 | 647 |  | 
 | 648 | static inline int is_normal(struct zone *zone) | 
 | 649 | { | 
 | 650 | 	return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | 
 | 651 | } | 
 | 652 |  | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 653 | static inline int is_dma32(struct zone *zone) | 
 | 654 | { | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 655 | #ifdef CONFIG_ZONE_DMA32 | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 656 | 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | 
| Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 657 | #else | 
 | 658 | 	return 0; | 
 | 659 | #endif | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 660 | } | 
 | 661 |  | 
 | 662 | static inline int is_dma(struct zone *zone) | 
 | 663 | { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 664 | #ifdef CONFIG_ZONE_DMA | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 665 | 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 666 | #else | 
 | 667 | 	return 0; | 
 | 668 | #endif | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 669 | } | 
 | 670 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | /* These two functions are used to setup the per zone pages min values */ | 
 | 672 | struct ctl_table; | 
 | 673 | struct file; | 
 | 674 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,  | 
 | 675 | 					void __user *, size_t *, loff_t *); | 
 | 676 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | 
 | 677 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | 
 | 678 | 					void __user *, size_t *, loff_t *); | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 679 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, | 
 | 680 | 					void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 9614634 | 2006-07-03 00:24:13 -0700 | [diff] [blame] | 681 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | 
 | 682 | 			struct file *, void __user *, size_t *, loff_t *); | 
| Christoph Lameter | 0ff3849 | 2006-09-25 23:31:52 -0700 | [diff] [blame] | 683 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | 
 | 684 | 			struct file *, void __user *, size_t *, loff_t *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 |  | 
| KAMEZAWA Hiroyuki | f0c0b2b | 2007-07-15 23:38:01 -0700 | [diff] [blame] | 686 | extern int numa_zonelist_order_handler(struct ctl_table *, int, | 
 | 687 | 			struct file *, void __user *, size_t *, loff_t *); | 
 | 688 | extern char numa_zonelist_order[]; | 
 | 689 | #define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */ | 
 | 690 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | #include <linux/topology.h> | 
 | 692 | /* Returns the number of the current Node. */ | 
| Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 693 | #ifndef numa_node_id | 
| Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 694 | #define numa_node_id()		(cpu_to_node(raw_smp_processor_id())) | 
| Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 695 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 697 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 |  | 
 | 699 | extern struct pglist_data contig_page_data; | 
 | 700 | #define NODE_DATA(nid)		(&contig_page_data) | 
 | 701 | #define NODE_MEM_MAP(nid)	mem_map | 
 | 702 | #define MAX_NODES_SHIFT		1 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 704 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 |  | 
 | 706 | #include <asm/mmzone.h> | 
 | 707 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 708 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 709 |  | 
| KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 710 | extern struct pglist_data *first_online_pgdat(void); | 
 | 711 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | 
 | 712 | extern struct zone *next_zone(struct zone *zone); | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 713 |  | 
 | 714 | /** | 
 | 715 |  * for_each_pgdat - helper macro to iterate over all nodes | 
 | 716 |  * @pgdat - pointer to a pg_data_t variable | 
 | 717 |  */ | 
 | 718 | #define for_each_online_pgdat(pgdat)			\ | 
 | 719 | 	for (pgdat = first_online_pgdat();		\ | 
 | 720 | 	     pgdat;					\ | 
 | 721 | 	     pgdat = next_online_pgdat(pgdat)) | 
| KAMEZAWA Hiroyuki | 8357f86 | 2006-03-27 01:15:57 -0800 | [diff] [blame] | 722 | /** | 
 | 723 |  * for_each_zone - helper macro to iterate over all memory zones | 
 | 724 |  * @zone - pointer to struct zone variable | 
 | 725 |  * | 
 | 726 |  * The user only needs to declare the zone variable, for_each_zone | 
 | 727 |  * fills it in. | 
 | 728 |  */ | 
 | 729 | #define for_each_zone(zone)			        \ | 
 | 730 | 	for (zone = (first_online_pgdat())->node_zones; \ | 
 | 731 | 	     zone;					\ | 
 | 732 | 	     zone = next_zone(zone)) | 
 | 733 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 734 | #ifdef CONFIG_SPARSEMEM | 
 | 735 | #include <asm/sparsemem.h> | 
 | 736 | #endif | 
 | 737 |  | 
| Andi Kleen | 07808b7 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 738 | #if BITS_PER_LONG == 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | /* | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 740 |  * with 32 bit page->flags field, we reserve 9 bits for node/zone info. | 
 | 741 |  * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 |  */ | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 743 | #define FLAGS_RESERVED		9 | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 744 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | #elif BITS_PER_LONG == 64 | 
 | 746 | /* | 
 | 747 |  * with 64 bit flags field, there's plenty of room. | 
 | 748 |  */ | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 749 | #define FLAGS_RESERVED		32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 |  | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 751 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 |  | 
| Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 753 | #error BITS_PER_LONG not defined | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | #endif | 
 | 756 |  | 
| Mel Gorman | c713216 | 2006-09-27 01:49:43 -0700 | [diff] [blame] | 757 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | 
 | 758 | 	!defined(CONFIG_ARCH_POPULATES_NODE_MAP) | 
| Andy Whitcroft | b159d43 | 2005-06-23 00:07:52 -0700 | [diff] [blame] | 759 | #define early_pfn_to_nid(nid)  (0UL) | 
 | 760 | #endif | 
 | 761 |  | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 762 | #ifdef CONFIG_FLATMEM | 
 | 763 | #define pfn_to_nid(pfn)		(0) | 
 | 764 | #endif | 
 | 765 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 766 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | 
 | 767 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | 
 | 768 |  | 
 | 769 | #ifdef CONFIG_SPARSEMEM | 
 | 770 |  | 
 | 771 | /* | 
 | 772 |  * SECTION_SHIFT    		#bits space required to store a section # | 
 | 773 |  * | 
 | 774 |  * PA_SECTION_SHIFT		physical address to/from section number | 
 | 775 |  * PFN_SECTION_SHIFT		pfn to/from section number | 
 | 776 |  */ | 
 | 777 | #define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | 
 | 778 |  | 
 | 779 | #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS) | 
 | 780 | #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT) | 
 | 781 |  | 
 | 782 | #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT) | 
 | 783 |  | 
 | 784 | #define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT) | 
 | 785 | #define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1)) | 
 | 786 |  | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 787 | #define SECTION_BLOCKFLAGS_BITS \ | 
| Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 788 | 	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) | 
| Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 789 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 790 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | 
 | 791 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | 
 | 792 | #endif | 
 | 793 |  | 
 | 794 | struct page; | 
 | 795 | struct mem_section { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 796 | 	/* | 
 | 797 | 	 * This is, logically, a pointer to an array of struct | 
 | 798 | 	 * pages.  However, it is stored with some other magic. | 
 | 799 | 	 * (see sparse.c::sparse_init_one_section()) | 
 | 800 | 	 * | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 801 | 	 * Additionally during early boot we encode node id of | 
 | 802 | 	 * the location of the section here to guide allocation. | 
 | 803 | 	 * (see sparse.c::memory_present()) | 
 | 804 | 	 * | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 805 | 	 * Making it a UL at least makes someone do a cast | 
 | 806 | 	 * before using it wrong. | 
 | 807 | 	 */ | 
 | 808 | 	unsigned long section_mem_map; | 
| Mel Gorman | 5c0e306 | 2007-10-16 01:25:56 -0700 | [diff] [blame] | 809 |  | 
 | 810 | 	/* See declaration of similar field in struct zone */ | 
 | 811 | 	unsigned long *pageblock_flags; | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 812 | }; | 
 | 813 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 814 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
 | 815 | #define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section)) | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 816 | #else | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 817 | #define SECTIONS_PER_ROOT	1 | 
 | 818 | #endif | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 819 |  | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 820 | #define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT) | 
 | 821 | #define NR_SECTION_ROOTS	(NR_MEM_SECTIONS / SECTIONS_PER_ROOT) | 
 | 822 | #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1) | 
 | 823 |  | 
 | 824 | #ifdef CONFIG_SPARSEMEM_EXTREME | 
 | 825 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | 
 | 826 | #else | 
 | 827 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | 
 | 828 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 829 |  | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 830 | static inline struct mem_section *__nr_to_section(unsigned long nr) | 
 | 831 | { | 
| Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 832 | 	if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | 
 | 833 | 		return NULL; | 
 | 834 | 	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 835 | } | 
| Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 836 | extern int __section_nr(struct mem_section* ms); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 837 |  | 
 | 838 | /* | 
 | 839 |  * We use the lower bits of the mem_map pointer to store | 
 | 840 |  * a little bit of information.  There should be at least | 
 | 841 |  * 3 bits here due to 32-bit alignment. | 
 | 842 |  */ | 
 | 843 | #define	SECTION_MARKED_PRESENT	(1UL<<0) | 
 | 844 | #define SECTION_HAS_MEM_MAP	(1UL<<1) | 
 | 845 | #define SECTION_MAP_LAST_BIT	(1UL<<2) | 
 | 846 | #define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1)) | 
| Andy Whitcroft | 30c253e | 2006-06-23 02:03:41 -0700 | [diff] [blame] | 847 | #define SECTION_NID_SHIFT	2 | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 848 |  | 
 | 849 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | 
 | 850 | { | 
 | 851 | 	unsigned long map = section->section_mem_map; | 
 | 852 | 	map &= SECTION_MAP_MASK; | 
 | 853 | 	return (struct page *)map; | 
 | 854 | } | 
 | 855 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 856 | static inline int present_section(struct mem_section *section) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 857 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 858 | 	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 859 | } | 
 | 860 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 861 | static inline int present_section_nr(unsigned long nr) | 
 | 862 | { | 
 | 863 | 	return present_section(__nr_to_section(nr)); | 
 | 864 | } | 
 | 865 |  | 
 | 866 | static inline int valid_section(struct mem_section *section) | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 867 | { | 
| Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 868 | 	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 869 | } | 
 | 870 |  | 
 | 871 | static inline int valid_section_nr(unsigned long nr) | 
 | 872 | { | 
 | 873 | 	return valid_section(__nr_to_section(nr)); | 
 | 874 | } | 
 | 875 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 876 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | 
 | 877 | { | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 878 | 	return __nr_to_section(pfn_to_section_nr(pfn)); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 879 | } | 
 | 880 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 881 | static inline int pfn_valid(unsigned long pfn) | 
 | 882 | { | 
 | 883 | 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 
 | 884 | 		return 0; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 885 | 	return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 886 | } | 
 | 887 |  | 
| Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 888 | static inline int pfn_present(unsigned long pfn) | 
 | 889 | { | 
 | 890 | 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 
 | 891 | 		return 0; | 
 | 892 | 	return present_section(__nr_to_section(pfn_to_section_nr(pfn))); | 
 | 893 | } | 
 | 894 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 895 | /* | 
 | 896 |  * These are _only_ used during initialisation, therefore they | 
 | 897 |  * can use __initdata ...  They could have names to indicate | 
 | 898 |  * this restriction. | 
 | 899 |  */ | 
 | 900 | #ifdef CONFIG_NUMA | 
| Andy Whitcroft | 161599f | 2006-01-06 00:10:54 -0800 | [diff] [blame] | 901 | #define pfn_to_nid(pfn)							\ | 
 | 902 | ({									\ | 
 | 903 | 	unsigned long __pfn_to_nid_pfn = (pfn);				\ | 
 | 904 | 	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\ | 
 | 905 | }) | 
| Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 906 | #else | 
 | 907 | #define pfn_to_nid(pfn)		(0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 908 | #endif | 
 | 909 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 910 | #define early_pfn_valid(pfn)	pfn_valid(pfn) | 
 | 911 | void sparse_init(void); | 
 | 912 | #else | 
 | 913 | #define sparse_init()	do {} while (0) | 
| Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 914 | #define sparse_index_init(_sec, _nid)  do {} while (0) | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 915 | #endif /* CONFIG_SPARSEMEM */ | 
 | 916 |  | 
| Andy Whitcroft | 7516795 | 2006-10-21 10:24:14 -0700 | [diff] [blame] | 917 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | 
 | 918 | #define early_pfn_in_nid(pfn, nid)	(early_pfn_to_nid(pfn) == (nid)) | 
 | 919 | #else | 
 | 920 | #define early_pfn_in_nid(pfn, nid)	(1) | 
 | 921 | #endif | 
 | 922 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 923 | #ifndef early_pfn_valid | 
 | 924 | #define early_pfn_valid(pfn)	(1) | 
 | 925 | #endif | 
 | 926 |  | 
 | 927 | void memory_present(int nid, unsigned long start, unsigned long end); | 
 | 928 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 
 | 929 |  | 
| Andy Whitcroft | 14e0729 | 2007-05-06 14:49:14 -0700 | [diff] [blame] | 930 | /* | 
 | 931 |  * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | 
 | 932 |  * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | 
 | 933 |  * pfn_valid_within() should be used in this case; we optimise this away | 
 | 934 |  * when we have no holes within a MAX_ORDER_NR_PAGES block. | 
 | 935 |  */ | 
 | 936 | #ifdef CONFIG_HOLES_IN_ZONE | 
 | 937 | #define pfn_valid_within(pfn) pfn_valid(pfn) | 
 | 938 | #else | 
 | 939 | #define pfn_valid_within(pfn) (1) | 
 | 940 | #endif | 
 | 941 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | #endif /* !__ASSEMBLY__ */ | 
 | 943 | #endif /* __KERNEL__ */ | 
 | 944 | #endif /* _LINUX_MMZONE_H */ |