Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMZONE_H |
| 2 | #define _LINUX_MMZONE_H |
| 3 | |
| 4 | #ifdef __KERNEL__ |
| 5 | #ifndef __ASSEMBLY__ |
| 6 | |
| 7 | #include <linux/config.h> |
| 8 | #include <linux/spinlock.h> |
| 9 | #include <linux/list.h> |
| 10 | #include <linux/wait.h> |
| 11 | #include <linux/cache.h> |
| 12 | #include <linux/threads.h> |
| 13 | #include <linux/numa.h> |
| 14 | #include <linux/init.h> |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 15 | #include <linux/seqlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/atomic.h> |
| 17 | |
| 18 | /* Free memory management - zoned buddy allocator. */ |
| 19 | #ifndef CONFIG_FORCE_MAX_ZONEORDER |
| 20 | #define MAX_ORDER 11 |
| 21 | #else |
| 22 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER |
| 23 | #endif |
| 24 | |
| 25 | struct free_area { |
| 26 | struct list_head free_list; |
| 27 | unsigned long nr_free; |
| 28 | }; |
| 29 | |
| 30 | struct pglist_data; |
| 31 | |
| 32 | /* |
| 33 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. |
| 34 | * So add a wild amount of padding here to ensure that they fall into separate |
| 35 | * cachelines. There are very few zone structures in the machine, so space |
| 36 | * consumption is not a concern here. |
| 37 | */ |
| 38 | #if defined(CONFIG_SMP) |
| 39 | struct zone_padding { |
| 40 | char x[0]; |
Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 41 | } ____cacheline_internodealigned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #define ZONE_PADDING(name) struct zone_padding name; |
| 43 | #else |
| 44 | #define ZONE_PADDING(name) |
| 45 | #endif |
| 46 | |
| 47 | struct per_cpu_pages { |
| 48 | int count; /* number of pages in the list */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | int high; /* high watermark, emptying needed */ |
| 50 | int batch; /* chunk size for buddy add/remove */ |
| 51 | struct list_head list; /* the list of pages */ |
| 52 | }; |
| 53 | |
| 54 | struct per_cpu_pageset { |
| 55 | struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ |
| 56 | #ifdef CONFIG_NUMA |
| 57 | unsigned long numa_hit; /* allocated in intended node */ |
| 58 | unsigned long numa_miss; /* allocated in non intended node */ |
| 59 | unsigned long numa_foreign; /* was intended here, hit elsewhere */ |
| 60 | unsigned long interleave_hit; /* interleaver prefered this zone */ |
| 61 | unsigned long local_node; /* allocation from local node */ |
| 62 | unsigned long other_node; /* allocation from other node */ |
| 63 | #endif |
| 64 | } ____cacheline_aligned_in_smp; |
| 65 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 66 | #ifdef CONFIG_NUMA |
| 67 | #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) |
| 68 | #else |
| 69 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) |
| 70 | #endif |
| 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #define ZONE_DMA 0 |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 73 | #define ZONE_DMA32 1 |
| 74 | #define ZONE_NORMAL 2 |
| 75 | #define ZONE_HIGHMEM 3 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 77 | #define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | #define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ |
| 79 | |
| 80 | |
| 81 | /* |
| 82 | * When a memory allocation must conform to specific limitations (such |
| 83 | * as being suitable for DMA) the caller will pass in hints to the |
| 84 | * allocator in the gfp_mask, in the zone modifier bits. These bits |
| 85 | * are used to select a priority ordered list of memory zones which |
| 86 | * match the requested limits. GFP_ZONEMASK defines which bits within |
| 87 | * the gfp_mask should be considered as zone modifiers. Each valid |
| 88 | * combination of the zone modifier bits has a corresponding list |
| 89 | * of zones (in node_zonelists). Thus for two zone modifiers there |
| 90 | * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will |
| 91 | * be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible |
| 92 | * combinations of zone modifiers in "zone modifier space". |
Linus Torvalds | ac3461a | 2005-11-22 19:39:30 -0800 | [diff] [blame] | 93 | * |
| 94 | * NOTE! Make sure this matches the zones in <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | */ |
Linus Torvalds | ac3461a | 2005-11-22 19:39:30 -0800 | [diff] [blame] | 96 | #define GFP_ZONEMASK 0x07 |
| 97 | #define GFP_ZONETYPES 5 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
| 99 | /* |
| 100 | * On machines where it is needed (eg PCs) we divide physical memory |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 101 | * into multiple physical zones. On a PC we have 4 zones: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | * |
| 103 | * ZONE_DMA < 16 MB ISA DMA capable memory |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 104 | * ZONE_DMA32 0 MB Empty |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | * ZONE_NORMAL 16-896 MB direct mapped by the kernel |
| 106 | * ZONE_HIGHMEM > 896 MB only page cache and user processes |
| 107 | */ |
| 108 | |
| 109 | struct zone { |
| 110 | /* Fields commonly accessed by the page allocator */ |
| 111 | unsigned long free_pages; |
| 112 | unsigned long pages_min, pages_low, pages_high; |
| 113 | /* |
| 114 | * We don't know if the memory that we're going to allocate will be freeable |
| 115 | * or/and it will be released eventually, so to avoid totally wasting several |
| 116 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk |
| 117 | * to run OOM on the lower zones despite there's tons of freeable ram |
| 118 | * on the higher zones). This array is recalculated at runtime if the |
| 119 | * sysctl_lowmem_reserve_ratio sysctl changes. |
| 120 | */ |
| 121 | unsigned long lowmem_reserve[MAX_NR_ZONES]; |
| 122 | |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 123 | #ifdef CONFIG_NUMA |
| 124 | struct per_cpu_pageset *pageset[NR_CPUS]; |
| 125 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | struct per_cpu_pageset pageset[NR_CPUS]; |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 127 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | /* |
| 129 | * free areas of different sizes |
| 130 | */ |
| 131 | spinlock_t lock; |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 132 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 133 | /* see spanned/present_pages for more description */ |
| 134 | seqlock_t span_seqlock; |
| 135 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | struct free_area free_area[MAX_ORDER]; |
| 137 | |
| 138 | |
| 139 | ZONE_PADDING(_pad1_) |
| 140 | |
| 141 | /* Fields commonly accessed by the page reclaim scanner */ |
| 142 | spinlock_t lru_lock; |
| 143 | struct list_head active_list; |
| 144 | struct list_head inactive_list; |
| 145 | unsigned long nr_scan_active; |
| 146 | unsigned long nr_scan_inactive; |
| 147 | unsigned long nr_active; |
| 148 | unsigned long nr_inactive; |
| 149 | unsigned long pages_scanned; /* since last reclaim */ |
| 150 | int all_unreclaimable; /* All pages pinned */ |
| 151 | |
| 152 | /* |
Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 153 | * Does the allocator try to reclaim pages from the zone as soon |
| 154 | * as it fails a watermark_ok() in __alloc_pages? |
| 155 | */ |
| 156 | int reclaim_pages; |
Martin Hicks | 1e7e5a9 | 2005-06-21 17:14:43 -0700 | [diff] [blame] | 157 | /* A count of how many reclaimers are scanning this zone */ |
| 158 | atomic_t reclaim_in_progress; |
Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 159 | |
| 160 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | * prev_priority holds the scanning priority for this zone. It is |
| 162 | * defined as the scanning priority at which we achieved our reclaim |
| 163 | * target at the previous try_to_free_pages() or balance_pgdat() |
| 164 | * invokation. |
| 165 | * |
| 166 | * We use prev_priority as a measure of how much stress page reclaim is |
| 167 | * under - it drives the swappiness decision: whether to unmap mapped |
| 168 | * pages. |
| 169 | * |
| 170 | * temp_priority is used to remember the scanning priority at which |
| 171 | * this zone was successfully refilled to free_pages == pages_high. |
| 172 | * |
| 173 | * Access to both these fields is quite racy even on uniprocessor. But |
| 174 | * it is expected to average out OK. |
| 175 | */ |
| 176 | int temp_priority; |
| 177 | int prev_priority; |
| 178 | |
| 179 | |
| 180 | ZONE_PADDING(_pad2_) |
| 181 | /* Rarely used or read-mostly fields */ |
| 182 | |
| 183 | /* |
| 184 | * wait_table -- the array holding the hash table |
| 185 | * wait_table_size -- the size of the hash table array |
| 186 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) |
| 187 | * |
| 188 | * The purpose of all these is to keep track of the people |
| 189 | * waiting for a page to become available and make them |
| 190 | * runnable again when possible. The trouble is that this |
| 191 | * consumes a lot of space, especially when so few things |
| 192 | * wait on pages at a given time. So instead of using |
| 193 | * per-page waitqueues, we use a waitqueue hash table. |
| 194 | * |
| 195 | * The bucket discipline is to sleep on the same queue when |
| 196 | * colliding and wake all in that wait queue when removing. |
| 197 | * When something wakes, it must check to be sure its page is |
| 198 | * truly available, a la thundering herd. The cost of a |
| 199 | * collision is great, but given the expected load of the |
| 200 | * table, they should be so rare as to be outweighed by the |
| 201 | * benefits from the saved space. |
| 202 | * |
| 203 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the |
| 204 | * primary users of these fields, and in mm/page_alloc.c |
| 205 | * free_area_init_core() performs the initialization of them. |
| 206 | */ |
| 207 | wait_queue_head_t * wait_table; |
| 208 | unsigned long wait_table_size; |
| 209 | unsigned long wait_table_bits; |
| 210 | |
| 211 | /* |
| 212 | * Discontig memory support fields. |
| 213 | */ |
| 214 | struct pglist_data *zone_pgdat; |
| 215 | struct page *zone_mem_map; |
| 216 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
| 217 | unsigned long zone_start_pfn; |
| 218 | |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 219 | /* |
| 220 | * zone_start_pfn, spanned_pages and present_pages are all |
| 221 | * protected by span_seqlock. It is a seqlock because it has |
| 222 | * to be read outside of zone->lock, and it is done in the main |
| 223 | * allocator path. But, it is written quite infrequently. |
| 224 | * |
| 225 | * The lock is declared along with zone->lock because it is |
| 226 | * frequently read in proximity to zone->lock. It's good to |
| 227 | * give them a chance of being in the same cacheline. |
| 228 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | unsigned long spanned_pages; /* total size, including holes */ |
| 230 | unsigned long present_pages; /* amount of memory (excluding holes) */ |
| 231 | |
| 232 | /* |
| 233 | * rarely used fields: |
| 234 | */ |
| 235 | char *name; |
Ravikiran G Thirumalai | 22fc6ec | 2006-01-08 01:01:27 -0800 | [diff] [blame] | 236 | } ____cacheline_internodealigned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | |
| 238 | |
| 239 | /* |
| 240 | * The "priority" of VM scanning is how much of the queues we will scan in one |
| 241 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
| 242 | * queues ("queue_length >> 12") during an aging round. |
| 243 | */ |
| 244 | #define DEF_PRIORITY 12 |
| 245 | |
| 246 | /* |
| 247 | * One allocation request operates on a zonelist. A zonelist |
| 248 | * is a list of zones, the first one is the 'goal' of the |
| 249 | * allocation, the other zones are fallback zones, in decreasing |
| 250 | * priority. |
| 251 | * |
| 252 | * Right now a zonelist takes up less than a cacheline. We never |
| 253 | * modify it apart from boot-up, and only a few indices are used, |
| 254 | * so despite the zonelist table being relatively big, the cache |
| 255 | * footprint of this construct is very small. |
| 256 | */ |
| 257 | struct zonelist { |
| 258 | struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited |
| 259 | }; |
| 260 | |
| 261 | |
| 262 | /* |
| 263 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM |
| 264 | * (mostly NUMA machines?) to denote a higher-level memory zone than the |
| 265 | * zone denotes. |
| 266 | * |
| 267 | * On NUMA machines, each NUMA node would have a pg_data_t to describe |
| 268 | * it's memory layout. |
| 269 | * |
| 270 | * Memory statistics and page replacement data structures are maintained on a |
| 271 | * per-zone basis. |
| 272 | */ |
| 273 | struct bootmem_data; |
| 274 | typedef struct pglist_data { |
| 275 | struct zone node_zones[MAX_NR_ZONES]; |
| 276 | struct zonelist node_zonelists[GFP_ZONETYPES]; |
| 277 | int nr_zones; |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 278 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | struct page *node_mem_map; |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 280 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | struct bootmem_data *bdata; |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 282 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 283 | /* |
| 284 | * Must be held any time you expect node_start_pfn, node_present_pages |
| 285 | * or node_spanned_pages stay constant. Holding this will also |
| 286 | * guarantee that any pfn_valid() stays that way. |
| 287 | * |
| 288 | * Nests above zone->lock and zone->size_seqlock. |
| 289 | */ |
| 290 | spinlock_t node_size_lock; |
| 291 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | unsigned long node_start_pfn; |
| 293 | unsigned long node_present_pages; /* total number of physical pages */ |
| 294 | unsigned long node_spanned_pages; /* total size of physical page |
| 295 | range, including holes */ |
| 296 | int node_id; |
| 297 | struct pglist_data *pgdat_next; |
| 298 | wait_queue_head_t kswapd_wait; |
| 299 | struct task_struct *kswapd; |
| 300 | int kswapd_max_order; |
| 301 | } pg_data_t; |
| 302 | |
| 303 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) |
| 304 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 305 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 306 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 307 | #else |
| 308 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) |
| 309 | #endif |
Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 310 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 312 | #include <linux/memory_hotplug.h> |
| 313 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | extern struct pglist_data *pgdat_list; |
| 315 | |
| 316 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, |
| 317 | unsigned long *free, struct pglist_data *pgdat); |
| 318 | void get_zone_counts(unsigned long *active, unsigned long *inactive, |
| 319 | unsigned long *free); |
| 320 | void build_all_zonelists(void); |
| 321 | void wakeup_kswapd(struct zone *zone, int order); |
| 322 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 323 | int classzone_idx, int alloc_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | |
| 325 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
| 326 | void memory_present(int nid, unsigned long start, unsigned long end); |
| 327 | #else |
| 328 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} |
| 329 | #endif |
| 330 | |
| 331 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE |
| 332 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); |
| 333 | #endif |
| 334 | |
| 335 | /* |
| 336 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. |
| 337 | */ |
| 338 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) |
| 339 | |
| 340 | /** |
| 341 | * for_each_pgdat - helper macro to iterate over all nodes |
| 342 | * @pgdat - pointer to a pg_data_t variable |
| 343 | * |
| 344 | * Meant to help with common loops of the form |
| 345 | * pgdat = pgdat_list; |
| 346 | * while(pgdat) { |
| 347 | * ... |
| 348 | * pgdat = pgdat->pgdat_next; |
| 349 | * } |
| 350 | */ |
| 351 | #define for_each_pgdat(pgdat) \ |
| 352 | for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next) |
| 353 | |
| 354 | /* |
| 355 | * next_zone - helper magic for for_each_zone() |
| 356 | * Thanks to William Lee Irwin III for this piece of ingenuity. |
| 357 | */ |
| 358 | static inline struct zone *next_zone(struct zone *zone) |
| 359 | { |
| 360 | pg_data_t *pgdat = zone->zone_pgdat; |
| 361 | |
| 362 | if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) |
| 363 | zone++; |
| 364 | else if (pgdat->pgdat_next) { |
| 365 | pgdat = pgdat->pgdat_next; |
| 366 | zone = pgdat->node_zones; |
| 367 | } else |
| 368 | zone = NULL; |
| 369 | |
| 370 | return zone; |
| 371 | } |
| 372 | |
| 373 | /** |
| 374 | * for_each_zone - helper macro to iterate over all memory zones |
| 375 | * @zone - pointer to struct zone variable |
| 376 | * |
| 377 | * The user only needs to declare the zone variable, for_each_zone |
| 378 | * fills it in. This basically means for_each_zone() is an |
| 379 | * easier to read version of this piece of code: |
| 380 | * |
| 381 | * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next) |
| 382 | * for (i = 0; i < MAX_NR_ZONES; ++i) { |
| 383 | * struct zone * z = pgdat->node_zones + i; |
| 384 | * ... |
| 385 | * } |
| 386 | * } |
| 387 | */ |
| 388 | #define for_each_zone(zone) \ |
| 389 | for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) |
| 390 | |
Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 391 | static inline int populated_zone(struct zone *zone) |
| 392 | { |
| 393 | return (!!zone->present_pages); |
| 394 | } |
| 395 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | static inline int is_highmem_idx(int idx) |
| 397 | { |
| 398 | return (idx == ZONE_HIGHMEM); |
| 399 | } |
| 400 | |
| 401 | static inline int is_normal_idx(int idx) |
| 402 | { |
| 403 | return (idx == ZONE_NORMAL); |
| 404 | } |
Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 405 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | /** |
| 407 | * is_highmem - helper function to quickly check if a struct zone is a |
| 408 | * highmem zone or not. This is an attempt to keep references |
| 409 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. |
| 410 | * @zone - pointer to struct zone variable |
| 411 | */ |
| 412 | static inline int is_highmem(struct zone *zone) |
| 413 | { |
| 414 | return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; |
| 415 | } |
| 416 | |
| 417 | static inline int is_normal(struct zone *zone) |
| 418 | { |
| 419 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; |
| 420 | } |
| 421 | |
Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 422 | static inline int is_dma32(struct zone *zone) |
| 423 | { |
| 424 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; |
| 425 | } |
| 426 | |
| 427 | static inline int is_dma(struct zone *zone) |
| 428 | { |
| 429 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; |
| 430 | } |
| 431 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | /* These two functions are used to setup the per zone pages min values */ |
| 433 | struct ctl_table; |
| 434 | struct file; |
| 435 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, |
| 436 | void __user *, size_t *, loff_t *); |
| 437 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; |
| 438 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, |
| 439 | void __user *, size_t *, loff_t *); |
Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 440 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, |
| 441 | void __user *, size_t *, loff_t *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | |
| 443 | #include <linux/topology.h> |
| 444 | /* Returns the number of the current Node. */ |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 445 | #ifndef numa_node_id |
Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 446 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 447 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | |
Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 449 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | |
| 451 | extern struct pglist_data contig_page_data; |
| 452 | #define NODE_DATA(nid) (&contig_page_data) |
| 453 | #define NODE_MEM_MAP(nid) mem_map |
| 454 | #define MAX_NODES_SHIFT 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | |
Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 456 | #else /* CONFIG_NEED_MULTIPLE_NODES */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | |
| 458 | #include <asm/mmzone.h> |
| 459 | |
Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 460 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 461 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 462 | #ifdef CONFIG_SPARSEMEM |
| 463 | #include <asm/sparsemem.h> |
| 464 | #endif |
| 465 | |
Andi Kleen | 07808b7 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 466 | #if BITS_PER_LONG == 32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | /* |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 468 | * with 32 bit page->flags field, we reserve 9 bits for node/zone info. |
| 469 | * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | */ |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 471 | #define FLAGS_RESERVED 9 |
Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 472 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | #elif BITS_PER_LONG == 64 |
| 474 | /* |
| 475 | * with 64 bit flags field, there's plenty of room. |
| 476 | */ |
Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 477 | #define FLAGS_RESERVED 32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | |
Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 479 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | |
Dave Hansen | 348f8b6 | 2005-06-23 00:07:40 -0700 | [diff] [blame] | 481 | #error BITS_PER_LONG not defined |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | #endif |
| 484 | |
Andy Whitcroft | b159d43 | 2005-06-23 00:07:52 -0700 | [diff] [blame] | 485 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID |
| 486 | #define early_pfn_to_nid(nid) (0UL) |
| 487 | #endif |
| 488 | |
Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 489 | #ifdef CONFIG_FLATMEM |
| 490 | #define pfn_to_nid(pfn) (0) |
| 491 | #endif |
| 492 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 493 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) |
| 494 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) |
| 495 | |
| 496 | #ifdef CONFIG_SPARSEMEM |
| 497 | |
| 498 | /* |
| 499 | * SECTION_SHIFT #bits space required to store a section # |
| 500 | * |
| 501 | * PA_SECTION_SHIFT physical address to/from section number |
| 502 | * PFN_SECTION_SHIFT pfn to/from section number |
| 503 | */ |
| 504 | #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) |
| 505 | |
| 506 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) |
| 507 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) |
| 508 | |
| 509 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) |
| 510 | |
| 511 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) |
| 512 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) |
| 513 | |
| 514 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS |
| 515 | #error Allocator MAX_ORDER exceeds SECTION_SIZE |
| 516 | #endif |
| 517 | |
| 518 | struct page; |
| 519 | struct mem_section { |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 520 | /* |
| 521 | * This is, logically, a pointer to an array of struct |
| 522 | * pages. However, it is stored with some other magic. |
| 523 | * (see sparse.c::sparse_init_one_section()) |
| 524 | * |
| 525 | * Making it a UL at least makes someone do a cast |
| 526 | * before using it wrong. |
| 527 | */ |
| 528 | unsigned long section_mem_map; |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 529 | }; |
| 530 | |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 531 | #ifdef CONFIG_SPARSEMEM_EXTREME |
| 532 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 533 | #else |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 534 | #define SECTIONS_PER_ROOT 1 |
| 535 | #endif |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 536 | |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 537 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
| 538 | #define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) |
| 539 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
| 540 | |
| 541 | #ifdef CONFIG_SPARSEMEM_EXTREME |
| 542 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; |
| 543 | #else |
| 544 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
| 545 | #endif |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 546 | |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 547 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
| 548 | { |
Bob Picco | 3e34726 | 2005-09-03 15:54:28 -0700 | [diff] [blame] | 549 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) |
| 550 | return NULL; |
| 551 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 552 | } |
Dave Hansen | 4ca644d | 2005-10-29 18:16:51 -0700 | [diff] [blame] | 553 | extern int __section_nr(struct mem_section* ms); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 554 | |
| 555 | /* |
| 556 | * We use the lower bits of the mem_map pointer to store |
| 557 | * a little bit of information. There should be at least |
| 558 | * 3 bits here due to 32-bit alignment. |
| 559 | */ |
| 560 | #define SECTION_MARKED_PRESENT (1UL<<0) |
| 561 | #define SECTION_HAS_MEM_MAP (1UL<<1) |
| 562 | #define SECTION_MAP_LAST_BIT (1UL<<2) |
| 563 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) |
| 564 | |
| 565 | static inline struct page *__section_mem_map_addr(struct mem_section *section) |
| 566 | { |
| 567 | unsigned long map = section->section_mem_map; |
| 568 | map &= SECTION_MAP_MASK; |
| 569 | return (struct page *)map; |
| 570 | } |
| 571 | |
| 572 | static inline int valid_section(struct mem_section *section) |
| 573 | { |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 574 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 575 | } |
| 576 | |
| 577 | static inline int section_has_mem_map(struct mem_section *section) |
| 578 | { |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 579 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 580 | } |
| 581 | |
| 582 | static inline int valid_section_nr(unsigned long nr) |
| 583 | { |
| 584 | return valid_section(__nr_to_section(nr)); |
| 585 | } |
| 586 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 587 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
| 588 | { |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 589 | return __nr_to_section(pfn_to_section_nr(pfn)); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 590 | } |
| 591 | |
| 592 | #define pfn_to_page(pfn) \ |
| 593 | ({ \ |
| 594 | unsigned long __pfn = (pfn); \ |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 595 | __section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \ |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 596 | }) |
| 597 | #define page_to_pfn(page) \ |
| 598 | ({ \ |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 599 | page - __section_mem_map_addr(__nr_to_section( \ |
| 600 | page_to_section(page))); \ |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 601 | }) |
| 602 | |
| 603 | static inline int pfn_valid(unsigned long pfn) |
| 604 | { |
| 605 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
| 606 | return 0; |
Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 607 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 608 | } |
| 609 | |
| 610 | /* |
| 611 | * These are _only_ used during initialisation, therefore they |
| 612 | * can use __initdata ... They could have names to indicate |
| 613 | * this restriction. |
| 614 | */ |
| 615 | #ifdef CONFIG_NUMA |
Andy Whitcroft | 161599f | 2006-01-06 00:10:54 -0800 | [diff] [blame] | 616 | #define pfn_to_nid(pfn) \ |
| 617 | ({ \ |
| 618 | unsigned long __pfn_to_nid_pfn = (pfn); \ |
| 619 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ |
| 620 | }) |
Andy Whitcroft | 2bdaf11 | 2006-01-06 00:10:53 -0800 | [diff] [blame] | 621 | #else |
| 622 | #define pfn_to_nid(pfn) (0) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 623 | #endif |
| 624 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 625 | #define early_pfn_valid(pfn) pfn_valid(pfn) |
| 626 | void sparse_init(void); |
| 627 | #else |
| 628 | #define sparse_init() do {} while (0) |
Dave Hansen | 28ae55c | 2005-09-03 15:54:29 -0700 | [diff] [blame] | 629 | #define sparse_index_init(_sec, _nid) do {} while (0) |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 630 | #endif /* CONFIG_SPARSEMEM */ |
| 631 | |
| 632 | #ifndef early_pfn_valid |
| 633 | #define early_pfn_valid(pfn) (1) |
| 634 | #endif |
| 635 | |
| 636 | void memory_present(int nid, unsigned long start, unsigned long end); |
| 637 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); |
| 638 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | #endif /* !__ASSEMBLY__ */ |
| 640 | #endif /* __KERNEL__ */ |
| 641 | #endif /* _LINUX_MMZONE_H */ |