Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/vmstat.c |
| 3 | * |
| 4 | * Manages VM statistics |
| 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 6 | * |
| 7 | * zoned VM statistics |
| 8 | * Copyright (C) 2006 Silicon Graphics, Inc., |
| 9 | * Christoph Lameter <christoph@lameter.com> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 12 | #include <linux/mm.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 13 | #include <linux/err.h> |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 14 | #include <linux/module.h> |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 15 | #include <linux/cpu.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 16 | #include <linux/sched.h> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 17 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 18 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 19 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; |
| 20 | EXPORT_PER_CPU_SYMBOL(vm_event_states); |
| 21 | |
| 22 | static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) |
| 23 | { |
Christoph Lameter | 9eccf2a | 2008-02-04 22:29:22 -0800 | [diff] [blame] | 24 | int cpu; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 25 | int i; |
| 26 | |
| 27 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); |
| 28 | |
Christoph Lameter | 9eccf2a | 2008-02-04 22:29:22 -0800 | [diff] [blame] | 29 | for_each_cpu_mask(cpu, *cpumask) { |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 30 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); |
| 31 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 32 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) |
| 33 | ret[i] += this->event[i]; |
| 34 | } |
| 35 | } |
| 36 | |
| 37 | /* |
| 38 | * Accumulate the vm event counters across all CPUs. |
| 39 | * The result is unavoidably approximate - it can change |
| 40 | * during and after execution of this function. |
| 41 | */ |
| 42 | void all_vm_events(unsigned long *ret) |
| 43 | { |
| 44 | sum_vm_events(ret, &cpu_online_map); |
| 45 | } |
Heiko Carstens | 32dd66f | 2006-07-10 04:44:31 -0700 | [diff] [blame] | 46 | EXPORT_SYMBOL_GPL(all_vm_events); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 47 | |
| 48 | #ifdef CONFIG_HOTPLUG |
| 49 | /* |
| 50 | * Fold the foreign cpu events into our own. |
| 51 | * |
| 52 | * This is adding to the events on one processor |
| 53 | * but keeps the global counts constant. |
| 54 | */ |
| 55 | void vm_events_fold_cpu(int cpu) |
| 56 | { |
| 57 | struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); |
| 58 | int i; |
| 59 | |
| 60 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { |
| 61 | count_vm_events(i, fold_state->event[i]); |
| 62 | fold_state->event[i] = 0; |
| 63 | } |
| 64 | } |
| 65 | #endif /* CONFIG_HOTPLUG */ |
| 66 | |
| 67 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| 68 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 69 | /* |
| 70 | * Manage combined zone based / global counters |
| 71 | * |
| 72 | * vm_stat contains the global counters |
| 73 | */ |
| 74 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
| 75 | EXPORT_SYMBOL(vm_stat); |
| 76 | |
| 77 | #ifdef CONFIG_SMP |
| 78 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 79 | static int calculate_threshold(struct zone *zone) |
| 80 | { |
| 81 | int threshold; |
| 82 | int mem; /* memory in 128 MB units */ |
| 83 | |
| 84 | /* |
| 85 | * The threshold scales with the number of processors and the amount |
| 86 | * of memory per zone. More memory means that we can defer updates for |
| 87 | * longer, more processors could lead to more contention. |
| 88 | * fls() is used to have a cheap way of logarithmic scaling. |
| 89 | * |
| 90 | * Some sample thresholds: |
| 91 | * |
| 92 | * Threshold Processors (fls) Zonesize fls(mem+1) |
| 93 | * ------------------------------------------------------------------ |
| 94 | * 8 1 1 0.9-1 GB 4 |
| 95 | * 16 2 2 0.9-1 GB 4 |
| 96 | * 20 2 2 1-2 GB 5 |
| 97 | * 24 2 2 2-4 GB 6 |
| 98 | * 28 2 2 4-8 GB 7 |
| 99 | * 32 2 2 8-16 GB 8 |
| 100 | * 4 2 2 <128M 1 |
| 101 | * 30 4 3 2-4 GB 5 |
| 102 | * 48 4 3 8-16 GB 8 |
| 103 | * 32 8 4 1-2 GB 4 |
| 104 | * 32 8 4 0.9-1GB 4 |
| 105 | * 10 16 5 <128M 1 |
| 106 | * 40 16 5 900M 4 |
| 107 | * 70 64 7 2-4 GB 5 |
| 108 | * 84 64 7 4-8 GB 6 |
| 109 | * 108 512 9 4-8 GB 6 |
| 110 | * 125 1024 10 8-16 GB 8 |
| 111 | * 125 1024 10 16-32 GB 9 |
| 112 | */ |
| 113 | |
| 114 | mem = zone->present_pages >> (27 - PAGE_SHIFT); |
| 115 | |
| 116 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); |
| 117 | |
| 118 | /* |
| 119 | * Maximum threshold is 125 |
| 120 | */ |
| 121 | threshold = min(125, threshold); |
| 122 | |
| 123 | return threshold; |
| 124 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 125 | |
| 126 | /* |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 127 | * Refresh the thresholds for each zone. |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 128 | */ |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 129 | static void refresh_zone_stat_thresholds(void) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 130 | { |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 131 | struct zone *zone; |
| 132 | int cpu; |
| 133 | int threshold; |
| 134 | |
| 135 | for_each_zone(zone) { |
| 136 | |
| 137 | if (!zone->present_pages) |
| 138 | continue; |
| 139 | |
| 140 | threshold = calculate_threshold(zone); |
| 141 | |
| 142 | for_each_online_cpu(cpu) |
| 143 | zone_pcp(zone, cpu)->stat_threshold = threshold; |
| 144 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | /* |
| 148 | * For use when we know that interrupts are disabled. |
| 149 | */ |
| 150 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
| 151 | int delta) |
| 152 | { |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 153 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
| 154 | s8 *p = pcp->vm_stat_diff + item; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 155 | long x; |
| 156 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 157 | x = delta + *p; |
| 158 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 159 | if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 160 | zone_page_state_add(x, zone, item); |
| 161 | x = 0; |
| 162 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 163 | *p = x; |
| 164 | } |
| 165 | EXPORT_SYMBOL(__mod_zone_page_state); |
| 166 | |
| 167 | /* |
| 168 | * For an unknown interrupt state |
| 169 | */ |
| 170 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
| 171 | int delta) |
| 172 | { |
| 173 | unsigned long flags; |
| 174 | |
| 175 | local_irq_save(flags); |
| 176 | __mod_zone_page_state(zone, item, delta); |
| 177 | local_irq_restore(flags); |
| 178 | } |
| 179 | EXPORT_SYMBOL(mod_zone_page_state); |
| 180 | |
| 181 | /* |
| 182 | * Optimized increment and decrement functions. |
| 183 | * |
| 184 | * These are only for a single page and therefore can take a struct page * |
| 185 | * argument instead of struct zone *. This allows the inclusion of the code |
| 186 | * generated for page_zone(page) into the optimized functions. |
| 187 | * |
| 188 | * No overflow check is necessary and therefore the differential can be |
| 189 | * incremented or decremented in place which may allow the compilers to |
| 190 | * generate better code. |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 191 | * The increment or decrement is known and therefore one boundary check can |
| 192 | * be omitted. |
| 193 | * |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 194 | * NOTE: These functions are very performance sensitive. Change only |
| 195 | * with care. |
| 196 | * |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 197 | * Some processors have inc/dec instructions that are atomic vs an interrupt. |
| 198 | * However, the code must first determine the differential location in a zone |
| 199 | * based on the processor number and then inc/dec the counter. There is no |
| 200 | * guarantee without disabling preemption that the processor will not change |
| 201 | * in between and therefore the atomicity vs. interrupt cannot be exploited |
| 202 | * in a useful way here. |
| 203 | */ |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 204 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 205 | { |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 206 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
| 207 | s8 *p = pcp->vm_stat_diff + item; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 208 | |
| 209 | (*p)++; |
| 210 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 211 | if (unlikely(*p > pcp->stat_threshold)) { |
| 212 | int overstep = pcp->stat_threshold / 2; |
| 213 | |
| 214 | zone_page_state_add(*p + overstep, zone, item); |
| 215 | *p = -overstep; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 216 | } |
| 217 | } |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 218 | |
| 219 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) |
| 220 | { |
| 221 | __inc_zone_state(page_zone(page), item); |
| 222 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 223 | EXPORT_SYMBOL(__inc_zone_page_state); |
| 224 | |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 225 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 226 | { |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 227 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
| 228 | s8 *p = pcp->vm_stat_diff + item; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 229 | |
| 230 | (*p)--; |
| 231 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 232 | if (unlikely(*p < - pcp->stat_threshold)) { |
| 233 | int overstep = pcp->stat_threshold / 2; |
| 234 | |
| 235 | zone_page_state_add(*p - overstep, zone, item); |
| 236 | *p = overstep; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 237 | } |
| 238 | } |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 239 | |
| 240 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) |
| 241 | { |
| 242 | __dec_zone_state(page_zone(page), item); |
| 243 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 244 | EXPORT_SYMBOL(__dec_zone_page_state); |
| 245 | |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 246 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| 247 | { |
| 248 | unsigned long flags; |
| 249 | |
| 250 | local_irq_save(flags); |
| 251 | __inc_zone_state(zone, item); |
| 252 | local_irq_restore(flags); |
| 253 | } |
| 254 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 255 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) |
| 256 | { |
| 257 | unsigned long flags; |
| 258 | struct zone *zone; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 259 | |
| 260 | zone = page_zone(page); |
| 261 | local_irq_save(flags); |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 262 | __inc_zone_state(zone, item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 263 | local_irq_restore(flags); |
| 264 | } |
| 265 | EXPORT_SYMBOL(inc_zone_page_state); |
| 266 | |
| 267 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) |
| 268 | { |
| 269 | unsigned long flags; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 270 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 271 | local_irq_save(flags); |
Christoph Lameter | a302eb4 | 2006-08-31 21:27:34 -0700 | [diff] [blame] | 272 | __dec_zone_page_state(page, item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 273 | local_irq_restore(flags); |
| 274 | } |
| 275 | EXPORT_SYMBOL(dec_zone_page_state); |
| 276 | |
| 277 | /* |
| 278 | * Update the zone counters for one cpu. |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 279 | * |
Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 280 | * The cpu specified must be either the current cpu or a processor that |
| 281 | * is not online. If it is the current cpu then the execution thread must |
| 282 | * be pinned to the current cpu. |
| 283 | * |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 284 | * Note that refresh_cpu_vm_stats strives to only access |
| 285 | * node local memory. The per cpu pagesets on remote zones are placed |
| 286 | * in the memory local to the processor using that pageset. So the |
| 287 | * loop over all zones will access a series of cachelines local to |
| 288 | * the processor. |
| 289 | * |
| 290 | * The call to zone_page_state_add updates the cachelines with the |
| 291 | * statistics in the remote zone struct as well as the global cachelines |
| 292 | * with the global counters. These could cause remote node cache line |
| 293 | * bouncing and will have to be only done when necessary. |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 294 | */ |
| 295 | void refresh_cpu_vm_stats(int cpu) |
| 296 | { |
| 297 | struct zone *zone; |
| 298 | int i; |
Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 299 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 300 | |
| 301 | for_each_zone(zone) { |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 302 | struct per_cpu_pageset *p; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 303 | |
Christoph Lameter | 39bbcb8 | 2006-09-25 23:31:49 -0700 | [diff] [blame] | 304 | if (!populated_zone(zone)) |
| 305 | continue; |
| 306 | |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 307 | p = zone_pcp(zone, cpu); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 308 | |
| 309 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 310 | if (p->vm_stat_diff[i]) { |
Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 311 | unsigned long flags; |
| 312 | int v; |
| 313 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 314 | local_irq_save(flags); |
Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 315 | v = p->vm_stat_diff[i]; |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 316 | p->vm_stat_diff[i] = 0; |
Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 317 | local_irq_restore(flags); |
| 318 | atomic_long_add(v, &zone->vm_stat[i]); |
| 319 | global_diff[i] += v; |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 320 | #ifdef CONFIG_NUMA |
| 321 | /* 3 seconds idle till flush */ |
| 322 | p->expire = 3; |
| 323 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 324 | } |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 325 | #ifdef CONFIG_NUMA |
| 326 | /* |
| 327 | * Deal with draining the remote pageset of this |
| 328 | * processor |
| 329 | * |
| 330 | * Check if there are pages remaining in this pageset |
| 331 | * if not then there is nothing to expire. |
| 332 | */ |
Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 333 | if (!p->expire || !p->pcp.count) |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 334 | continue; |
| 335 | |
| 336 | /* |
| 337 | * We never drain zones local to this processor. |
| 338 | */ |
| 339 | if (zone_to_nid(zone) == numa_node_id()) { |
| 340 | p->expire = 0; |
| 341 | continue; |
| 342 | } |
| 343 | |
| 344 | p->expire--; |
| 345 | if (p->expire) |
| 346 | continue; |
| 347 | |
Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 348 | if (p->pcp.count) |
| 349 | drain_zone_pages(zone, &p->pcp); |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 350 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 351 | } |
Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 352 | |
| 353 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
| 354 | if (global_diff[i]) |
| 355 | atomic_long_add(global_diff[i], &vm_stat[i]); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 356 | } |
| 357 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 358 | #endif |
| 359 | |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 360 | #ifdef CONFIG_NUMA |
| 361 | /* |
| 362 | * zonelist = the list of zones passed to the allocator |
| 363 | * z = the zone from which the allocation occurred. |
| 364 | * |
| 365 | * Must be called with interrupts disabled. |
| 366 | */ |
| 367 | void zone_statistics(struct zonelist *zonelist, struct zone *z) |
| 368 | { |
| 369 | if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) { |
| 370 | __inc_zone_state(z, NUMA_HIT); |
| 371 | } else { |
| 372 | __inc_zone_state(z, NUMA_MISS); |
| 373 | __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN); |
| 374 | } |
Christoph Lameter | 5d29234 | 2006-09-27 01:50:10 -0700 | [diff] [blame] | 375 | if (z->node == numa_node_id()) |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 376 | __inc_zone_state(z, NUMA_LOCAL); |
| 377 | else |
| 378 | __inc_zone_state(z, NUMA_OTHER); |
| 379 | } |
| 380 | #endif |
| 381 | |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 382 | #ifdef CONFIG_PROC_FS |
| 383 | |
| 384 | #include <linux/seq_file.h> |
| 385 | |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 386 | static char * const migratetype_names[MIGRATE_TYPES] = { |
| 387 | "Unmovable", |
| 388 | "Reclaimable", |
| 389 | "Movable", |
| 390 | "Reserve", |
| 391 | }; |
| 392 | |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 393 | static void *frag_start(struct seq_file *m, loff_t *pos) |
| 394 | { |
| 395 | pg_data_t *pgdat; |
| 396 | loff_t node = *pos; |
| 397 | for (pgdat = first_online_pgdat(); |
| 398 | pgdat && node; |
| 399 | pgdat = next_online_pgdat(pgdat)) |
| 400 | --node; |
| 401 | |
| 402 | return pgdat; |
| 403 | } |
| 404 | |
| 405 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) |
| 406 | { |
| 407 | pg_data_t *pgdat = (pg_data_t *)arg; |
| 408 | |
| 409 | (*pos)++; |
| 410 | return next_online_pgdat(pgdat); |
| 411 | } |
| 412 | |
| 413 | static void frag_stop(struct seq_file *m, void *arg) |
| 414 | { |
| 415 | } |
| 416 | |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 417 | /* Walk all the zones in a node and print using a callback */ |
| 418 | static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, |
| 419 | void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 420 | { |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 421 | struct zone *zone; |
| 422 | struct zone *node_zones = pgdat->node_zones; |
| 423 | unsigned long flags; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 424 | |
| 425 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { |
| 426 | if (!populated_zone(zone)) |
| 427 | continue; |
| 428 | |
| 429 | spin_lock_irqsave(&zone->lock, flags); |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 430 | print(m, pgdat, zone); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 431 | spin_unlock_irqrestore(&zone->lock, flags); |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 432 | } |
| 433 | } |
| 434 | |
| 435 | static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, |
| 436 | struct zone *zone) |
| 437 | { |
| 438 | int order; |
| 439 | |
| 440 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); |
| 441 | for (order = 0; order < MAX_ORDER; ++order) |
| 442 | seq_printf(m, "%6lu ", zone->free_area[order].nr_free); |
| 443 | seq_putc(m, '\n'); |
| 444 | } |
| 445 | |
| 446 | /* |
| 447 | * This walks the free areas for each zone. |
| 448 | */ |
| 449 | static int frag_show(struct seq_file *m, void *arg) |
| 450 | { |
| 451 | pg_data_t *pgdat = (pg_data_t *)arg; |
| 452 | walk_zones_in_node(m, pgdat, frag_show_print); |
| 453 | return 0; |
| 454 | } |
| 455 | |
| 456 | static void pagetypeinfo_showfree_print(struct seq_file *m, |
| 457 | pg_data_t *pgdat, struct zone *zone) |
| 458 | { |
| 459 | int order, mtype; |
| 460 | |
| 461 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { |
| 462 | seq_printf(m, "Node %4d, zone %8s, type %12s ", |
| 463 | pgdat->node_id, |
| 464 | zone->name, |
| 465 | migratetype_names[mtype]); |
| 466 | for (order = 0; order < MAX_ORDER; ++order) { |
| 467 | unsigned long freecount = 0; |
| 468 | struct free_area *area; |
| 469 | struct list_head *curr; |
| 470 | |
| 471 | area = &(zone->free_area[order]); |
| 472 | |
| 473 | list_for_each(curr, &area->free_list[mtype]) |
| 474 | freecount++; |
| 475 | seq_printf(m, "%6lu ", freecount); |
| 476 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 477 | seq_putc(m, '\n'); |
| 478 | } |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 479 | } |
| 480 | |
| 481 | /* Print out the free pages at each order for each migatetype */ |
| 482 | static int pagetypeinfo_showfree(struct seq_file *m, void *arg) |
| 483 | { |
| 484 | int order; |
| 485 | pg_data_t *pgdat = (pg_data_t *)arg; |
| 486 | |
| 487 | /* Print header */ |
| 488 | seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); |
| 489 | for (order = 0; order < MAX_ORDER; ++order) |
| 490 | seq_printf(m, "%6d ", order); |
| 491 | seq_putc(m, '\n'); |
| 492 | |
| 493 | walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); |
| 494 | |
| 495 | return 0; |
| 496 | } |
| 497 | |
| 498 | static void pagetypeinfo_showblockcount_print(struct seq_file *m, |
| 499 | pg_data_t *pgdat, struct zone *zone) |
| 500 | { |
| 501 | int mtype; |
| 502 | unsigned long pfn; |
| 503 | unsigned long start_pfn = zone->zone_start_pfn; |
| 504 | unsigned long end_pfn = start_pfn + zone->spanned_pages; |
| 505 | unsigned long count[MIGRATE_TYPES] = { 0, }; |
| 506 | |
| 507 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
| 508 | struct page *page; |
| 509 | |
| 510 | if (!pfn_valid(pfn)) |
| 511 | continue; |
| 512 | |
| 513 | page = pfn_to_page(pfn); |
| 514 | mtype = get_pageblock_migratetype(page); |
| 515 | |
| 516 | count[mtype]++; |
| 517 | } |
| 518 | |
| 519 | /* Print counts */ |
| 520 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); |
| 521 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) |
| 522 | seq_printf(m, "%12lu ", count[mtype]); |
| 523 | seq_putc(m, '\n'); |
| 524 | } |
| 525 | |
| 526 | /* Print out the free pages at each order for each migratetype */ |
| 527 | static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) |
| 528 | { |
| 529 | int mtype; |
| 530 | pg_data_t *pgdat = (pg_data_t *)arg; |
| 531 | |
| 532 | seq_printf(m, "\n%-23s", "Number of blocks type "); |
| 533 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) |
| 534 | seq_printf(m, "%12s ", migratetype_names[mtype]); |
| 535 | seq_putc(m, '\n'); |
| 536 | walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); |
| 537 | |
| 538 | return 0; |
| 539 | } |
| 540 | |
| 541 | /* |
| 542 | * This prints out statistics in relation to grouping pages by mobility. |
| 543 | * It is expensive to collect so do not constantly read the file. |
| 544 | */ |
| 545 | static int pagetypeinfo_show(struct seq_file *m, void *arg) |
| 546 | { |
| 547 | pg_data_t *pgdat = (pg_data_t *)arg; |
| 548 | |
| 549 | seq_printf(m, "Page block order: %d\n", pageblock_order); |
| 550 | seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); |
| 551 | seq_putc(m, '\n'); |
| 552 | pagetypeinfo_showfree(m, pgdat); |
| 553 | pagetypeinfo_showblockcount(m, pgdat); |
| 554 | |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 555 | return 0; |
| 556 | } |
| 557 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 558 | const struct seq_operations fragmentation_op = { |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 559 | .start = frag_start, |
| 560 | .next = frag_next, |
| 561 | .stop = frag_stop, |
| 562 | .show = frag_show, |
| 563 | }; |
| 564 | |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 565 | const struct seq_operations pagetypeinfo_op = { |
| 566 | .start = frag_start, |
| 567 | .next = frag_next, |
| 568 | .stop = frag_stop, |
| 569 | .show = pagetypeinfo_show, |
| 570 | }; |
| 571 | |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 572 | #ifdef CONFIG_ZONE_DMA |
| 573 | #define TEXT_FOR_DMA(xx) xx "_dma", |
| 574 | #else |
| 575 | #define TEXT_FOR_DMA(xx) |
| 576 | #endif |
| 577 | |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 578 | #ifdef CONFIG_ZONE_DMA32 |
| 579 | #define TEXT_FOR_DMA32(xx) xx "_dma32", |
| 580 | #else |
| 581 | #define TEXT_FOR_DMA32(xx) |
| 582 | #endif |
| 583 | |
| 584 | #ifdef CONFIG_HIGHMEM |
| 585 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", |
| 586 | #else |
| 587 | #define TEXT_FOR_HIGHMEM(xx) |
| 588 | #endif |
| 589 | |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 590 | #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 591 | TEXT_FOR_HIGHMEM(xx) xx "_movable", |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 592 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 593 | static const char * const vmstat_text[] = { |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 594 | /* Zoned VM counters */ |
Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 595 | "nr_free_pages", |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 596 | "nr_inactive", |
Peter Zijlstra | 23c1fb5 | 2007-07-06 13:35:34 +0200 | [diff] [blame] | 597 | "nr_active", |
Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 598 | "nr_anon_pages", |
Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 599 | "nr_mapped", |
Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 600 | "nr_file_pages", |
Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 601 | "nr_dirty", |
| 602 | "nr_writeback", |
Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 603 | "nr_slab_reclaimable", |
| 604 | "nr_slab_unreclaimable", |
Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 605 | "nr_page_table_pages", |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 606 | "nr_unstable", |
Christoph Lameter | d2c5e30 | 2006-06-30 01:55:41 -0700 | [diff] [blame] | 607 | "nr_bounce", |
Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 608 | "nr_vmscan_write", |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 609 | |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 610 | #ifdef CONFIG_NUMA |
| 611 | "numa_hit", |
| 612 | "numa_miss", |
| 613 | "numa_foreign", |
| 614 | "numa_interleave", |
| 615 | "numa_local", |
| 616 | "numa_other", |
| 617 | #endif |
| 618 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 619 | #ifdef CONFIG_VM_EVENT_COUNTERS |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 620 | "pgpgin", |
| 621 | "pgpgout", |
| 622 | "pswpin", |
| 623 | "pswpout", |
| 624 | |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 625 | TEXTS_FOR_ZONES("pgalloc") |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 626 | |
| 627 | "pgfree", |
| 628 | "pgactivate", |
| 629 | "pgdeactivate", |
| 630 | |
| 631 | "pgfault", |
| 632 | "pgmajfault", |
| 633 | |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 634 | TEXTS_FOR_ZONES("pgrefill") |
| 635 | TEXTS_FOR_ZONES("pgsteal") |
| 636 | TEXTS_FOR_ZONES("pgscan_kswapd") |
| 637 | TEXTS_FOR_ZONES("pgscan_direct") |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 638 | |
| 639 | "pginodesteal", |
| 640 | "slabs_scanned", |
| 641 | "kswapd_steal", |
| 642 | "kswapd_inodesteal", |
| 643 | "pageoutrun", |
| 644 | "allocstall", |
| 645 | |
| 646 | "pgrotated", |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 647 | #endif |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 648 | }; |
| 649 | |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 650 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, |
| 651 | struct zone *zone) |
| 652 | { |
| 653 | int i; |
| 654 | seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); |
| 655 | seq_printf(m, |
| 656 | "\n pages free %lu" |
| 657 | "\n min %lu" |
| 658 | "\n low %lu" |
| 659 | "\n high %lu" |
| 660 | "\n scanned %lu (a: %lu i: %lu)" |
| 661 | "\n spanned %lu" |
| 662 | "\n present %lu", |
| 663 | zone_page_state(zone, NR_FREE_PAGES), |
| 664 | zone->pages_min, |
| 665 | zone->pages_low, |
| 666 | zone->pages_high, |
| 667 | zone->pages_scanned, |
| 668 | zone->nr_scan_active, zone->nr_scan_inactive, |
| 669 | zone->spanned_pages, |
| 670 | zone->present_pages); |
| 671 | |
| 672 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
| 673 | seq_printf(m, "\n %-12s %lu", vmstat_text[i], |
| 674 | zone_page_state(zone, i)); |
| 675 | |
| 676 | seq_printf(m, |
| 677 | "\n protection: (%lu", |
| 678 | zone->lowmem_reserve[0]); |
| 679 | for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) |
| 680 | seq_printf(m, ", %lu", zone->lowmem_reserve[i]); |
| 681 | seq_printf(m, |
| 682 | ")" |
| 683 | "\n pagesets"); |
| 684 | for_each_online_cpu(i) { |
| 685 | struct per_cpu_pageset *pageset; |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 686 | |
| 687 | pageset = zone_pcp(zone, i); |
Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 688 | seq_printf(m, |
| 689 | "\n cpu: %i" |
| 690 | "\n count: %i" |
| 691 | "\n high: %i" |
| 692 | "\n batch: %i", |
| 693 | i, |
| 694 | pageset->pcp.count, |
| 695 | pageset->pcp.high, |
| 696 | pageset->pcp.batch); |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 697 | #ifdef CONFIG_SMP |
| 698 | seq_printf(m, "\n vm stats threshold: %d", |
| 699 | pageset->stat_threshold); |
| 700 | #endif |
| 701 | } |
| 702 | seq_printf(m, |
| 703 | "\n all_unreclaimable: %u" |
| 704 | "\n prev_priority: %i" |
| 705 | "\n start_pfn: %lu", |
David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 706 | zone_is_all_unreclaimable(zone), |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 707 | zone->prev_priority, |
| 708 | zone->zone_start_pfn); |
| 709 | seq_putc(m, '\n'); |
| 710 | } |
| 711 | |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 712 | /* |
| 713 | * Output information about zones in @pgdat. |
| 714 | */ |
| 715 | static int zoneinfo_show(struct seq_file *m, void *arg) |
| 716 | { |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 717 | pg_data_t *pgdat = (pg_data_t *)arg; |
| 718 | walk_zones_in_node(m, pgdat, zoneinfo_show_print); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 719 | return 0; |
| 720 | } |
| 721 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 722 | const struct seq_operations zoneinfo_op = { |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 723 | .start = frag_start, /* iterate over all zones. The same as in |
| 724 | * fragmentation. */ |
| 725 | .next = frag_next, |
| 726 | .stop = frag_stop, |
| 727 | .show = zoneinfo_show, |
| 728 | }; |
| 729 | |
| 730 | static void *vmstat_start(struct seq_file *m, loff_t *pos) |
| 731 | { |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 732 | unsigned long *v; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 733 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 734 | unsigned long *e; |
| 735 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 736 | int i; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 737 | |
| 738 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
| 739 | return NULL; |
| 740 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 741 | #ifdef CONFIG_VM_EVENT_COUNTERS |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 742 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 743 | + sizeof(struct vm_event_state), GFP_KERNEL); |
| 744 | #else |
| 745 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), |
| 746 | GFP_KERNEL); |
| 747 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 748 | m->private = v; |
| 749 | if (!v) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 750 | return ERR_PTR(-ENOMEM); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 751 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
| 752 | v[i] = global_page_state(i); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 753 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 754 | e = v + NR_VM_ZONE_STAT_ITEMS; |
| 755 | all_vm_events(e); |
| 756 | e[PGPGIN] /= 2; /* sectors -> kbytes */ |
| 757 | e[PGPGOUT] /= 2; |
| 758 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 759 | return v + *pos; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 760 | } |
| 761 | |
| 762 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) |
| 763 | { |
| 764 | (*pos)++; |
| 765 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
| 766 | return NULL; |
| 767 | return (unsigned long *)m->private + *pos; |
| 768 | } |
| 769 | |
| 770 | static int vmstat_show(struct seq_file *m, void *arg) |
| 771 | { |
| 772 | unsigned long *l = arg; |
| 773 | unsigned long off = l - (unsigned long *)m->private; |
| 774 | |
| 775 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); |
| 776 | return 0; |
| 777 | } |
| 778 | |
| 779 | static void vmstat_stop(struct seq_file *m, void *arg) |
| 780 | { |
| 781 | kfree(m->private); |
| 782 | m->private = NULL; |
| 783 | } |
| 784 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 785 | const struct seq_operations vmstat_op = { |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 786 | .start = vmstat_start, |
| 787 | .next = vmstat_next, |
| 788 | .stop = vmstat_stop, |
| 789 | .show = vmstat_show, |
| 790 | }; |
| 791 | |
| 792 | #endif /* CONFIG_PROC_FS */ |
| 793 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 794 | #ifdef CONFIG_SMP |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 795 | static DEFINE_PER_CPU(struct delayed_work, vmstat_work); |
Christoph Lameter | 77461ab | 2007-05-09 02:35:13 -0700 | [diff] [blame] | 796 | int sysctl_stat_interval __read_mostly = HZ; |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 797 | |
| 798 | static void vmstat_update(struct work_struct *w) |
| 799 | { |
| 800 | refresh_cpu_vm_stats(smp_processor_id()); |
Christoph Lameter | 77461ab | 2007-05-09 02:35:13 -0700 | [diff] [blame] | 801 | schedule_delayed_work(&__get_cpu_var(vmstat_work), |
| 802 | sysctl_stat_interval); |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 803 | } |
| 804 | |
Randy Dunlap | 42614fc | 2007-11-14 17:00:12 -0800 | [diff] [blame] | 805 | static void __cpuinit start_cpu_timer(int cpu) |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 806 | { |
| 807 | struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); |
| 808 | |
Christoph Lameter | 39bf627 | 2007-05-10 22:22:21 -0700 | [diff] [blame] | 809 | INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 810 | schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); |
| 811 | } |
| 812 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 813 | /* |
| 814 | * Use the cpu notifier to insure that the thresholds are recalculated |
| 815 | * when necessary. |
| 816 | */ |
| 817 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, |
| 818 | unsigned long action, |
| 819 | void *hcpu) |
| 820 | { |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 821 | long cpu = (long)hcpu; |
| 822 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 823 | switch (action) { |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 824 | case CPU_ONLINE: |
| 825 | case CPU_ONLINE_FROZEN: |
| 826 | start_cpu_timer(cpu); |
| 827 | break; |
| 828 | case CPU_DOWN_PREPARE: |
| 829 | case CPU_DOWN_PREPARE_FROZEN: |
| 830 | cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); |
| 831 | per_cpu(vmstat_work, cpu).work.func = NULL; |
| 832 | break; |
| 833 | case CPU_DOWN_FAILED: |
| 834 | case CPU_DOWN_FAILED_FROZEN: |
| 835 | start_cpu_timer(cpu); |
| 836 | break; |
Andy Whitcroft | ce421c7 | 2006-12-06 20:33:08 -0800 | [diff] [blame] | 837 | case CPU_DEAD: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 838 | case CPU_DEAD_FROZEN: |
Andy Whitcroft | ce421c7 | 2006-12-06 20:33:08 -0800 | [diff] [blame] | 839 | refresh_zone_stat_thresholds(); |
| 840 | break; |
| 841 | default: |
| 842 | break; |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 843 | } |
| 844 | return NOTIFY_OK; |
| 845 | } |
| 846 | |
| 847 | static struct notifier_block __cpuinitdata vmstat_notifier = |
| 848 | { &vmstat_cpuup_callback, NULL, 0 }; |
| 849 | |
Adrian Bunk | e2fc88d | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 850 | static int __init setup_vmstat(void) |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 851 | { |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 852 | int cpu; |
| 853 | |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 854 | refresh_zone_stat_thresholds(); |
| 855 | register_cpu_notifier(&vmstat_notifier); |
Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 856 | |
| 857 | for_each_online_cpu(cpu) |
| 858 | start_cpu_timer(cpu); |
Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 859 | return 0; |
| 860 | } |
| 861 | module_init(setup_vmstat) |
| 862 | #endif |