blob: e8d846f57774ee631adc5836aeaa6498b39736a0 [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
Christoph Lameter2244b952006-06-30 01:55:33 -07006 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070010 */
11
Christoph Lameterf6ac2352006-06-30 01:55:32 -070012#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040013#include <linux/err.h>
Christoph Lameter2244b952006-06-30 01:55:33 -070014#include <linux/module.h>
Christoph Lameterdf9ecab2006-08-31 21:27:35 -070015#include <linux/cpu.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040016#include <linux/sched.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070017
Christoph Lameterf8891e52006-06-30 01:55:45 -070018#ifdef CONFIG_VM_EVENT_COUNTERS
19DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
20EXPORT_PER_CPU_SYMBOL(vm_event_states);
21
22static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
23{
24 int cpu = 0;
25 int i;
26
27 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
28
29 cpu = first_cpu(*cpumask);
30 while (cpu < NR_CPUS) {
31 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
32
33 cpu = next_cpu(cpu, *cpumask);
34
35 if (cpu < NR_CPUS)
36 prefetch(&per_cpu(vm_event_states, cpu));
37
38
39 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
40 ret[i] += this->event[i];
41 }
42}
43
44/*
45 * Accumulate the vm event counters across all CPUs.
46 * The result is unavoidably approximate - it can change
47 * during and after execution of this function.
48*/
49void all_vm_events(unsigned long *ret)
50{
51 sum_vm_events(ret, &cpu_online_map);
52}
Heiko Carstens32dd66f2006-07-10 04:44:31 -070053EXPORT_SYMBOL_GPL(all_vm_events);
Christoph Lameterf8891e52006-06-30 01:55:45 -070054
55#ifdef CONFIG_HOTPLUG
56/*
57 * Fold the foreign cpu events into our own.
58 *
59 * This is adding to the events on one processor
60 * but keeps the global counts constant.
61 */
62void vm_events_fold_cpu(int cpu)
63{
64 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
65 int i;
66
67 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
68 count_vm_events(i, fold_state->event[i]);
69 fold_state->event[i] = 0;
70 }
71}
72#endif /* CONFIG_HOTPLUG */
73
74#endif /* CONFIG_VM_EVENT_COUNTERS */
75
Christoph Lameter2244b952006-06-30 01:55:33 -070076/*
77 * Manage combined zone based / global counters
78 *
79 * vm_stat contains the global counters
80 */
81atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82EXPORT_SYMBOL(vm_stat);
83
84#ifdef CONFIG_SMP
85
Christoph Lameterdf9ecab2006-08-31 21:27:35 -070086static int calculate_threshold(struct zone *zone)
87{
88 int threshold;
89 int mem; /* memory in 128 MB units */
90
91 /*
92 * The threshold scales with the number of processors and the amount
93 * of memory per zone. More memory means that we can defer updates for
94 * longer, more processors could lead to more contention.
95 * fls() is used to have a cheap way of logarithmic scaling.
96 *
97 * Some sample thresholds:
98 *
99 * Threshold Processors (fls) Zonesize fls(mem+1)
100 * ------------------------------------------------------------------
101 * 8 1 1 0.9-1 GB 4
102 * 16 2 2 0.9-1 GB 4
103 * 20 2 2 1-2 GB 5
104 * 24 2 2 2-4 GB 6
105 * 28 2 2 4-8 GB 7
106 * 32 2 2 8-16 GB 8
107 * 4 2 2 <128M 1
108 * 30 4 3 2-4 GB 5
109 * 48 4 3 8-16 GB 8
110 * 32 8 4 1-2 GB 4
111 * 32 8 4 0.9-1GB 4
112 * 10 16 5 <128M 1
113 * 40 16 5 900M 4
114 * 70 64 7 2-4 GB 5
115 * 84 64 7 4-8 GB 6
116 * 108 512 9 4-8 GB 6
117 * 125 1024 10 8-16 GB 8
118 * 125 1024 10 16-32 GB 9
119 */
120
121 mem = zone->present_pages >> (27 - PAGE_SHIFT);
122
123 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
124
125 /*
126 * Maximum threshold is 125
127 */
128 threshold = min(125, threshold);
129
130 return threshold;
131}
Christoph Lameter2244b952006-06-30 01:55:33 -0700132
133/*
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700134 * Refresh the thresholds for each zone.
Christoph Lameter2244b952006-06-30 01:55:33 -0700135 */
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700136static void refresh_zone_stat_thresholds(void)
Christoph Lameter2244b952006-06-30 01:55:33 -0700137{
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700138 struct zone *zone;
139 int cpu;
140 int threshold;
141
142 for_each_zone(zone) {
143
144 if (!zone->present_pages)
145 continue;
146
147 threshold = calculate_threshold(zone);
148
149 for_each_online_cpu(cpu)
150 zone_pcp(zone, cpu)->stat_threshold = threshold;
151 }
Christoph Lameter2244b952006-06-30 01:55:33 -0700152}
153
154/*
155 * For use when we know that interrupts are disabled.
156 */
157void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
158 int delta)
159{
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700160 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
161 s8 *p = pcp->vm_stat_diff + item;
Christoph Lameter2244b952006-06-30 01:55:33 -0700162 long x;
163
Christoph Lameter2244b952006-06-30 01:55:33 -0700164 x = delta + *p;
165
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700166 if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
Christoph Lameter2244b952006-06-30 01:55:33 -0700167 zone_page_state_add(x, zone, item);
168 x = 0;
169 }
Christoph Lameter2244b952006-06-30 01:55:33 -0700170 *p = x;
171}
172EXPORT_SYMBOL(__mod_zone_page_state);
173
174/*
175 * For an unknown interrupt state
176 */
177void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
178 int delta)
179{
180 unsigned long flags;
181
182 local_irq_save(flags);
183 __mod_zone_page_state(zone, item, delta);
184 local_irq_restore(flags);
185}
186EXPORT_SYMBOL(mod_zone_page_state);
187
188/*
189 * Optimized increment and decrement functions.
190 *
191 * These are only for a single page and therefore can take a struct page *
192 * argument instead of struct zone *. This allows the inclusion of the code
193 * generated for page_zone(page) into the optimized functions.
194 *
195 * No overflow check is necessary and therefore the differential can be
196 * incremented or decremented in place which may allow the compilers to
197 * generate better code.
Christoph Lameter2244b952006-06-30 01:55:33 -0700198 * The increment or decrement is known and therefore one boundary check can
199 * be omitted.
200 *
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700201 * NOTE: These functions are very performance sensitive. Change only
202 * with care.
203 *
Christoph Lameter2244b952006-06-30 01:55:33 -0700204 * Some processors have inc/dec instructions that are atomic vs an interrupt.
205 * However, the code must first determine the differential location in a zone
206 * based on the processor number and then inc/dec the counter. There is no
207 * guarantee without disabling preemption that the processor will not change
208 * in between and therefore the atomicity vs. interrupt cannot be exploited
209 * in a useful way here.
210 */
Christoph Lameterc8785382007-02-10 01:43:01 -0800211void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700212{
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700213 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
214 s8 *p = pcp->vm_stat_diff + item;
Christoph Lameter2244b952006-06-30 01:55:33 -0700215
216 (*p)++;
217
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700218 if (unlikely(*p > pcp->stat_threshold)) {
219 int overstep = pcp->stat_threshold / 2;
220
221 zone_page_state_add(*p + overstep, zone, item);
222 *p = -overstep;
Christoph Lameter2244b952006-06-30 01:55:33 -0700223 }
224}
Christoph Lameterca889e62006-06-30 01:55:44 -0700225
226void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
227{
228 __inc_zone_state(page_zone(page), item);
229}
Christoph Lameter2244b952006-06-30 01:55:33 -0700230EXPORT_SYMBOL(__inc_zone_page_state);
231
Christoph Lameterc8785382007-02-10 01:43:01 -0800232void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700233{
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700234 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
235 s8 *p = pcp->vm_stat_diff + item;
Christoph Lameter2244b952006-06-30 01:55:33 -0700236
237 (*p)--;
238
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700239 if (unlikely(*p < - pcp->stat_threshold)) {
240 int overstep = pcp->stat_threshold / 2;
241
242 zone_page_state_add(*p - overstep, zone, item);
243 *p = overstep;
Christoph Lameter2244b952006-06-30 01:55:33 -0700244 }
245}
Christoph Lameterc8785382007-02-10 01:43:01 -0800246
247void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
248{
249 __dec_zone_state(page_zone(page), item);
250}
Christoph Lameter2244b952006-06-30 01:55:33 -0700251EXPORT_SYMBOL(__dec_zone_page_state);
252
Christoph Lameterca889e62006-06-30 01:55:44 -0700253void inc_zone_state(struct zone *zone, enum zone_stat_item item)
254{
255 unsigned long flags;
256
257 local_irq_save(flags);
258 __inc_zone_state(zone, item);
259 local_irq_restore(flags);
260}
261
Christoph Lameter2244b952006-06-30 01:55:33 -0700262void inc_zone_page_state(struct page *page, enum zone_stat_item item)
263{
264 unsigned long flags;
265 struct zone *zone;
Christoph Lameter2244b952006-06-30 01:55:33 -0700266
267 zone = page_zone(page);
268 local_irq_save(flags);
Christoph Lameterca889e62006-06-30 01:55:44 -0700269 __inc_zone_state(zone, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700270 local_irq_restore(flags);
271}
272EXPORT_SYMBOL(inc_zone_page_state);
273
274void dec_zone_page_state(struct page *page, enum zone_stat_item item)
275{
276 unsigned long flags;
Christoph Lameter2244b952006-06-30 01:55:33 -0700277
Christoph Lameter2244b952006-06-30 01:55:33 -0700278 local_irq_save(flags);
Christoph Lametera302eb42006-08-31 21:27:34 -0700279 __dec_zone_page_state(page, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700280 local_irq_restore(flags);
281}
282EXPORT_SYMBOL(dec_zone_page_state);
283
284/*
285 * Update the zone counters for one cpu.
Christoph Lameter4037d452007-05-09 02:35:14 -0700286 *
287 * Note that refresh_cpu_vm_stats strives to only access
288 * node local memory. The per cpu pagesets on remote zones are placed
289 * in the memory local to the processor using that pageset. So the
290 * loop over all zones will access a series of cachelines local to
291 * the processor.
292 *
293 * The call to zone_page_state_add updates the cachelines with the
294 * statistics in the remote zone struct as well as the global cachelines
295 * with the global counters. These could cause remote node cache line
296 * bouncing and will have to be only done when necessary.
Christoph Lameter2244b952006-06-30 01:55:33 -0700297 */
298void refresh_cpu_vm_stats(int cpu)
299{
300 struct zone *zone;
301 int i;
302 unsigned long flags;
303
304 for_each_zone(zone) {
Christoph Lameter4037d452007-05-09 02:35:14 -0700305 struct per_cpu_pageset *p;
Christoph Lameter2244b952006-06-30 01:55:33 -0700306
Christoph Lameter39bbcb82006-09-25 23:31:49 -0700307 if (!populated_zone(zone))
308 continue;
309
Christoph Lameter4037d452007-05-09 02:35:14 -0700310 p = zone_pcp(zone, cpu);
Christoph Lameter2244b952006-06-30 01:55:33 -0700311
312 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
Christoph Lameter4037d452007-05-09 02:35:14 -0700313 if (p->vm_stat_diff[i]) {
Christoph Lameter2244b952006-06-30 01:55:33 -0700314 local_irq_save(flags);
Christoph Lameter4037d452007-05-09 02:35:14 -0700315 zone_page_state_add(p->vm_stat_diff[i],
Christoph Lameter2244b952006-06-30 01:55:33 -0700316 zone, i);
Christoph Lameter4037d452007-05-09 02:35:14 -0700317 p->vm_stat_diff[i] = 0;
318#ifdef CONFIG_NUMA
319 /* 3 seconds idle till flush */
320 p->expire = 3;
321#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700322 local_irq_restore(flags);
323 }
Christoph Lameter4037d452007-05-09 02:35:14 -0700324#ifdef CONFIG_NUMA
325 /*
326 * Deal with draining the remote pageset of this
327 * processor
328 *
329 * Check if there are pages remaining in this pageset
330 * if not then there is nothing to expire.
331 */
332 if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count))
333 continue;
334
335 /*
336 * We never drain zones local to this processor.
337 */
338 if (zone_to_nid(zone) == numa_node_id()) {
339 p->expire = 0;
340 continue;
341 }
342
343 p->expire--;
344 if (p->expire)
345 continue;
346
347 if (p->pcp[0].count)
348 drain_zone_pages(zone, p->pcp + 0);
349
350 if (p->pcp[1].count)
351 drain_zone_pages(zone, p->pcp + 1);
352#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700353 }
354}
355
Christoph Lameter2244b952006-06-30 01:55:33 -0700356#endif
357
Christoph Lameterca889e62006-06-30 01:55:44 -0700358#ifdef CONFIG_NUMA
359/*
360 * zonelist = the list of zones passed to the allocator
361 * z = the zone from which the allocation occurred.
362 *
363 * Must be called with interrupts disabled.
364 */
365void zone_statistics(struct zonelist *zonelist, struct zone *z)
366{
367 if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
368 __inc_zone_state(z, NUMA_HIT);
369 } else {
370 __inc_zone_state(z, NUMA_MISS);
371 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
372 }
Christoph Lameter5d292342006-09-27 01:50:10 -0700373 if (z->node == numa_node_id())
Christoph Lameterca889e62006-06-30 01:55:44 -0700374 __inc_zone_state(z, NUMA_LOCAL);
375 else
376 __inc_zone_state(z, NUMA_OTHER);
377}
378#endif
379
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700380#ifdef CONFIG_PROC_FS
381
382#include <linux/seq_file.h>
383
Mel Gorman467c9962007-10-16 01:26:02 -0700384static char * const migratetype_names[MIGRATE_TYPES] = {
385 "Unmovable",
386 "Reclaimable",
387 "Movable",
388 "Reserve",
389};
390
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700391static void *frag_start(struct seq_file *m, loff_t *pos)
392{
393 pg_data_t *pgdat;
394 loff_t node = *pos;
395 for (pgdat = first_online_pgdat();
396 pgdat && node;
397 pgdat = next_online_pgdat(pgdat))
398 --node;
399
400 return pgdat;
401}
402
403static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
404{
405 pg_data_t *pgdat = (pg_data_t *)arg;
406
407 (*pos)++;
408 return next_online_pgdat(pgdat);
409}
410
411static void frag_stop(struct seq_file *m, void *arg)
412{
413}
414
Mel Gorman467c9962007-10-16 01:26:02 -0700415/* Walk all the zones in a node and print using a callback */
416static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
417 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700418{
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700419 struct zone *zone;
420 struct zone *node_zones = pgdat->node_zones;
421 unsigned long flags;
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700422
423 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
424 if (!populated_zone(zone))
425 continue;
426
427 spin_lock_irqsave(&zone->lock, flags);
Mel Gorman467c9962007-10-16 01:26:02 -0700428 print(m, pgdat, zone);
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700429 spin_unlock_irqrestore(&zone->lock, flags);
Mel Gorman467c9962007-10-16 01:26:02 -0700430 }
431}
432
433static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
434 struct zone *zone)
435{
436 int order;
437
438 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
439 for (order = 0; order < MAX_ORDER; ++order)
440 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
441 seq_putc(m, '\n');
442}
443
444/*
445 * This walks the free areas for each zone.
446 */
447static int frag_show(struct seq_file *m, void *arg)
448{
449 pg_data_t *pgdat = (pg_data_t *)arg;
450 walk_zones_in_node(m, pgdat, frag_show_print);
451 return 0;
452}
453
454static void pagetypeinfo_showfree_print(struct seq_file *m,
455 pg_data_t *pgdat, struct zone *zone)
456{
457 int order, mtype;
458
459 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
460 seq_printf(m, "Node %4d, zone %8s, type %12s ",
461 pgdat->node_id,
462 zone->name,
463 migratetype_names[mtype]);
464 for (order = 0; order < MAX_ORDER; ++order) {
465 unsigned long freecount = 0;
466 struct free_area *area;
467 struct list_head *curr;
468
469 area = &(zone->free_area[order]);
470
471 list_for_each(curr, &area->free_list[mtype])
472 freecount++;
473 seq_printf(m, "%6lu ", freecount);
474 }
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700475 seq_putc(m, '\n');
476 }
Mel Gorman467c9962007-10-16 01:26:02 -0700477}
478
479/* Print out the free pages at each order for each migatetype */
480static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
481{
482 int order;
483 pg_data_t *pgdat = (pg_data_t *)arg;
484
485 /* Print header */
486 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
487 for (order = 0; order < MAX_ORDER; ++order)
488 seq_printf(m, "%6d ", order);
489 seq_putc(m, '\n');
490
491 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
492
493 return 0;
494}
495
496static void pagetypeinfo_showblockcount_print(struct seq_file *m,
497 pg_data_t *pgdat, struct zone *zone)
498{
499 int mtype;
500 unsigned long pfn;
501 unsigned long start_pfn = zone->zone_start_pfn;
502 unsigned long end_pfn = start_pfn + zone->spanned_pages;
503 unsigned long count[MIGRATE_TYPES] = { 0, };
504
505 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
506 struct page *page;
507
508 if (!pfn_valid(pfn))
509 continue;
510
511 page = pfn_to_page(pfn);
512 mtype = get_pageblock_migratetype(page);
513
514 count[mtype]++;
515 }
516
517 /* Print counts */
518 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
519 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
520 seq_printf(m, "%12lu ", count[mtype]);
521 seq_putc(m, '\n');
522}
523
524/* Print out the free pages at each order for each migratetype */
525static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
526{
527 int mtype;
528 pg_data_t *pgdat = (pg_data_t *)arg;
529
530 seq_printf(m, "\n%-23s", "Number of blocks type ");
531 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
532 seq_printf(m, "%12s ", migratetype_names[mtype]);
533 seq_putc(m, '\n');
534 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
535
536 return 0;
537}
538
539/*
540 * This prints out statistics in relation to grouping pages by mobility.
541 * It is expensive to collect so do not constantly read the file.
542 */
543static int pagetypeinfo_show(struct seq_file *m, void *arg)
544{
545 pg_data_t *pgdat = (pg_data_t *)arg;
546
547 seq_printf(m, "Page block order: %d\n", pageblock_order);
548 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
549 seq_putc(m, '\n');
550 pagetypeinfo_showfree(m, pgdat);
551 pagetypeinfo_showblockcount(m, pgdat);
552
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700553 return 0;
554}
555
Helge Deller15ad7cd2006-12-06 20:40:36 -0800556const struct seq_operations fragmentation_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700557 .start = frag_start,
558 .next = frag_next,
559 .stop = frag_stop,
560 .show = frag_show,
561};
562
Mel Gorman467c9962007-10-16 01:26:02 -0700563const struct seq_operations pagetypeinfo_op = {
564 .start = frag_start,
565 .next = frag_next,
566 .stop = frag_stop,
567 .show = pagetypeinfo_show,
568};
569
Christoph Lameter4b51d662007-02-10 01:43:10 -0800570#ifdef CONFIG_ZONE_DMA
571#define TEXT_FOR_DMA(xx) xx "_dma",
572#else
573#define TEXT_FOR_DMA(xx)
574#endif
575
Christoph Lameter27bf71c2006-09-25 23:31:15 -0700576#ifdef CONFIG_ZONE_DMA32
577#define TEXT_FOR_DMA32(xx) xx "_dma32",
578#else
579#define TEXT_FOR_DMA32(xx)
580#endif
581
582#ifdef CONFIG_HIGHMEM
583#define TEXT_FOR_HIGHMEM(xx) xx "_high",
584#else
585#define TEXT_FOR_HIGHMEM(xx)
586#endif
587
Christoph Lameter4b51d662007-02-10 01:43:10 -0800588#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
Mel Gorman2a1e2742007-07-17 04:03:12 -0700589 TEXT_FOR_HIGHMEM(xx) xx "_movable",
Christoph Lameter27bf71c2006-09-25 23:31:15 -0700590
Helge Deller15ad7cd2006-12-06 20:40:36 -0800591static const char * const vmstat_text[] = {
Christoph Lameter2244b952006-06-30 01:55:33 -0700592 /* Zoned VM counters */
Christoph Lameterd23ad422007-02-10 01:43:02 -0800593 "nr_free_pages",
Christoph Lameterc8785382007-02-10 01:43:01 -0800594 "nr_inactive",
Peter Zijlstra23c1fb52007-07-06 13:35:34 +0200595 "nr_active",
Christoph Lameterf3dbd342006-06-30 01:55:36 -0700596 "nr_anon_pages",
Christoph Lameter65ba55f2006-06-30 01:55:34 -0700597 "nr_mapped",
Christoph Lameter347ce432006-06-30 01:55:35 -0700598 "nr_file_pages",
Christoph Lameter51ed4492007-02-10 01:43:02 -0800599 "nr_dirty",
600 "nr_writeback",
Christoph Lameter972d1a72006-09-25 23:31:51 -0700601 "nr_slab_reclaimable",
602 "nr_slab_unreclaimable",
Christoph Lameterdf849a12006-06-30 01:55:38 -0700603 "nr_page_table_pages",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700604 "nr_unstable",
Christoph Lameterd2c5e302006-06-30 01:55:41 -0700605 "nr_bounce",
Andrew Mortone129b5c2006-09-27 01:50:00 -0700606 "nr_vmscan_write",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700607
Christoph Lameterca889e62006-06-30 01:55:44 -0700608#ifdef CONFIG_NUMA
609 "numa_hit",
610 "numa_miss",
611 "numa_foreign",
612 "numa_interleave",
613 "numa_local",
614 "numa_other",
615#endif
616
Christoph Lameterf8891e52006-06-30 01:55:45 -0700617#ifdef CONFIG_VM_EVENT_COUNTERS
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700618 "pgpgin",
619 "pgpgout",
620 "pswpin",
621 "pswpout",
622
Christoph Lameter27bf71c2006-09-25 23:31:15 -0700623 TEXTS_FOR_ZONES("pgalloc")
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700624
625 "pgfree",
626 "pgactivate",
627 "pgdeactivate",
628
629 "pgfault",
630 "pgmajfault",
631
Christoph Lameter27bf71c2006-09-25 23:31:15 -0700632 TEXTS_FOR_ZONES("pgrefill")
633 TEXTS_FOR_ZONES("pgsteal")
634 TEXTS_FOR_ZONES("pgscan_kswapd")
635 TEXTS_FOR_ZONES("pgscan_direct")
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700636
637 "pginodesteal",
638 "slabs_scanned",
639 "kswapd_steal",
640 "kswapd_inodesteal",
641 "pageoutrun",
642 "allocstall",
643
644 "pgrotated",
Christoph Lameterf8891e52006-06-30 01:55:45 -0700645#endif
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700646};
647
Mel Gorman467c9962007-10-16 01:26:02 -0700648static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
649 struct zone *zone)
650{
651 int i;
652 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
653 seq_printf(m,
654 "\n pages free %lu"
655 "\n min %lu"
656 "\n low %lu"
657 "\n high %lu"
658 "\n scanned %lu (a: %lu i: %lu)"
659 "\n spanned %lu"
660 "\n present %lu",
661 zone_page_state(zone, NR_FREE_PAGES),
662 zone->pages_min,
663 zone->pages_low,
664 zone->pages_high,
665 zone->pages_scanned,
666 zone->nr_scan_active, zone->nr_scan_inactive,
667 zone->spanned_pages,
668 zone->present_pages);
669
670 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
671 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
672 zone_page_state(zone, i));
673
674 seq_printf(m,
675 "\n protection: (%lu",
676 zone->lowmem_reserve[0]);
677 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
678 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
679 seq_printf(m,
680 ")"
681 "\n pagesets");
682 for_each_online_cpu(i) {
683 struct per_cpu_pageset *pageset;
684 int j;
685
686 pageset = zone_pcp(zone, i);
687 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
688 seq_printf(m,
689 "\n cpu: %i pcp: %i"
690 "\n count: %i"
691 "\n high: %i"
692 "\n batch: %i",
693 i, j,
694 pageset->pcp[j].count,
695 pageset->pcp[j].high,
696 pageset->pcp[j].batch);
697 }
698#ifdef CONFIG_SMP
699 seq_printf(m, "\n vm stats threshold: %d",
700 pageset->stat_threshold);
701#endif
702 }
703 seq_printf(m,
704 "\n all_unreclaimable: %u"
705 "\n prev_priority: %i"
706 "\n start_pfn: %lu",
David Rientjese815af92007-10-16 23:25:54 -0700707 zone_is_all_unreclaimable(zone),
Mel Gorman467c9962007-10-16 01:26:02 -0700708 zone->prev_priority,
709 zone->zone_start_pfn);
710 seq_putc(m, '\n');
711}
712
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700713/*
714 * Output information about zones in @pgdat.
715 */
716static int zoneinfo_show(struct seq_file *m, void *arg)
717{
Mel Gorman467c9962007-10-16 01:26:02 -0700718 pg_data_t *pgdat = (pg_data_t *)arg;
719 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700720 return 0;
721}
722
Helge Deller15ad7cd2006-12-06 20:40:36 -0800723const struct seq_operations zoneinfo_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700724 .start = frag_start, /* iterate over all zones. The same as in
725 * fragmentation. */
726 .next = frag_next,
727 .stop = frag_stop,
728 .show = zoneinfo_show,
729};
730
731static void *vmstat_start(struct seq_file *m, loff_t *pos)
732{
Christoph Lameter2244b952006-06-30 01:55:33 -0700733 unsigned long *v;
Christoph Lameterf8891e52006-06-30 01:55:45 -0700734#ifdef CONFIG_VM_EVENT_COUNTERS
735 unsigned long *e;
736#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700737 int i;
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700738
739 if (*pos >= ARRAY_SIZE(vmstat_text))
740 return NULL;
741
Christoph Lameterf8891e52006-06-30 01:55:45 -0700742#ifdef CONFIG_VM_EVENT_COUNTERS
Christoph Lameter2244b952006-06-30 01:55:33 -0700743 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
Christoph Lameterf8891e52006-06-30 01:55:45 -0700744 + sizeof(struct vm_event_state), GFP_KERNEL);
745#else
746 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
747 GFP_KERNEL);
748#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700749 m->private = v;
750 if (!v)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700751 return ERR_PTR(-ENOMEM);
Christoph Lameter2244b952006-06-30 01:55:33 -0700752 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
753 v[i] = global_page_state(i);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700754#ifdef CONFIG_VM_EVENT_COUNTERS
755 e = v + NR_VM_ZONE_STAT_ITEMS;
756 all_vm_events(e);
757 e[PGPGIN] /= 2; /* sectors -> kbytes */
758 e[PGPGOUT] /= 2;
759#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700760 return v + *pos;
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700761}
762
763static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
764{
765 (*pos)++;
766 if (*pos >= ARRAY_SIZE(vmstat_text))
767 return NULL;
768 return (unsigned long *)m->private + *pos;
769}
770
771static int vmstat_show(struct seq_file *m, void *arg)
772{
773 unsigned long *l = arg;
774 unsigned long off = l - (unsigned long *)m->private;
775
776 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
777 return 0;
778}
779
780static void vmstat_stop(struct seq_file *m, void *arg)
781{
782 kfree(m->private);
783 m->private = NULL;
784}
785
Helge Deller15ad7cd2006-12-06 20:40:36 -0800786const struct seq_operations vmstat_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700787 .start = vmstat_start,
788 .next = vmstat_next,
789 .stop = vmstat_stop,
790 .show = vmstat_show,
791};
792
793#endif /* CONFIG_PROC_FS */
794
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700795#ifdef CONFIG_SMP
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700796static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
Christoph Lameter77461ab2007-05-09 02:35:13 -0700797int sysctl_stat_interval __read_mostly = HZ;
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700798
799static void vmstat_update(struct work_struct *w)
800{
801 refresh_cpu_vm_stats(smp_processor_id());
Christoph Lameter77461ab2007-05-09 02:35:13 -0700802 schedule_delayed_work(&__get_cpu_var(vmstat_work),
803 sysctl_stat_interval);
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700804}
805
Randy Dunlap42614fc2007-11-14 17:00:12 -0800806static void __cpuinit start_cpu_timer(int cpu)
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700807{
808 struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
809
Christoph Lameter39bf6272007-05-10 22:22:21 -0700810 INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700811 schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
812}
813
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700814/*
815 * Use the cpu notifier to insure that the thresholds are recalculated
816 * when necessary.
817 */
818static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
819 unsigned long action,
820 void *hcpu)
821{
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700822 long cpu = (long)hcpu;
823
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700824 switch (action) {
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700825 case CPU_ONLINE:
826 case CPU_ONLINE_FROZEN:
827 start_cpu_timer(cpu);
828 break;
829 case CPU_DOWN_PREPARE:
830 case CPU_DOWN_PREPARE_FROZEN:
831 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
832 per_cpu(vmstat_work, cpu).work.func = NULL;
833 break;
834 case CPU_DOWN_FAILED:
835 case CPU_DOWN_FAILED_FROZEN:
836 start_cpu_timer(cpu);
837 break;
Andy Whitcroftce421c72006-12-06 20:33:08 -0800838 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700839 case CPU_DEAD_FROZEN:
Andy Whitcroftce421c72006-12-06 20:33:08 -0800840 refresh_zone_stat_thresholds();
841 break;
842 default:
843 break;
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700844 }
845 return NOTIFY_OK;
846}
847
848static struct notifier_block __cpuinitdata vmstat_notifier =
849 { &vmstat_cpuup_callback, NULL, 0 };
850
Adrian Bunke2fc88d2007-10-16 01:26:27 -0700851static int __init setup_vmstat(void)
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700852{
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700853 int cpu;
854
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700855 refresh_zone_stat_thresholds();
856 register_cpu_notifier(&vmstat_notifier);
Christoph Lameterd1187ed2007-05-09 02:35:12 -0700857
858 for_each_online_cpu(cpu)
859 start_cpu_timer(cpu);
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700860 return 0;
861}
862module_init(setup_vmstat)
863#endif