blob: dfdf24133901790e8ca744934bb57279aa25dee5 [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
Christoph Lameter2244b952006-06-30 01:55:33 -07006 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070010 */
11
12#include <linux/config.h>
13#include <linux/mm.h>
Christoph Lameter2244b952006-06-30 01:55:33 -070014#include <linux/module.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070015
Christoph Lameterf6ac2352006-06-30 01:55:32 -070016void __get_zone_counts(unsigned long *active, unsigned long *inactive,
17 unsigned long *free, struct pglist_data *pgdat)
18{
19 struct zone *zones = pgdat->node_zones;
20 int i;
21
22 *active = 0;
23 *inactive = 0;
24 *free = 0;
25 for (i = 0; i < MAX_NR_ZONES; i++) {
26 *active += zones[i].nr_active;
27 *inactive += zones[i].nr_inactive;
28 *free += zones[i].free_pages;
29 }
30}
31
32void get_zone_counts(unsigned long *active,
33 unsigned long *inactive, unsigned long *free)
34{
35 struct pglist_data *pgdat;
36
37 *active = 0;
38 *inactive = 0;
39 *free = 0;
40 for_each_online_pgdat(pgdat) {
41 unsigned long l, m, n;
42 __get_zone_counts(&l, &m, &n, pgdat);
43 *active += l;
44 *inactive += m;
45 *free += n;
46 }
47}
48
Christoph Lameterf8891e52006-06-30 01:55:45 -070049#ifdef CONFIG_VM_EVENT_COUNTERS
50DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
51EXPORT_PER_CPU_SYMBOL(vm_event_states);
52
53static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
54{
55 int cpu = 0;
56 int i;
57
58 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
59
60 cpu = first_cpu(*cpumask);
61 while (cpu < NR_CPUS) {
62 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
63
64 cpu = next_cpu(cpu, *cpumask);
65
66 if (cpu < NR_CPUS)
67 prefetch(&per_cpu(vm_event_states, cpu));
68
69
70 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
71 ret[i] += this->event[i];
72 }
73}
74
75/*
76 * Accumulate the vm event counters across all CPUs.
77 * The result is unavoidably approximate - it can change
78 * during and after execution of this function.
79*/
80void all_vm_events(unsigned long *ret)
81{
82 sum_vm_events(ret, &cpu_online_map);
83}
Heiko Carstens32dd66f2006-07-10 04:44:31 -070084EXPORT_SYMBOL_GPL(all_vm_events);
Christoph Lameterf8891e52006-06-30 01:55:45 -070085
86#ifdef CONFIG_HOTPLUG
87/*
88 * Fold the foreign cpu events into our own.
89 *
90 * This is adding to the events on one processor
91 * but keeps the global counts constant.
92 */
93void vm_events_fold_cpu(int cpu)
94{
95 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
96 int i;
97
98 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
99 count_vm_events(i, fold_state->event[i]);
100 fold_state->event[i] = 0;
101 }
102}
103#endif /* CONFIG_HOTPLUG */
104
105#endif /* CONFIG_VM_EVENT_COUNTERS */
106
Christoph Lameter2244b952006-06-30 01:55:33 -0700107/*
108 * Manage combined zone based / global counters
109 *
110 * vm_stat contains the global counters
111 */
112atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
113EXPORT_SYMBOL(vm_stat);
114
115#ifdef CONFIG_SMP
116
117#define STAT_THRESHOLD 32
118
119/*
120 * Determine pointer to currently valid differential byte given a zone and
121 * the item number.
122 *
123 * Preemption must be off
124 */
125static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item)
126{
127 return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item];
128}
129
130/*
131 * For use when we know that interrupts are disabled.
132 */
133void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
134 int delta)
135{
136 s8 *p;
137 long x;
138
139 p = diff_pointer(zone, item);
140 x = delta + *p;
141
142 if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) {
143 zone_page_state_add(x, zone, item);
144 x = 0;
145 }
146
147 *p = x;
148}
149EXPORT_SYMBOL(__mod_zone_page_state);
150
151/*
152 * For an unknown interrupt state
153 */
154void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
155 int delta)
156{
157 unsigned long flags;
158
159 local_irq_save(flags);
160 __mod_zone_page_state(zone, item, delta);
161 local_irq_restore(flags);
162}
163EXPORT_SYMBOL(mod_zone_page_state);
164
165/*
166 * Optimized increment and decrement functions.
167 *
168 * These are only for a single page and therefore can take a struct page *
169 * argument instead of struct zone *. This allows the inclusion of the code
170 * generated for page_zone(page) into the optimized functions.
171 *
172 * No overflow check is necessary and therefore the differential can be
173 * incremented or decremented in place which may allow the compilers to
174 * generate better code.
175 *
176 * The increment or decrement is known and therefore one boundary check can
177 * be omitted.
178 *
179 * Some processors have inc/dec instructions that are atomic vs an interrupt.
180 * However, the code must first determine the differential location in a zone
181 * based on the processor number and then inc/dec the counter. There is no
182 * guarantee without disabling preemption that the processor will not change
183 * in between and therefore the atomicity vs. interrupt cannot be exploited
184 * in a useful way here.
185 */
Christoph Lameterca889e62006-06-30 01:55:44 -0700186static void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700187{
Christoph Lameter2244b952006-06-30 01:55:33 -0700188 s8 *p = diff_pointer(zone, item);
189
190 (*p)++;
191
192 if (unlikely(*p > STAT_THRESHOLD)) {
193 zone_page_state_add(*p, zone, item);
194 *p = 0;
195 }
196}
Christoph Lameterca889e62006-06-30 01:55:44 -0700197
198void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
199{
200 __inc_zone_state(page_zone(page), item);
201}
Christoph Lameter2244b952006-06-30 01:55:33 -0700202EXPORT_SYMBOL(__inc_zone_page_state);
203
204void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
205{
206 struct zone *zone = page_zone(page);
207 s8 *p = diff_pointer(zone, item);
208
209 (*p)--;
210
211 if (unlikely(*p < -STAT_THRESHOLD)) {
212 zone_page_state_add(*p, zone, item);
213 *p = 0;
214 }
215}
216EXPORT_SYMBOL(__dec_zone_page_state);
217
Christoph Lameterca889e62006-06-30 01:55:44 -0700218void inc_zone_state(struct zone *zone, enum zone_stat_item item)
219{
220 unsigned long flags;
221
222 local_irq_save(flags);
223 __inc_zone_state(zone, item);
224 local_irq_restore(flags);
225}
226
Christoph Lameter2244b952006-06-30 01:55:33 -0700227void inc_zone_page_state(struct page *page, enum zone_stat_item item)
228{
229 unsigned long flags;
230 struct zone *zone;
Christoph Lameter2244b952006-06-30 01:55:33 -0700231
232 zone = page_zone(page);
233 local_irq_save(flags);
Christoph Lameterca889e62006-06-30 01:55:44 -0700234 __inc_zone_state(zone, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700235 local_irq_restore(flags);
236}
237EXPORT_SYMBOL(inc_zone_page_state);
238
239void dec_zone_page_state(struct page *page, enum zone_stat_item item)
240{
241 unsigned long flags;
242 struct zone *zone;
243 s8 *p;
244
245 zone = page_zone(page);
246 local_irq_save(flags);
247 p = diff_pointer(zone, item);
248
249 (*p)--;
250
251 if (unlikely(*p < -STAT_THRESHOLD)) {
252 zone_page_state_add(*p, zone, item);
253 *p = 0;
254 }
255 local_irq_restore(flags);
256}
257EXPORT_SYMBOL(dec_zone_page_state);
258
259/*
260 * Update the zone counters for one cpu.
261 */
262void refresh_cpu_vm_stats(int cpu)
263{
264 struct zone *zone;
265 int i;
266 unsigned long flags;
267
268 for_each_zone(zone) {
269 struct per_cpu_pageset *pcp;
270
271 pcp = zone_pcp(zone, cpu);
272
273 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
274 if (pcp->vm_stat_diff[i]) {
275 local_irq_save(flags);
276 zone_page_state_add(pcp->vm_stat_diff[i],
277 zone, i);
278 pcp->vm_stat_diff[i] = 0;
279 local_irq_restore(flags);
280 }
281 }
282}
283
284static void __refresh_cpu_vm_stats(void *dummy)
285{
286 refresh_cpu_vm_stats(smp_processor_id());
287}
288
289/*
290 * Consolidate all counters.
291 *
292 * Note that the result is less inaccurate but still inaccurate
293 * if concurrent processes are allowed to run.
294 */
295void refresh_vm_stats(void)
296{
297 on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1);
298}
299EXPORT_SYMBOL(refresh_vm_stats);
300
301#endif
302
Christoph Lameterca889e62006-06-30 01:55:44 -0700303#ifdef CONFIG_NUMA
304/*
305 * zonelist = the list of zones passed to the allocator
306 * z = the zone from which the allocation occurred.
307 *
308 * Must be called with interrupts disabled.
309 */
310void zone_statistics(struct zonelist *zonelist, struct zone *z)
311{
312 if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
313 __inc_zone_state(z, NUMA_HIT);
314 } else {
315 __inc_zone_state(z, NUMA_MISS);
316 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
317 }
318 if (z->zone_pgdat == NODE_DATA(numa_node_id()))
319 __inc_zone_state(z, NUMA_LOCAL);
320 else
321 __inc_zone_state(z, NUMA_OTHER);
322}
323#endif
324
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700325#ifdef CONFIG_PROC_FS
326
327#include <linux/seq_file.h>
328
329static void *frag_start(struct seq_file *m, loff_t *pos)
330{
331 pg_data_t *pgdat;
332 loff_t node = *pos;
333 for (pgdat = first_online_pgdat();
334 pgdat && node;
335 pgdat = next_online_pgdat(pgdat))
336 --node;
337
338 return pgdat;
339}
340
341static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
342{
343 pg_data_t *pgdat = (pg_data_t *)arg;
344
345 (*pos)++;
346 return next_online_pgdat(pgdat);
347}
348
349static void frag_stop(struct seq_file *m, void *arg)
350{
351}
352
353/*
354 * This walks the free areas for each zone.
355 */
356static int frag_show(struct seq_file *m, void *arg)
357{
358 pg_data_t *pgdat = (pg_data_t *)arg;
359 struct zone *zone;
360 struct zone *node_zones = pgdat->node_zones;
361 unsigned long flags;
362 int order;
363
364 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
365 if (!populated_zone(zone))
366 continue;
367
368 spin_lock_irqsave(&zone->lock, flags);
369 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
370 for (order = 0; order < MAX_ORDER; ++order)
371 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
372 spin_unlock_irqrestore(&zone->lock, flags);
373 seq_putc(m, '\n');
374 }
375 return 0;
376}
377
378struct seq_operations fragmentation_op = {
379 .start = frag_start,
380 .next = frag_next,
381 .stop = frag_stop,
382 .show = frag_show,
383};
384
385static char *vmstat_text[] = {
Christoph Lameter2244b952006-06-30 01:55:33 -0700386 /* Zoned VM counters */
Christoph Lameterf3dbd342006-06-30 01:55:36 -0700387 "nr_anon_pages",
Christoph Lameter65ba55f2006-06-30 01:55:34 -0700388 "nr_mapped",
Christoph Lameter347ce432006-06-30 01:55:35 -0700389 "nr_file_pages",
Christoph Lameter9a865ff2006-06-30 01:55:38 -0700390 "nr_slab",
Christoph Lameterdf849a12006-06-30 01:55:38 -0700391 "nr_page_table_pages",
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -0700392 "nr_dirty",
Christoph Lameterce866b32006-06-30 01:55:40 -0700393 "nr_writeback",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700394 "nr_unstable",
Christoph Lameterd2c5e302006-06-30 01:55:41 -0700395 "nr_bounce",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700396
Christoph Lameterca889e62006-06-30 01:55:44 -0700397#ifdef CONFIG_NUMA
398 "numa_hit",
399 "numa_miss",
400 "numa_foreign",
401 "numa_interleave",
402 "numa_local",
403 "numa_other",
404#endif
405
Christoph Lameterf8891e52006-06-30 01:55:45 -0700406#ifdef CONFIG_VM_EVENT_COUNTERS
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700407 "pgpgin",
408 "pgpgout",
409 "pswpin",
410 "pswpout",
411
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700412 "pgalloc_dma",
Christoph Lameterf8891e52006-06-30 01:55:45 -0700413 "pgalloc_dma32",
414 "pgalloc_normal",
415 "pgalloc_high",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700416
417 "pgfree",
418 "pgactivate",
419 "pgdeactivate",
420
421 "pgfault",
422 "pgmajfault",
423
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700424 "pgrefill_dma",
Christoph Lameterf8891e52006-06-30 01:55:45 -0700425 "pgrefill_dma32",
426 "pgrefill_normal",
427 "pgrefill_high",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700428
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700429 "pgsteal_dma",
Christoph Lameterf8891e52006-06-30 01:55:45 -0700430 "pgsteal_dma32",
431 "pgsteal_normal",
432 "pgsteal_high",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700433
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700434 "pgscan_kswapd_dma",
Christoph Lameterf8891e52006-06-30 01:55:45 -0700435 "pgscan_kswapd_dma32",
436 "pgscan_kswapd_normal",
437 "pgscan_kswapd_high",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700438
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700439 "pgscan_direct_dma",
Christoph Lameterf8891e52006-06-30 01:55:45 -0700440 "pgscan_direct_dma32",
441 "pgscan_direct_normal",
442 "pgscan_direct_high",
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700443
444 "pginodesteal",
445 "slabs_scanned",
446 "kswapd_steal",
447 "kswapd_inodesteal",
448 "pageoutrun",
449 "allocstall",
450
451 "pgrotated",
Christoph Lameterf8891e52006-06-30 01:55:45 -0700452#endif
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700453};
454
455/*
456 * Output information about zones in @pgdat.
457 */
458static int zoneinfo_show(struct seq_file *m, void *arg)
459{
460 pg_data_t *pgdat = arg;
461 struct zone *zone;
462 struct zone *node_zones = pgdat->node_zones;
463 unsigned long flags;
464
465 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
466 int i;
467
468 if (!populated_zone(zone))
469 continue;
470
471 spin_lock_irqsave(&zone->lock, flags);
472 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
473 seq_printf(m,
474 "\n pages free %lu"
475 "\n min %lu"
476 "\n low %lu"
477 "\n high %lu"
478 "\n active %lu"
479 "\n inactive %lu"
480 "\n scanned %lu (a: %lu i: %lu)"
481 "\n spanned %lu"
482 "\n present %lu",
483 zone->free_pages,
484 zone->pages_min,
485 zone->pages_low,
486 zone->pages_high,
487 zone->nr_active,
488 zone->nr_inactive,
489 zone->pages_scanned,
490 zone->nr_scan_active, zone->nr_scan_inactive,
491 zone->spanned_pages,
492 zone->present_pages);
Christoph Lameter2244b952006-06-30 01:55:33 -0700493
494 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
495 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
496 zone_page_state(zone, i));
497
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700498 seq_printf(m,
499 "\n protection: (%lu",
500 zone->lowmem_reserve[0]);
501 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
502 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
503 seq_printf(m,
504 ")"
505 "\n pagesets");
506 for_each_online_cpu(i) {
507 struct per_cpu_pageset *pageset;
508 int j;
509
510 pageset = zone_pcp(zone, i);
511 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
512 if (pageset->pcp[j].count)
513 break;
514 }
515 if (j == ARRAY_SIZE(pageset->pcp))
516 continue;
517 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
518 seq_printf(m,
519 "\n cpu: %i pcp: %i"
520 "\n count: %i"
521 "\n high: %i"
522 "\n batch: %i",
523 i, j,
524 pageset->pcp[j].count,
525 pageset->pcp[j].high,
526 pageset->pcp[j].batch);
527 }
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700528 }
529 seq_printf(m,
530 "\n all_unreclaimable: %u"
531 "\n prev_priority: %i"
532 "\n temp_priority: %i"
533 "\n start_pfn: %lu",
534 zone->all_unreclaimable,
535 zone->prev_priority,
536 zone->temp_priority,
537 zone->zone_start_pfn);
538 spin_unlock_irqrestore(&zone->lock, flags);
539 seq_putc(m, '\n');
540 }
541 return 0;
542}
543
544struct seq_operations zoneinfo_op = {
545 .start = frag_start, /* iterate over all zones. The same as in
546 * fragmentation. */
547 .next = frag_next,
548 .stop = frag_stop,
549 .show = zoneinfo_show,
550};
551
552static void *vmstat_start(struct seq_file *m, loff_t *pos)
553{
Christoph Lameter2244b952006-06-30 01:55:33 -0700554 unsigned long *v;
Christoph Lameterf8891e52006-06-30 01:55:45 -0700555#ifdef CONFIG_VM_EVENT_COUNTERS
556 unsigned long *e;
557#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700558 int i;
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700559
560 if (*pos >= ARRAY_SIZE(vmstat_text))
561 return NULL;
562
Christoph Lameterf8891e52006-06-30 01:55:45 -0700563#ifdef CONFIG_VM_EVENT_COUNTERS
Christoph Lameter2244b952006-06-30 01:55:33 -0700564 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
Christoph Lameterf8891e52006-06-30 01:55:45 -0700565 + sizeof(struct vm_event_state), GFP_KERNEL);
566#else
567 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
568 GFP_KERNEL);
569#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700570 m->private = v;
571 if (!v)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700572 return ERR_PTR(-ENOMEM);
Christoph Lameter2244b952006-06-30 01:55:33 -0700573 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
574 v[i] = global_page_state(i);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700575#ifdef CONFIG_VM_EVENT_COUNTERS
576 e = v + NR_VM_ZONE_STAT_ITEMS;
577 all_vm_events(e);
578 e[PGPGIN] /= 2; /* sectors -> kbytes */
579 e[PGPGOUT] /= 2;
580#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700581 return v + *pos;
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700582}
583
584static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
585{
586 (*pos)++;
587 if (*pos >= ARRAY_SIZE(vmstat_text))
588 return NULL;
589 return (unsigned long *)m->private + *pos;
590}
591
592static int vmstat_show(struct seq_file *m, void *arg)
593{
594 unsigned long *l = arg;
595 unsigned long off = l - (unsigned long *)m->private;
596
597 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
598 return 0;
599}
600
601static void vmstat_stop(struct seq_file *m, void *arg)
602{
603 kfree(m->private);
604 m->private = NULL;
605}
606
607struct seq_operations vmstat_op = {
608 .start = vmstat_start,
609 .next = vmstat_next,
610 .stop = vmstat_stop,
611 .show = vmstat_show,
612};
613
614#endif /* CONFIG_PROC_FS */
615