blob: 117f0dd8ad03fa3780b86b8feedbdbb1603c1576 [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
Christoph Lameter96177292007-02-10 01:43:03 -08006#include <linux/mm.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
8#include <asm/atomic.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -07009
Christoph Lameter4b51d662007-02-10 01:43:10 -080010#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
Christoph Lameter27bf71c2006-09-25 23:31:15 -070016#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
Adam Litke3b116302008-04-28 02:13:06 -070028
Mel Gorman2a1e2742007-07-17 04:03:12 -070029#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
Christoph Lameterf6ac2352006-06-30 01:55:32 -070030
Christoph Lameterf8891e52006-06-30 01:55:45 -070031enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
Mel Gorman24cf725182009-06-16 15:33:23 -070039#ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif
Christoph Lameterf8891e52006-06-30 01:55:45 -070042 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -080043 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
Christoph Lameterf8891e52006-06-30 01:55:45 -070045 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
Adam Litke3b116302008-04-28 02:13:06 -070046#ifdef CONFIG_HUGETLB_PAGE
47 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
48#endif
Lee Schermerhornbbfd28e2008-10-18 20:26:40 -070049 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
50 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
51 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
Nick Piggin5344b7e2008-10-18 20:26:51 -070052 UNEVICTABLE_PGMLOCKED,
53 UNEVICTABLE_PGMUNLOCKED,
54 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
55 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
Lee Schermerhorn985737c2008-10-18 20:26:53 -070056 UNEVICTABLE_MLOCKFREED,
Christoph Lameterf8891e52006-06-30 01:55:45 -070057 NR_VM_EVENT_ITEMS
Christoph Lameterf6ac2352006-06-30 01:55:32 -070058};
59
Adrian Bunkc748e132008-07-23 21:27:03 -070060extern int sysctl_stat_interval;
61
Andrew Morton780a0652007-02-10 01:44:41 -080062#ifdef CONFIG_VM_EVENT_COUNTERS
63/*
64 * Light weight per cpu counter implementation.
65 *
66 * Counters should only be incremented and no critical kernel component
67 * should rely on the counter values.
68 *
69 * Counters are handled completely inline. On many platforms the code
70 * generated will simply be the increment of a global address.
71 */
72
Christoph Lameterf8891e52006-06-30 01:55:45 -070073struct vm_event_state {
74 unsigned long event[NR_VM_EVENT_ITEMS];
75};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070076
Christoph Lameterf8891e52006-06-30 01:55:45 -070077DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070078
Christoph Lameterf8891e52006-06-30 01:55:45 -070079static inline void __count_vm_event(enum vm_event_item item)
80{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090081 __this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070082}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070083
Christoph Lameterf8891e52006-06-30 01:55:45 -070084static inline void count_vm_event(enum vm_event_item item)
85{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090086 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070087}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070088
Christoph Lameterf8891e52006-06-30 01:55:45 -070089static inline void __count_vm_events(enum vm_event_item item, long delta)
90{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090091 __this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070092}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070093
Christoph Lameterf8891e52006-06-30 01:55:45 -070094static inline void count_vm_events(enum vm_event_item item, long delta)
95{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090096 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070097}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070098
Christoph Lameterf8891e52006-06-30 01:55:45 -070099extern void all_vm_events(unsigned long *);
Magnus Damme9033872006-12-22 01:08:01 -0800100#ifdef CONFIG_HOTPLUG
Christoph Lameterf8891e52006-06-30 01:55:45 -0700101extern void vm_events_fold_cpu(int cpu);
Magnus Damme9033872006-12-22 01:08:01 -0800102#else
103static inline void vm_events_fold_cpu(int cpu)
104{
105}
106#endif
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700107
Christoph Lameterf8891e52006-06-30 01:55:45 -0700108#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700109
Christoph Lameterf8891e52006-06-30 01:55:45 -0700110/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -0800111static inline void count_vm_event(enum vm_event_item item)
112{
113}
114static inline void count_vm_events(enum vm_event_item item, long delta)
115{
116}
117static inline void __count_vm_event(enum vm_event_item item)
118{
119}
120static inline void __count_vm_events(enum vm_event_item item, long delta)
121{
122}
123static inline void all_vm_events(unsigned long *ret)
124{
125}
126static inline void vm_events_fold_cpu(int cpu)
127{
128}
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700129
Christoph Lameterf8891e52006-06-30 01:55:45 -0700130#endif /* CONFIG_VM_EVENT_COUNTERS */
131
132#define __count_zone_vm_events(item, zone, delta) \
Christoph Lameter4b51d662007-02-10 01:43:10 -0800133 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
134 zone_idx(zone), delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700135
Christoph Lameter2244b952006-06-30 01:55:33 -0700136/*
137 * Zone based page accounting with per cpu differentials.
138 */
139extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700140
Christoph Lameter2244b952006-06-30 01:55:33 -0700141static inline void zone_page_state_add(long x, struct zone *zone,
142 enum zone_stat_item item)
143{
144 atomic_long_add(x, &zone->vm_stat[item]);
145 atomic_long_add(x, &vm_stat[item]);
146}
147
148static inline unsigned long global_page_state(enum zone_stat_item item)
149{
150 long x = atomic_long_read(&vm_stat[item]);
151#ifdef CONFIG_SMP
152 if (x < 0)
153 x = 0;
154#endif
155 return x;
156}
157
158static inline unsigned long zone_page_state(struct zone *zone,
159 enum zone_stat_item item)
160{
161 long x = atomic_long_read(&zone->vm_stat[item]);
162#ifdef CONFIG_SMP
163 if (x < 0)
164 x = 0;
165#endif
166 return x;
167}
168
Wu Fengguangadea02a2009-09-21 17:01:42 -0700169extern unsigned long global_reclaimable_pages(void);
170extern unsigned long zone_reclaimable_pages(struct zone *zone);
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700171
Christoph Lameter2244b952006-06-30 01:55:33 -0700172#ifdef CONFIG_NUMA
173/*
174 * Determine the per node value of a stat item. This function
175 * is called frequently in a NUMA machine, so try to be as
176 * frugal as possible.
177 */
178static inline unsigned long node_page_state(int node,
179 enum zone_stat_item item)
180{
181 struct zone *zones = NODE_DATA(node)->node_zones;
182
183 return
Christoph Lameter4b51d662007-02-10 01:43:10 -0800184#ifdef CONFIG_ZONE_DMA
185 zone_page_state(&zones[ZONE_DMA], item) +
186#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700187#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2244b952006-06-30 01:55:33 -0700188 zone_page_state(&zones[ZONE_DMA32], item) +
189#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700190#ifdef CONFIG_HIGHMEM
191 zone_page_state(&zones[ZONE_HIGHMEM], item) +
192#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700193 zone_page_state(&zones[ZONE_NORMAL], item) +
194 zone_page_state(&zones[ZONE_MOVABLE], item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700195}
Christoph Lameterca889e62006-06-30 01:55:44 -0700196
Mel Gorman18ea7e72008-04-28 02:12:14 -0700197extern void zone_statistics(struct zone *, struct zone *);
Christoph Lameterca889e62006-06-30 01:55:44 -0700198
Christoph Lameter2244b952006-06-30 01:55:33 -0700199#else
Christoph Lameterca889e62006-06-30 01:55:44 -0700200
Christoph Lameter2244b952006-06-30 01:55:33 -0700201#define node_page_state(node, item) global_page_state(item)
Christoph Lameterca889e62006-06-30 01:55:44 -0700202#define zone_statistics(_zl,_z) do { } while (0)
203
204#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700205
Christoph Lameter2244b952006-06-30 01:55:33 -0700206#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
207#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
208
209static inline void zap_zone_vm_stats(struct zone *zone)
210{
211 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
212}
213
Christoph Lameterca889e62006-06-30 01:55:44 -0700214extern void inc_zone_state(struct zone *, enum zone_stat_item);
215
Christoph Lameter2244b952006-06-30 01:55:33 -0700216#ifdef CONFIG_SMP
217void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
218void __inc_zone_page_state(struct page *, enum zone_stat_item);
219void __dec_zone_page_state(struct page *, enum zone_stat_item);
220
221void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
222void inc_zone_page_state(struct page *, enum zone_stat_item);
223void dec_zone_page_state(struct page *, enum zone_stat_item);
224
225extern void inc_zone_state(struct zone *, enum zone_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800226extern void __inc_zone_state(struct zone *, enum zone_stat_item);
227extern void dec_zone_state(struct zone *, enum zone_stat_item);
228extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700229
230void refresh_cpu_vm_stats(int);
Christoph Lameter2244b952006-06-30 01:55:33 -0700231#else /* CONFIG_SMP */
232
233/*
234 * We do not maintain differentials in a single processor configuration.
235 * The functions directly modify the zone and global counters.
236 */
237static inline void __mod_zone_page_state(struct zone *zone,
238 enum zone_stat_item item, int delta)
239{
240 zone_page_state_add(delta, zone, item);
241}
242
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700243static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
244{
245 atomic_long_inc(&zone->vm_stat[item]);
246 atomic_long_inc(&vm_stat[item]);
247}
248
Christoph Lameter2244b952006-06-30 01:55:33 -0700249static inline void __inc_zone_page_state(struct page *page,
250 enum zone_stat_item item)
251{
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700252 __inc_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700253}
254
Christoph Lameterc8785382007-02-10 01:43:01 -0800255static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
256{
257 atomic_long_dec(&zone->vm_stat[item]);
258 atomic_long_dec(&vm_stat[item]);
259}
260
Christoph Lameter2244b952006-06-30 01:55:33 -0700261static inline void __dec_zone_page_state(struct page *page,
262 enum zone_stat_item item)
263{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100264 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700265}
266
267/*
268 * We only use atomic operations to update counters. So there is no need to
269 * disable interrupts.
270 */
271#define inc_zone_page_state __inc_zone_page_state
272#define dec_zone_page_state __dec_zone_page_state
273#define mod_zone_page_state __mod_zone_page_state
274
275static inline void refresh_cpu_vm_stats(int cpu) { }
Christoph Lameter2244b952006-06-30 01:55:33 -0700276#endif
277
278#endif /* _LINUX_VMSTAT_H */