blob: 58334d4395167b9f3105cba86b0b68b60bca39d0 [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
Christoph Lameter96177292007-02-10 01:43:03 -08006#include <linux/mm.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
8#include <asm/atomic.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -07009
Christoph Lameter4b51d662007-02-10 01:43:10 -080010#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
Christoph Lameter27bf71c2006-09-25 23:31:15 -070016#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
Adam Litke3b116302008-04-28 02:13:06 -070028
Mel Gorman2a1e2742007-07-17 04:03:12 -070029#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
Christoph Lameterf6ac2352006-06-30 01:55:32 -070030
Christoph Lameterf8891e52006-06-30 01:55:45 -070031enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
40 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
Adam Litke3b116302008-04-28 02:13:06 -070041#ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
43#endif
Christoph Lameterf8891e52006-06-30 01:55:45 -070044 NR_VM_EVENT_ITEMS
Christoph Lameterf6ac2352006-06-30 01:55:32 -070045};
46
Adrian Bunkc748e132008-07-23 21:27:03 -070047extern const struct seq_operations fragmentation_op;
48extern const struct seq_operations pagetypeinfo_op;
49extern const struct seq_operations zoneinfo_op;
50extern const struct seq_operations vmstat_op;
51extern int sysctl_stat_interval;
52
Andrew Morton780a0652007-02-10 01:44:41 -080053#ifdef CONFIG_VM_EVENT_COUNTERS
54/*
55 * Light weight per cpu counter implementation.
56 *
57 * Counters should only be incremented and no critical kernel component
58 * should rely on the counter values.
59 *
60 * Counters are handled completely inline. On many platforms the code
61 * generated will simply be the increment of a global address.
62 */
63
Christoph Lameterf8891e52006-06-30 01:55:45 -070064struct vm_event_state {
65 unsigned long event[NR_VM_EVENT_ITEMS];
66};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070067
Christoph Lameterf8891e52006-06-30 01:55:45 -070068DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070069
Christoph Lameterf8891e52006-06-30 01:55:45 -070070static inline void __count_vm_event(enum vm_event_item item)
71{
Jan Blunck38cbcdc2006-08-05 12:14:14 -070072 __get_cpu_var(vm_event_states).event[item]++;
Christoph Lameterf8891e52006-06-30 01:55:45 -070073}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070074
Christoph Lameterf8891e52006-06-30 01:55:45 -070075static inline void count_vm_event(enum vm_event_item item)
76{
Jan Blunck38cbcdc2006-08-05 12:14:14 -070077 get_cpu_var(vm_event_states).event[item]++;
Christoph Lameterf8891e52006-06-30 01:55:45 -070078 put_cpu();
79}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070080
Christoph Lameterf8891e52006-06-30 01:55:45 -070081static inline void __count_vm_events(enum vm_event_item item, long delta)
82{
Jan Blunck38cbcdc2006-08-05 12:14:14 -070083 __get_cpu_var(vm_event_states).event[item] += delta;
Christoph Lameterf8891e52006-06-30 01:55:45 -070084}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070085
Christoph Lameterf8891e52006-06-30 01:55:45 -070086static inline void count_vm_events(enum vm_event_item item, long delta)
87{
Jan Blunck38cbcdc2006-08-05 12:14:14 -070088 get_cpu_var(vm_event_states).event[item] += delta;
Christoph Lameterf8891e52006-06-30 01:55:45 -070089 put_cpu();
90}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070091
Christoph Lameterf8891e52006-06-30 01:55:45 -070092extern void all_vm_events(unsigned long *);
Magnus Damme9033872006-12-22 01:08:01 -080093#ifdef CONFIG_HOTPLUG
Christoph Lameterf8891e52006-06-30 01:55:45 -070094extern void vm_events_fold_cpu(int cpu);
Magnus Damme9033872006-12-22 01:08:01 -080095#else
96static inline void vm_events_fold_cpu(int cpu)
97{
98}
99#endif
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700100
Christoph Lameterf8891e52006-06-30 01:55:45 -0700101#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700102
Christoph Lameterf8891e52006-06-30 01:55:45 -0700103/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -0800104static inline void count_vm_event(enum vm_event_item item)
105{
106}
107static inline void count_vm_events(enum vm_event_item item, long delta)
108{
109}
110static inline void __count_vm_event(enum vm_event_item item)
111{
112}
113static inline void __count_vm_events(enum vm_event_item item, long delta)
114{
115}
116static inline void all_vm_events(unsigned long *ret)
117{
118}
119static inline void vm_events_fold_cpu(int cpu)
120{
121}
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700122
Christoph Lameterf8891e52006-06-30 01:55:45 -0700123#endif /* CONFIG_VM_EVENT_COUNTERS */
124
125#define __count_zone_vm_events(item, zone, delta) \
Christoph Lameter4b51d662007-02-10 01:43:10 -0800126 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
127 zone_idx(zone), delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700128
Christoph Lameter2244b952006-06-30 01:55:33 -0700129/*
130 * Zone based page accounting with per cpu differentials.
131 */
132extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700133
Christoph Lameter2244b952006-06-30 01:55:33 -0700134static inline void zone_page_state_add(long x, struct zone *zone,
135 enum zone_stat_item item)
136{
137 atomic_long_add(x, &zone->vm_stat[item]);
138 atomic_long_add(x, &vm_stat[item]);
139}
140
141static inline unsigned long global_page_state(enum zone_stat_item item)
142{
143 long x = atomic_long_read(&vm_stat[item]);
144#ifdef CONFIG_SMP
145 if (x < 0)
146 x = 0;
147#endif
148 return x;
149}
150
151static inline unsigned long zone_page_state(struct zone *zone,
152 enum zone_stat_item item)
153{
154 long x = atomic_long_read(&zone->vm_stat[item]);
155#ifdef CONFIG_SMP
156 if (x < 0)
157 x = 0;
158#endif
159 return x;
160}
161
162#ifdef CONFIG_NUMA
163/*
164 * Determine the per node value of a stat item. This function
165 * is called frequently in a NUMA machine, so try to be as
166 * frugal as possible.
167 */
168static inline unsigned long node_page_state(int node,
169 enum zone_stat_item item)
170{
171 struct zone *zones = NODE_DATA(node)->node_zones;
172
173 return
Christoph Lameter4b51d662007-02-10 01:43:10 -0800174#ifdef CONFIG_ZONE_DMA
175 zone_page_state(&zones[ZONE_DMA], item) +
176#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700177#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2244b952006-06-30 01:55:33 -0700178 zone_page_state(&zones[ZONE_DMA32], item) +
179#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700180#ifdef CONFIG_HIGHMEM
181 zone_page_state(&zones[ZONE_HIGHMEM], item) +
182#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700183 zone_page_state(&zones[ZONE_NORMAL], item) +
184 zone_page_state(&zones[ZONE_MOVABLE], item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700185}
Christoph Lameterca889e62006-06-30 01:55:44 -0700186
Mel Gorman18ea7e72008-04-28 02:12:14 -0700187extern void zone_statistics(struct zone *, struct zone *);
Christoph Lameterca889e62006-06-30 01:55:44 -0700188
Christoph Lameter2244b952006-06-30 01:55:33 -0700189#else
Christoph Lameterca889e62006-06-30 01:55:44 -0700190
Christoph Lameter2244b952006-06-30 01:55:33 -0700191#define node_page_state(node, item) global_page_state(item)
Christoph Lameterca889e62006-06-30 01:55:44 -0700192#define zone_statistics(_zl,_z) do { } while (0)
193
194#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700195
196#define __add_zone_page_state(__z, __i, __d) \
197 __mod_zone_page_state(__z, __i, __d)
198#define __sub_zone_page_state(__z, __i, __d) \
199 __mod_zone_page_state(__z, __i,-(__d))
200
201#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
202#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
203
204static inline void zap_zone_vm_stats(struct zone *zone)
205{
206 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
207}
208
Christoph Lameterca889e62006-06-30 01:55:44 -0700209extern void inc_zone_state(struct zone *, enum zone_stat_item);
210
Christoph Lameter2244b952006-06-30 01:55:33 -0700211#ifdef CONFIG_SMP
212void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
213void __inc_zone_page_state(struct page *, enum zone_stat_item);
214void __dec_zone_page_state(struct page *, enum zone_stat_item);
215
216void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
217void inc_zone_page_state(struct page *, enum zone_stat_item);
218void dec_zone_page_state(struct page *, enum zone_stat_item);
219
220extern void inc_zone_state(struct zone *, enum zone_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800221extern void __inc_zone_state(struct zone *, enum zone_stat_item);
222extern void dec_zone_state(struct zone *, enum zone_stat_item);
223extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700224
225void refresh_cpu_vm_stats(int);
Christoph Lameter2244b952006-06-30 01:55:33 -0700226#else /* CONFIG_SMP */
227
228/*
229 * We do not maintain differentials in a single processor configuration.
230 * The functions directly modify the zone and global counters.
231 */
232static inline void __mod_zone_page_state(struct zone *zone,
233 enum zone_stat_item item, int delta)
234{
235 zone_page_state_add(delta, zone, item);
236}
237
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700238static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
239{
240 atomic_long_inc(&zone->vm_stat[item]);
241 atomic_long_inc(&vm_stat[item]);
242}
243
Christoph Lameter2244b952006-06-30 01:55:33 -0700244static inline void __inc_zone_page_state(struct page *page,
245 enum zone_stat_item item)
246{
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700247 __inc_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700248}
249
Christoph Lameterc8785382007-02-10 01:43:01 -0800250static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
251{
252 atomic_long_dec(&zone->vm_stat[item]);
253 atomic_long_dec(&vm_stat[item]);
254}
255
Christoph Lameter2244b952006-06-30 01:55:33 -0700256static inline void __dec_zone_page_state(struct page *page,
257 enum zone_stat_item item)
258{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100259 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700260}
261
262/*
263 * We only use atomic operations to update counters. So there is no need to
264 * disable interrupts.
265 */
266#define inc_zone_page_state __inc_zone_page_state
267#define dec_zone_page_state __dec_zone_page_state
268#define mod_zone_page_state __mod_zone_page_state
269
270static inline void refresh_cpu_vm_stats(int cpu) { }
Christoph Lameter2244b952006-06-30 01:55:33 -0700271#endif
272
273#endif /* _LINUX_VMSTAT_H */