blob: 2b3831b58aa4312a754c41b2608431db85e31887 [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
Christoph Lameter96177292007-02-10 01:43:03 -08006#include <linux/mm.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
8#include <asm/atomic.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -07009
Christoph Lameter4b51d662007-02-10 01:43:10 -080010#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
Christoph Lameter27bf71c2006-09-25 23:31:15 -070016#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
Adam Litke3b116302008-04-28 02:13:06 -070028
Mel Gorman2a1e2742007-07-17 04:03:12 -070029#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
Christoph Lameterf6ac2352006-06-30 01:55:32 -070030
Christoph Lameterf8891e52006-06-30 01:55:45 -070031enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
Mel Gorman24cf725182009-06-16 15:33:23 -070039#ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif
Christoph Lameterf8891e52006-06-30 01:55:45 -070042 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -080043 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
Christoph Lameterf8891e52006-06-30 01:55:45 -070045 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
Mel Gorman748446b2010-05-24 14:32:27 -070046#ifdef CONFIG_COMPACTION
47 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
Mel Gorman56de7262010-05-24 14:32:30 -070048 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
Mel Gorman748446b2010-05-24 14:32:27 -070049#endif
Adam Litke3b116302008-04-28 02:13:06 -070050#ifdef CONFIG_HUGETLB_PAGE
51 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
52#endif
Lee Schermerhornbbfd28e2008-10-18 20:26:40 -070053 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
54 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
55 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
Nick Piggin5344b7e2008-10-18 20:26:51 -070056 UNEVICTABLE_PGMLOCKED,
57 UNEVICTABLE_PGMUNLOCKED,
58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
Lee Schermerhorn985737c2008-10-18 20:26:53 -070060 UNEVICTABLE_MLOCKFREED,
Andi Kleen81ab4202011-04-14 15:22:06 -070061#ifdef CONFIG_TRANSPARENT_HUGEPAGE
62 THP_FAULT_ALLOC,
63 THP_FAULT_FALLBACK,
64 THP_COLLAPSE_ALLOC,
65 THP_COLLAPSE_ALLOC_FAILED,
66 THP_SPLIT,
67#endif
Christoph Lameterf8891e52006-06-30 01:55:45 -070068 NR_VM_EVENT_ITEMS
Christoph Lameterf6ac2352006-06-30 01:55:32 -070069};
70
Adrian Bunkc748e132008-07-23 21:27:03 -070071extern int sysctl_stat_interval;
72
Andrew Morton780a0652007-02-10 01:44:41 -080073#ifdef CONFIG_VM_EVENT_COUNTERS
74/*
75 * Light weight per cpu counter implementation.
76 *
77 * Counters should only be incremented and no critical kernel component
78 * should rely on the counter values.
79 *
80 * Counters are handled completely inline. On many platforms the code
81 * generated will simply be the increment of a global address.
82 */
83
Christoph Lameterf8891e52006-06-30 01:55:45 -070084struct vm_event_state {
85 unsigned long event[NR_VM_EVENT_ITEMS];
86};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070087
Christoph Lameterf8891e52006-06-30 01:55:45 -070088DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070089
Christoph Lameterf8891e52006-06-30 01:55:45 -070090static inline void __count_vm_event(enum vm_event_item item)
91{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090092 __this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070093}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070094
Christoph Lameterf8891e52006-06-30 01:55:45 -070095static inline void count_vm_event(enum vm_event_item item)
96{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090097 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070098}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070099
Christoph Lameterf8891e52006-06-30 01:55:45 -0700100static inline void __count_vm_events(enum vm_event_item item, long delta)
101{
Rusty Russelldd17c8f2009-10-29 22:34:15 +0900102 __this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700103}
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700104
Christoph Lameterf8891e52006-06-30 01:55:45 -0700105static inline void count_vm_events(enum vm_event_item item, long delta)
106{
Rusty Russelldd17c8f2009-10-29 22:34:15 +0900107 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700108}
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700109
Christoph Lameterf8891e52006-06-30 01:55:45 -0700110extern void all_vm_events(unsigned long *);
Magnus Damme9033872006-12-22 01:08:01 -0800111#ifdef CONFIG_HOTPLUG
Christoph Lameterf8891e52006-06-30 01:55:45 -0700112extern void vm_events_fold_cpu(int cpu);
Magnus Damme9033872006-12-22 01:08:01 -0800113#else
114static inline void vm_events_fold_cpu(int cpu)
115{
116}
117#endif
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700118
Christoph Lameterf8891e52006-06-30 01:55:45 -0700119#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700120
Christoph Lameterf8891e52006-06-30 01:55:45 -0700121/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -0800122static inline void count_vm_event(enum vm_event_item item)
123{
124}
125static inline void count_vm_events(enum vm_event_item item, long delta)
126{
127}
128static inline void __count_vm_event(enum vm_event_item item)
129{
130}
131static inline void __count_vm_events(enum vm_event_item item, long delta)
132{
133}
134static inline void all_vm_events(unsigned long *ret)
135{
136}
137static inline void vm_events_fold_cpu(int cpu)
138{
139}
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700140
Christoph Lameterf8891e52006-06-30 01:55:45 -0700141#endif /* CONFIG_VM_EVENT_COUNTERS */
142
143#define __count_zone_vm_events(item, zone, delta) \
Christoph Lameter4b51d662007-02-10 01:43:10 -0800144 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
145 zone_idx(zone), delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700146
Christoph Lameter2244b952006-06-30 01:55:33 -0700147/*
148 * Zone based page accounting with per cpu differentials.
149 */
150extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700151
Christoph Lameter2244b952006-06-30 01:55:33 -0700152static inline void zone_page_state_add(long x, struct zone *zone,
153 enum zone_stat_item item)
154{
155 atomic_long_add(x, &zone->vm_stat[item]);
156 atomic_long_add(x, &vm_stat[item]);
157}
158
159static inline unsigned long global_page_state(enum zone_stat_item item)
160{
161 long x = atomic_long_read(&vm_stat[item]);
162#ifdef CONFIG_SMP
163 if (x < 0)
164 x = 0;
165#endif
166 return x;
167}
168
169static inline unsigned long zone_page_state(struct zone *zone,
170 enum zone_stat_item item)
171{
172 long x = atomic_long_read(&zone->vm_stat[item]);
173#ifdef CONFIG_SMP
174 if (x < 0)
175 x = 0;
176#endif
177 return x;
178}
179
Christoph Lameteraa454842010-09-09 16:38:17 -0700180/*
181 * More accurate version that also considers the currently pending
182 * deltas. For that we need to loop over all cpus to find the current
183 * deltas. There is no synchronization so the result cannot be
184 * exactly accurate either.
185 */
186static inline unsigned long zone_page_state_snapshot(struct zone *zone,
187 enum zone_stat_item item)
188{
189 long x = atomic_long_read(&zone->vm_stat[item]);
190
191#ifdef CONFIG_SMP
192 int cpu;
193 for_each_online_cpu(cpu)
194 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
195
196 if (x < 0)
197 x = 0;
198#endif
199 return x;
200}
201
Wu Fengguangadea02a2009-09-21 17:01:42 -0700202extern unsigned long global_reclaimable_pages(void);
203extern unsigned long zone_reclaimable_pages(struct zone *zone);
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700204
Christoph Lameter2244b952006-06-30 01:55:33 -0700205#ifdef CONFIG_NUMA
206/*
207 * Determine the per node value of a stat item. This function
208 * is called frequently in a NUMA machine, so try to be as
209 * frugal as possible.
210 */
211static inline unsigned long node_page_state(int node,
212 enum zone_stat_item item)
213{
214 struct zone *zones = NODE_DATA(node)->node_zones;
215
216 return
Christoph Lameter4b51d662007-02-10 01:43:10 -0800217#ifdef CONFIG_ZONE_DMA
218 zone_page_state(&zones[ZONE_DMA], item) +
219#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700220#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2244b952006-06-30 01:55:33 -0700221 zone_page_state(&zones[ZONE_DMA32], item) +
222#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700223#ifdef CONFIG_HIGHMEM
224 zone_page_state(&zones[ZONE_HIGHMEM], item) +
225#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700226 zone_page_state(&zones[ZONE_NORMAL], item) +
227 zone_page_state(&zones[ZONE_MOVABLE], item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700228}
Christoph Lameterca889e62006-06-30 01:55:44 -0700229
Andi Kleen78afd562011-03-22 16:33:12 -0700230extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
Christoph Lameterca889e62006-06-30 01:55:44 -0700231
Christoph Lameter2244b952006-06-30 01:55:33 -0700232#else
Christoph Lameterca889e62006-06-30 01:55:44 -0700233
Christoph Lameter2244b952006-06-30 01:55:33 -0700234#define node_page_state(node, item) global_page_state(item)
Andi Kleen78afd562011-03-22 16:33:12 -0700235#define zone_statistics(_zl, _z, gfp) do { } while (0)
Christoph Lameterca889e62006-06-30 01:55:44 -0700236
237#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700238
Christoph Lameter2244b952006-06-30 01:55:33 -0700239#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
240#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
241
242static inline void zap_zone_vm_stats(struct zone *zone)
243{
244 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
245}
246
Christoph Lameterca889e62006-06-30 01:55:44 -0700247extern void inc_zone_state(struct zone *, enum zone_stat_item);
248
Christoph Lameter2244b952006-06-30 01:55:33 -0700249#ifdef CONFIG_SMP
250void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
251void __inc_zone_page_state(struct page *, enum zone_stat_item);
252void __dec_zone_page_state(struct page *, enum zone_stat_item);
253
254void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
255void inc_zone_page_state(struct page *, enum zone_stat_item);
256void dec_zone_page_state(struct page *, enum zone_stat_item);
257
258extern void inc_zone_state(struct zone *, enum zone_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800259extern void __inc_zone_state(struct zone *, enum zone_stat_item);
260extern void dec_zone_state(struct zone *, enum zone_stat_item);
261extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700262
263void refresh_cpu_vm_stats(int);
Mel Gormanb44129b2011-01-13 15:45:43 -0800264
265int calculate_pressure_threshold(struct zone *zone);
266int calculate_normal_threshold(struct zone *zone);
267void set_pgdat_percpu_threshold(pg_data_t *pgdat,
268 int (*calculate_pressure)(struct zone *));
Christoph Lameter2244b952006-06-30 01:55:33 -0700269#else /* CONFIG_SMP */
270
271/*
272 * We do not maintain differentials in a single processor configuration.
273 * The functions directly modify the zone and global counters.
274 */
275static inline void __mod_zone_page_state(struct zone *zone,
276 enum zone_stat_item item, int delta)
277{
278 zone_page_state_add(delta, zone, item);
279}
280
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700281static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
282{
283 atomic_long_inc(&zone->vm_stat[item]);
284 atomic_long_inc(&vm_stat[item]);
285}
286
Christoph Lameter2244b952006-06-30 01:55:33 -0700287static inline void __inc_zone_page_state(struct page *page,
288 enum zone_stat_item item)
289{
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700290 __inc_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700291}
292
Christoph Lameterc8785382007-02-10 01:43:01 -0800293static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
294{
295 atomic_long_dec(&zone->vm_stat[item]);
296 atomic_long_dec(&vm_stat[item]);
297}
298
Christoph Lameter2244b952006-06-30 01:55:33 -0700299static inline void __dec_zone_page_state(struct page *page,
300 enum zone_stat_item item)
301{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100302 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700303}
304
305/*
306 * We only use atomic operations to update counters. So there is no need to
307 * disable interrupts.
308 */
309#define inc_zone_page_state __inc_zone_page_state
310#define dec_zone_page_state __dec_zone_page_state
311#define mod_zone_page_state __mod_zone_page_state
312
Mel Gormanb44129b2011-01-13 15:45:43 -0800313#define set_pgdat_percpu_threshold(pgdat, callback) { }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800314
Christoph Lameter2244b952006-06-30 01:55:33 -0700315static inline void refresh_cpu_vm_stats(int cpu) { }
Christoph Lameter2244b952006-06-30 01:55:33 -0700316#endif
317
318#endif /* _LINUX_VMSTAT_H */