blob: a13291f7da887af2c881f54b60e227e9653e5d72 [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
Christoph Lameter96177292007-02-10 01:43:03 -08006#include <linux/mm.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
Andrew Mortonf042e702011-05-26 16:25:24 -07008#include <linux/vm_event_item.h>
Arun Sharma600634972011-07-26 16:09:06 -07009#include <linux/atomic.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070010
Adrian Bunkc748e132008-07-23 21:27:03 -070011extern int sysctl_stat_interval;
12
Andrew Morton780a0652007-02-10 01:44:41 -080013#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
Christoph Lameterf8891e52006-06-30 01:55:45 -070024struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
26};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070027
Christoph Lameterf8891e52006-06-30 01:55:45 -070028DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070029
Christoph Lameterf8891e52006-06-30 01:55:45 -070030static inline void __count_vm_event(enum vm_event_item item)
31{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090032 __this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070033}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070034
Christoph Lameterf8891e52006-06-30 01:55:45 -070035static inline void count_vm_event(enum vm_event_item item)
36{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090037 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070038}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070039
Christoph Lameterf8891e52006-06-30 01:55:45 -070040static inline void __count_vm_events(enum vm_event_item item, long delta)
41{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090042 __this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070043}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070044
Christoph Lameterf8891e52006-06-30 01:55:45 -070045static inline void count_vm_events(enum vm_event_item item, long delta)
46{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090047 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070048}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070049
Christoph Lameterf8891e52006-06-30 01:55:45 -070050extern void all_vm_events(unsigned long *);
Magnus Damme9033872006-12-22 01:08:01 -080051#ifdef CONFIG_HOTPLUG
Christoph Lameterf8891e52006-06-30 01:55:45 -070052extern void vm_events_fold_cpu(int cpu);
Magnus Damme9033872006-12-22 01:08:01 -080053#else
54static inline void vm_events_fold_cpu(int cpu)
55{
56}
57#endif
Christoph Lameterf6ac2352006-06-30 01:55:32 -070058
Christoph Lameterf8891e52006-06-30 01:55:45 -070059#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -070060
Christoph Lameterf8891e52006-06-30 01:55:45 -070061/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -080062static inline void count_vm_event(enum vm_event_item item)
63{
64}
65static inline void count_vm_events(enum vm_event_item item, long delta)
66{
67}
68static inline void __count_vm_event(enum vm_event_item item)
69{
70}
71static inline void __count_vm_events(enum vm_event_item item, long delta)
72{
73}
74static inline void all_vm_events(unsigned long *ret)
75{
76}
77static inline void vm_events_fold_cpu(int cpu)
78{
79}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070080
Christoph Lameterf8891e52006-06-30 01:55:45 -070081#endif /* CONFIG_VM_EVENT_COUNTERS */
82
Mel Gorman03c5a6e2012-11-02 14:52:48 +000083#ifdef CONFIG_NUMA_BALANCING
84#define count_vm_numa_event(x) count_vm_event(x)
85#define count_vm_numa_events(x, y) count_vm_events(x, y)
86#else
87#define count_vm_numa_event(x) do {} while (0)
88#define count_vm_numa_events(x, y) do {} while (0)
89#endif /* CONFIG_NUMA_BALANCING */
90
Christoph Lameterf8891e52006-06-30 01:55:45 -070091#define __count_zone_vm_events(item, zone, delta) \
Christoph Lameter4b51d662007-02-10 01:43:10 -080092 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
93 zone_idx(zone), delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -070094
Christoph Lameter2244b952006-06-30 01:55:33 -070095/*
96 * Zone based page accounting with per cpu differentials.
97 */
98extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -070099
Christoph Lameter2244b952006-06-30 01:55:33 -0700100static inline void zone_page_state_add(long x, struct zone *zone,
101 enum zone_stat_item item)
102{
103 atomic_long_add(x, &zone->vm_stat[item]);
104 atomic_long_add(x, &vm_stat[item]);
105}
106
107static inline unsigned long global_page_state(enum zone_stat_item item)
108{
109 long x = atomic_long_read(&vm_stat[item]);
110#ifdef CONFIG_SMP
111 if (x < 0)
112 x = 0;
113#endif
114 return x;
115}
116
117static inline unsigned long zone_page_state(struct zone *zone,
118 enum zone_stat_item item)
119{
120 long x = atomic_long_read(&zone->vm_stat[item]);
121#ifdef CONFIG_SMP
122 if (x < 0)
123 x = 0;
124#endif
125 return x;
126}
127
Christoph Lameteraa454842010-09-09 16:38:17 -0700128/*
129 * More accurate version that also considers the currently pending
130 * deltas. For that we need to loop over all cpus to find the current
131 * deltas. There is no synchronization so the result cannot be
132 * exactly accurate either.
133 */
134static inline unsigned long zone_page_state_snapshot(struct zone *zone,
135 enum zone_stat_item item)
136{
137 long x = atomic_long_read(&zone->vm_stat[item]);
138
139#ifdef CONFIG_SMP
140 int cpu;
141 for_each_online_cpu(cpu)
142 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
143
144 if (x < 0)
145 x = 0;
146#endif
147 return x;
148}
149
Wu Fengguangadea02a2009-09-21 17:01:42 -0700150extern unsigned long global_reclaimable_pages(void);
151extern unsigned long zone_reclaimable_pages(struct zone *zone);
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700152
Christoph Lameter2244b952006-06-30 01:55:33 -0700153#ifdef CONFIG_NUMA
154/*
155 * Determine the per node value of a stat item. This function
156 * is called frequently in a NUMA machine, so try to be as
157 * frugal as possible.
158 */
159static inline unsigned long node_page_state(int node,
160 enum zone_stat_item item)
161{
162 struct zone *zones = NODE_DATA(node)->node_zones;
163
164 return
Christoph Lameter4b51d662007-02-10 01:43:10 -0800165#ifdef CONFIG_ZONE_DMA
166 zone_page_state(&zones[ZONE_DMA], item) +
167#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700168#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2244b952006-06-30 01:55:33 -0700169 zone_page_state(&zones[ZONE_DMA32], item) +
170#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700171#ifdef CONFIG_HIGHMEM
172 zone_page_state(&zones[ZONE_HIGHMEM], item) +
173#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700174 zone_page_state(&zones[ZONE_NORMAL], item) +
175 zone_page_state(&zones[ZONE_MOVABLE], item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700176}
Christoph Lameterca889e62006-06-30 01:55:44 -0700177
Andi Kleen78afd562011-03-22 16:33:12 -0700178extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
Christoph Lameterca889e62006-06-30 01:55:44 -0700179
Christoph Lameter2244b952006-06-30 01:55:33 -0700180#else
Christoph Lameterca889e62006-06-30 01:55:44 -0700181
Christoph Lameter2244b952006-06-30 01:55:33 -0700182#define node_page_state(node, item) global_page_state(item)
Andi Kleen78afd562011-03-22 16:33:12 -0700183#define zone_statistics(_zl, _z, gfp) do { } while (0)
Christoph Lameterca889e62006-06-30 01:55:44 -0700184
185#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700186
Christoph Lameter2244b952006-06-30 01:55:33 -0700187#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
188#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
189
Christoph Lameterca889e62006-06-30 01:55:44 -0700190extern void inc_zone_state(struct zone *, enum zone_stat_item);
191
Christoph Lameter2244b952006-06-30 01:55:33 -0700192#ifdef CONFIG_SMP
193void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
194void __inc_zone_page_state(struct page *, enum zone_stat_item);
195void __dec_zone_page_state(struct page *, enum zone_stat_item);
196
197void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
198void inc_zone_page_state(struct page *, enum zone_stat_item);
199void dec_zone_page_state(struct page *, enum zone_stat_item);
200
201extern void inc_zone_state(struct zone *, enum zone_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800202extern void __inc_zone_state(struct zone *, enum zone_stat_item);
203extern void dec_zone_state(struct zone *, enum zone_stat_item);
204extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700205
206void refresh_cpu_vm_stats(int);
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700207void refresh_zone_stat_thresholds(void);
Mel Gormanb44129b2011-01-13 15:45:43 -0800208
Minchan Kim5a883812012-10-08 16:33:39 -0700209void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
210
Mel Gormanb44129b2011-01-13 15:45:43 -0800211int calculate_pressure_threshold(struct zone *zone);
212int calculate_normal_threshold(struct zone *zone);
213void set_pgdat_percpu_threshold(pg_data_t *pgdat,
214 int (*calculate_pressure)(struct zone *));
Christoph Lameter2244b952006-06-30 01:55:33 -0700215#else /* CONFIG_SMP */
216
217/*
218 * We do not maintain differentials in a single processor configuration.
219 * The functions directly modify the zone and global counters.
220 */
221static inline void __mod_zone_page_state(struct zone *zone,
222 enum zone_stat_item item, int delta)
223{
224 zone_page_state_add(delta, zone, item);
225}
226
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700227static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
228{
229 atomic_long_inc(&zone->vm_stat[item]);
230 atomic_long_inc(&vm_stat[item]);
231}
232
Christoph Lameter2244b952006-06-30 01:55:33 -0700233static inline void __inc_zone_page_state(struct page *page,
234 enum zone_stat_item item)
235{
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700236 __inc_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700237}
238
Christoph Lameterc8785382007-02-10 01:43:01 -0800239static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
240{
241 atomic_long_dec(&zone->vm_stat[item]);
242 atomic_long_dec(&vm_stat[item]);
243}
244
Christoph Lameter2244b952006-06-30 01:55:33 -0700245static inline void __dec_zone_page_state(struct page *page,
246 enum zone_stat_item item)
247{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100248 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700249}
250
251/*
252 * We only use atomic operations to update counters. So there is no need to
253 * disable interrupts.
254 */
255#define inc_zone_page_state __inc_zone_page_state
256#define dec_zone_page_state __dec_zone_page_state
257#define mod_zone_page_state __mod_zone_page_state
258
Mel Gormanb44129b2011-01-13 15:45:43 -0800259#define set_pgdat_percpu_threshold(pgdat, callback) { }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800260
Christoph Lameter2244b952006-06-30 01:55:33 -0700261static inline void refresh_cpu_vm_stats(int cpu) { }
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700262static inline void refresh_zone_stat_thresholds(void) { }
263
Minchan Kim5a883812012-10-08 16:33:39 -0700264static inline void drain_zonestat(struct zone *zone,
265 struct per_cpu_pageset *pset) { }
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700266#endif /* CONFIG_SMP */
267
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -0700268static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
269 int migratetype)
270{
271 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
272 if (is_migrate_cma(migratetype))
273 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
274}
275
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700276extern const char * const vmstat_text[];
Christoph Lameter2244b952006-06-30 01:55:33 -0700277
278#endif /* _LINUX_VMSTAT_H */