Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_VMSTAT_H |
| 2 | #define _LINUX_VMSTAT_H |
| 3 | |
| 4 | #include <linux/types.h> |
| 5 | #include <linux/percpu.h> |
Christoph Lameter | 9617729 | 2007-02-10 01:43:03 -0800 | [diff] [blame] | 6 | #include <linux/mm.h> |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 7 | #include <linux/mmzone.h> |
Andrew Morton | f042e70 | 2011-05-26 16:25:24 -0700 | [diff] [blame] | 8 | #include <linux/vm_event_item.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 9 | #include <linux/atomic.h> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 10 | |
Adrian Bunk | c748e13 | 2008-07-23 21:27:03 -0700 | [diff] [blame] | 11 | extern int sysctl_stat_interval; |
| 12 | |
Andrew Morton | 780a065 | 2007-02-10 01:44:41 -0800 | [diff] [blame] | 13 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 14 | /* |
| 15 | * Light weight per cpu counter implementation. |
| 16 | * |
| 17 | * Counters should only be incremented and no critical kernel component |
| 18 | * should rely on the counter values. |
| 19 | * |
| 20 | * Counters are handled completely inline. On many platforms the code |
| 21 | * generated will simply be the increment of a global address. |
| 22 | */ |
| 23 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 24 | struct vm_event_state { |
| 25 | unsigned long event[NR_VM_EVENT_ITEMS]; |
| 26 | }; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 27 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 28 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 29 | |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 30 | /* |
| 31 | * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the |
| 32 | * local_irq_disable overhead. |
| 33 | */ |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 34 | static inline void __count_vm_event(enum vm_event_item item) |
| 35 | { |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 36 | raw_cpu_inc(vm_event_states.event[item]); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 37 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 38 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 39 | static inline void count_vm_event(enum vm_event_item item) |
| 40 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 41 | this_cpu_inc(vm_event_states.event[item]); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 42 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 43 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 44 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 45 | { |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 46 | raw_cpu_add(vm_event_states.event[item], delta); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 47 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 48 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 49 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 50 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 51 | this_cpu_add(vm_event_states.event[item], delta); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 52 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 53 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 54 | extern void all_vm_events(unsigned long *); |
Yijing Wang | f1cb087 | 2013-04-29 15:08:14 -0700 | [diff] [blame] | 55 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 56 | extern void vm_events_fold_cpu(int cpu); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 57 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 58 | #else |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 59 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 60 | /* Disable counters */ |
Andrew Morton | 780a065 | 2007-02-10 01:44:41 -0800 | [diff] [blame] | 61 | static inline void count_vm_event(enum vm_event_item item) |
| 62 | { |
| 63 | } |
| 64 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 65 | { |
| 66 | } |
| 67 | static inline void __count_vm_event(enum vm_event_item item) |
| 68 | { |
| 69 | } |
| 70 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 71 | { |
| 72 | } |
| 73 | static inline void all_vm_events(unsigned long *ret) |
| 74 | { |
| 75 | } |
| 76 | static inline void vm_events_fold_cpu(int cpu) |
| 77 | { |
| 78 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 79 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 80 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| 81 | |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 82 | #ifdef CONFIG_NUMA_BALANCING |
| 83 | #define count_vm_numa_event(x) count_vm_event(x) |
| 84 | #define count_vm_numa_events(x, y) count_vm_events(x, y) |
| 85 | #else |
| 86 | #define count_vm_numa_event(x) do {} while (0) |
Mel Gorman | 3c0ff46 | 2013-02-22 16:34:29 -0800 | [diff] [blame] | 87 | #define count_vm_numa_events(x, y) do { (void)(y); } while (0) |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 88 | #endif /* CONFIG_NUMA_BALANCING */ |
| 89 | |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 90 | #ifdef CONFIG_DEBUG_TLBFLUSH |
| 91 | #define count_vm_tlb_event(x) count_vm_event(x) |
| 92 | #define count_vm_tlb_events(x, y) count_vm_events(x, y) |
| 93 | #else |
| 94 | #define count_vm_tlb_event(x) do {} while (0) |
| 95 | #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) |
| 96 | #endif |
| 97 | |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 98 | #ifdef CONFIG_DEBUG_VM_VMACACHE |
| 99 | #define count_vm_vmacache_event(x) count_vm_event(x) |
| 100 | #else |
| 101 | #define count_vm_vmacache_event(x) do {} while (0) |
| 102 | #endif |
| 103 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 104 | #define __count_zone_vm_events(item, zone, delta) \ |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 105 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ |
| 106 | zone_idx(zone), delta) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 107 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 108 | /* |
| 109 | * Zone based page accounting with per cpu differentials. |
| 110 | */ |
| 111 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 112 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 113 | static inline void zone_page_state_add(long x, struct zone *zone, |
| 114 | enum zone_stat_item item) |
| 115 | { |
| 116 | atomic_long_add(x, &zone->vm_stat[item]); |
| 117 | atomic_long_add(x, &vm_stat[item]); |
| 118 | } |
| 119 | |
| 120 | static inline unsigned long global_page_state(enum zone_stat_item item) |
| 121 | { |
| 122 | long x = atomic_long_read(&vm_stat[item]); |
| 123 | #ifdef CONFIG_SMP |
| 124 | if (x < 0) |
| 125 | x = 0; |
| 126 | #endif |
| 127 | return x; |
| 128 | } |
| 129 | |
| 130 | static inline unsigned long zone_page_state(struct zone *zone, |
| 131 | enum zone_stat_item item) |
| 132 | { |
| 133 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 134 | #ifdef CONFIG_SMP |
| 135 | if (x < 0) |
| 136 | x = 0; |
| 137 | #endif |
| 138 | return x; |
| 139 | } |
| 140 | |
Christoph Lameter | aa45484 | 2010-09-09 16:38:17 -0700 | [diff] [blame] | 141 | /* |
| 142 | * More accurate version that also considers the currently pending |
| 143 | * deltas. For that we need to loop over all cpus to find the current |
| 144 | * deltas. There is no synchronization so the result cannot be |
| 145 | * exactly accurate either. |
| 146 | */ |
| 147 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, |
| 148 | enum zone_stat_item item) |
| 149 | { |
| 150 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 151 | |
| 152 | #ifdef CONFIG_SMP |
| 153 | int cpu; |
| 154 | for_each_online_cpu(cpu) |
| 155 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; |
| 156 | |
| 157 | if (x < 0) |
| 158 | x = 0; |
| 159 | #endif |
| 160 | return x; |
| 161 | } |
| 162 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 163 | #ifdef CONFIG_NUMA |
| 164 | /* |
| 165 | * Determine the per node value of a stat item. This function |
| 166 | * is called frequently in a NUMA machine, so try to be as |
| 167 | * frugal as possible. |
| 168 | */ |
| 169 | static inline unsigned long node_page_state(int node, |
| 170 | enum zone_stat_item item) |
| 171 | { |
| 172 | struct zone *zones = NODE_DATA(node)->node_zones; |
| 173 | |
| 174 | return |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 175 | #ifdef CONFIG_ZONE_DMA |
| 176 | zone_page_state(&zones[ZONE_DMA], item) + |
| 177 | #endif |
Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 178 | #ifdef CONFIG_ZONE_DMA32 |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 179 | zone_page_state(&zones[ZONE_DMA32], item) + |
| 180 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 181 | #ifdef CONFIG_HIGHMEM |
| 182 | zone_page_state(&zones[ZONE_HIGHMEM], item) + |
| 183 | #endif |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 184 | zone_page_state(&zones[ZONE_NORMAL], item) + |
| 185 | zone_page_state(&zones[ZONE_MOVABLE], item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 186 | } |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 187 | |
Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 188 | extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 189 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 190 | #else |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 191 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 192 | #define node_page_state(node, item) global_page_state(item) |
Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 193 | #define zone_statistics(_zl, _z, gfp) do { } while (0) |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 194 | |
| 195 | #endif /* CONFIG_NUMA */ |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 196 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 197 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
| 198 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
| 199 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 200 | #ifdef CONFIG_SMP |
| 201 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); |
| 202 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| 203 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| 204 | |
| 205 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
| 206 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
| 207 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
| 208 | |
| 209 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 210 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
| 211 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
| 212 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 213 | |
Christoph Lameter | 2bb921e | 2013-09-11 14:21:30 -0700 | [diff] [blame] | 214 | void cpu_vm_stats_fold(int cpu); |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 215 | void refresh_zone_stat_thresholds(void); |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 216 | |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 217 | void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); |
| 218 | |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 219 | int calculate_pressure_threshold(struct zone *zone); |
| 220 | int calculate_normal_threshold(struct zone *zone); |
| 221 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, |
| 222 | int (*calculate_pressure)(struct zone *)); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 223 | #else /* CONFIG_SMP */ |
| 224 | |
| 225 | /* |
| 226 | * We do not maintain differentials in a single processor configuration. |
| 227 | * The functions directly modify the zone and global counters. |
| 228 | */ |
| 229 | static inline void __mod_zone_page_state(struct zone *zone, |
| 230 | enum zone_stat_item item, int delta) |
| 231 | { |
| 232 | zone_page_state_add(delta, zone, item); |
| 233 | } |
| 234 | |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 235 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| 236 | { |
| 237 | atomic_long_inc(&zone->vm_stat[item]); |
| 238 | atomic_long_inc(&vm_stat[item]); |
| 239 | } |
| 240 | |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 241 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
| 242 | { |
| 243 | atomic_long_dec(&zone->vm_stat[item]); |
| 244 | atomic_long_dec(&vm_stat[item]); |
| 245 | } |
| 246 | |
Johannes Weiner | 6a3ed21 | 2014-04-03 14:47:34 -0700 | [diff] [blame] | 247 | static inline void __inc_zone_page_state(struct page *page, |
| 248 | enum zone_stat_item item) |
| 249 | { |
| 250 | __inc_zone_state(page_zone(page), item); |
| 251 | } |
| 252 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 253 | static inline void __dec_zone_page_state(struct page *page, |
| 254 | enum zone_stat_item item) |
| 255 | { |
Uwe Kleine-König | 57ce36f | 2008-02-25 16:45:03 +0100 | [diff] [blame] | 256 | __dec_zone_state(page_zone(page), item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | /* |
| 260 | * We only use atomic operations to update counters. So there is no need to |
| 261 | * disable interrupts. |
| 262 | */ |
| 263 | #define inc_zone_page_state __inc_zone_page_state |
| 264 | #define dec_zone_page_state __dec_zone_page_state |
| 265 | #define mod_zone_page_state __mod_zone_page_state |
| 266 | |
Johannes Weiner | 6a3ed21 | 2014-04-03 14:47:34 -0700 | [diff] [blame] | 267 | #define inc_zone_state __inc_zone_state |
| 268 | #define dec_zone_state __dec_zone_state |
| 269 | |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 270 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 271 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 272 | static inline void refresh_cpu_vm_stats(int cpu) { } |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 273 | static inline void refresh_zone_stat_thresholds(void) { } |
Christoph Lameter | 2bb921e | 2013-09-11 14:21:30 -0700 | [diff] [blame] | 274 | static inline void cpu_vm_stats_fold(int cpu) { } |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 275 | |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 276 | static inline void drain_zonestat(struct zone *zone, |
| 277 | struct per_cpu_pageset *pset) { } |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 278 | #endif /* CONFIG_SMP */ |
| 279 | |
Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 280 | static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, |
| 281 | int migratetype) |
| 282 | { |
| 283 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); |
| 284 | if (is_migrate_cma(migratetype)) |
| 285 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); |
| 286 | } |
| 287 | |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 288 | extern const char * const vmstat_text[]; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 289 | |
| 290 | #endif /* _LINUX_VMSTAT_H */ |