blob: a68bfb768382ad8bd7c6bae327f8af2389099ba7 [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
Christoph Lameter2244b952006-06-30 01:55:33 -07006 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070010 */
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +040011#include <linux/fs.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070012#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040013#include <linux/err.h>
Christoph Lameter2244b952006-06-30 01:55:33 -070014#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Christoph Lameterdf9ecab2006-08-31 21:27:35 -070016#include <linux/cpu.h>
Adrian Bunkc748e132008-07-23 21:27:03 -070017#include <linux/vmstat.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040018#include <linux/sched.h>
Mel Gormanf1a5ab12010-05-24 14:32:26 -070019#include <linux/math64.h>
Michael Rubin79da8262010-10-26 14:21:36 -070020#include <linux/writeback.h>
Namhyung Kim36deb0b2010-10-26 14:22:04 -070021#include <linux/compaction.h>
Lisa Due0935212013-09-11 14:22:36 -070022#include <linux/mm_inline.h>
23
24#include "internal.h"
Christoph Lameterf6ac2352006-06-30 01:55:32 -070025
Christoph Lameterf8891e52006-06-30 01:55:45 -070026#ifdef CONFIG_VM_EVENT_COUNTERS
27DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
28EXPORT_PER_CPU_SYMBOL(vm_event_states);
29
Minchan Kim31f961a2010-08-09 17:18:59 -070030static void sum_vm_events(unsigned long *ret)
Christoph Lameterf8891e52006-06-30 01:55:45 -070031{
Christoph Lameter9eccf2a2008-02-04 22:29:22 -080032 int cpu;
Christoph Lameterf8891e52006-06-30 01:55:45 -070033 int i;
34
35 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
36
Minchan Kim31f961a2010-08-09 17:18:59 -070037 for_each_online_cpu(cpu) {
Christoph Lameterf8891e52006-06-30 01:55:45 -070038 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
39
Christoph Lameterf8891e52006-06-30 01:55:45 -070040 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
41 ret[i] += this->event[i];
42 }
43}
44
45/*
46 * Accumulate the vm event counters across all CPUs.
47 * The result is unavoidably approximate - it can change
48 * during and after execution of this function.
49*/
50void all_vm_events(unsigned long *ret)
51{
KOSAKI Motohirob5be1132008-05-12 14:02:06 -070052 get_online_cpus();
Minchan Kim31f961a2010-08-09 17:18:59 -070053 sum_vm_events(ret);
KOSAKI Motohirob5be1132008-05-12 14:02:06 -070054 put_online_cpus();
Christoph Lameterf8891e52006-06-30 01:55:45 -070055}
Heiko Carstens32dd66f2006-07-10 04:44:31 -070056EXPORT_SYMBOL_GPL(all_vm_events);
Christoph Lameterf8891e52006-06-30 01:55:45 -070057
58#ifdef CONFIG_HOTPLUG
59/*
60 * Fold the foreign cpu events into our own.
61 *
62 * This is adding to the events on one processor
63 * but keeps the global counts constant.
64 */
65void vm_events_fold_cpu(int cpu)
66{
67 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
68 int i;
69
70 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
71 count_vm_events(i, fold_state->event[i]);
72 fold_state->event[i] = 0;
73 }
74}
75#endif /* CONFIG_HOTPLUG */
76
77#endif /* CONFIG_VM_EVENT_COUNTERS */
78
Christoph Lameter2244b952006-06-30 01:55:33 -070079/*
80 * Manage combined zone based / global counters
81 *
82 * vm_stat contains the global counters
83 */
Dimitri Sivanicha1cb2c62011-10-31 17:09:46 -070084atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
Christoph Lameter2244b952006-06-30 01:55:33 -070085EXPORT_SYMBOL(vm_stat);
86
87#ifdef CONFIG_SMP
88
Mel Gormanb44129b2011-01-13 15:45:43 -080089int calculate_pressure_threshold(struct zone *zone)
Mel Gorman88f5acf2011-01-13 15:45:41 -080090{
91 int threshold;
92 int watermark_distance;
93
94 /*
95 * As vmstats are not up to date, there is drift between the estimated
96 * and real values. For high thresholds and a high number of CPUs, it
97 * is possible for the min watermark to be breached while the estimated
98 * value looks fine. The pressure threshold is a reduced value such
99 * that even the maximum amount of drift will not accidentally breach
100 * the min watermark
101 */
102 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
103 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
104
105 /*
106 * Maximum threshold is 125
107 */
108 threshold = min(125, threshold);
109
110 return threshold;
111}
112
Mel Gormanb44129b2011-01-13 15:45:43 -0800113int calculate_normal_threshold(struct zone *zone)
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700114{
115 int threshold;
116 int mem; /* memory in 128 MB units */
117
118 /*
119 * The threshold scales with the number of processors and the amount
120 * of memory per zone. More memory means that we can defer updates for
121 * longer, more processors could lead to more contention.
122 * fls() is used to have a cheap way of logarithmic scaling.
123 *
124 * Some sample thresholds:
125 *
126 * Threshold Processors (fls) Zonesize fls(mem+1)
127 * ------------------------------------------------------------------
128 * 8 1 1 0.9-1 GB 4
129 * 16 2 2 0.9-1 GB 4
130 * 20 2 2 1-2 GB 5
131 * 24 2 2 2-4 GB 6
132 * 28 2 2 4-8 GB 7
133 * 32 2 2 8-16 GB 8
134 * 4 2 2 <128M 1
135 * 30 4 3 2-4 GB 5
136 * 48 4 3 8-16 GB 8
137 * 32 8 4 1-2 GB 4
138 * 32 8 4 0.9-1GB 4
139 * 10 16 5 <128M 1
140 * 40 16 5 900M 4
141 * 70 64 7 2-4 GB 5
142 * 84 64 7 4-8 GB 6
143 * 108 512 9 4-8 GB 6
144 * 125 1024 10 8-16 GB 8
145 * 125 1024 10 16-32 GB 9
146 */
147
148 mem = zone->present_pages >> (27 - PAGE_SHIFT);
149
150 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
151
152 /*
153 * Maximum threshold is 125
154 */
155 threshold = min(125, threshold);
156
157 return threshold;
158}
Christoph Lameter2244b952006-06-30 01:55:33 -0700159
160/*
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700161 * Refresh the thresholds for each zone.
Christoph Lameter2244b952006-06-30 01:55:33 -0700162 */
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700163void refresh_zone_stat_thresholds(void)
Christoph Lameter2244b952006-06-30 01:55:33 -0700164{
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700165 struct zone *zone;
166 int cpu;
167 int threshold;
168
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700169 for_each_populated_zone(zone) {
Christoph Lameteraa454842010-09-09 16:38:17 -0700170 unsigned long max_drift, tolerate_drift;
171
Mel Gormanb44129b2011-01-13 15:45:43 -0800172 threshold = calculate_normal_threshold(zone);
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700173
174 for_each_online_cpu(cpu)
Christoph Lameter99dcc3e2010-01-05 15:34:51 +0900175 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
176 = threshold;
Christoph Lameteraa454842010-09-09 16:38:17 -0700177
178 /*
179 * Only set percpu_drift_mark if there is a danger that
180 * NR_FREE_PAGES reports the low watermark is ok when in fact
181 * the min watermark could be breached by an allocation
182 */
183 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
184 max_drift = num_online_cpus() * threshold;
185 if (max_drift > tolerate_drift)
186 zone->percpu_drift_mark = high_wmark_pages(zone) +
187 max_drift;
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700188 }
Christoph Lameter2244b952006-06-30 01:55:33 -0700189}
190
Mel Gormanb44129b2011-01-13 15:45:43 -0800191void set_pgdat_percpu_threshold(pg_data_t *pgdat,
192 int (*calculate_pressure)(struct zone *))
Mel Gorman88f5acf2011-01-13 15:45:41 -0800193{
194 struct zone *zone;
195 int cpu;
196 int threshold;
197 int i;
198
Mel Gorman88f5acf2011-01-13 15:45:41 -0800199 for (i = 0; i < pgdat->nr_zones; i++) {
200 zone = &pgdat->node_zones[i];
201 if (!zone->percpu_drift_mark)
202 continue;
203
Mel Gormanb44129b2011-01-13 15:45:43 -0800204 threshold = (*calculate_pressure)(zone);
205 for_each_possible_cpu(cpu)
Mel Gorman88f5acf2011-01-13 15:45:41 -0800206 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
207 = threshold;
208 }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800209}
210
Christoph Lameter2244b952006-06-30 01:55:33 -0700211/*
212 * For use when we know that interrupts are disabled.
213 */
214void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
215 int delta)
216{
Christoph Lameter12938a92010-12-06 11:16:20 -0600217 struct per_cpu_pageset __percpu *pcp = zone->pageset;
218 s8 __percpu *p = pcp->vm_stat_diff + item;
Christoph Lameter2244b952006-06-30 01:55:33 -0700219 long x;
Christoph Lameter12938a92010-12-06 11:16:20 -0600220 long t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700221
Christoph Lameter12938a92010-12-06 11:16:20 -0600222 x = delta + __this_cpu_read(*p);
Christoph Lameter2244b952006-06-30 01:55:33 -0700223
Christoph Lameter12938a92010-12-06 11:16:20 -0600224 t = __this_cpu_read(pcp->stat_threshold);
225
226 if (unlikely(x > t || x < -t)) {
Christoph Lameter2244b952006-06-30 01:55:33 -0700227 zone_page_state_add(x, zone, item);
228 x = 0;
229 }
Christoph Lameter12938a92010-12-06 11:16:20 -0600230 __this_cpu_write(*p, x);
Christoph Lameter2244b952006-06-30 01:55:33 -0700231}
232EXPORT_SYMBOL(__mod_zone_page_state);
233
234/*
Christoph Lameter2244b952006-06-30 01:55:33 -0700235 * Optimized increment and decrement functions.
236 *
237 * These are only for a single page and therefore can take a struct page *
238 * argument instead of struct zone *. This allows the inclusion of the code
239 * generated for page_zone(page) into the optimized functions.
240 *
241 * No overflow check is necessary and therefore the differential can be
242 * incremented or decremented in place which may allow the compilers to
243 * generate better code.
Christoph Lameter2244b952006-06-30 01:55:33 -0700244 * The increment or decrement is known and therefore one boundary check can
245 * be omitted.
246 *
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700247 * NOTE: These functions are very performance sensitive. Change only
248 * with care.
249 *
Christoph Lameter2244b952006-06-30 01:55:33 -0700250 * Some processors have inc/dec instructions that are atomic vs an interrupt.
251 * However, the code must first determine the differential location in a zone
252 * based on the processor number and then inc/dec the counter. There is no
253 * guarantee without disabling preemption that the processor will not change
254 * in between and therefore the atomicity vs. interrupt cannot be exploited
255 * in a useful way here.
256 */
Christoph Lameterc8785382007-02-10 01:43:01 -0800257void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700258{
Christoph Lameter12938a92010-12-06 11:16:20 -0600259 struct per_cpu_pageset __percpu *pcp = zone->pageset;
260 s8 __percpu *p = pcp->vm_stat_diff + item;
261 s8 v, t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700262
Christoph Lameter908ee0f2010-12-06 11:40:02 -0600263 v = __this_cpu_inc_return(*p);
Christoph Lameter12938a92010-12-06 11:16:20 -0600264 t = __this_cpu_read(pcp->stat_threshold);
265 if (unlikely(v > t)) {
266 s8 overstep = t >> 1;
Christoph Lameter2244b952006-06-30 01:55:33 -0700267
Christoph Lameter12938a92010-12-06 11:16:20 -0600268 zone_page_state_add(v + overstep, zone, item);
269 __this_cpu_write(*p, -overstep);
Christoph Lameter2244b952006-06-30 01:55:33 -0700270 }
271}
Christoph Lameterca889e62006-06-30 01:55:44 -0700272
273void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
274{
275 __inc_zone_state(page_zone(page), item);
276}
Christoph Lameter2244b952006-06-30 01:55:33 -0700277EXPORT_SYMBOL(__inc_zone_page_state);
278
Christoph Lameterc8785382007-02-10 01:43:01 -0800279void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700280{
Christoph Lameter12938a92010-12-06 11:16:20 -0600281 struct per_cpu_pageset __percpu *pcp = zone->pageset;
282 s8 __percpu *p = pcp->vm_stat_diff + item;
283 s8 v, t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700284
Christoph Lameter908ee0f2010-12-06 11:40:02 -0600285 v = __this_cpu_dec_return(*p);
Christoph Lameter12938a92010-12-06 11:16:20 -0600286 t = __this_cpu_read(pcp->stat_threshold);
287 if (unlikely(v < - t)) {
288 s8 overstep = t >> 1;
Christoph Lameter2244b952006-06-30 01:55:33 -0700289
Christoph Lameter12938a92010-12-06 11:16:20 -0600290 zone_page_state_add(v - overstep, zone, item);
291 __this_cpu_write(*p, overstep);
Christoph Lameter2244b952006-06-30 01:55:33 -0700292 }
293}
Christoph Lameterc8785382007-02-10 01:43:01 -0800294
295void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
296{
297 __dec_zone_state(page_zone(page), item);
298}
Christoph Lameter2244b952006-06-30 01:55:33 -0700299EXPORT_SYMBOL(__dec_zone_page_state);
300
Heiko Carstens41561532012-01-12 17:17:30 -0800301#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
Christoph Lameter7c839122010-12-14 10:28:46 -0600302/*
303 * If we have cmpxchg_local support then we do not need to incur the overhead
304 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
305 *
306 * mod_state() modifies the zone counter state through atomic per cpu
307 * operations.
308 *
309 * Overstep mode specifies how overstep should handled:
310 * 0 No overstepping
311 * 1 Overstepping half of threshold
312 * -1 Overstepping minus half of threshold
313*/
314static inline void mod_state(struct zone *zone,
315 enum zone_stat_item item, int delta, int overstep_mode)
316{
317 struct per_cpu_pageset __percpu *pcp = zone->pageset;
318 s8 __percpu *p = pcp->vm_stat_diff + item;
319 long o, n, t, z;
320
321 do {
322 z = 0; /* overflow to zone counters */
323
324 /*
325 * The fetching of the stat_threshold is racy. We may apply
326 * a counter threshold to the wrong the cpu if we get
Christoph Lameterd3bc2362011-04-14 15:21:58 -0700327 * rescheduled while executing here. However, the next
328 * counter update will apply the threshold again and
329 * therefore bring the counter under the threshold again.
330 *
331 * Most of the time the thresholds are the same anyways
332 * for all cpus in a zone.
Christoph Lameter7c839122010-12-14 10:28:46 -0600333 */
334 t = this_cpu_read(pcp->stat_threshold);
335
336 o = this_cpu_read(*p);
337 n = delta + o;
338
339 if (n > t || n < -t) {
340 int os = overstep_mode * (t >> 1) ;
341
342 /* Overflow must be added to zone counters */
343 z = n + os;
344 n = -os;
345 }
346 } while (this_cpu_cmpxchg(*p, o, n) != o);
347
348 if (z)
349 zone_page_state_add(z, zone, item);
350}
351
352void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
353 int delta)
354{
355 mod_state(zone, item, delta, 0);
356}
357EXPORT_SYMBOL(mod_zone_page_state);
358
359void inc_zone_state(struct zone *zone, enum zone_stat_item item)
360{
361 mod_state(zone, item, 1, 1);
362}
363
364void inc_zone_page_state(struct page *page, enum zone_stat_item item)
365{
366 mod_state(page_zone(page), item, 1, 1);
367}
368EXPORT_SYMBOL(inc_zone_page_state);
369
370void dec_zone_page_state(struct page *page, enum zone_stat_item item)
371{
372 mod_state(page_zone(page), item, -1, -1);
373}
374EXPORT_SYMBOL(dec_zone_page_state);
375#else
376/*
377 * Use interrupt disable to serialize counter updates
378 */
379void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
380 int delta)
381{
382 unsigned long flags;
383
384 local_irq_save(flags);
385 __mod_zone_page_state(zone, item, delta);
386 local_irq_restore(flags);
387}
388EXPORT_SYMBOL(mod_zone_page_state);
389
Christoph Lameterca889e62006-06-30 01:55:44 -0700390void inc_zone_state(struct zone *zone, enum zone_stat_item item)
391{
392 unsigned long flags;
393
394 local_irq_save(flags);
395 __inc_zone_state(zone, item);
396 local_irq_restore(flags);
397}
398
Christoph Lameter2244b952006-06-30 01:55:33 -0700399void inc_zone_page_state(struct page *page, enum zone_stat_item item)
400{
401 unsigned long flags;
402 struct zone *zone;
Christoph Lameter2244b952006-06-30 01:55:33 -0700403
404 zone = page_zone(page);
405 local_irq_save(flags);
Christoph Lameterca889e62006-06-30 01:55:44 -0700406 __inc_zone_state(zone, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700407 local_irq_restore(flags);
408}
409EXPORT_SYMBOL(inc_zone_page_state);
410
411void dec_zone_page_state(struct page *page, enum zone_stat_item item)
412{
413 unsigned long flags;
Christoph Lameter2244b952006-06-30 01:55:33 -0700414
Christoph Lameter2244b952006-06-30 01:55:33 -0700415 local_irq_save(flags);
Christoph Lametera302eb42006-08-31 21:27:34 -0700416 __dec_zone_page_state(page, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700417 local_irq_restore(flags);
418}
419EXPORT_SYMBOL(dec_zone_page_state);
Christoph Lameter7c839122010-12-14 10:28:46 -0600420#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700421
422/*
423 * Update the zone counters for one cpu.
Christoph Lameter4037d452007-05-09 02:35:14 -0700424 *
Christoph Lametera7f75e22008-02-04 22:29:16 -0800425 * The cpu specified must be either the current cpu or a processor that
426 * is not online. If it is the current cpu then the execution thread must
427 * be pinned to the current cpu.
428 *
Christoph Lameter4037d452007-05-09 02:35:14 -0700429 * Note that refresh_cpu_vm_stats strives to only access
430 * node local memory. The per cpu pagesets on remote zones are placed
431 * in the memory local to the processor using that pageset. So the
432 * loop over all zones will access a series of cachelines local to
433 * the processor.
434 *
435 * The call to zone_page_state_add updates the cachelines with the
436 * statistics in the remote zone struct as well as the global cachelines
437 * with the global counters. These could cause remote node cache line
438 * bouncing and will have to be only done when necessary.
Christoph Lameter2244b952006-06-30 01:55:33 -0700439 */
440void refresh_cpu_vm_stats(int cpu)
441{
442 struct zone *zone;
443 int i;
Christoph Lametera7f75e22008-02-04 22:29:16 -0800444 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
Christoph Lameter2244b952006-06-30 01:55:33 -0700445
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700446 for_each_populated_zone(zone) {
Christoph Lameter4037d452007-05-09 02:35:14 -0700447 struct per_cpu_pageset *p;
Christoph Lameter2244b952006-06-30 01:55:33 -0700448
Christoph Lameter99dcc3e2010-01-05 15:34:51 +0900449 p = per_cpu_ptr(zone->pageset, cpu);
Christoph Lameter2244b952006-06-30 01:55:33 -0700450
451 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
Christoph Lameter4037d452007-05-09 02:35:14 -0700452 if (p->vm_stat_diff[i]) {
Christoph Lametera7f75e22008-02-04 22:29:16 -0800453 unsigned long flags;
454 int v;
455
Christoph Lameter2244b952006-06-30 01:55:33 -0700456 local_irq_save(flags);
Christoph Lametera7f75e22008-02-04 22:29:16 -0800457 v = p->vm_stat_diff[i];
Christoph Lameter4037d452007-05-09 02:35:14 -0700458 p->vm_stat_diff[i] = 0;
Christoph Lametera7f75e22008-02-04 22:29:16 -0800459 local_irq_restore(flags);
460 atomic_long_add(v, &zone->vm_stat[i]);
461 global_diff[i] += v;
Christoph Lameter4037d452007-05-09 02:35:14 -0700462#ifdef CONFIG_NUMA
463 /* 3 seconds idle till flush */
464 p->expire = 3;
465#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700466 }
Dimitri Sivanich468fd622008-04-28 02:13:37 -0700467 cond_resched();
Christoph Lameter4037d452007-05-09 02:35:14 -0700468#ifdef CONFIG_NUMA
469 /*
470 * Deal with draining the remote pageset of this
471 * processor
472 *
473 * Check if there are pages remaining in this pageset
474 * if not then there is nothing to expire.
475 */
Christoph Lameter3dfa5722008-02-04 22:29:19 -0800476 if (!p->expire || !p->pcp.count)
Christoph Lameter4037d452007-05-09 02:35:14 -0700477 continue;
478
479 /*
480 * We never drain zones local to this processor.
481 */
482 if (zone_to_nid(zone) == numa_node_id()) {
483 p->expire = 0;
484 continue;
485 }
486
487 p->expire--;
488 if (p->expire)
489 continue;
490
Christoph Lameter3dfa5722008-02-04 22:29:19 -0800491 if (p->pcp.count)
492 drain_zone_pages(zone, &p->pcp);
Christoph Lameter4037d452007-05-09 02:35:14 -0700493#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700494 }
Christoph Lametera7f75e22008-02-04 22:29:16 -0800495
496 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
497 if (global_diff[i])
498 atomic_long_add(global_diff[i], &vm_stat[i]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700499}
500
Christoph Lameter2244b952006-06-30 01:55:33 -0700501#endif
502
Christoph Lameterca889e62006-06-30 01:55:44 -0700503#ifdef CONFIG_NUMA
504/*
505 * zonelist = the list of zones passed to the allocator
506 * z = the zone from which the allocation occurred.
507 *
508 * Must be called with interrupts disabled.
Andi Kleen78afd562011-03-22 16:33:12 -0700509 *
510 * When __GFP_OTHER_NODE is set assume the node of the preferred
511 * zone is the local node. This is useful for daemons who allocate
512 * memory on behalf of other processes.
Christoph Lameterca889e62006-06-30 01:55:44 -0700513 */
Andi Kleen78afd562011-03-22 16:33:12 -0700514void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
Christoph Lameterca889e62006-06-30 01:55:44 -0700515{
Mel Gorman18ea7e72008-04-28 02:12:14 -0700516 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
Christoph Lameterca889e62006-06-30 01:55:44 -0700517 __inc_zone_state(z, NUMA_HIT);
518 } else {
519 __inc_zone_state(z, NUMA_MISS);
Mel Gorman18ea7e72008-04-28 02:12:14 -0700520 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
Christoph Lameterca889e62006-06-30 01:55:44 -0700521 }
Andi Kleen78afd562011-03-22 16:33:12 -0700522 if (z->node == ((flags & __GFP_OTHER_NODE) ?
523 preferred_zone->node : numa_node_id()))
Christoph Lameterca889e62006-06-30 01:55:44 -0700524 __inc_zone_state(z, NUMA_LOCAL);
525 else
526 __inc_zone_state(z, NUMA_OTHER);
527}
528#endif
529
Mel Gormand7a57522010-05-24 14:32:25 -0700530#ifdef CONFIG_COMPACTION
Namhyung Kim36deb0b2010-10-26 14:22:04 -0700531
Mel Gormand7a57522010-05-24 14:32:25 -0700532struct contig_page_info {
533 unsigned long free_pages;
534 unsigned long free_blocks_total;
535 unsigned long free_blocks_suitable;
536};
537
538/*
539 * Calculate the number of free pages in a zone, how many contiguous
540 * pages are free and how many are large enough to satisfy an allocation of
541 * the target size. Note that this function makes no attempt to estimate
542 * how many suitable free blocks there *might* be if MOVABLE pages were
543 * migrated. Calculating that is possible, but expensive and can be
544 * figured out from userspace
545 */
546static void fill_contig_page_info(struct zone *zone,
547 unsigned int suitable_order,
548 struct contig_page_info *info)
549{
550 unsigned int order;
551
552 info->free_pages = 0;
553 info->free_blocks_total = 0;
554 info->free_blocks_suitable = 0;
555
556 for (order = 0; order < MAX_ORDER; order++) {
557 unsigned long blocks;
558
559 /* Count number of free blocks */
560 blocks = zone->free_area[order].nr_free;
561 info->free_blocks_total += blocks;
562
563 /* Count free base pages */
564 info->free_pages += blocks << order;
565
566 /* Count the suitable free blocks */
567 if (order >= suitable_order)
568 info->free_blocks_suitable += blocks <<
569 (order - suitable_order);
570 }
571}
Mel Gormanf1a5ab12010-05-24 14:32:26 -0700572
573/*
574 * A fragmentation index only makes sense if an allocation of a requested
575 * size would fail. If that is true, the fragmentation index indicates
576 * whether external fragmentation or a lack of memory was the problem.
577 * The value can be used to determine if page reclaim or compaction
578 * should be used
579 */
Mel Gorman56de7262010-05-24 14:32:30 -0700580static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
Mel Gormanf1a5ab12010-05-24 14:32:26 -0700581{
582 unsigned long requested = 1UL << order;
583
584 if (!info->free_blocks_total)
585 return 0;
586
587 /* Fragmentation index only makes sense when a request would fail */
588 if (info->free_blocks_suitable)
589 return -1000;
590
591 /*
592 * Index is between 0 and 1 so return within 3 decimal places
593 *
594 * 0 => allocation would fail due to lack of memory
595 * 1 => allocation would fail due to fragmentation
596 */
597 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
598}
Mel Gorman56de7262010-05-24 14:32:30 -0700599
600/* Same as __fragmentation index but allocs contig_page_info on stack */
601int fragmentation_index(struct zone *zone, unsigned int order)
602{
603 struct contig_page_info info;
604
605 fill_contig_page_info(zone, order, &info);
606 return __fragmentation_index(order, &info);
607}
Mel Gormand7a57522010-05-24 14:32:25 -0700608#endif
609
610#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +0400611#include <linux/proc_fs.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700612#include <linux/seq_file.h>
613
Mel Gorman467c9962007-10-16 01:26:02 -0700614static char * const migratetype_names[MIGRATE_TYPES] = {
615 "Unmovable",
616 "Reclaimable",
617 "Movable",
618 "Reserve",
Michal Nazarewiczd4158d22011-12-29 13:09:50 +0100619#ifdef CONFIG_CMA
620 "CMA",
621#endif
KOSAKI Motohiro91446b02008-04-15 14:34:42 -0700622 "Isolate",
Mel Gorman467c9962007-10-16 01:26:02 -0700623};
624
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700625static void *frag_start(struct seq_file *m, loff_t *pos)
626{
627 pg_data_t *pgdat;
628 loff_t node = *pos;
629 for (pgdat = first_online_pgdat();
630 pgdat && node;
631 pgdat = next_online_pgdat(pgdat))
632 --node;
633
634 return pgdat;
635}
636
637static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
638{
639 pg_data_t *pgdat = (pg_data_t *)arg;
640
641 (*pos)++;
642 return next_online_pgdat(pgdat);
643}
644
645static void frag_stop(struct seq_file *m, void *arg)
646{
647}
648
Mel Gorman467c9962007-10-16 01:26:02 -0700649/* Walk all the zones in a node and print using a callback */
650static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
651 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700652{
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700653 struct zone *zone;
654 struct zone *node_zones = pgdat->node_zones;
655 unsigned long flags;
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700656
657 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
658 if (!populated_zone(zone))
659 continue;
660
661 spin_lock_irqsave(&zone->lock, flags);
Mel Gorman467c9962007-10-16 01:26:02 -0700662 print(m, pgdat, zone);
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700663 spin_unlock_irqrestore(&zone->lock, flags);
Mel Gorman467c9962007-10-16 01:26:02 -0700664 }
665}
Mel Gormand7a57522010-05-24 14:32:25 -0700666#endif
Mel Gorman467c9962007-10-16 01:26:02 -0700667
David Rientjes0d6617c2011-09-14 16:21:05 -0700668#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700669#ifdef CONFIG_ZONE_DMA
670#define TEXT_FOR_DMA(xx) xx "_dma",
671#else
672#define TEXT_FOR_DMA(xx)
673#endif
674
675#ifdef CONFIG_ZONE_DMA32
676#define TEXT_FOR_DMA32(xx) xx "_dma32",
677#else
678#define TEXT_FOR_DMA32(xx)
679#endif
680
681#ifdef CONFIG_HIGHMEM
682#define TEXT_FOR_HIGHMEM(xx) xx "_high",
683#else
684#define TEXT_FOR_HIGHMEM(xx)
685#endif
686
687#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
688 TEXT_FOR_HIGHMEM(xx) xx "_movable",
689
690const char * const vmstat_text[] = {
691 /* Zoned VM counters */
692 "nr_free_pages",
693 "nr_inactive_anon",
694 "nr_active_anon",
695 "nr_inactive_file",
696 "nr_active_file",
697 "nr_unevictable",
698 "nr_mlock",
699 "nr_anon_pages",
700 "nr_mapped",
701 "nr_file_pages",
702 "nr_dirty",
703 "nr_writeback",
704 "nr_slab_reclaimable",
705 "nr_slab_unreclaimable",
706 "nr_page_table_pages",
707 "nr_kernel_stack",
708 "nr_unstable",
709 "nr_bounce",
710 "nr_vmscan_write",
Mel Gorman49ea7eb2011-10-31 17:07:59 -0700711 "nr_vmscan_immediate_reclaim",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700712 "nr_writeback_temp",
713 "nr_isolated_anon",
714 "nr_isolated_file",
715 "nr_shmem",
716 "nr_dirtied",
717 "nr_written",
718
719#ifdef CONFIG_NUMA
720 "numa_hit",
721 "numa_miss",
722 "numa_foreign",
723 "numa_interleave",
724 "numa_local",
725 "numa_other",
726#endif
727 "nr_anon_transparent_hugepages",
Larry Bassel6d6e2c92012-12-14 14:21:05 -0800728 "nr_free_cma",
Liam Mark6ffc0402015-02-27 12:59:00 -0800729 "nr_swapcache",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700730 "nr_dirty_threshold",
731 "nr_dirty_background_threshold",
732
733#ifdef CONFIG_VM_EVENT_COUNTERS
734 "pgpgin",
735 "pgpgout",
736 "pswpin",
737 "pswpout",
738
739 TEXTS_FOR_ZONES("pgalloc")
740
741 "pgfree",
742 "pgactivate",
743 "pgdeactivate",
744
745 "pgfault",
746 "pgmajfault",
747
748 TEXTS_FOR_ZONES("pgrefill")
Ying Han904249a2012-04-25 16:01:48 -0700749 TEXTS_FOR_ZONES("pgsteal_kswapd")
750 TEXTS_FOR_ZONES("pgsteal_direct")
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700751 TEXTS_FOR_ZONES("pgscan_kswapd")
752 TEXTS_FOR_ZONES("pgscan_direct")
753
754#ifdef CONFIG_NUMA
755 "zone_reclaim_failed",
756#endif
757 "pginodesteal",
758 "slabs_scanned",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700759 "kswapd_inodesteal",
760 "kswapd_low_wmark_hit_quickly",
761 "kswapd_high_wmark_hit_quickly",
762 "kswapd_skip_congestion_wait",
763 "pageoutrun",
764 "allocstall",
765
766 "pgrotated",
767
Mel Gormancd9bb9b2012-10-19 10:46:20 +0100768#ifdef CONFIG_MIGRATION
769 "pgmigrate_success",
770 "pgmigrate_fail",
771#endif
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700772#ifdef CONFIG_COMPACTION
Mel Gorman3f2afc22012-10-19 12:00:10 +0100773 "compact_migrate_scanned",
774 "compact_free_scanned",
775 "compact_isolated",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700776 "compact_stall",
777 "compact_fail",
778 "compact_success",
779#endif
780
781#ifdef CONFIG_HUGETLB_PAGE
782 "htlb_buddy_alloc_success",
783 "htlb_buddy_alloc_fail",
784#endif
785 "unevictable_pgs_culled",
786 "unevictable_pgs_scanned",
787 "unevictable_pgs_rescued",
788 "unevictable_pgs_mlocked",
789 "unevictable_pgs_munlocked",
790 "unevictable_pgs_cleared",
791 "unevictable_pgs_stranded",
792 "unevictable_pgs_mlockfreed",
793
794#ifdef CONFIG_TRANSPARENT_HUGEPAGE
795 "thp_fault_alloc",
796 "thp_fault_fallback",
797 "thp_collapse_alloc",
798 "thp_collapse_alloc_failed",
799 "thp_split",
800#endif
801
802#endif /* CONFIG_VM_EVENTS_COUNTERS */
803};
David Rientjes0d6617c2011-09-14 16:21:05 -0700804#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700805
806
Mel Gormand7a57522010-05-24 14:32:25 -0700807#ifdef CONFIG_PROC_FS
Mel Gorman467c9962007-10-16 01:26:02 -0700808static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
809 struct zone *zone)
810{
811 int order;
812
813 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
814 for (order = 0; order < MAX_ORDER; ++order)
815 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
816 seq_putc(m, '\n');
817}
818
819/*
820 * This walks the free areas for each zone.
821 */
822static int frag_show(struct seq_file *m, void *arg)
823{
824 pg_data_t *pgdat = (pg_data_t *)arg;
825 walk_zones_in_node(m, pgdat, frag_show_print);
826 return 0;
827}
828
829static void pagetypeinfo_showfree_print(struct seq_file *m,
830 pg_data_t *pgdat, struct zone *zone)
831{
832 int order, mtype;
833
834 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
835 seq_printf(m, "Node %4d, zone %8s, type %12s ",
836 pgdat->node_id,
837 zone->name,
838 migratetype_names[mtype]);
839 for (order = 0; order < MAX_ORDER; ++order) {
840 unsigned long freecount = 0;
841 struct free_area *area;
842 struct list_head *curr;
843
844 area = &(zone->free_area[order]);
845
846 list_for_each(curr, &area->free_list[mtype])
847 freecount++;
848 seq_printf(m, "%6lu ", freecount);
849 }
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700850 seq_putc(m, '\n');
851 }
Mel Gorman467c9962007-10-16 01:26:02 -0700852}
853
854/* Print out the free pages at each order for each migatetype */
855static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
856{
857 int order;
858 pg_data_t *pgdat = (pg_data_t *)arg;
859
860 /* Print header */
861 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
862 for (order = 0; order < MAX_ORDER; ++order)
863 seq_printf(m, "%6d ", order);
864 seq_putc(m, '\n');
865
866 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
867
868 return 0;
869}
870
871static void pagetypeinfo_showblockcount_print(struct seq_file *m,
872 pg_data_t *pgdat, struct zone *zone)
873{
874 int mtype;
875 unsigned long pfn;
876 unsigned long start_pfn = zone->zone_start_pfn;
877 unsigned long end_pfn = start_pfn + zone->spanned_pages;
878 unsigned long count[MIGRATE_TYPES] = { 0, };
879
880 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
881 struct page *page;
882
883 if (!pfn_valid(pfn))
884 continue;
885
886 page = pfn_to_page(pfn);
Mel Gormaneb335752009-05-13 17:34:48 +0100887
888 /* Watch for unexpected holes punched in the memmap */
889 if (!memmap_valid_within(pfn, page, zone))
Mel Gormane80d6a22008-08-14 11:10:14 +0100890 continue;
Mel Gormaneb335752009-05-13 17:34:48 +0100891
Mel Gorman467c9962007-10-16 01:26:02 -0700892 mtype = get_pageblock_migratetype(page);
893
Mel Gormane80d6a22008-08-14 11:10:14 +0100894 if (mtype < MIGRATE_TYPES)
895 count[mtype]++;
Mel Gorman467c9962007-10-16 01:26:02 -0700896 }
897
898 /* Print counts */
899 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
900 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
901 seq_printf(m, "%12lu ", count[mtype]);
902 seq_putc(m, '\n');
903}
904
905/* Print out the free pages at each order for each migratetype */
906static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
907{
908 int mtype;
909 pg_data_t *pgdat = (pg_data_t *)arg;
910
911 seq_printf(m, "\n%-23s", "Number of blocks type ");
912 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
913 seq_printf(m, "%12s ", migratetype_names[mtype]);
914 seq_putc(m, '\n');
915 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
916
917 return 0;
918}
919
920/*
921 * This prints out statistics in relation to grouping pages by mobility.
922 * It is expensive to collect so do not constantly read the file.
923 */
924static int pagetypeinfo_show(struct seq_file *m, void *arg)
925{
926 pg_data_t *pgdat = (pg_data_t *)arg;
927
KOSAKI Motohiro41b25a32008-04-30 00:52:13 -0700928 /* check memoryless node */
929 if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
930 return 0;
931
Mel Gorman467c9962007-10-16 01:26:02 -0700932 seq_printf(m, "Page block order: %d\n", pageblock_order);
933 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
934 seq_putc(m, '\n');
935 pagetypeinfo_showfree(m, pgdat);
936 pagetypeinfo_showblockcount(m, pgdat);
937
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700938 return 0;
939}
940
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +0400941static const struct seq_operations fragmentation_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700942 .start = frag_start,
943 .next = frag_next,
944 .stop = frag_stop,
945 .show = frag_show,
946};
947
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +0400948static int fragmentation_open(struct inode *inode, struct file *file)
949{
950 return seq_open(file, &fragmentation_op);
951}
952
953static const struct file_operations fragmentation_file_operations = {
954 .open = fragmentation_open,
955 .read = seq_read,
956 .llseek = seq_lseek,
957 .release = seq_release,
958};
959
Alexey Dobriyan74e2e8e2008-10-06 04:15:36 +0400960static const struct seq_operations pagetypeinfo_op = {
Mel Gorman467c9962007-10-16 01:26:02 -0700961 .start = frag_start,
962 .next = frag_next,
963 .stop = frag_stop,
964 .show = pagetypeinfo_show,
965};
966
Alexey Dobriyan74e2e8e2008-10-06 04:15:36 +0400967static int pagetypeinfo_open(struct inode *inode, struct file *file)
968{
969 return seq_open(file, &pagetypeinfo_op);
970}
971
972static const struct file_operations pagetypeinfo_file_ops = {
973 .open = pagetypeinfo_open,
974 .read = seq_read,
975 .llseek = seq_lseek,
976 .release = seq_release,
977};
978
Mel Gorman467c9962007-10-16 01:26:02 -0700979static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
980 struct zone *zone)
981{
982 int i;
983 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
984 seq_printf(m,
985 "\n pages free %lu"
986 "\n min %lu"
987 "\n low %lu"
988 "\n high %lu"
Wu Fengguang08d9ae72009-06-16 15:32:30 -0700989 "\n scanned %lu"
Mel Gorman467c9962007-10-16 01:26:02 -0700990 "\n spanned %lu"
991 "\n present %lu",
Mel Gorman88f5acf2011-01-13 15:45:41 -0800992 zone_page_state(zone, NR_FREE_PAGES),
Mel Gorman41858962009-06-16 15:32:12 -0700993 min_wmark_pages(zone),
994 low_wmark_pages(zone),
995 high_wmark_pages(zone),
Mel Gorman467c9962007-10-16 01:26:02 -0700996 zone->pages_scanned,
Mel Gorman467c9962007-10-16 01:26:02 -0700997 zone->spanned_pages,
998 zone->present_pages);
999
1000 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1001 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1002 zone_page_state(zone, i));
1003
1004 seq_printf(m,
1005 "\n protection: (%lu",
1006 zone->lowmem_reserve[0]);
1007 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1008 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
1009 seq_printf(m,
1010 ")"
1011 "\n pagesets");
1012 for_each_online_cpu(i) {
1013 struct per_cpu_pageset *pageset;
Mel Gorman467c9962007-10-16 01:26:02 -07001014
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09001015 pageset = per_cpu_ptr(zone->pageset, i);
Christoph Lameter3dfa5722008-02-04 22:29:19 -08001016 seq_printf(m,
1017 "\n cpu: %i"
1018 "\n count: %i"
1019 "\n high: %i"
1020 "\n batch: %i",
1021 i,
1022 pageset->pcp.count,
1023 pageset->pcp.high,
1024 pageset->pcp.batch);
Mel Gorman467c9962007-10-16 01:26:02 -07001025#ifdef CONFIG_SMP
1026 seq_printf(m, "\n vm stats threshold: %d",
1027 pageset->stat_threshold);
1028#endif
1029 }
1030 seq_printf(m,
1031 "\n all_unreclaimable: %u"
Rik van Riel556adec2008-10-18 20:26:34 -07001032 "\n start_pfn: %lu"
1033 "\n inactive_ratio: %u",
Lisa Due0935212013-09-11 14:22:36 -07001034 !zone_reclaimable(zone),
Rik van Riel556adec2008-10-18 20:26:34 -07001035 zone->zone_start_pfn,
1036 zone->inactive_ratio);
Mel Gorman467c9962007-10-16 01:26:02 -07001037 seq_putc(m, '\n');
1038}
1039
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001040/*
1041 * Output information about zones in @pgdat.
1042 */
1043static int zoneinfo_show(struct seq_file *m, void *arg)
1044{
Mel Gorman467c9962007-10-16 01:26:02 -07001045 pg_data_t *pgdat = (pg_data_t *)arg;
1046 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001047 return 0;
1048}
1049
Alexey Dobriyan5c9fe622008-10-06 04:19:42 +04001050static const struct seq_operations zoneinfo_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001051 .start = frag_start, /* iterate over all zones. The same as in
1052 * fragmentation. */
1053 .next = frag_next,
1054 .stop = frag_stop,
1055 .show = zoneinfo_show,
1056};
1057
Alexey Dobriyan5c9fe622008-10-06 04:19:42 +04001058static int zoneinfo_open(struct inode *inode, struct file *file)
1059{
1060 return seq_open(file, &zoneinfo_op);
1061}
1062
1063static const struct file_operations proc_zoneinfo_file_operations = {
1064 .open = zoneinfo_open,
1065 .read = seq_read,
1066 .llseek = seq_lseek,
1067 .release = seq_release,
1068};
1069
Michael Rubin79da8262010-10-26 14:21:36 -07001070enum writeback_stat_item {
1071 NR_DIRTY_THRESHOLD,
1072 NR_DIRTY_BG_THRESHOLD,
1073 NR_VM_WRITEBACK_STAT_ITEMS,
1074};
1075
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001076static void *vmstat_start(struct seq_file *m, loff_t *pos)
1077{
Christoph Lameter2244b952006-06-30 01:55:33 -07001078 unsigned long *v;
Michael Rubin79da8262010-10-26 14:21:36 -07001079 int i, stat_items_size;
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001080
1081 if (*pos >= ARRAY_SIZE(vmstat_text))
1082 return NULL;
Michael Rubin79da8262010-10-26 14:21:36 -07001083 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1084 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001085
Christoph Lameterf8891e52006-06-30 01:55:45 -07001086#ifdef CONFIG_VM_EVENT_COUNTERS
Michael Rubin79da8262010-10-26 14:21:36 -07001087 stat_items_size += sizeof(struct vm_event_state);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001088#endif
Michael Rubin79da8262010-10-26 14:21:36 -07001089
1090 v = kmalloc(stat_items_size, GFP_KERNEL);
Christoph Lameter2244b952006-06-30 01:55:33 -07001091 m->private = v;
1092 if (!v)
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001093 return ERR_PTR(-ENOMEM);
Christoph Lameter2244b952006-06-30 01:55:33 -07001094 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1095 v[i] = global_page_state(i);
Michael Rubin79da8262010-10-26 14:21:36 -07001096 v += NR_VM_ZONE_STAT_ITEMS;
1097
1098 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1099 v + NR_DIRTY_THRESHOLD);
1100 v += NR_VM_WRITEBACK_STAT_ITEMS;
1101
Christoph Lameterf8891e52006-06-30 01:55:45 -07001102#ifdef CONFIG_VM_EVENT_COUNTERS
Michael Rubin79da8262010-10-26 14:21:36 -07001103 all_vm_events(v);
1104 v[PGPGIN] /= 2; /* sectors -> kbytes */
1105 v[PGPGOUT] /= 2;
Christoph Lameterf8891e52006-06-30 01:55:45 -07001106#endif
Wu Fengguangff8b16d2010-11-04 01:56:49 +08001107 return (unsigned long *)m->private + *pos;
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001108}
1109
1110static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1111{
1112 (*pos)++;
1113 if (*pos >= ARRAY_SIZE(vmstat_text))
1114 return NULL;
1115 return (unsigned long *)m->private + *pos;
1116}
1117
1118static int vmstat_show(struct seq_file *m, void *arg)
1119{
1120 unsigned long *l = arg;
1121 unsigned long off = l - (unsigned long *)m->private;
1122
1123 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1124 return 0;
1125}
1126
1127static void vmstat_stop(struct seq_file *m, void *arg)
1128{
1129 kfree(m->private);
1130 m->private = NULL;
1131}
1132
Alexey Dobriyanb6aa44a2008-10-06 04:17:48 +04001133static const struct seq_operations vmstat_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001134 .start = vmstat_start,
1135 .next = vmstat_next,
1136 .stop = vmstat_stop,
1137 .show = vmstat_show,
1138};
1139
Alexey Dobriyanb6aa44a2008-10-06 04:17:48 +04001140static int vmstat_open(struct inode *inode, struct file *file)
1141{
1142 return seq_open(file, &vmstat_op);
1143}
1144
1145static const struct file_operations proc_vmstat_file_operations = {
1146 .open = vmstat_open,
1147 .read = seq_read,
1148 .llseek = seq_lseek,
1149 .release = seq_release,
1150};
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001151#endif /* CONFIG_PROC_FS */
1152
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001153#ifdef CONFIG_SMP
Michal Hockoa8f40832015-12-11 13:40:32 -08001154static struct workqueue_struct *vmstat_wq;
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001155static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
Christoph Lameter77461ab2007-05-09 02:35:13 -07001156int sysctl_stat_interval __read_mostly = HZ;
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001157
1158static void vmstat_update(struct work_struct *w)
1159{
1160 refresh_cpu_vm_stats(smp_processor_id());
Michal Hockoa8f40832015-12-11 13:40:32 -08001161 queue_delayed_work(vmstat_wq, &__get_cpu_var(vmstat_work),
Anton Blanchard98f4ebb2009-04-02 16:56:39 -07001162 round_jiffies_relative(sysctl_stat_interval));
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001163}
1164
Randy Dunlap42614fc2007-11-14 17:00:12 -08001165static void __cpuinit start_cpu_timer(int cpu)
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001166{
Tejun Heo1871e522009-10-29 22:34:13 +09001167 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001168
Tejun Heo1871e522009-10-29 22:34:13 +09001169 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
Michal Hockoa8f40832015-12-11 13:40:32 -08001170 queue_delayed_work_on(cpu, vmstat_wq, work, __round_jiffies_relative(HZ, cpu));
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001171}
1172
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001173/*
1174 * Use the cpu notifier to insure that the thresholds are recalculated
1175 * when necessary.
1176 */
1177static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
1178 unsigned long action,
1179 void *hcpu)
1180{
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001181 long cpu = (long)hcpu;
1182
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001183 switch (action) {
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001184 case CPU_ONLINE:
1185 case CPU_ONLINE_FROZEN:
KAMEZAWA Hiroyuki5ee28a42010-09-09 16:38:14 -07001186 refresh_zone_stat_thresholds();
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001187 start_cpu_timer(cpu);
Christoph Lameterad596922010-01-05 15:34:51 +09001188 node_set_state(cpu_to_node(cpu), N_CPU);
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001189 break;
1190 case CPU_DOWN_PREPARE:
1191 case CPU_DOWN_PREPARE_FROZEN:
Tejun Heoafe2c512010-12-14 16:21:17 +01001192 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001193 per_cpu(vmstat_work, cpu).work.func = NULL;
1194 break;
1195 case CPU_DOWN_FAILED:
1196 case CPU_DOWN_FAILED_FROZEN:
1197 start_cpu_timer(cpu);
1198 break;
Andy Whitcroftce421c72006-12-06 20:33:08 -08001199 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001200 case CPU_DEAD_FROZEN:
Andy Whitcroftce421c72006-12-06 20:33:08 -08001201 refresh_zone_stat_thresholds();
1202 break;
1203 default:
1204 break;
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001205 }
1206 return NOTIFY_OK;
1207}
1208
1209static struct notifier_block __cpuinitdata vmstat_notifier =
1210 { &vmstat_cpuup_callback, NULL, 0 };
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001211#endif
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001212
Adrian Bunke2fc88d2007-10-16 01:26:27 -07001213static int __init setup_vmstat(void)
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001214{
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001215#ifdef CONFIG_SMP
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001216 int cpu;
1217
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001218 register_cpu_notifier(&vmstat_notifier);
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001219
Michal Hockoa8f40832015-12-11 13:40:32 -08001220 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001221 for_each_online_cpu(cpu)
1222 start_cpu_timer(cpu);
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001223#endif
1224#ifdef CONFIG_PROC_FS
1225 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
Alexey Dobriyan74e2e8e2008-10-06 04:15:36 +04001226 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
Alexey Dobriyanb6aa44a2008-10-06 04:17:48 +04001227 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
Alexey Dobriyan5c9fe622008-10-06 04:19:42 +04001228 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001229#endif
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001230 return 0;
1231}
1232module_init(setup_vmstat)
Mel Gormand7a57522010-05-24 14:32:25 -07001233
1234#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1235#include <linux/debugfs.h>
1236
1237static struct dentry *extfrag_debug_root;
1238
1239/*
1240 * Return an index indicating how much of the available free memory is
1241 * unusable for an allocation of the requested size.
1242 */
1243static int unusable_free_index(unsigned int order,
1244 struct contig_page_info *info)
1245{
1246 /* No free memory is interpreted as all free memory is unusable */
1247 if (info->free_pages == 0)
1248 return 1000;
1249
1250 /*
1251 * Index should be a value between 0 and 1. Return a value to 3
1252 * decimal places.
1253 *
1254 * 0 => no fragmentation
1255 * 1 => high fragmentation
1256 */
1257 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1258
1259}
1260
1261static void unusable_show_print(struct seq_file *m,
1262 pg_data_t *pgdat, struct zone *zone)
1263{
1264 unsigned int order;
1265 int index;
1266 struct contig_page_info info;
1267
1268 seq_printf(m, "Node %d, zone %8s ",
1269 pgdat->node_id,
1270 zone->name);
1271 for (order = 0; order < MAX_ORDER; ++order) {
1272 fill_contig_page_info(zone, order, &info);
1273 index = unusable_free_index(order, &info);
1274 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1275 }
1276
1277 seq_putc(m, '\n');
1278}
1279
1280/*
1281 * Display unusable free space index
1282 *
1283 * The unusable free space index measures how much of the available free
1284 * memory cannot be used to satisfy an allocation of a given size and is a
1285 * value between 0 and 1. The higher the value, the more of free memory is
1286 * unusable and by implication, the worse the external fragmentation is. This
1287 * can be expressed as a percentage by multiplying by 100.
1288 */
1289static int unusable_show(struct seq_file *m, void *arg)
1290{
1291 pg_data_t *pgdat = (pg_data_t *)arg;
1292
1293 /* check memoryless node */
1294 if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1295 return 0;
1296
1297 walk_zones_in_node(m, pgdat, unusable_show_print);
1298
1299 return 0;
1300}
1301
1302static const struct seq_operations unusable_op = {
1303 .start = frag_start,
1304 .next = frag_next,
1305 .stop = frag_stop,
1306 .show = unusable_show,
1307};
1308
1309static int unusable_open(struct inode *inode, struct file *file)
1310{
1311 return seq_open(file, &unusable_op);
1312}
1313
1314static const struct file_operations unusable_file_ops = {
1315 .open = unusable_open,
1316 .read = seq_read,
1317 .llseek = seq_lseek,
1318 .release = seq_release,
1319};
1320
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001321static void extfrag_show_print(struct seq_file *m,
1322 pg_data_t *pgdat, struct zone *zone)
1323{
1324 unsigned int order;
1325 int index;
1326
1327 /* Alloc on stack as interrupts are disabled for zone walk */
1328 struct contig_page_info info;
1329
1330 seq_printf(m, "Node %d, zone %8s ",
1331 pgdat->node_id,
1332 zone->name);
1333 for (order = 0; order < MAX_ORDER; ++order) {
1334 fill_contig_page_info(zone, order, &info);
Mel Gorman56de7262010-05-24 14:32:30 -07001335 index = __fragmentation_index(order, &info);
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001336 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1337 }
1338
1339 seq_putc(m, '\n');
1340}
1341
1342/*
1343 * Display fragmentation index for orders that allocations would fail for
1344 */
1345static int extfrag_show(struct seq_file *m, void *arg)
1346{
1347 pg_data_t *pgdat = (pg_data_t *)arg;
1348
1349 walk_zones_in_node(m, pgdat, extfrag_show_print);
1350
1351 return 0;
1352}
1353
1354static const struct seq_operations extfrag_op = {
1355 .start = frag_start,
1356 .next = frag_next,
1357 .stop = frag_stop,
1358 .show = extfrag_show,
1359};
1360
1361static int extfrag_open(struct inode *inode, struct file *file)
1362{
1363 return seq_open(file, &extfrag_op);
1364}
1365
1366static const struct file_operations extfrag_file_ops = {
1367 .open = extfrag_open,
1368 .read = seq_read,
1369 .llseek = seq_lseek,
1370 .release = seq_release,
1371};
1372
Mel Gormand7a57522010-05-24 14:32:25 -07001373static int __init extfrag_debug_init(void)
1374{
1375 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1376 if (!extfrag_debug_root)
1377 return -ENOMEM;
1378
1379 if (!debugfs_create_file("unusable_index", 0444,
1380 extfrag_debug_root, NULL, &unusable_file_ops))
1381 return -ENOMEM;
1382
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001383 if (!debugfs_create_file("extfrag_index", 0444,
1384 extfrag_debug_root, NULL, &extfrag_file_ops))
1385 return -ENOMEM;
1386
Mel Gormand7a57522010-05-24 14:32:25 -07001387 return 0;
1388}
1389
1390module_init(extfrag_debug_init);
1391#endif