blob: 56651a10c36645a7f58c87ac9b97599b72255b61 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
Andrew Mortone129b5c2006-09-27 01:50:00 -070022#include <linux/vmstat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/notifier.h>
36#include <linux/rwsem.h>
Rafael J. Wysocki248a0302006-03-22 00:09:04 -080037#include <linux/delay.h>
Yasunori Goto3218ae12006-06-27 02:53:33 -070038#include <linux/kthread.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080039#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#include <asm/tlbflush.h>
42#include <asm/div64.h>
43
44#include <linux/swapops.h>
45
Nick Piggin0f8053a2006-03-22 00:08:33 -080046#include "internal.h"
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048struct scan_control {
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 /* Incremented by the number of inactive pages that were scanned */
50 unsigned long nr_scanned;
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 /* This context's GFP mask */
Al Viro6daa0e22005-10-21 03:18:50 -040053 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55 int may_writepage;
56
Christoph Lameterf1fd1062006-01-18 17:42:30 -080057 /* Can pages be swapped as part of reclaim? */
58 int may_swap;
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
61 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
62 * In this context, it doesn't matter that we scan the
63 * whole list at once. */
64 int swap_cluster_max;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -070065
66 int swappiness;
Nick Piggin408d8542006-09-25 23:31:27 -070067
68 int all_unreclaimable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
71/*
72 * The list of shrinker callbacks used by to apply pressure to
73 * ageable caches.
74 */
75struct shrinker {
76 shrinker_t shrinker;
77 struct list_head list;
78 int seeks; /* seeks to recreate an obj */
79 long nr; /* objs pending delete */
80};
81
82#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
83
84#ifdef ARCH_HAS_PREFETCH
85#define prefetch_prev_lru_page(_page, _base, _field) \
86 do { \
87 if ((_page)->lru.prev != _base) { \
88 struct page *prev; \
89 \
90 prev = lru_to_page(&(_page->lru)); \
91 prefetch(&prev->_field); \
92 } \
93 } while (0)
94#else
95#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
96#endif
97
98#ifdef ARCH_HAS_PREFETCHW
99#define prefetchw_prev_lru_page(_page, _base, _field) \
100 do { \
101 if ((_page)->lru.prev != _base) { \
102 struct page *prev; \
103 \
104 prev = lru_to_page(&(_page->lru)); \
105 prefetchw(&prev->_field); \
106 } \
107 } while (0)
108#else
109#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
110#endif
111
112/*
113 * From 0 .. 100. Higher means more swappy.
114 */
115int vm_swappiness = 60;
Andrew Mortonbd1e22b2006-06-23 02:03:47 -0700116long vm_total_pages; /* The total number of pages which the VM controls */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118static LIST_HEAD(shrinker_list);
119static DECLARE_RWSEM(shrinker_rwsem);
120
121/*
122 * Add a shrinker callback to be called from the vm
123 */
124struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
125{
126 struct shrinker *shrinker;
127
128 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
129 if (shrinker) {
130 shrinker->shrinker = theshrinker;
131 shrinker->seeks = seeks;
132 shrinker->nr = 0;
133 down_write(&shrinker_rwsem);
134 list_add_tail(&shrinker->list, &shrinker_list);
135 up_write(&shrinker_rwsem);
136 }
137 return shrinker;
138}
139EXPORT_SYMBOL(set_shrinker);
140
141/*
142 * Remove one
143 */
144void remove_shrinker(struct shrinker *shrinker)
145{
146 down_write(&shrinker_rwsem);
147 list_del(&shrinker->list);
148 up_write(&shrinker_rwsem);
149 kfree(shrinker);
150}
151EXPORT_SYMBOL(remove_shrinker);
152
153#define SHRINK_BATCH 128
154/*
155 * Call the shrink functions to age shrinkable caches
156 *
157 * Here we assume it costs one seek to replace a lru page and that it also
158 * takes a seek to recreate a cache object. With this in mind we age equal
159 * percentages of the lru and ageable caches. This should balance the seeks
160 * generated by these structures.
161 *
162 * If the vm encounted mapped pages on the LRU it increase the pressure on
163 * slab to avoid swapping.
164 *
165 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
166 *
167 * `lru_pages' represents the number of on-LRU pages in all the zones which
168 * are eligible for the caller's allocation attempt. It is used for balancing
169 * slab reclaim versus page reclaim.
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700170 *
171 * Returns the number of slab objects which we shrunk.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 */
Andrew Morton69e05942006-03-22 00:08:19 -0800173unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
174 unsigned long lru_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
176 struct shrinker *shrinker;
Andrew Morton69e05942006-03-22 00:08:19 -0800177 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179 if (scanned == 0)
180 scanned = SWAP_CLUSTER_MAX;
181
182 if (!down_read_trylock(&shrinker_rwsem))
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700183 return 1; /* Assume we'll be able to shrink next time */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
185 list_for_each_entry(shrinker, &shrinker_list, list) {
186 unsigned long long delta;
187 unsigned long total_scan;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800188 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 delta = (4 * scanned) / shrinker->seeks;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800191 delta *= max_pass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 do_div(delta, lru_pages + 1);
193 shrinker->nr += delta;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800194 if (shrinker->nr < 0) {
195 printk(KERN_ERR "%s: nr=%ld\n",
196 __FUNCTION__, shrinker->nr);
197 shrinker->nr = max_pass;
198 }
199
200 /*
201 * Avoid risking looping forever due to too large nr value:
202 * never try to free more than twice the estimate number of
203 * freeable entries.
204 */
205 if (shrinker->nr > max_pass * 2)
206 shrinker->nr = max_pass * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 total_scan = shrinker->nr;
209 shrinker->nr = 0;
210
211 while (total_scan >= SHRINK_BATCH) {
212 long this_scan = SHRINK_BATCH;
213 int shrink_ret;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700214 int nr_before;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700216 nr_before = (*shrinker->shrinker)(0, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
218 if (shrink_ret == -1)
219 break;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700220 if (shrink_ret < nr_before)
221 ret += nr_before - shrink_ret;
Christoph Lameterf8891e52006-06-30 01:55:45 -0700222 count_vm_events(SLABS_SCANNED, this_scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 total_scan -= this_scan;
224
225 cond_resched();
226 }
227
228 shrinker->nr += total_scan;
229 }
230 up_read(&shrinker_rwsem);
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700231 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232}
233
234/* Called without lock on whether page is mapped, so answer is unstable */
235static inline int page_mapping_inuse(struct page *page)
236{
237 struct address_space *mapping;
238
239 /* Page is in somebody's page tables. */
240 if (page_mapped(page))
241 return 1;
242
243 /* Be more reluctant to reclaim swapcache than pagecache */
244 if (PageSwapCache(page))
245 return 1;
246
247 mapping = page_mapping(page);
248 if (!mapping)
249 return 0;
250
251 /* File is mmap'd by somebody? */
252 return mapping_mapped(mapping);
253}
254
255static inline int is_page_cache_freeable(struct page *page)
256{
257 return page_count(page) - !!PagePrivate(page) == 2;
258}
259
260static int may_write_to_queue(struct backing_dev_info *bdi)
261{
Christoph Lameter930d9152006-01-08 01:00:47 -0800262 if (current->flags & PF_SWAPWRITE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 return 1;
264 if (!bdi_write_congested(bdi))
265 return 1;
266 if (bdi == current->backing_dev_info)
267 return 1;
268 return 0;
269}
270
271/*
272 * We detected a synchronous write error writing a page out. Probably
273 * -ENOSPC. We need to propagate that into the address_space for a subsequent
274 * fsync(), msync() or close().
275 *
276 * The tricky part is that after writepage we cannot touch the mapping: nothing
277 * prevents it from being freed up. But we have a ref on the page and once
278 * that page is locked, the mapping is pinned.
279 *
280 * We're allowed to run sleeping lock_page() here because we know the caller has
281 * __GFP_FS.
282 */
283static void handle_write_error(struct address_space *mapping,
284 struct page *page, int error)
285{
286 lock_page(page);
287 if (page_mapping(page) == mapping) {
288 if (error == -ENOSPC)
289 set_bit(AS_ENOSPC, &mapping->flags);
290 else
291 set_bit(AS_EIO, &mapping->flags);
292 }
293 unlock_page(page);
294}
295
Christoph Lameter04e62a22006-06-23 02:03:38 -0700296/* possible outcome of pageout() */
297typedef enum {
298 /* failed to write page out, page is locked */
299 PAGE_KEEP,
300 /* move page to the active list, page is locked */
301 PAGE_ACTIVATE,
302 /* page has been sent to the disk successfully, page is unlocked */
303 PAGE_SUCCESS,
304 /* page is clean and locked */
305 PAGE_CLEAN,
306} pageout_t;
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308/*
Andrew Morton1742f192006-03-22 00:08:21 -0800309 * pageout is called by shrink_page_list() for each dirty page.
310 * Calls ->writepage().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 */
Christoph Lameter04e62a22006-06-23 02:03:38 -0700312static pageout_t pageout(struct page *page, struct address_space *mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
314 /*
315 * If the page is dirty, only perform writeback if that write
316 * will be non-blocking. To prevent this allocation from being
317 * stalled by pagecache activity. But note that there may be
318 * stalls if we need to run get_block(). We could test
319 * PagePrivate for that.
320 *
321 * If this process is currently in generic_file_write() against
322 * this page's queue, we can perform writeback even if that
323 * will block.
324 *
325 * If the page is swapcache, write it back even if that would
326 * block, for some throttling. This happens by accident, because
327 * swap_backing_dev_info is bust: it doesn't reflect the
328 * congestion state of the swapdevs. Easy to fix, if needed.
329 * See swapfile.c:page_queue_congested().
330 */
331 if (!is_page_cache_freeable(page))
332 return PAGE_KEEP;
333 if (!mapping) {
334 /*
335 * Some data journaling orphaned pages can have
336 * page->mapping == NULL while being dirty with clean buffers.
337 */
akpm@osdl.org323aca62005-04-16 15:24:06 -0700338 if (PagePrivate(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (try_to_free_buffers(page)) {
340 ClearPageDirty(page);
341 printk("%s: orphaned page\n", __FUNCTION__);
342 return PAGE_CLEAN;
343 }
344 }
345 return PAGE_KEEP;
346 }
347 if (mapping->a_ops->writepage == NULL)
348 return PAGE_ACTIVATE;
349 if (!may_write_to_queue(mapping->backing_dev_info))
350 return PAGE_KEEP;
351
352 if (clear_page_dirty_for_io(page)) {
353 int res;
354 struct writeback_control wbc = {
355 .sync_mode = WB_SYNC_NONE,
356 .nr_to_write = SWAP_CLUSTER_MAX,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700357 .range_start = 0,
358 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 .nonblocking = 1,
360 .for_reclaim = 1,
361 };
362
363 SetPageReclaim(page);
364 res = mapping->a_ops->writepage(page, &wbc);
365 if (res < 0)
366 handle_write_error(mapping, page, res);
Zach Brown994fc28c2005-12-15 14:28:17 -0800367 if (res == AOP_WRITEPAGE_ACTIVATE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 ClearPageReclaim(page);
369 return PAGE_ACTIVATE;
370 }
371 if (!PageWriteback(page)) {
372 /* synchronous write or broken a_ops? */
373 ClearPageReclaim(page);
374 }
Andrew Mortone129b5c2006-09-27 01:50:00 -0700375 inc_zone_page_state(page, NR_VMSCAN_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 return PAGE_SUCCESS;
377 }
378
379 return PAGE_CLEAN;
380}
381
Andrew Mortona649fd92006-10-17 00:09:36 -0700382/*
383 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
384 * someone else has a ref on the page, abort and return 0. If it was
385 * successfully detached, return 1. Assumes the caller has a single ref on
386 * this page.
387 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800388int remove_mapping(struct address_space *mapping, struct page *page)
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800389{
Nick Piggin28e4d962006-09-25 23:31:23 -0700390 BUG_ON(!PageLocked(page));
391 BUG_ON(mapping != page_mapping(page));
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800392
393 write_lock_irq(&mapping->tree_lock);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800394 /*
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700395 * The non racy check for a busy page.
396 *
397 * Must be careful with the order of the tests. When someone has
398 * a ref to the page, it may be possible that they dirty it then
399 * drop the reference. So if PageDirty is tested before page_count
400 * here, then the following race may occur:
401 *
402 * get_user_pages(&page);
403 * [user mapping goes away]
404 * write_to(page);
405 * !PageDirty(page) [good]
406 * SetPageDirty(page);
407 * put_page(page);
408 * !page_count(page) [good, discard it]
409 *
410 * [oops, our write_to data is lost]
411 *
412 * Reversing the order of the tests ensures such a situation cannot
413 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
414 * load is not satisfied before that of page->_count.
415 *
416 * Note that if SetPageDirty is always performed via set_page_dirty,
417 * and thus under tree_lock, then this ordering is not required.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800418 */
419 if (unlikely(page_count(page) != 2))
420 goto cannot_free;
421 smp_rmb();
422 if (unlikely(PageDirty(page)))
423 goto cannot_free;
424
425 if (PageSwapCache(page)) {
426 swp_entry_t swap = { .val = page_private(page) };
427 __delete_from_swap_cache(page);
428 write_unlock_irq(&mapping->tree_lock);
429 swap_free(swap);
430 __put_page(page); /* The pagecache ref */
431 return 1;
432 }
433
434 __remove_from_page_cache(page);
435 write_unlock_irq(&mapping->tree_lock);
436 __put_page(page);
437 return 1;
438
439cannot_free:
440 write_unlock_irq(&mapping->tree_lock);
441 return 0;
442}
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444/*
Andrew Morton1742f192006-03-22 00:08:21 -0800445 * shrink_page_list() returns the number of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 */
Andrew Morton1742f192006-03-22 00:08:21 -0800447static unsigned long shrink_page_list(struct list_head *page_list,
448 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
450 LIST_HEAD(ret_pages);
451 struct pagevec freed_pvec;
452 int pgactivate = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -0800453 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
455 cond_resched();
456
457 pagevec_init(&freed_pvec, 1);
458 while (!list_empty(page_list)) {
459 struct address_space *mapping;
460 struct page *page;
461 int may_enter_fs;
462 int referenced;
463
464 cond_resched();
465
466 page = lru_to_page(page_list);
467 list_del(&page->lru);
468
469 if (TestSetPageLocked(page))
470 goto keep;
471
Nick Piggin725d7042006-09-25 23:30:55 -0700472 VM_BUG_ON(PageActive(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474 sc->nr_scanned++;
Christoph Lameter80e43422006-02-11 17:55:53 -0800475
476 if (!sc->may_swap && page_mapped(page))
477 goto keep_locked;
478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 /* Double the slab pressure for mapped and swapcache pages */
480 if (page_mapped(page) || PageSwapCache(page))
481 sc->nr_scanned++;
482
483 if (PageWriteback(page))
484 goto keep_locked;
485
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800486 referenced = page_referenced(page, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 /* In active use or really unfreeable? Activate it. */
488 if (referenced && page_mapping_inuse(page))
489 goto activate_locked;
490
491#ifdef CONFIG_SWAP
492 /*
493 * Anonymous process memory has backing store?
494 * Try to allocate it some swap space here.
495 */
Christoph Lameter6e5ef1a2006-03-22 00:08:45 -0800496 if (PageAnon(page) && !PageSwapCache(page))
Christoph Lameter1480a542006-01-08 01:00:53 -0800497 if (!add_to_swap(page, GFP_ATOMIC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 goto activate_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499#endif /* CONFIG_SWAP */
500
501 mapping = page_mapping(page);
502 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
503 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
504
505 /*
506 * The page is mapped into the page tables of one or more
507 * processes. Try to unmap it here.
508 */
509 if (page_mapped(page) && mapping) {
Christoph Lametera48d07a2006-02-01 03:05:38 -0800510 switch (try_to_unmap(page, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 case SWAP_FAIL:
512 goto activate_locked;
513 case SWAP_AGAIN:
514 goto keep_locked;
515 case SWAP_SUCCESS:
516 ; /* try to free the page below */
517 }
518 }
519
520 if (PageDirty(page)) {
521 if (referenced)
522 goto keep_locked;
523 if (!may_enter_fs)
524 goto keep_locked;
Christoph Lameter52a83632006-02-01 03:05:28 -0800525 if (!sc->may_writepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 goto keep_locked;
527
528 /* Page is dirty, try to write it out here */
529 switch(pageout(page, mapping)) {
530 case PAGE_KEEP:
531 goto keep_locked;
532 case PAGE_ACTIVATE:
533 goto activate_locked;
534 case PAGE_SUCCESS:
535 if (PageWriteback(page) || PageDirty(page))
536 goto keep;
537 /*
538 * A synchronous write - probably a ramdisk. Go
539 * ahead and try to reclaim the page.
540 */
541 if (TestSetPageLocked(page))
542 goto keep;
543 if (PageDirty(page) || PageWriteback(page))
544 goto keep_locked;
545 mapping = page_mapping(page);
546 case PAGE_CLEAN:
547 ; /* try to free the page below */
548 }
549 }
550
551 /*
552 * If the page has buffers, try to free the buffer mappings
553 * associated with this page. If we succeed we try to free
554 * the page as well.
555 *
556 * We do this even if the page is PageDirty().
557 * try_to_release_page() does not perform I/O, but it is
558 * possible for a page to have PageDirty set, but it is actually
559 * clean (all its buffers are clean). This happens if the
560 * buffers were written out directly, with submit_bh(). ext3
561 * will do this, as well as the blockdev mapping.
562 * try_to_release_page() will discover that cleanness and will
563 * drop the buffers and mark the page clean - it can be freed.
564 *
565 * Rarely, pages can have buffers and no ->mapping. These are
566 * the pages which were not successfully invalidated in
567 * truncate_complete_page(). We try to drop those buffers here
568 * and if that worked, and the page is no longer mapped into
569 * process address space (page_count == 1) it can be freed.
570 * Otherwise, leave the page on the LRU so it is swappable.
571 */
572 if (PagePrivate(page)) {
573 if (!try_to_release_page(page, sc->gfp_mask))
574 goto activate_locked;
575 if (!mapping && page_count(page) == 1)
576 goto free_it;
577 }
578
Nick Piggin28e4d962006-09-25 23:31:23 -0700579 if (!mapping || !remove_mapping(mapping, page))
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800580 goto keep_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
582free_it:
583 unlock_page(page);
Andrew Morton05ff5132006-03-22 00:08:20 -0800584 nr_reclaimed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 if (!pagevec_add(&freed_pvec, page))
586 __pagevec_release_nonlru(&freed_pvec);
587 continue;
588
589activate_locked:
590 SetPageActive(page);
591 pgactivate++;
592keep_locked:
593 unlock_page(page);
594keep:
595 list_add(&page->lru, &ret_pages);
Nick Piggin725d7042006-09-25 23:30:55 -0700596 VM_BUG_ON(PageLRU(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 }
598 list_splice(&ret_pages, page_list);
599 if (pagevec_count(&freed_pvec))
600 __pagevec_release_nonlru(&freed_pvec);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700601 count_vm_events(PGACTIVATE, pgactivate);
Andrew Morton05ff5132006-03-22 00:08:20 -0800602 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800605/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 * zone->lru_lock is heavily contended. Some of the functions that
607 * shrink the lists perform better by taking out a batch of pages
608 * and working on them outside the LRU lock.
609 *
610 * For pagecache intensive workloads, this function is the hottest
611 * spot in the kernel (apart from copy_*_user functions).
612 *
613 * Appropriate locks must be held before calling this function.
614 *
615 * @nr_to_scan: The number of pages to look through on the list.
616 * @src: The LRU list to pull pages off.
617 * @dst: The temp list to put pages on to.
618 * @scanned: The number of pages that were scanned.
619 *
620 * returns how many pages were moved onto *@dst.
621 */
Andrew Morton69e05942006-03-22 00:08:19 -0800622static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
623 struct list_head *src, struct list_head *dst,
624 unsigned long *scanned)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625{
Andrew Morton69e05942006-03-22 00:08:19 -0800626 unsigned long nr_taken = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 struct page *page;
Wu Fengguangc9b02d92006-03-22 00:08:23 -0800628 unsigned long scan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Wu Fengguangc9b02d92006-03-22 00:08:23 -0800630 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
Nick Piggin7c8ee9a2006-03-22 00:08:03 -0800631 struct list_head *target;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 page = lru_to_page(src);
633 prefetchw_prev_lru_page(page, src, flags);
634
Nick Piggin725d7042006-09-25 23:30:55 -0700635 VM_BUG_ON(!PageLRU(page));
Nick Piggin8d438f92006-03-22 00:07:59 -0800636
Nick Piggin053837f2006-01-18 17:42:27 -0800637 list_del(&page->lru);
Nick Piggin7c8ee9a2006-03-22 00:08:03 -0800638 target = src;
639 if (likely(get_page_unless_zero(page))) {
Nick Piggin053837f2006-01-18 17:42:27 -0800640 /*
Nick Piggin7c8ee9a2006-03-22 00:08:03 -0800641 * Be careful not to clear PageLRU until after we're
642 * sure the page is not being freed elsewhere -- the
643 * page release code relies on it.
Nick Piggin053837f2006-01-18 17:42:27 -0800644 */
Nick Piggin7c8ee9a2006-03-22 00:08:03 -0800645 ClearPageLRU(page);
646 target = dst;
647 nr_taken++;
648 } /* else it is being freed elsewhere */
Nick Piggin46453a62006-03-22 00:07:58 -0800649
Nick Piggin7c8ee9a2006-03-22 00:08:03 -0800650 list_add(&page->lru, target);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 }
652
653 *scanned = scan;
654 return nr_taken;
655}
656
657/*
Andrew Morton1742f192006-03-22 00:08:21 -0800658 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
659 * of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 */
Andrew Morton1742f192006-03-22 00:08:21 -0800661static unsigned long shrink_inactive_list(unsigned long max_scan,
662 struct zone *zone, struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663{
664 LIST_HEAD(page_list);
665 struct pagevec pvec;
Andrew Morton69e05942006-03-22 00:08:19 -0800666 unsigned long nr_scanned = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -0800667 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 pagevec_init(&pvec, 1);
670
671 lru_add_drain();
672 spin_lock_irq(&zone->lru_lock);
Andrew Morton69e05942006-03-22 00:08:19 -0800673 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 struct page *page;
Andrew Morton69e05942006-03-22 00:08:19 -0800675 unsigned long nr_taken;
676 unsigned long nr_scan;
677 unsigned long nr_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
680 &zone->inactive_list,
681 &page_list, &nr_scan);
Christoph Lameterc8785382007-02-10 01:43:01 -0800682 __mod_zone_page_state(zone, NR_INACTIVE, -nr_taken);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 zone->pages_scanned += nr_scan;
684 spin_unlock_irq(&zone->lru_lock);
685
Andrew Morton69e05942006-03-22 00:08:19 -0800686 nr_scanned += nr_scan;
Andrew Morton1742f192006-03-22 00:08:21 -0800687 nr_freed = shrink_page_list(&page_list, sc);
Andrew Morton05ff5132006-03-22 00:08:20 -0800688 nr_reclaimed += nr_freed;
Nick Piggina74609f2006-01-06 00:11:20 -0800689 local_irq_disable();
690 if (current_is_kswapd()) {
Christoph Lameterf8891e52006-06-30 01:55:45 -0700691 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
692 __count_vm_events(KSWAPD_STEAL, nr_freed);
Nick Piggina74609f2006-01-06 00:11:20 -0800693 } else
Christoph Lameterf8891e52006-06-30 01:55:45 -0700694 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
Shantanu Goel918d3f92006-12-29 16:48:59 -0800695 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
Nick Piggina74609f2006-01-06 00:11:20 -0800696
Wu Fengguangfb8d14e2006-03-22 00:08:28 -0800697 if (nr_taken == 0)
698 goto done;
699
Nick Piggina74609f2006-01-06 00:11:20 -0800700 spin_lock(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 /*
702 * Put back any unfreeable pages.
703 */
704 while (!list_empty(&page_list)) {
705 page = lru_to_page(&page_list);
Nick Piggin725d7042006-09-25 23:30:55 -0700706 VM_BUG_ON(PageLRU(page));
Nick Piggin8d438f92006-03-22 00:07:59 -0800707 SetPageLRU(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 list_del(&page->lru);
709 if (PageActive(page))
710 add_page_to_active_list(zone, page);
711 else
712 add_page_to_inactive_list(zone, page);
713 if (!pagevec_add(&pvec, page)) {
714 spin_unlock_irq(&zone->lru_lock);
715 __pagevec_release(&pvec);
716 spin_lock_irq(&zone->lru_lock);
717 }
718 }
Andrew Morton69e05942006-03-22 00:08:19 -0800719 } while (nr_scanned < max_scan);
Wu Fengguangfb8d14e2006-03-22 00:08:28 -0800720 spin_unlock(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721done:
Wu Fengguangfb8d14e2006-03-22 00:08:28 -0800722 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 pagevec_release(&pvec);
Andrew Morton05ff5132006-03-22 00:08:20 -0800724 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725}
726
Martin Bligh3bb1a852006-10-28 10:38:24 -0700727/*
728 * We are about to scan this zone at a certain priority level. If that priority
729 * level is smaller (ie: more urgent) than the previous priority, then note
730 * that priority level within the zone. This is done so that when the next
731 * process comes in to scan this zone, it will immediately start out at this
732 * priority level rather than having to build up its own scanning priority.
733 * Here, this priority affects only the reclaim-mapped threshold.
734 */
735static inline void note_zone_scanning_priority(struct zone *zone, int priority)
736{
737 if (priority < zone->prev_priority)
738 zone->prev_priority = priority;
739}
740
Nick Piggin4ff1ffb2006-09-25 23:31:28 -0700741static inline int zone_is_near_oom(struct zone *zone)
742{
Christoph Lameterc8785382007-02-10 01:43:01 -0800743 return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
744 + zone_page_state(zone, NR_INACTIVE))*3;
Nick Piggin4ff1ffb2006-09-25 23:31:28 -0700745}
746
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747/*
748 * This moves pages from the active list to the inactive list.
749 *
750 * We move them the other way if the page is referenced by one or more
751 * processes, from rmap.
752 *
753 * If the pages are mostly unmapped, the processing is fast and it is
754 * appropriate to hold zone->lru_lock across the whole operation. But if
755 * the pages are mapped, the processing is slow (page_referenced()) so we
756 * should drop zone->lru_lock around each page. It's impossible to balance
757 * this, so instead we remove the pages from the LRU while processing them.
758 * It is safe to rely on PG_active against the non-LRU pages in here because
759 * nobody will play with that bit on a non-LRU page.
760 *
761 * The downside is that we have to touch page->_count against each page.
762 * But we had to alter page->flags anyway.
763 */
Andrew Morton1742f192006-03-22 00:08:21 -0800764static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
Martin Blighbbdb3962006-10-28 10:38:25 -0700765 struct scan_control *sc, int priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
Andrew Morton69e05942006-03-22 00:08:19 -0800767 unsigned long pgmoved;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 int pgdeactivate = 0;
Andrew Morton69e05942006-03-22 00:08:19 -0800769 unsigned long pgscanned;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 LIST_HEAD(l_hold); /* The pages which were snipped off */
771 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
772 LIST_HEAD(l_active); /* Pages to go onto the active_list */
773 struct page *page;
774 struct pagevec pvec;
775 int reclaim_mapped = 0;
Christoph Lameter2903fb12006-02-11 17:55:55 -0800776
Christoph Lameter6e5ef1a2006-03-22 00:08:45 -0800777 if (sc->may_swap) {
Christoph Lameter2903fb12006-02-11 17:55:55 -0800778 long mapped_ratio;
779 long distress;
780 long swap_tendency;
781
Nick Piggin4ff1ffb2006-09-25 23:31:28 -0700782 if (zone_is_near_oom(zone))
783 goto force_reclaim_mapped;
784
Christoph Lameter2903fb12006-02-11 17:55:55 -0800785 /*
786 * `distress' is a measure of how much trouble we're having
787 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
788 */
Martin Blighbbdb3962006-10-28 10:38:25 -0700789 distress = 100 >> min(zone->prev_priority, priority);
Christoph Lameter2903fb12006-02-11 17:55:55 -0800790
791 /*
792 * The point of this algorithm is to decide when to start
793 * reclaiming mapped memory instead of just pagecache. Work out
794 * how much memory
795 * is mapped.
796 */
Christoph Lameterf3dbd342006-06-30 01:55:36 -0700797 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
798 global_page_state(NR_ANON_PAGES)) * 100) /
Christoph Lameterbf02cf42006-06-30 01:55:36 -0700799 vm_total_pages;
Christoph Lameter2903fb12006-02-11 17:55:55 -0800800
801 /*
802 * Now decide how much we really want to unmap some pages. The
803 * mapped ratio is downgraded - just because there's a lot of
804 * mapped memory doesn't necessarily mean that page reclaim
805 * isn't succeeding.
806 *
807 * The distress ratio is important - we don't want to start
808 * going oom.
809 *
810 * A 100% value of vm_swappiness overrides this algorithm
811 * altogether.
812 */
Rafael J. Wysockid6277db2006-06-23 02:03:18 -0700813 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
Christoph Lameter2903fb12006-02-11 17:55:55 -0800814
815 /*
816 * Now use this metric to decide whether to start moving mapped
817 * memory onto the inactive list.
818 */
819 if (swap_tendency >= 100)
Nick Piggin4ff1ffb2006-09-25 23:31:28 -0700820force_reclaim_mapped:
Christoph Lameter2903fb12006-02-11 17:55:55 -0800821 reclaim_mapped = 1;
822 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 lru_add_drain();
825 spin_lock_irq(&zone->lru_lock);
826 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
827 &l_hold, &pgscanned);
828 zone->pages_scanned += pgscanned;
Christoph Lameterc8785382007-02-10 01:43:01 -0800829 __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 spin_unlock_irq(&zone->lru_lock);
831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 while (!list_empty(&l_hold)) {
833 cond_resched();
834 page = lru_to_page(&l_hold);
835 list_del(&page->lru);
836 if (page_mapped(page)) {
837 if (!reclaim_mapped ||
838 (total_swap_pages == 0 && PageAnon(page)) ||
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800839 page_referenced(page, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 list_add(&page->lru, &l_active);
841 continue;
842 }
843 }
844 list_add(&page->lru, &l_inactive);
845 }
846
847 pagevec_init(&pvec, 1);
848 pgmoved = 0;
849 spin_lock_irq(&zone->lru_lock);
850 while (!list_empty(&l_inactive)) {
851 page = lru_to_page(&l_inactive);
852 prefetchw_prev_lru_page(page, &l_inactive, flags);
Nick Piggin725d7042006-09-25 23:30:55 -0700853 VM_BUG_ON(PageLRU(page));
Nick Piggin8d438f92006-03-22 00:07:59 -0800854 SetPageLRU(page);
Nick Piggin725d7042006-09-25 23:30:55 -0700855 VM_BUG_ON(!PageActive(page));
Nick Piggin4c84cac2006-03-22 00:08:00 -0800856 ClearPageActive(page);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 list_move(&page->lru, &zone->inactive_list);
859 pgmoved++;
860 if (!pagevec_add(&pvec, page)) {
Christoph Lameterc8785382007-02-10 01:43:01 -0800861 __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 spin_unlock_irq(&zone->lru_lock);
863 pgdeactivate += pgmoved;
864 pgmoved = 0;
865 if (buffer_heads_over_limit)
866 pagevec_strip(&pvec);
867 __pagevec_release(&pvec);
868 spin_lock_irq(&zone->lru_lock);
869 }
870 }
Christoph Lameterc8785382007-02-10 01:43:01 -0800871 __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 pgdeactivate += pgmoved;
873 if (buffer_heads_over_limit) {
874 spin_unlock_irq(&zone->lru_lock);
875 pagevec_strip(&pvec);
876 spin_lock_irq(&zone->lru_lock);
877 }
878
879 pgmoved = 0;
880 while (!list_empty(&l_active)) {
881 page = lru_to_page(&l_active);
882 prefetchw_prev_lru_page(page, &l_active, flags);
Nick Piggin725d7042006-09-25 23:30:55 -0700883 VM_BUG_ON(PageLRU(page));
Nick Piggin8d438f92006-03-22 00:07:59 -0800884 SetPageLRU(page);
Nick Piggin725d7042006-09-25 23:30:55 -0700885 VM_BUG_ON(!PageActive(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 list_move(&page->lru, &zone->active_list);
887 pgmoved++;
888 if (!pagevec_add(&pvec, page)) {
Christoph Lameterc8785382007-02-10 01:43:01 -0800889 __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 pgmoved = 0;
891 spin_unlock_irq(&zone->lru_lock);
892 __pagevec_release(&pvec);
893 spin_lock_irq(&zone->lru_lock);
894 }
895 }
Christoph Lameterc8785382007-02-10 01:43:01 -0800896 __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Christoph Lameterf8891e52006-06-30 01:55:45 -0700898 __count_zone_vm_events(PGREFILL, zone, pgscanned);
899 __count_vm_events(PGDEACTIVATE, pgdeactivate);
900 spin_unlock_irq(&zone->lru_lock);
Nick Piggina74609f2006-01-06 00:11:20 -0800901
902 pagevec_release(&pvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903}
904
905/*
906 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
907 */
Andrew Morton05ff5132006-03-22 00:08:20 -0800908static unsigned long shrink_zone(int priority, struct zone *zone,
909 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
911 unsigned long nr_active;
912 unsigned long nr_inactive;
Christoph Lameter86959492006-03-22 00:08:18 -0800913 unsigned long nr_to_scan;
Andrew Morton05ff5132006-03-22 00:08:20 -0800914 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Martin Hicks53e9a612005-09-03 15:54:51 -0700916 atomic_inc(&zone->reclaim_in_progress);
917
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 /*
919 * Add one to `nr_to_scan' just to make sure that the kernel will
920 * slowly sift through the active list.
921 */
Christoph Lameterc8785382007-02-10 01:43:01 -0800922 zone->nr_scan_active +=
923 (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 nr_active = zone->nr_scan_active;
925 if (nr_active >= sc->swap_cluster_max)
926 zone->nr_scan_active = 0;
927 else
928 nr_active = 0;
929
Christoph Lameterc8785382007-02-10 01:43:01 -0800930 zone->nr_scan_inactive +=
931 (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 nr_inactive = zone->nr_scan_inactive;
933 if (nr_inactive >= sc->swap_cluster_max)
934 zone->nr_scan_inactive = 0;
935 else
936 nr_inactive = 0;
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 while (nr_active || nr_inactive) {
939 if (nr_active) {
Christoph Lameter86959492006-03-22 00:08:18 -0800940 nr_to_scan = min(nr_active,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 (unsigned long)sc->swap_cluster_max);
Christoph Lameter86959492006-03-22 00:08:18 -0800942 nr_active -= nr_to_scan;
Martin Blighbbdb3962006-10-28 10:38:25 -0700943 shrink_active_list(nr_to_scan, zone, sc, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 }
945
946 if (nr_inactive) {
Christoph Lameter86959492006-03-22 00:08:18 -0800947 nr_to_scan = min(nr_inactive,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 (unsigned long)sc->swap_cluster_max);
Christoph Lameter86959492006-03-22 00:08:18 -0800949 nr_inactive -= nr_to_scan;
Andrew Morton1742f192006-03-22 00:08:21 -0800950 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
951 sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953 }
954
Andrew Morton232ea4d2007-02-28 20:13:21 -0800955 throttle_vm_writeout(sc->gfp_mask);
Martin Hicks53e9a612005-09-03 15:54:51 -0700956
957 atomic_dec(&zone->reclaim_in_progress);
Andrew Morton05ff5132006-03-22 00:08:20 -0800958 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959}
960
961/*
962 * This is the direct reclaim path, for page-allocating processes. We only
963 * try to reclaim pages from zones which will satisfy the caller's allocation
964 * request.
965 *
966 * We reclaim from a zone even if that zone is over pages_high. Because:
967 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
968 * allocation or
969 * b) The zones may be over pages_high but they must go *over* pages_high to
970 * satisfy the `incremental min' zone defense algorithm.
971 *
972 * Returns the number of reclaimed pages.
973 *
974 * If a zone is deemed to be full of pinned pages then just give it a light
975 * scan then give up on it.
976 */
Andrew Morton1742f192006-03-22 00:08:21 -0800977static unsigned long shrink_zones(int priority, struct zone **zones,
Andrew Morton05ff5132006-03-22 00:08:20 -0800978 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979{
Andrew Morton05ff5132006-03-22 00:08:20 -0800980 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 int i;
982
Nick Piggin408d8542006-09-25 23:31:27 -0700983 sc->all_unreclaimable = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 for (i = 0; zones[i] != NULL; i++) {
985 struct zone *zone = zones[i];
986
Con Kolivasf3fe6512006-01-06 00:11:15 -0800987 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 continue;
989
Paul Jackson02a0e532006-12-13 00:34:25 -0800990 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 continue;
992
Martin Bligh3bb1a852006-10-28 10:38:24 -0700993 note_zone_scanning_priority(zone, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Christoph Lameter86959492006-03-22 00:08:18 -0800995 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 continue; /* Let kswapd poll it */
997
Nick Piggin408d8542006-09-25 23:31:27 -0700998 sc->all_unreclaimable = 0;
999
Andrew Morton05ff5132006-03-22 00:08:20 -08001000 nr_reclaimed += shrink_zone(priority, zone, sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 }
Andrew Morton05ff5132006-03-22 00:08:20 -08001002 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003}
1004
1005/*
1006 * This is the main entry point to direct page reclaim.
1007 *
1008 * If a full scan of the inactive list fails to free enough memory then we
1009 * are "out of memory" and something needs to be killed.
1010 *
1011 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1012 * high - the zone may be full of dirty or under-writeback pages, which this
1013 * caller can't do much about. We kick pdflush and take explicit naps in the
1014 * hope that some of these pages can be written. But if the allocating task
1015 * holds filesystem locks which prevent writeout this might not work, and the
1016 * allocation attempt will fail.
1017 */
Andrew Morton69e05942006-03-22 00:08:19 -08001018unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019{
1020 int priority;
1021 int ret = 0;
Andrew Morton69e05942006-03-22 00:08:19 -08001022 unsigned long total_scanned = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -08001023 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 struct reclaim_state *reclaim_state = current->reclaim_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 unsigned long lru_pages = 0;
1026 int i;
Andrew Morton179e9632006-03-22 00:08:18 -08001027 struct scan_control sc = {
1028 .gfp_mask = gfp_mask,
1029 .may_writepage = !laptop_mode,
1030 .swap_cluster_max = SWAP_CLUSTER_MAX,
1031 .may_swap = 1,
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001032 .swappiness = vm_swappiness,
Andrew Morton179e9632006-03-22 00:08:18 -08001033 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
Christoph Lameterf8891e52006-06-30 01:55:45 -07001035 count_vm_event(ALLOCSTALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 for (i = 0; zones[i] != NULL; i++) {
1038 struct zone *zone = zones[i];
1039
Paul Jackson02a0e532006-12-13 00:34:25 -08001040 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 continue;
1042
Christoph Lameterc8785382007-02-10 01:43:01 -08001043 lru_pages += zone_page_state(zone, NR_ACTIVE)
1044 + zone_page_state(zone, NR_INACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 }
1046
1047 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 sc.nr_scanned = 0;
Rik van Rielf7b7fd82005-11-28 13:44:07 -08001049 if (!priority)
1050 disable_swap_token();
Andrew Morton1742f192006-03-22 00:08:21 -08001051 nr_reclaimed += shrink_zones(priority, zones, &sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1053 if (reclaim_state) {
Andrew Morton05ff5132006-03-22 00:08:20 -08001054 nr_reclaimed += reclaim_state->reclaimed_slab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 reclaim_state->reclaimed_slab = 0;
1056 }
1057 total_scanned += sc.nr_scanned;
Andrew Morton05ff5132006-03-22 00:08:20 -08001058 if (nr_reclaimed >= sc.swap_cluster_max) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 ret = 1;
1060 goto out;
1061 }
1062
1063 /*
1064 * Try to write back as many pages as we just scanned. This
1065 * tends to cause slow streaming writers to write data to the
1066 * disk smoothly, at the dirtying rate, which is nice. But
1067 * that's undesirable in laptop mode, where we *want* lumpy
1068 * writeout. So in laptop mode, write out the whole world.
1069 */
Andrew Morton179e9632006-03-22 00:08:18 -08001070 if (total_scanned > sc.swap_cluster_max +
1071 sc.swap_cluster_max / 2) {
Pekka J Enberg687a21c2005-06-28 20:44:55 -07001072 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 sc.may_writepage = 1;
1074 }
1075
1076 /* Take a nap, wait for some writeback to complete */
1077 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
Andrew Morton3fcfab12006-10-19 23:28:16 -07001078 congestion_wait(WRITE, HZ/10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 }
Nick Piggin408d8542006-09-25 23:31:27 -07001080 /* top priority shrink_caches still had more to do? don't OOM, then */
1081 if (!sc.all_unreclaimable)
1082 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083out:
Martin Bligh3bb1a852006-10-28 10:38:24 -07001084 /*
1085 * Now that we've scanned all the zones at this priority level, note
1086 * that level within the zone so that the next thread which performs
1087 * scanning of this zone will immediately start out at this priority
1088 * level. This affects only the decision whether or not to bring
1089 * mapped pages onto the inactive list.
1090 */
1091 if (priority < 0)
1092 priority = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 for (i = 0; zones[i] != 0; i++) {
1094 struct zone *zone = zones[i];
1095
Paul Jackson02a0e532006-12-13 00:34:25 -08001096 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 continue;
1098
Martin Bligh3bb1a852006-10-28 10:38:24 -07001099 zone->prev_priority = priority;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 }
1101 return ret;
1102}
1103
1104/*
1105 * For kswapd, balance_pgdat() will work across all this node's zones until
1106 * they are all at pages_high.
1107 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 * Returns the number of pages which were actually freed.
1109 *
1110 * There is special handling here for zones which are full of pinned pages.
1111 * This can happen if the pages are all mlocked, or if they are all used by
1112 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1113 * What we do is to detect the case where all pages in the zone have been
1114 * scanned twice and there has been zero successful reclaim. Mark the zone as
1115 * dead and from now on, only perform a short scan. Basically we're polling
1116 * the zone for when the problem goes away.
1117 *
1118 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1119 * zones which have free_pages > pages_high, but once a zone is found to have
1120 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1121 * of the number of free pages in the lower zones. This interoperates with
1122 * the page allocator fallback scheme to ensure that aging of pages is balanced
1123 * across the zones.
1124 */
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001125static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 int all_zones_ok;
1128 int priority;
1129 int i;
Andrew Morton69e05942006-03-22 00:08:19 -08001130 unsigned long total_scanned;
Andrew Morton05ff5132006-03-22 00:08:20 -08001131 unsigned long nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 struct reclaim_state *reclaim_state = current->reclaim_state;
Andrew Morton179e9632006-03-22 00:08:18 -08001133 struct scan_control sc = {
1134 .gfp_mask = GFP_KERNEL,
1135 .may_swap = 1,
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001136 .swap_cluster_max = SWAP_CLUSTER_MAX,
1137 .swappiness = vm_swappiness,
Andrew Morton179e9632006-03-22 00:08:18 -08001138 };
Martin Bligh3bb1a852006-10-28 10:38:24 -07001139 /*
1140 * temp_priority is used to remember the scanning priority at which
1141 * this zone was successfully refilled to free_pages == pages_high.
1142 */
1143 int temp_priority[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145loop_again:
1146 total_scanned = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -08001147 nr_reclaimed = 0;
Christoph Lameterc0bbbc72006-06-11 15:22:26 -07001148 sc.may_writepage = !laptop_mode;
Christoph Lameterf8891e52006-06-30 01:55:45 -07001149 count_vm_event(PAGEOUTRUN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Martin Bligh3bb1a852006-10-28 10:38:24 -07001151 for (i = 0; i < pgdat->nr_zones; i++)
1152 temp_priority[i] = DEF_PRIORITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1155 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1156 unsigned long lru_pages = 0;
1157
Rik van Rielf7b7fd82005-11-28 13:44:07 -08001158 /* The swap token gets in the way of swapout... */
1159 if (!priority)
1160 disable_swap_token();
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 all_zones_ok = 1;
1163
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001164 /*
1165 * Scan in the highmem->dma direction for the highest
1166 * zone which needs scanning
1167 */
1168 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1169 struct zone *zone = pgdat->node_zones + i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001171 if (!populated_zone(zone))
1172 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001174 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1175 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001177 if (!zone_watermark_ok(zone, order, zone->pages_high,
1178 0, 0)) {
1179 end_zone = i;
Andrew Mortone1dbeda2006-12-06 20:32:01 -08001180 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 }
Andrew Mortone1dbeda2006-12-06 20:32:01 -08001183 if (i < 0)
1184 goto out;
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 for (i = 0; i <= end_zone; i++) {
1187 struct zone *zone = pgdat->node_zones + i;
1188
Christoph Lameterc8785382007-02-10 01:43:01 -08001189 lru_pages += zone_page_state(zone, NR_ACTIVE)
1190 + zone_page_state(zone, NR_INACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 }
1192
1193 /*
1194 * Now scan the zone in the dma->highmem direction, stopping
1195 * at the last zone which needs scanning.
1196 *
1197 * We do this because the page allocator works in the opposite
1198 * direction. This prevents the page allocator from allocating
1199 * pages behind kswapd's direction of progress, which would
1200 * cause too much scanning of the lower zones.
1201 */
1202 for (i = 0; i <= end_zone; i++) {
1203 struct zone *zone = pgdat->node_zones + i;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001204 int nr_slab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
Con Kolivasf3fe6512006-01-06 00:11:15 -08001206 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 continue;
1208
1209 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1210 continue;
1211
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001212 if (!zone_watermark_ok(zone, order, zone->pages_high,
1213 end_zone, 0))
1214 all_zones_ok = 0;
Martin Bligh3bb1a852006-10-28 10:38:24 -07001215 temp_priority[i] = priority;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 sc.nr_scanned = 0;
Martin Bligh3bb1a852006-10-28 10:38:24 -07001217 note_zone_scanning_priority(zone, priority);
Andrew Morton05ff5132006-03-22 00:08:20 -08001218 nr_reclaimed += shrink_zone(priority, zone, &sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 reclaim_state->reclaimed_slab = 0;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001220 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1221 lru_pages);
Andrew Morton05ff5132006-03-22 00:08:20 -08001222 nr_reclaimed += reclaim_state->reclaimed_slab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 total_scanned += sc.nr_scanned;
1224 if (zone->all_unreclaimable)
1225 continue;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001226 if (nr_slab == 0 && zone->pages_scanned >=
Christoph Lameterc8785382007-02-10 01:43:01 -08001227 (zone_page_state(zone, NR_ACTIVE)
1228 + zone_page_state(zone, NR_INACTIVE)) * 6)
1229 zone->all_unreclaimable = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 /*
1231 * If we've done a decent amount of scanning and
1232 * the reclaim ratio is low, start doing writepage
1233 * even in laptop mode
1234 */
1235 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
Andrew Morton05ff5132006-03-22 00:08:20 -08001236 total_scanned > nr_reclaimed + nr_reclaimed / 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 sc.may_writepage = 1;
1238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 if (all_zones_ok)
1240 break; /* kswapd: all done */
1241 /*
1242 * OK, kswapd is getting into trouble. Take a nap, then take
1243 * another pass across the zones.
1244 */
1245 if (total_scanned && priority < DEF_PRIORITY - 2)
Andrew Morton3fcfab12006-10-19 23:28:16 -07001246 congestion_wait(WRITE, HZ/10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
1248 /*
1249 * We do this so kswapd doesn't build up large priorities for
1250 * example when it is freeing in parallel with allocators. It
1251 * matches the direct reclaim path behaviour in terms of impact
1252 * on zone->*_priority.
1253 */
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001254 if (nr_reclaimed >= SWAP_CLUSTER_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 break;
1256 }
1257out:
Martin Bligh3bb1a852006-10-28 10:38:24 -07001258 /*
1259 * Note within each zone the priority level at which this zone was
1260 * brought into a happy state. So that the next thread which scans this
1261 * zone will start out at that priority level.
1262 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 for (i = 0; i < pgdat->nr_zones; i++) {
1264 struct zone *zone = pgdat->node_zones + i;
1265
Martin Bligh3bb1a852006-10-28 10:38:24 -07001266 zone->prev_priority = temp_priority[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 }
1268 if (!all_zones_ok) {
1269 cond_resched();
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001270
1271 try_to_freeze();
1272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 goto loop_again;
1274 }
1275
Andrew Morton05ff5132006-03-22 00:08:20 -08001276 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277}
1278
1279/*
1280 * The background pageout daemon, started as a kernel thread
1281 * from the init process.
1282 *
1283 * This basically trickles out pages so that we have _some_
1284 * free memory available even if there is no other activity
1285 * that frees anything up. This is needed for things like routing
1286 * etc, where we otherwise might have all activity going on in
1287 * asynchronous contexts that cannot page things out.
1288 *
1289 * If there are applications that are active memory-allocators
1290 * (most normal use), this basically shouldn't matter.
1291 */
1292static int kswapd(void *p)
1293{
1294 unsigned long order;
1295 pg_data_t *pgdat = (pg_data_t*)p;
1296 struct task_struct *tsk = current;
1297 DEFINE_WAIT(wait);
1298 struct reclaim_state reclaim_state = {
1299 .reclaimed_slab = 0,
1300 };
1301 cpumask_t cpumask;
1302
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 cpumask = node_to_cpumask(pgdat->node_id);
1304 if (!cpus_empty(cpumask))
1305 set_cpus_allowed(tsk, cpumask);
1306 current->reclaim_state = &reclaim_state;
1307
1308 /*
1309 * Tell the memory management that we're a "memory allocator",
1310 * and that if we need more memory we should get access to it
1311 * regardless (see "__alloc_pages()"). "kswapd" should
1312 * never get caught in the normal page freeing logic.
1313 *
1314 * (Kswapd normally doesn't need memory anyway, but sometimes
1315 * you need a small amount of memory in order to be able to
1316 * page out something else, and this flag essentially protects
1317 * us from recursively trying to free more memory as we're
1318 * trying to free the first piece of memory in the first place).
1319 */
Christoph Lameter930d9152006-01-08 01:00:47 -08001320 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
1322 order = 0;
1323 for ( ; ; ) {
1324 unsigned long new_order;
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1327 new_order = pgdat->kswapd_max_order;
1328 pgdat->kswapd_max_order = 0;
1329 if (order < new_order) {
1330 /*
1331 * Don't sleep if someone wants a larger 'order'
1332 * allocation
1333 */
1334 order = new_order;
1335 } else {
Rafael J. Wysockib1296cc2007-05-06 14:50:48 -07001336 if (!freezing(current))
1337 schedule();
1338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 order = pgdat->kswapd_max_order;
1340 }
1341 finish_wait(&pgdat->kswapd_wait, &wait);
1342
Rafael J. Wysockib1296cc2007-05-06 14:50:48 -07001343 if (!try_to_freeze()) {
1344 /* We can speed up thawing tasks if we don't call
1345 * balance_pgdat after returning from the refrigerator
1346 */
1347 balance_pgdat(pgdat, order);
1348 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 }
1350 return 0;
1351}
1352
1353/*
1354 * A zone is low on free memory, so wake its kswapd task to service it.
1355 */
1356void wakeup_kswapd(struct zone *zone, int order)
1357{
1358 pg_data_t *pgdat;
1359
Con Kolivasf3fe6512006-01-06 00:11:15 -08001360 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 return;
1362
1363 pgdat = zone->zone_pgdat;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08001364 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 return;
1366 if (pgdat->kswapd_max_order < order)
1367 pgdat->kswapd_max_order = order;
Paul Jackson02a0e532006-12-13 00:34:25 -08001368 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 return;
Con Kolivas8d0986e2005-09-13 01:25:07 -07001370 if (!waitqueue_active(&pgdat->kswapd_wait))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 return;
Con Kolivas8d0986e2005-09-13 01:25:07 -07001372 wake_up_interruptible(&pgdat->kswapd_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373}
1374
1375#ifdef CONFIG_PM
1376/*
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001377 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
1378 * from LRU lists system-wide, for given pass and priority, and returns the
1379 * number of reclaimed pages
1380 *
1381 * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1382 */
Nigel Cunninghame07aa052006-12-22 01:07:21 -08001383static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1384 int pass, struct scan_control *sc)
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001385{
1386 struct zone *zone;
1387 unsigned long nr_to_scan, ret = 0;
1388
1389 for_each_zone(zone) {
1390
1391 if (!populated_zone(zone))
1392 continue;
1393
1394 if (zone->all_unreclaimable && prio != DEF_PRIORITY)
1395 continue;
1396
1397 /* For pass = 0 we don't shrink the active list */
1398 if (pass > 0) {
Christoph Lameterc8785382007-02-10 01:43:01 -08001399 zone->nr_scan_active +=
1400 (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001401 if (zone->nr_scan_active >= nr_pages || pass > 3) {
1402 zone->nr_scan_active = 0;
Christoph Lameterc8785382007-02-10 01:43:01 -08001403 nr_to_scan = min(nr_pages,
1404 zone_page_state(zone, NR_ACTIVE));
Martin Blighbbdb3962006-10-28 10:38:25 -07001405 shrink_active_list(nr_to_scan, zone, sc, prio);
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001406 }
1407 }
1408
Christoph Lameterc8785382007-02-10 01:43:01 -08001409 zone->nr_scan_inactive +=
1410 (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001411 if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
1412 zone->nr_scan_inactive = 0;
Christoph Lameterc8785382007-02-10 01:43:01 -08001413 nr_to_scan = min(nr_pages,
1414 zone_page_state(zone, NR_INACTIVE));
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001415 ret += shrink_inactive_list(nr_to_scan, zone, sc);
1416 if (ret >= nr_pages)
1417 return ret;
1418 }
1419 }
1420
1421 return ret;
1422}
1423
Andrew Morton76395d32007-01-05 16:37:05 -08001424static unsigned long count_lru_pages(void)
1425{
Christoph Lameterc8785382007-02-10 01:43:01 -08001426 return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
Andrew Morton76395d32007-01-05 16:37:05 -08001427}
1428
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001429/*
1430 * Try to free `nr_pages' of memory, system-wide, and return the number of
1431 * freed pages.
1432 *
1433 * Rather than trying to age LRUs the aim is to preserve the overall
1434 * LRU order by reclaiming preferentially
1435 * inactive > active > active referenced > active mapped
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 */
Andrew Morton69e05942006-03-22 00:08:19 -08001437unsigned long shrink_all_memory(unsigned long nr_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001439 unsigned long lru_pages, nr_slab;
Andrew Morton69e05942006-03-22 00:08:19 -08001440 unsigned long ret = 0;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001441 int pass;
1442 struct reclaim_state reclaim_state;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001443 struct scan_control sc = {
1444 .gfp_mask = GFP_KERNEL,
1445 .may_swap = 0,
1446 .swap_cluster_max = nr_pages,
1447 .may_writepage = 1,
1448 .swappiness = vm_swappiness,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 };
1450
1451 current->reclaim_state = &reclaim_state;
Andrew Morton69e05942006-03-22 00:08:19 -08001452
Andrew Morton76395d32007-01-05 16:37:05 -08001453 lru_pages = count_lru_pages();
Christoph Lameter972d1a72006-09-25 23:31:51 -07001454 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001455 /* If slab caches are huge, it's better to hit them first */
1456 while (nr_slab >= lru_pages) {
1457 reclaim_state.reclaimed_slab = 0;
1458 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1459 if (!reclaim_state.reclaimed_slab)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 break;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001461
1462 ret += reclaim_state.reclaimed_slab;
1463 if (ret >= nr_pages)
1464 goto out;
1465
1466 nr_slab -= reclaim_state.reclaimed_slab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 }
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001468
1469 /*
1470 * We try to shrink LRUs in 5 passes:
1471 * 0 = Reclaim from inactive_list only
1472 * 1 = Reclaim from active list but don't reclaim mapped
1473 * 2 = 2nd pass of type 1
1474 * 3 = Reclaim mapped (normal reclaim)
1475 * 4 = 2nd pass of type 3
1476 */
1477 for (pass = 0; pass < 5; pass++) {
1478 int prio;
1479
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001480 /* Force reclaiming mapped pages in the passes #3 and #4 */
1481 if (pass > 2) {
1482 sc.may_swap = 1;
1483 sc.swappiness = 100;
1484 }
1485
1486 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1487 unsigned long nr_to_scan = nr_pages - ret;
1488
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001489 sc.nr_scanned = 0;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001490 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1491 if (ret >= nr_pages)
1492 goto out;
1493
1494 reclaim_state.reclaimed_slab = 0;
Andrew Morton76395d32007-01-05 16:37:05 -08001495 shrink_slab(sc.nr_scanned, sc.gfp_mask,
1496 count_lru_pages());
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001497 ret += reclaim_state.reclaimed_slab;
1498 if (ret >= nr_pages)
1499 goto out;
1500
1501 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
Andrew Morton3fcfab12006-10-19 23:28:16 -07001502 congestion_wait(WRITE, HZ / 10);
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001503 }
Rafael J. Wysocki248a0302006-03-22 00:09:04 -08001504 }
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001505
1506 /*
1507 * If ret = 0, we could not shrink LRUs, but there may be something
1508 * in slab caches
1509 */
Andrew Morton76395d32007-01-05 16:37:05 -08001510 if (!ret) {
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001511 do {
1512 reclaim_state.reclaimed_slab = 0;
Andrew Morton76395d32007-01-05 16:37:05 -08001513 shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001514 ret += reclaim_state.reclaimed_slab;
1515 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
Andrew Morton76395d32007-01-05 16:37:05 -08001516 }
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001517
1518out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 current->reclaim_state = NULL;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 return ret;
1522}
1523#endif
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525/* It's optimal to keep kswapds on the same CPUs as their memory, but
1526 not required for correctness. So if the last cpu in a node goes
1527 away, we get changed to run anywhere: as the first one comes back,
1528 restore their cpu bindings. */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -07001529static int __devinit cpu_callback(struct notifier_block *nfb,
Andrew Morton69e05942006-03-22 00:08:19 -08001530 unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531{
1532 pg_data_t *pgdat;
1533 cpumask_t mask;
1534
1535 if (action == CPU_ONLINE) {
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -08001536 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 mask = node_to_cpumask(pgdat->node_id);
1538 if (any_online_cpu(mask) != NR_CPUS)
1539 /* One of our CPUs online: restore mask */
1540 set_cpus_allowed(pgdat->kswapd, mask);
1541 }
1542 }
1543 return NOTIFY_OK;
1544}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
Yasunori Goto3218ae12006-06-27 02:53:33 -07001546/*
1547 * This kswapd start function will be called by init and node-hot-add.
1548 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
1549 */
1550int kswapd_run(int nid)
1551{
1552 pg_data_t *pgdat = NODE_DATA(nid);
1553 int ret = 0;
1554
1555 if (pgdat->kswapd)
1556 return 0;
1557
1558 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1559 if (IS_ERR(pgdat->kswapd)) {
1560 /* failure at boot is fatal */
1561 BUG_ON(system_state == SYSTEM_BOOTING);
1562 printk("Failed to start kswapd on node %d\n",nid);
1563 ret = -1;
1564 }
1565 return ret;
1566}
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568static int __init kswapd_init(void)
1569{
Yasunori Goto3218ae12006-06-27 02:53:33 -07001570 int nid;
Andrew Morton69e05942006-03-22 00:08:19 -08001571
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 swap_setup();
Yasunori Goto3218ae12006-06-27 02:53:33 -07001573 for_each_online_node(nid)
1574 kswapd_run(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 hotcpu_notifier(cpu_callback, 0);
1576 return 0;
1577}
1578
1579module_init(kswapd_init)
Christoph Lameter9eeff232006-01-18 17:42:31 -08001580
1581#ifdef CONFIG_NUMA
1582/*
1583 * Zone reclaim mode
1584 *
1585 * If non-zero call zone_reclaim when the number of free pages falls below
1586 * the watermarks.
Christoph Lameter9eeff232006-01-18 17:42:31 -08001587 */
1588int zone_reclaim_mode __read_mostly;
1589
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08001590#define RECLAIM_OFF 0
1591#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
1592#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1593#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
1594
Christoph Lameter9eeff232006-01-18 17:42:31 -08001595/*
Christoph Lametera92f7122006-02-01 03:05:32 -08001596 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1597 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1598 * a zone.
1599 */
1600#define ZONE_RECLAIM_PRIORITY 4
1601
Christoph Lameter9eeff232006-01-18 17:42:31 -08001602/*
Christoph Lameter96146342006-07-03 00:24:13 -07001603 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
1604 * occur.
1605 */
1606int sysctl_min_unmapped_ratio = 1;
1607
1608/*
Christoph Lameter0ff38492006-09-25 23:31:52 -07001609 * If the number of slab pages in a zone grows beyond this percentage then
1610 * slab reclaim needs to occur.
1611 */
1612int sysctl_min_slab_ratio = 5;
1613
1614/*
Christoph Lameter9eeff232006-01-18 17:42:31 -08001615 * Try to free up some pages from this zone through reclaim.
1616 */
Andrew Morton179e9632006-03-22 00:08:18 -08001617static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
Christoph Lameter9eeff232006-01-18 17:42:31 -08001618{
Christoph Lameter7fb2d462006-03-22 00:08:22 -08001619 /* Minimum pages needed in order to stay on node */
Andrew Morton69e05942006-03-22 00:08:19 -08001620 const unsigned long nr_pages = 1 << order;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001621 struct task_struct *p = current;
1622 struct reclaim_state reclaim_state;
Christoph Lameter86959492006-03-22 00:08:18 -08001623 int priority;
Andrew Morton05ff5132006-03-22 00:08:20 -08001624 unsigned long nr_reclaimed = 0;
Andrew Morton179e9632006-03-22 00:08:18 -08001625 struct scan_control sc = {
1626 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1627 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
Andrew Morton69e05942006-03-22 00:08:19 -08001628 .swap_cluster_max = max_t(unsigned long, nr_pages,
1629 SWAP_CLUSTER_MAX),
Andrew Morton179e9632006-03-22 00:08:18 -08001630 .gfp_mask = gfp_mask,
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07001631 .swappiness = vm_swappiness,
Andrew Morton179e9632006-03-22 00:08:18 -08001632 };
Christoph Lameter83e33a42006-09-25 23:31:53 -07001633 unsigned long slab_reclaimable;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001634
1635 disable_swap_token();
Christoph Lameter9eeff232006-01-18 17:42:31 -08001636 cond_resched();
Christoph Lameterd4f77962006-02-24 13:04:22 -08001637 /*
1638 * We need to be able to allocate from the reserves for RECLAIM_SWAP
1639 * and we also need to be able to write out pages for RECLAIM_WRITE
1640 * and RECLAIM_SWAP.
1641 */
1642 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001643 reclaim_state.reclaimed_slab = 0;
1644 p->reclaim_state = &reclaim_state;
Christoph Lameterc84db232006-02-01 03:05:29 -08001645
Christoph Lameter0ff38492006-09-25 23:31:52 -07001646 if (zone_page_state(zone, NR_FILE_PAGES) -
1647 zone_page_state(zone, NR_FILE_MAPPED) >
1648 zone->min_unmapped_pages) {
1649 /*
1650 * Free memory by calling shrink zone with increasing
1651 * priorities until we have enough memory freed.
1652 */
1653 priority = ZONE_RECLAIM_PRIORITY;
1654 do {
Martin Bligh3bb1a852006-10-28 10:38:24 -07001655 note_zone_scanning_priority(zone, priority);
Christoph Lameter0ff38492006-09-25 23:31:52 -07001656 nr_reclaimed += shrink_zone(priority, zone, &sc);
1657 priority--;
1658 } while (priority >= 0 && nr_reclaimed < nr_pages);
1659 }
Christoph Lameterc84db232006-02-01 03:05:29 -08001660
Christoph Lameter83e33a42006-09-25 23:31:53 -07001661 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1662 if (slab_reclaimable > zone->min_slab_pages) {
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001663 /*
Christoph Lameter7fb2d462006-03-22 00:08:22 -08001664 * shrink_slab() does not currently allow us to determine how
Christoph Lameter0ff38492006-09-25 23:31:52 -07001665 * many pages were freed in this zone. So we take the current
1666 * number of slab pages and shake the slab until it is reduced
1667 * by the same nr_pages that we used for reclaiming unmapped
1668 * pages.
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001669 *
Christoph Lameter0ff38492006-09-25 23:31:52 -07001670 * Note that shrink_slab will free memory on all zones and may
1671 * take a long time.
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001672 */
Christoph Lameter0ff38492006-09-25 23:31:52 -07001673 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
Christoph Lameter83e33a42006-09-25 23:31:53 -07001674 zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
1675 slab_reclaimable - nr_pages)
Christoph Lameter0ff38492006-09-25 23:31:52 -07001676 ;
Christoph Lameter83e33a42006-09-25 23:31:53 -07001677
1678 /*
1679 * Update nr_reclaimed by the number of slab pages we
1680 * reclaimed from this zone.
1681 */
1682 nr_reclaimed += slab_reclaimable -
1683 zone_page_state(zone, NR_SLAB_RECLAIMABLE);
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001684 }
1685
Christoph Lameter9eeff232006-01-18 17:42:31 -08001686 p->reclaim_state = NULL;
Christoph Lameterd4f77962006-02-24 13:04:22 -08001687 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
Andrew Morton05ff5132006-03-22 00:08:20 -08001688 return nr_reclaimed >= nr_pages;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001689}
Andrew Morton179e9632006-03-22 00:08:18 -08001690
1691int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1692{
1693 cpumask_t mask;
1694 int node_id;
1695
1696 /*
Christoph Lameter0ff38492006-09-25 23:31:52 -07001697 * Zone reclaim reclaims unmapped file backed pages and
1698 * slab pages if we are over the defined limits.
Christoph Lameter34aa1332006-06-30 01:55:37 -07001699 *
Christoph Lameter96146342006-07-03 00:24:13 -07001700 * A small portion of unmapped file backed pages is needed for
1701 * file I/O otherwise pages read by file I/O will be immediately
1702 * thrown out if the zone is overallocated. So we do not reclaim
1703 * if less than a specified percentage of the zone is used by
1704 * unmapped file backed pages.
Andrew Morton179e9632006-03-22 00:08:18 -08001705 */
Christoph Lameter34aa1332006-06-30 01:55:37 -07001706 if (zone_page_state(zone, NR_FILE_PAGES) -
Christoph Lameter0ff38492006-09-25 23:31:52 -07001707 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
1708 && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
1709 <= zone->min_slab_pages)
Christoph Lameter96146342006-07-03 00:24:13 -07001710 return 0;
Andrew Morton179e9632006-03-22 00:08:18 -08001711
1712 /*
1713 * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1714 * not have reclaimable pages and if we should not delay the allocation
1715 * then do not scan.
1716 */
1717 if (!(gfp_mask & __GFP_WAIT) ||
1718 zone->all_unreclaimable ||
1719 atomic_read(&zone->reclaim_in_progress) > 0 ||
1720 (current->flags & PF_MEMALLOC))
1721 return 0;
1722
1723 /*
1724 * Only run zone reclaim on the local zone or on zones that do not
1725 * have associated processors. This will favor the local processor
1726 * over remote processors and spread off node memory allocations
1727 * as wide as possible.
1728 */
Christoph Lameter89fa3022006-09-25 23:31:55 -07001729 node_id = zone_to_nid(zone);
Andrew Morton179e9632006-03-22 00:08:18 -08001730 mask = node_to_cpumask(node_id);
1731 if (!cpus_empty(mask) && node_id != numa_node_id())
1732 return 0;
1733 return __zone_reclaim(zone, gfp_mask, order);
1734}
Christoph Lameter9eeff232006-01-18 17:42:31 -08001735#endif