blob: 486184d2b50c82060baec57d89eda443c1ddeddd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/file.h>
23#include <linux/writeback.h>
24#include <linux/blkdev.h>
25#include <linux/buffer_head.h> /* for try_to_release_page(),
26 buffer_heads_over_limit */
27#include <linux/mm_inline.h>
28#include <linux/pagevec.h>
29#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
34#include <linux/notifier.h>
35#include <linux/rwsem.h>
36
37#include <asm/tlbflush.h>
38#include <asm/div64.h>
39
40#include <linux/swapops.h>
41
42/* possible outcome of pageout() */
43typedef enum {
44 /* failed to write page out, page is locked */
45 PAGE_KEEP,
46 /* move page to the active list, page is locked */
47 PAGE_ACTIVATE,
48 /* page has been sent to the disk successfully, page is unlocked */
49 PAGE_SUCCESS,
50 /* page is clean and locked */
51 PAGE_CLEAN,
52} pageout_t;
53
54struct scan_control {
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 /* Incremented by the number of inactive pages that were scanned */
56 unsigned long nr_scanned;
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 unsigned long nr_mapped; /* From page_state */
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /* This context's GFP mask */
Al Viro6daa0e22005-10-21 03:18:50 -040061 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63 int may_writepage;
64
Christoph Lameterf1fd1062006-01-18 17:42:30 -080065 /* Can pages be swapped as part of reclaim? */
66 int may_swap;
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
69 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
70 * In this context, it doesn't matter that we scan the
71 * whole list at once. */
72 int swap_cluster_max;
73};
74
75/*
76 * The list of shrinker callbacks used by to apply pressure to
77 * ageable caches.
78 */
79struct shrinker {
80 shrinker_t shrinker;
81 struct list_head list;
82 int seeks; /* seeks to recreate an obj */
83 long nr; /* objs pending delete */
84};
85
86#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
87
88#ifdef ARCH_HAS_PREFETCH
89#define prefetch_prev_lru_page(_page, _base, _field) \
90 do { \
91 if ((_page)->lru.prev != _base) { \
92 struct page *prev; \
93 \
94 prev = lru_to_page(&(_page->lru)); \
95 prefetch(&prev->_field); \
96 } \
97 } while (0)
98#else
99#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
100#endif
101
102#ifdef ARCH_HAS_PREFETCHW
103#define prefetchw_prev_lru_page(_page, _base, _field) \
104 do { \
105 if ((_page)->lru.prev != _base) { \
106 struct page *prev; \
107 \
108 prev = lru_to_page(&(_page->lru)); \
109 prefetchw(&prev->_field); \
110 } \
111 } while (0)
112#else
113#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
114#endif
115
116/*
117 * From 0 .. 100. Higher means more swappy.
118 */
119int vm_swappiness = 60;
120static long total_memory;
121
122static LIST_HEAD(shrinker_list);
123static DECLARE_RWSEM(shrinker_rwsem);
124
125/*
126 * Add a shrinker callback to be called from the vm
127 */
128struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
129{
130 struct shrinker *shrinker;
131
132 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
133 if (shrinker) {
134 shrinker->shrinker = theshrinker;
135 shrinker->seeks = seeks;
136 shrinker->nr = 0;
137 down_write(&shrinker_rwsem);
138 list_add_tail(&shrinker->list, &shrinker_list);
139 up_write(&shrinker_rwsem);
140 }
141 return shrinker;
142}
143EXPORT_SYMBOL(set_shrinker);
144
145/*
146 * Remove one
147 */
148void remove_shrinker(struct shrinker *shrinker)
149{
150 down_write(&shrinker_rwsem);
151 list_del(&shrinker->list);
152 up_write(&shrinker_rwsem);
153 kfree(shrinker);
154}
155EXPORT_SYMBOL(remove_shrinker);
156
157#define SHRINK_BATCH 128
158/*
159 * Call the shrink functions to age shrinkable caches
160 *
161 * Here we assume it costs one seek to replace a lru page and that it also
162 * takes a seek to recreate a cache object. With this in mind we age equal
163 * percentages of the lru and ageable caches. This should balance the seeks
164 * generated by these structures.
165 *
166 * If the vm encounted mapped pages on the LRU it increase the pressure on
167 * slab to avoid swapping.
168 *
169 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
170 *
171 * `lru_pages' represents the number of on-LRU pages in all the zones which
172 * are eligible for the caller's allocation attempt. It is used for balancing
173 * slab reclaim versus page reclaim.
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700174 *
175 * Returns the number of slab objects which we shrunk.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 */
Andrew Morton69e05942006-03-22 00:08:19 -0800177unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
178 unsigned long lru_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 struct shrinker *shrinker;
Andrew Morton69e05942006-03-22 00:08:19 -0800181 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183 if (scanned == 0)
184 scanned = SWAP_CLUSTER_MAX;
185
186 if (!down_read_trylock(&shrinker_rwsem))
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700187 return 1; /* Assume we'll be able to shrink next time */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 list_for_each_entry(shrinker, &shrinker_list, list) {
190 unsigned long long delta;
191 unsigned long total_scan;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800192 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 delta = (4 * scanned) / shrinker->seeks;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800195 delta *= max_pass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 do_div(delta, lru_pages + 1);
197 shrinker->nr += delta;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800198 if (shrinker->nr < 0) {
199 printk(KERN_ERR "%s: nr=%ld\n",
200 __FUNCTION__, shrinker->nr);
201 shrinker->nr = max_pass;
202 }
203
204 /*
205 * Avoid risking looping forever due to too large nr value:
206 * never try to free more than twice the estimate number of
207 * freeable entries.
208 */
209 if (shrinker->nr > max_pass * 2)
210 shrinker->nr = max_pass * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212 total_scan = shrinker->nr;
213 shrinker->nr = 0;
214
215 while (total_scan >= SHRINK_BATCH) {
216 long this_scan = SHRINK_BATCH;
217 int shrink_ret;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700218 int nr_before;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700220 nr_before = (*shrinker->shrinker)(0, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
222 if (shrink_ret == -1)
223 break;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700224 if (shrink_ret < nr_before)
225 ret += nr_before - shrink_ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 mod_page_state(slabs_scanned, this_scan);
227 total_scan -= this_scan;
228
229 cond_resched();
230 }
231
232 shrinker->nr += total_scan;
233 }
234 up_read(&shrinker_rwsem);
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700235 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
238/* Called without lock on whether page is mapped, so answer is unstable */
239static inline int page_mapping_inuse(struct page *page)
240{
241 struct address_space *mapping;
242
243 /* Page is in somebody's page tables. */
244 if (page_mapped(page))
245 return 1;
246
247 /* Be more reluctant to reclaim swapcache than pagecache */
248 if (PageSwapCache(page))
249 return 1;
250
251 mapping = page_mapping(page);
252 if (!mapping)
253 return 0;
254
255 /* File is mmap'd by somebody? */
256 return mapping_mapped(mapping);
257}
258
259static inline int is_page_cache_freeable(struct page *page)
260{
261 return page_count(page) - !!PagePrivate(page) == 2;
262}
263
264static int may_write_to_queue(struct backing_dev_info *bdi)
265{
Christoph Lameter930d9152006-01-08 01:00:47 -0800266 if (current->flags & PF_SWAPWRITE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 return 1;
268 if (!bdi_write_congested(bdi))
269 return 1;
270 if (bdi == current->backing_dev_info)
271 return 1;
272 return 0;
273}
274
275/*
276 * We detected a synchronous write error writing a page out. Probably
277 * -ENOSPC. We need to propagate that into the address_space for a subsequent
278 * fsync(), msync() or close().
279 *
280 * The tricky part is that after writepage we cannot touch the mapping: nothing
281 * prevents it from being freed up. But we have a ref on the page and once
282 * that page is locked, the mapping is pinned.
283 *
284 * We're allowed to run sleeping lock_page() here because we know the caller has
285 * __GFP_FS.
286 */
287static void handle_write_error(struct address_space *mapping,
288 struct page *page, int error)
289{
290 lock_page(page);
291 if (page_mapping(page) == mapping) {
292 if (error == -ENOSPC)
293 set_bit(AS_ENOSPC, &mapping->flags);
294 else
295 set_bit(AS_EIO, &mapping->flags);
296 }
297 unlock_page(page);
298}
299
300/*
Andrew Morton1742f192006-03-22 00:08:21 -0800301 * pageout is called by shrink_page_list() for each dirty page.
302 * Calls ->writepage().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 */
304static pageout_t pageout(struct page *page, struct address_space *mapping)
305{
306 /*
307 * If the page is dirty, only perform writeback if that write
308 * will be non-blocking. To prevent this allocation from being
309 * stalled by pagecache activity. But note that there may be
310 * stalls if we need to run get_block(). We could test
311 * PagePrivate for that.
312 *
313 * If this process is currently in generic_file_write() against
314 * this page's queue, we can perform writeback even if that
315 * will block.
316 *
317 * If the page is swapcache, write it back even if that would
318 * block, for some throttling. This happens by accident, because
319 * swap_backing_dev_info is bust: it doesn't reflect the
320 * congestion state of the swapdevs. Easy to fix, if needed.
321 * See swapfile.c:page_queue_congested().
322 */
323 if (!is_page_cache_freeable(page))
324 return PAGE_KEEP;
325 if (!mapping) {
326 /*
327 * Some data journaling orphaned pages can have
328 * page->mapping == NULL while being dirty with clean buffers.
329 */
akpm@osdl.org323aca62005-04-16 15:24:06 -0700330 if (PagePrivate(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (try_to_free_buffers(page)) {
332 ClearPageDirty(page);
333 printk("%s: orphaned page\n", __FUNCTION__);
334 return PAGE_CLEAN;
335 }
336 }
337 return PAGE_KEEP;
338 }
339 if (mapping->a_ops->writepage == NULL)
340 return PAGE_ACTIVATE;
341 if (!may_write_to_queue(mapping->backing_dev_info))
342 return PAGE_KEEP;
343
344 if (clear_page_dirty_for_io(page)) {
345 int res;
346 struct writeback_control wbc = {
347 .sync_mode = WB_SYNC_NONE,
348 .nr_to_write = SWAP_CLUSTER_MAX,
349 .nonblocking = 1,
350 .for_reclaim = 1,
351 };
352
353 SetPageReclaim(page);
354 res = mapping->a_ops->writepage(page, &wbc);
355 if (res < 0)
356 handle_write_error(mapping, page, res);
Zach Brown994fc28c2005-12-15 14:28:17 -0800357 if (res == AOP_WRITEPAGE_ACTIVATE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 ClearPageReclaim(page);
359 return PAGE_ACTIVATE;
360 }
361 if (!PageWriteback(page)) {
362 /* synchronous write or broken a_ops? */
363 ClearPageReclaim(page);
364 }
365
366 return PAGE_SUCCESS;
367 }
368
369 return PAGE_CLEAN;
370}
371
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800372static int remove_mapping(struct address_space *mapping, struct page *page)
373{
374 if (!mapping)
375 return 0; /* truncate got there first */
376
377 write_lock_irq(&mapping->tree_lock);
378
379 /*
380 * The non-racy check for busy page. It is critical to check
381 * PageDirty _after_ making sure that the page is freeable and
382 * not in use by anybody. (pagecache + us == 2)
383 */
384 if (unlikely(page_count(page) != 2))
385 goto cannot_free;
386 smp_rmb();
387 if (unlikely(PageDirty(page)))
388 goto cannot_free;
389
390 if (PageSwapCache(page)) {
391 swp_entry_t swap = { .val = page_private(page) };
392 __delete_from_swap_cache(page);
393 write_unlock_irq(&mapping->tree_lock);
394 swap_free(swap);
395 __put_page(page); /* The pagecache ref */
396 return 1;
397 }
398
399 __remove_from_page_cache(page);
400 write_unlock_irq(&mapping->tree_lock);
401 __put_page(page);
402 return 1;
403
404cannot_free:
405 write_unlock_irq(&mapping->tree_lock);
406 return 0;
407}
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409/*
Andrew Morton1742f192006-03-22 00:08:21 -0800410 * shrink_page_list() returns the number of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 */
Andrew Morton1742f192006-03-22 00:08:21 -0800412static unsigned long shrink_page_list(struct list_head *page_list,
413 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
415 LIST_HEAD(ret_pages);
416 struct pagevec freed_pvec;
417 int pgactivate = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -0800418 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 cond_resched();
421
422 pagevec_init(&freed_pvec, 1);
423 while (!list_empty(page_list)) {
424 struct address_space *mapping;
425 struct page *page;
426 int may_enter_fs;
427 int referenced;
428
429 cond_resched();
430
431 page = lru_to_page(page_list);
432 list_del(&page->lru);
433
434 if (TestSetPageLocked(page))
435 goto keep;
436
437 BUG_ON(PageActive(page));
438
439 sc->nr_scanned++;
Christoph Lameter80e43422006-02-11 17:55:53 -0800440
441 if (!sc->may_swap && page_mapped(page))
442 goto keep_locked;
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 /* Double the slab pressure for mapped and swapcache pages */
445 if (page_mapped(page) || PageSwapCache(page))
446 sc->nr_scanned++;
447
448 if (PageWriteback(page))
449 goto keep_locked;
450
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800451 referenced = page_referenced(page, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 /* In active use or really unfreeable? Activate it. */
453 if (referenced && page_mapping_inuse(page))
454 goto activate_locked;
455
456#ifdef CONFIG_SWAP
457 /*
458 * Anonymous process memory has backing store?
459 * Try to allocate it some swap space here.
460 */
Lee Schermerhornc3400102005-10-29 18:15:51 -0700461 if (PageAnon(page) && !PageSwapCache(page)) {
Christoph Lameterf1fd1062006-01-18 17:42:30 -0800462 if (!sc->may_swap)
463 goto keep_locked;
Christoph Lameter1480a542006-01-08 01:00:53 -0800464 if (!add_to_swap(page, GFP_ATOMIC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 goto activate_locked;
466 }
467#endif /* CONFIG_SWAP */
468
469 mapping = page_mapping(page);
470 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
471 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
472
473 /*
474 * The page is mapped into the page tables of one or more
475 * processes. Try to unmap it here.
476 */
477 if (page_mapped(page) && mapping) {
Christoph Lameteraa3f18b2006-02-01 03:05:32 -0800478 /*
479 * No unmapping if we do not swap
480 */
481 if (!sc->may_swap)
482 goto keep_locked;
483
Christoph Lametera48d07a2006-02-01 03:05:38 -0800484 switch (try_to_unmap(page, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 case SWAP_FAIL:
486 goto activate_locked;
487 case SWAP_AGAIN:
488 goto keep_locked;
489 case SWAP_SUCCESS:
490 ; /* try to free the page below */
491 }
492 }
493
494 if (PageDirty(page)) {
495 if (referenced)
496 goto keep_locked;
497 if (!may_enter_fs)
498 goto keep_locked;
Christoph Lameter52a83632006-02-01 03:05:28 -0800499 if (!sc->may_writepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 goto keep_locked;
501
502 /* Page is dirty, try to write it out here */
503 switch(pageout(page, mapping)) {
504 case PAGE_KEEP:
505 goto keep_locked;
506 case PAGE_ACTIVATE:
507 goto activate_locked;
508 case PAGE_SUCCESS:
509 if (PageWriteback(page) || PageDirty(page))
510 goto keep;
511 /*
512 * A synchronous write - probably a ramdisk. Go
513 * ahead and try to reclaim the page.
514 */
515 if (TestSetPageLocked(page))
516 goto keep;
517 if (PageDirty(page) || PageWriteback(page))
518 goto keep_locked;
519 mapping = page_mapping(page);
520 case PAGE_CLEAN:
521 ; /* try to free the page below */
522 }
523 }
524
525 /*
526 * If the page has buffers, try to free the buffer mappings
527 * associated with this page. If we succeed we try to free
528 * the page as well.
529 *
530 * We do this even if the page is PageDirty().
531 * try_to_release_page() does not perform I/O, but it is
532 * possible for a page to have PageDirty set, but it is actually
533 * clean (all its buffers are clean). This happens if the
534 * buffers were written out directly, with submit_bh(). ext3
535 * will do this, as well as the blockdev mapping.
536 * try_to_release_page() will discover that cleanness and will
537 * drop the buffers and mark the page clean - it can be freed.
538 *
539 * Rarely, pages can have buffers and no ->mapping. These are
540 * the pages which were not successfully invalidated in
541 * truncate_complete_page(). We try to drop those buffers here
542 * and if that worked, and the page is no longer mapped into
543 * process address space (page_count == 1) it can be freed.
544 * Otherwise, leave the page on the LRU so it is swappable.
545 */
546 if (PagePrivate(page)) {
547 if (!try_to_release_page(page, sc->gfp_mask))
548 goto activate_locked;
549 if (!mapping && page_count(page) == 1)
550 goto free_it;
551 }
552
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800553 if (!remove_mapping(mapping, page))
554 goto keep_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
556free_it:
557 unlock_page(page);
Andrew Morton05ff5132006-03-22 00:08:20 -0800558 nr_reclaimed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 if (!pagevec_add(&freed_pvec, page))
560 __pagevec_release_nonlru(&freed_pvec);
561 continue;
562
563activate_locked:
564 SetPageActive(page);
565 pgactivate++;
566keep_locked:
567 unlock_page(page);
568keep:
569 list_add(&page->lru, &ret_pages);
570 BUG_ON(PageLRU(page));
571 }
572 list_splice(&ret_pages, page_list);
573 if (pagevec_count(&freed_pvec))
574 __pagevec_release_nonlru(&freed_pvec);
575 mod_page_state(pgactivate, pgactivate);
Andrew Morton05ff5132006-03-22 00:08:20 -0800576 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577}
578
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800579#ifdef CONFIG_MIGRATION
Christoph Lameter8419c312006-01-08 01:00:52 -0800580static inline void move_to_lru(struct page *page)
581{
582 list_del(&page->lru);
583 if (PageActive(page)) {
584 /*
585 * lru_cache_add_active checks that
586 * the PG_active bit is off.
587 */
588 ClearPageActive(page);
589 lru_cache_add_active(page);
590 } else {
591 lru_cache_add(page);
592 }
593 put_page(page);
594}
595
596/*
Nick Piggin053837f2006-01-18 17:42:27 -0800597 * Add isolated pages on the list back to the LRU.
Christoph Lameter8419c312006-01-08 01:00:52 -0800598 *
599 * returns the number of pages put back.
600 */
Andrew Morton69e05942006-03-22 00:08:19 -0800601unsigned long putback_lru_pages(struct list_head *l)
Christoph Lameter8419c312006-01-08 01:00:52 -0800602{
603 struct page *page;
604 struct page *page2;
Andrew Morton69e05942006-03-22 00:08:19 -0800605 unsigned long count = 0;
Christoph Lameter8419c312006-01-08 01:00:52 -0800606
607 list_for_each_entry_safe(page, page2, l, lru) {
608 move_to_lru(page);
609 count++;
610 }
611 return count;
612}
613
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614/*
Christoph Lametere965f962006-02-01 03:05:41 -0800615 * Non migratable page
616 */
617int fail_migrate_page(struct page *newpage, struct page *page)
618{
619 return -EIO;
620}
621EXPORT_SYMBOL(fail_migrate_page);
622
623/*
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800624 * swapout a single page
625 * page is locked upon entry, unlocked on exit
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800626 */
627static int swap_page(struct page *page)
628{
629 struct address_space *mapping = page_mapping(page);
630
631 if (page_mapped(page) && mapping)
Christoph Lameter418aade2006-02-10 01:51:15 -0800632 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800633 goto unlock_retry;
634
635 if (PageDirty(page)) {
636 /* Page is dirty, try to write it out here */
637 switch(pageout(page, mapping)) {
638 case PAGE_KEEP:
639 case PAGE_ACTIVATE:
640 goto unlock_retry;
641
642 case PAGE_SUCCESS:
643 goto retry;
644
645 case PAGE_CLEAN:
646 ; /* try to free the page below */
647 }
648 }
649
650 if (PagePrivate(page)) {
651 if (!try_to_release_page(page, GFP_KERNEL) ||
652 (!mapping && page_count(page) == 1))
653 goto unlock_retry;
654 }
655
656 if (remove_mapping(mapping, page)) {
657 /* Success */
658 unlock_page(page);
659 return 0;
660 }
661
662unlock_retry:
663 unlock_page(page);
664
665retry:
Christoph Lameterd0d96322006-01-08 01:00:55 -0800666 return -EAGAIN;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800667}
Christoph Lametere965f962006-02-01 03:05:41 -0800668EXPORT_SYMBOL(swap_page);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800669
670/*
671 * Page migration was first developed in the context of the memory hotplug
672 * project. The main authors of the migration code are:
673 *
674 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
675 * Hirokazu Takahashi <taka@valinux.co.jp>
676 * Dave Hansen <haveblue@us.ibm.com>
677 * Christoph Lameter <clameter@sgi.com>
678 */
679
680/*
681 * Remove references for a page and establish the new page with the correct
682 * basic settings to be able to stop accesses to the page.
683 */
Christoph Lametere965f962006-02-01 03:05:41 -0800684int migrate_page_remove_references(struct page *newpage,
Christoph Lametera48d07a2006-02-01 03:05:38 -0800685 struct page *page, int nr_refs)
686{
687 struct address_space *mapping = page_mapping(page);
688 struct page **radix_pointer;
689
690 /*
691 * Avoid doing any of the following work if the page count
692 * indicates that the page is in use or truncate has removed
693 * the page.
694 */
695 if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
Christoph Lameter4983da02006-03-14 19:50:19 -0800696 return -EAGAIN;
Christoph Lametera48d07a2006-02-01 03:05:38 -0800697
698 /*
699 * Establish swap ptes for anonymous pages or destroy pte
700 * maps for files.
701 *
702 * In order to reestablish file backed mappings the fault handlers
703 * will take the radix tree_lock which may then be used to stop
704 * processses from accessing this page until the new page is ready.
705 *
706 * A process accessing via a swap pte (an anonymous page) will take a
707 * page_lock on the old page which will block the process until the
708 * migration attempt is complete. At that time the PageSwapCache bit
709 * will be examined. If the page was migrated then the PageSwapCache
710 * bit will be clear and the operation to retrieve the page will be
711 * retried which will find the new page in the radix tree. Then a new
712 * direct mapping may be generated based on the radix tree contents.
713 *
714 * If the page was not migrated then the PageSwapCache bit
715 * is still set and the operation may continue.
716 */
Christoph Lameter4983da02006-03-14 19:50:19 -0800717 if (try_to_unmap(page, 1) == SWAP_FAIL)
718 /* A vma has VM_LOCKED set -> Permanent failure */
719 return -EPERM;
Christoph Lametera48d07a2006-02-01 03:05:38 -0800720
721 /*
722 * Give up if we were unable to remove all mappings.
723 */
724 if (page_mapcount(page))
Christoph Lameter4983da02006-03-14 19:50:19 -0800725 return -EAGAIN;
Christoph Lametera48d07a2006-02-01 03:05:38 -0800726
727 write_lock_irq(&mapping->tree_lock);
728
729 radix_pointer = (struct page **)radix_tree_lookup_slot(
730 &mapping->page_tree,
731 page_index(page));
732
733 if (!page_mapping(page) || page_count(page) != nr_refs ||
734 *radix_pointer != page) {
735 write_unlock_irq(&mapping->tree_lock);
Christoph Lameter4983da02006-03-14 19:50:19 -0800736 return -EAGAIN;
Christoph Lametera48d07a2006-02-01 03:05:38 -0800737 }
738
739 /*
740 * Now we know that no one else is looking at the page.
741 *
742 * Certain minimal information about a page must be available
743 * in order for other subsystems to properly handle the page if they
744 * find it through the radix tree update before we are finished
745 * copying the page.
746 */
747 get_page(newpage);
748 newpage->index = page->index;
749 newpage->mapping = page->mapping;
750 if (PageSwapCache(page)) {
751 SetPageSwapCache(newpage);
752 set_page_private(newpage, page_private(page));
753 }
754
755 *radix_pointer = newpage;
756 __put_page(page);
757 write_unlock_irq(&mapping->tree_lock);
758
759 return 0;
760}
Christoph Lametere965f962006-02-01 03:05:41 -0800761EXPORT_SYMBOL(migrate_page_remove_references);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800762
763/*
764 * Copy the page to its new location
765 */
766void migrate_page_copy(struct page *newpage, struct page *page)
767{
768 copy_highpage(newpage, page);
769
770 if (PageError(page))
771 SetPageError(newpage);
772 if (PageReferenced(page))
773 SetPageReferenced(newpage);
774 if (PageUptodate(page))
775 SetPageUptodate(newpage);
776 if (PageActive(page))
777 SetPageActive(newpage);
778 if (PageChecked(page))
779 SetPageChecked(newpage);
780 if (PageMappedToDisk(page))
781 SetPageMappedToDisk(newpage);
782
783 if (PageDirty(page)) {
784 clear_page_dirty_for_io(page);
785 set_page_dirty(newpage);
786 }
787
788 ClearPageSwapCache(page);
789 ClearPageActive(page);
790 ClearPagePrivate(page);
791 set_page_private(page, 0);
792 page->mapping = NULL;
793
794 /*
795 * If any waiters have accumulated on the new page then
796 * wake them up.
797 */
798 if (PageWriteback(newpage))
799 end_page_writeback(newpage);
800}
Christoph Lametere965f962006-02-01 03:05:41 -0800801EXPORT_SYMBOL(migrate_page_copy);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800802
803/*
804 * Common logic to directly migrate a single page suitable for
805 * pages that do not use PagePrivate.
806 *
807 * Pages are locked upon entry and exit.
808 */
809int migrate_page(struct page *newpage, struct page *page)
810{
Christoph Lameter4983da02006-03-14 19:50:19 -0800811 int rc;
812
Christoph Lametera48d07a2006-02-01 03:05:38 -0800813 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
814
Christoph Lameter4983da02006-03-14 19:50:19 -0800815 rc = migrate_page_remove_references(newpage, page, 2);
816
817 if (rc)
818 return rc;
Christoph Lametera48d07a2006-02-01 03:05:38 -0800819
820 migrate_page_copy(newpage, page);
821
Christoph Lametera3351e52006-02-01 03:05:39 -0800822 /*
823 * Remove auxiliary swap entries and replace
824 * them with real ptes.
825 *
826 * Note that a real pte entry will allow processes that are not
827 * waiting on the page lock to use the new page via the page tables
828 * before the new page is unlocked.
829 */
830 remove_from_swap(newpage);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800831 return 0;
832}
Christoph Lametere965f962006-02-01 03:05:41 -0800833EXPORT_SYMBOL(migrate_page);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800834
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800835/*
836 * migrate_pages
837 *
838 * Two lists are passed to this function. The first list
839 * contains the pages isolated from the LRU to be migrated.
840 * The second list contains new pages that the pages isolated
841 * can be moved to. If the second list is NULL then all
842 * pages are swapped out.
843 *
844 * The function returns after 10 attempts or if no pages
Christoph Lameter418aade2006-02-10 01:51:15 -0800845 * are movable anymore because to has become empty
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800846 * or no retryable pages exist anymore.
847 *
Christoph Lameterd0d96322006-01-08 01:00:55 -0800848 * Return: Number of pages not migrated when "to" ran empty.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800849 */
Andrew Morton69e05942006-03-22 00:08:19 -0800850unsigned long migrate_pages(struct list_head *from, struct list_head *to,
Christoph Lameterd4984712006-01-08 01:00:55 -0800851 struct list_head *moved, struct list_head *failed)
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800852{
Andrew Morton69e05942006-03-22 00:08:19 -0800853 unsigned long retry;
854 unsigned long nr_failed = 0;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800855 int pass = 0;
856 struct page *page;
857 struct page *page2;
858 int swapwrite = current->flags & PF_SWAPWRITE;
Christoph Lameterd0d96322006-01-08 01:00:55 -0800859 int rc;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800860
861 if (!swapwrite)
862 current->flags |= PF_SWAPWRITE;
863
864redo:
865 retry = 0;
866
Christoph Lameterd4984712006-01-08 01:00:55 -0800867 list_for_each_entry_safe(page, page2, from, lru) {
Christoph Lametera48d07a2006-02-01 03:05:38 -0800868 struct page *newpage = NULL;
869 struct address_space *mapping;
870
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800871 cond_resched();
872
Christoph Lameterd0d96322006-01-08 01:00:55 -0800873 rc = 0;
874 if (page_count(page) == 1)
Christoph Lameteree274972006-01-08 01:00:54 -0800875 /* page was freed from under us. So we are done. */
Christoph Lameterd0d96322006-01-08 01:00:55 -0800876 goto next;
877
Christoph Lametera48d07a2006-02-01 03:05:38 -0800878 if (to && list_empty(to))
879 break;
880
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800881 /*
882 * Skip locked pages during the first two passes to give the
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800883 * functions holding the lock time to release the page. Later we
884 * use lock_page() to have a higher chance of acquiring the
885 * lock.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800886 */
Christoph Lameterd0d96322006-01-08 01:00:55 -0800887 rc = -EAGAIN;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800888 if (pass > 2)
889 lock_page(page);
890 else
891 if (TestSetPageLocked(page))
Christoph Lameterd0d96322006-01-08 01:00:55 -0800892 goto next;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800893
894 /*
895 * Only wait on writeback if we have already done a pass where
896 * we we may have triggered writeouts for lots of pages.
897 */
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800898 if (pass > 0) {
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800899 wait_on_page_writeback(page);
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800900 } else {
Christoph Lameterd0d96322006-01-08 01:00:55 -0800901 if (PageWriteback(page))
902 goto unlock_page;
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800903 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800904
Christoph Lameterd0d96322006-01-08 01:00:55 -0800905 /*
906 * Anonymous pages must have swap cache references otherwise
907 * the information contained in the page maps cannot be
908 * preserved.
909 */
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800910 if (PageAnon(page) && !PageSwapCache(page)) {
Christoph Lameter1480a542006-01-08 01:00:53 -0800911 if (!add_to_swap(page, GFP_KERNEL)) {
Christoph Lameterd0d96322006-01-08 01:00:55 -0800912 rc = -ENOMEM;
913 goto unlock_page;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800914 }
915 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800916
Christoph Lametera48d07a2006-02-01 03:05:38 -0800917 if (!to) {
918 rc = swap_page(page);
919 goto next;
920 }
921
922 newpage = lru_to_page(to);
923 lock_page(newpage);
924
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800925 /*
Christoph Lametera48d07a2006-02-01 03:05:38 -0800926 * Pages are properly locked and writeback is complete.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800927 * Try to migrate the page.
928 */
Christoph Lametera48d07a2006-02-01 03:05:38 -0800929 mapping = page_mapping(page);
930 if (!mapping)
931 goto unlock_both;
932
Christoph Lametere965f962006-02-01 03:05:41 -0800933 if (mapping->a_ops->migratepage) {
Christoph Lameter418aade2006-02-10 01:51:15 -0800934 /*
935 * Most pages have a mapping and most filesystems
936 * should provide a migration function. Anonymous
937 * pages are part of swap space which also has its
938 * own migration function. This is the most common
939 * path for page migration.
940 */
Christoph Lametere965f962006-02-01 03:05:41 -0800941 rc = mapping->a_ops->migratepage(newpage, page);
942 goto unlock_both;
943 }
944
Christoph Lametera48d07a2006-02-01 03:05:38 -0800945 /*
Christoph Lameter418aade2006-02-10 01:51:15 -0800946 * Default handling if a filesystem does not provide
947 * a migration function. We can only migrate clean
948 * pages so try to write out any dirty pages first.
Christoph Lametera48d07a2006-02-01 03:05:38 -0800949 */
950 if (PageDirty(page)) {
951 switch (pageout(page, mapping)) {
952 case PAGE_KEEP:
953 case PAGE_ACTIVATE:
954 goto unlock_both;
955
956 case PAGE_SUCCESS:
957 unlock_page(newpage);
958 goto next;
959
960 case PAGE_CLEAN:
961 ; /* try to migrate the page below */
962 }
963 }
Christoph Lameter418aade2006-02-10 01:51:15 -0800964
Christoph Lametera48d07a2006-02-01 03:05:38 -0800965 /*
Christoph Lameter418aade2006-02-10 01:51:15 -0800966 * Buffers are managed in a filesystem specific way.
967 * We must have no buffers or drop them.
Christoph Lametera48d07a2006-02-01 03:05:38 -0800968 */
969 if (!page_has_buffers(page) ||
970 try_to_release_page(page, GFP_KERNEL)) {
971 rc = migrate_page(newpage, page);
972 goto unlock_both;
973 }
974
975 /*
976 * On early passes with mapped pages simply
977 * retry. There may be a lock held for some
978 * buffers that may go away. Later
979 * swap them out.
980 */
981 if (pass > 4) {
Christoph Lameter418aade2006-02-10 01:51:15 -0800982 /*
983 * Persistently unable to drop buffers..... As a
984 * measure of last resort we fall back to
985 * swap_page().
986 */
Christoph Lametera48d07a2006-02-01 03:05:38 -0800987 unlock_page(newpage);
988 newpage = NULL;
989 rc = swap_page(page);
990 goto next;
991 }
992
993unlock_both:
994 unlock_page(newpage);
Christoph Lameterd0d96322006-01-08 01:00:55 -0800995
996unlock_page:
997 unlock_page(page);
998
999next:
1000 if (rc == -EAGAIN) {
1001 retry++;
1002 } else if (rc) {
1003 /* Permanent failure */
1004 list_move(&page->lru, failed);
1005 nr_failed++;
1006 } else {
Christoph Lametera48d07a2006-02-01 03:05:38 -08001007 if (newpage) {
1008 /* Successful migration. Return page to LRU */
1009 move_to_lru(newpage);
1010 }
Christoph Lameterd4984712006-01-08 01:00:55 -08001011 list_move(&page->lru, moved);
Christoph Lameterd4984712006-01-08 01:00:55 -08001012 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -08001013 }
1014 if (retry && pass++ < 10)
1015 goto redo;
1016
1017 if (!swapwrite)
1018 current->flags &= ~PF_SWAPWRITE;
1019
Christoph Lameter49d2e9c2006-01-08 01:00:48 -08001020 return nr_failed + retry;
1021}
Christoph Lameter8419c312006-01-08 01:00:52 -08001022
Christoph Lameter8419c312006-01-08 01:00:52 -08001023/*
1024 * Isolate one page from the LRU lists and put it on the
Nick Piggin053837f2006-01-18 17:42:27 -08001025 * indicated list with elevated refcount.
Christoph Lameter8419c312006-01-08 01:00:52 -08001026 *
1027 * Result:
1028 * 0 = page not on LRU list
1029 * 1 = page removed from LRU list and added to the specified list.
Christoph Lameter8419c312006-01-08 01:00:52 -08001030 */
1031int isolate_lru_page(struct page *page)
1032{
Nick Piggin053837f2006-01-18 17:42:27 -08001033 int ret = 0;
Christoph Lameter8419c312006-01-08 01:00:52 -08001034
Nick Piggin053837f2006-01-18 17:42:27 -08001035 if (PageLRU(page)) {
1036 struct zone *zone = page_zone(page);
1037 spin_lock_irq(&zone->lru_lock);
Nick Piggin8d438f92006-03-22 00:07:59 -08001038 if (PageLRU(page)) {
Nick Piggin053837f2006-01-18 17:42:27 -08001039 ret = 1;
1040 get_page(page);
Nick Piggin8d438f92006-03-22 00:07:59 -08001041 ClearPageLRU(page);
Nick Piggin053837f2006-01-18 17:42:27 -08001042 if (PageActive(page))
1043 del_page_from_active_list(zone, page);
1044 else
1045 del_page_from_inactive_list(zone, page);
1046 }
1047 spin_unlock_irq(&zone->lru_lock);
Christoph Lameter8419c312006-01-08 01:00:52 -08001048 }
Nick Piggin053837f2006-01-18 17:42:27 -08001049
1050 return ret;
Christoph Lameter8419c312006-01-08 01:00:52 -08001051}
Christoph Lameter7cbe34c2006-01-08 01:00:49 -08001052#endif
Christoph Lameter49d2e9c2006-01-08 01:00:48 -08001053
1054/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 * zone->lru_lock is heavily contended. Some of the functions that
1056 * shrink the lists perform better by taking out a batch of pages
1057 * and working on them outside the LRU lock.
1058 *
1059 * For pagecache intensive workloads, this function is the hottest
1060 * spot in the kernel (apart from copy_*_user functions).
1061 *
1062 * Appropriate locks must be held before calling this function.
1063 *
1064 * @nr_to_scan: The number of pages to look through on the list.
1065 * @src: The LRU list to pull pages off.
1066 * @dst: The temp list to put pages on to.
1067 * @scanned: The number of pages that were scanned.
1068 *
1069 * returns how many pages were moved onto *@dst.
1070 */
Andrew Morton69e05942006-03-22 00:08:19 -08001071static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1072 struct list_head *src, struct list_head *dst,
1073 unsigned long *scanned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074{
Andrew Morton69e05942006-03-22 00:08:19 -08001075 unsigned long nr_taken = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 struct page *page;
Wu Fengguangc9b02d92006-03-22 00:08:23 -08001077 unsigned long scan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Wu Fengguangc9b02d92006-03-22 00:08:23 -08001079 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
Nick Piggin7c8ee9a2006-03-22 00:08:03 -08001080 struct list_head *target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 page = lru_to_page(src);
1082 prefetchw_prev_lru_page(page, src, flags);
1083
Nick Piggin8d438f92006-03-22 00:07:59 -08001084 BUG_ON(!PageLRU(page));
1085
Nick Piggin053837f2006-01-18 17:42:27 -08001086 list_del(&page->lru);
Nick Piggin7c8ee9a2006-03-22 00:08:03 -08001087 target = src;
1088 if (likely(get_page_unless_zero(page))) {
Nick Piggin053837f2006-01-18 17:42:27 -08001089 /*
Nick Piggin7c8ee9a2006-03-22 00:08:03 -08001090 * Be careful not to clear PageLRU until after we're
1091 * sure the page is not being freed elsewhere -- the
1092 * page release code relies on it.
Nick Piggin053837f2006-01-18 17:42:27 -08001093 */
Nick Piggin7c8ee9a2006-03-22 00:08:03 -08001094 ClearPageLRU(page);
1095 target = dst;
1096 nr_taken++;
1097 } /* else it is being freed elsewhere */
Nick Piggin46453a62006-03-22 00:07:58 -08001098
Nick Piggin7c8ee9a2006-03-22 00:08:03 -08001099 list_add(&page->lru, target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 }
1101
1102 *scanned = scan;
1103 return nr_taken;
1104}
1105
1106/*
Andrew Morton1742f192006-03-22 00:08:21 -08001107 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1108 * of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 */
Andrew Morton1742f192006-03-22 00:08:21 -08001110static unsigned long shrink_inactive_list(unsigned long max_scan,
1111 struct zone *zone, struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112{
1113 LIST_HEAD(page_list);
1114 struct pagevec pvec;
Andrew Morton69e05942006-03-22 00:08:19 -08001115 unsigned long nr_scanned = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -08001116 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
1118 pagevec_init(&pvec, 1);
1119
1120 lru_add_drain();
1121 spin_lock_irq(&zone->lru_lock);
Andrew Morton69e05942006-03-22 00:08:19 -08001122 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 struct page *page;
Andrew Morton69e05942006-03-22 00:08:19 -08001124 unsigned long nr_taken;
1125 unsigned long nr_scan;
1126 unsigned long nr_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
1129 &zone->inactive_list,
1130 &page_list, &nr_scan);
1131 zone->nr_inactive -= nr_taken;
1132 zone->pages_scanned += nr_scan;
1133 spin_unlock_irq(&zone->lru_lock);
1134
Andrew Morton69e05942006-03-22 00:08:19 -08001135 nr_scanned += nr_scan;
Andrew Morton1742f192006-03-22 00:08:21 -08001136 nr_freed = shrink_page_list(&page_list, sc);
Andrew Morton05ff5132006-03-22 00:08:20 -08001137 nr_reclaimed += nr_freed;
Nick Piggina74609f2006-01-06 00:11:20 -08001138 local_irq_disable();
1139 if (current_is_kswapd()) {
1140 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
1141 __mod_page_state(kswapd_steal, nr_freed);
1142 } else
1143 __mod_page_state_zone(zone, pgscan_direct, nr_scan);
1144 __mod_page_state_zone(zone, pgsteal, nr_freed);
1145
Wu Fengguangfb8d14e2006-03-22 00:08:28 -08001146 if (nr_taken == 0)
1147 goto done;
1148
Nick Piggina74609f2006-01-06 00:11:20 -08001149 spin_lock(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 /*
1151 * Put back any unfreeable pages.
1152 */
1153 while (!list_empty(&page_list)) {
1154 page = lru_to_page(&page_list);
Nick Piggin8d438f92006-03-22 00:07:59 -08001155 BUG_ON(PageLRU(page));
1156 SetPageLRU(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 list_del(&page->lru);
1158 if (PageActive(page))
1159 add_page_to_active_list(zone, page);
1160 else
1161 add_page_to_inactive_list(zone, page);
1162 if (!pagevec_add(&pvec, page)) {
1163 spin_unlock_irq(&zone->lru_lock);
1164 __pagevec_release(&pvec);
1165 spin_lock_irq(&zone->lru_lock);
1166 }
1167 }
Andrew Morton69e05942006-03-22 00:08:19 -08001168 } while (nr_scanned < max_scan);
Wu Fengguangfb8d14e2006-03-22 00:08:28 -08001169 spin_unlock(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170done:
Wu Fengguangfb8d14e2006-03-22 00:08:28 -08001171 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 pagevec_release(&pvec);
Andrew Morton05ff5132006-03-22 00:08:20 -08001173 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174}
1175
1176/*
1177 * This moves pages from the active list to the inactive list.
1178 *
1179 * We move them the other way if the page is referenced by one or more
1180 * processes, from rmap.
1181 *
1182 * If the pages are mostly unmapped, the processing is fast and it is
1183 * appropriate to hold zone->lru_lock across the whole operation. But if
1184 * the pages are mapped, the processing is slow (page_referenced()) so we
1185 * should drop zone->lru_lock around each page. It's impossible to balance
1186 * this, so instead we remove the pages from the LRU while processing them.
1187 * It is safe to rely on PG_active against the non-LRU pages in here because
1188 * nobody will play with that bit on a non-LRU page.
1189 *
1190 * The downside is that we have to touch page->_count against each page.
1191 * But we had to alter page->flags anyway.
1192 */
Andrew Morton1742f192006-03-22 00:08:21 -08001193static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1194 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195{
Andrew Morton69e05942006-03-22 00:08:19 -08001196 unsigned long pgmoved;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 int pgdeactivate = 0;
Andrew Morton69e05942006-03-22 00:08:19 -08001198 unsigned long pgscanned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 LIST_HEAD(l_hold); /* The pages which were snipped off */
1200 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
1201 LIST_HEAD(l_active); /* Pages to go onto the active_list */
1202 struct page *page;
1203 struct pagevec pvec;
1204 int reclaim_mapped = 0;
Christoph Lameter2903fb12006-02-11 17:55:55 -08001205
1206 if (unlikely(sc->may_swap)) {
1207 long mapped_ratio;
1208 long distress;
1209 long swap_tendency;
1210
1211 /*
1212 * `distress' is a measure of how much trouble we're having
1213 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
1214 */
1215 distress = 100 >> zone->prev_priority;
1216
1217 /*
1218 * The point of this algorithm is to decide when to start
1219 * reclaiming mapped memory instead of just pagecache. Work out
1220 * how much memory
1221 * is mapped.
1222 */
1223 mapped_ratio = (sc->nr_mapped * 100) / total_memory;
1224
1225 /*
1226 * Now decide how much we really want to unmap some pages. The
1227 * mapped ratio is downgraded - just because there's a lot of
1228 * mapped memory doesn't necessarily mean that page reclaim
1229 * isn't succeeding.
1230 *
1231 * The distress ratio is important - we don't want to start
1232 * going oom.
1233 *
1234 * A 100% value of vm_swappiness overrides this algorithm
1235 * altogether.
1236 */
1237 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
1238
1239 /*
1240 * Now use this metric to decide whether to start moving mapped
1241 * memory onto the inactive list.
1242 */
1243 if (swap_tendency >= 100)
1244 reclaim_mapped = 1;
1245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
1247 lru_add_drain();
1248 spin_lock_irq(&zone->lru_lock);
1249 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
1250 &l_hold, &pgscanned);
1251 zone->pages_scanned += pgscanned;
1252 zone->nr_active -= pgmoved;
1253 spin_unlock_irq(&zone->lru_lock);
1254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 while (!list_empty(&l_hold)) {
1256 cond_resched();
1257 page = lru_to_page(&l_hold);
1258 list_del(&page->lru);
1259 if (page_mapped(page)) {
1260 if (!reclaim_mapped ||
1261 (total_swap_pages == 0 && PageAnon(page)) ||
Rik van Rielf7b7fd82005-11-28 13:44:07 -08001262 page_referenced(page, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 list_add(&page->lru, &l_active);
1264 continue;
1265 }
1266 }
1267 list_add(&page->lru, &l_inactive);
1268 }
1269
1270 pagevec_init(&pvec, 1);
1271 pgmoved = 0;
1272 spin_lock_irq(&zone->lru_lock);
1273 while (!list_empty(&l_inactive)) {
1274 page = lru_to_page(&l_inactive);
1275 prefetchw_prev_lru_page(page, &l_inactive, flags);
Nick Piggin8d438f92006-03-22 00:07:59 -08001276 BUG_ON(PageLRU(page));
1277 SetPageLRU(page);
Nick Piggin4c84cac2006-03-22 00:08:00 -08001278 BUG_ON(!PageActive(page));
1279 ClearPageActive(page);
1280
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 list_move(&page->lru, &zone->inactive_list);
1282 pgmoved++;
1283 if (!pagevec_add(&pvec, page)) {
1284 zone->nr_inactive += pgmoved;
1285 spin_unlock_irq(&zone->lru_lock);
1286 pgdeactivate += pgmoved;
1287 pgmoved = 0;
1288 if (buffer_heads_over_limit)
1289 pagevec_strip(&pvec);
1290 __pagevec_release(&pvec);
1291 spin_lock_irq(&zone->lru_lock);
1292 }
1293 }
1294 zone->nr_inactive += pgmoved;
1295 pgdeactivate += pgmoved;
1296 if (buffer_heads_over_limit) {
1297 spin_unlock_irq(&zone->lru_lock);
1298 pagevec_strip(&pvec);
1299 spin_lock_irq(&zone->lru_lock);
1300 }
1301
1302 pgmoved = 0;
1303 while (!list_empty(&l_active)) {
1304 page = lru_to_page(&l_active);
1305 prefetchw_prev_lru_page(page, &l_active, flags);
Nick Piggin8d438f92006-03-22 00:07:59 -08001306 BUG_ON(PageLRU(page));
1307 SetPageLRU(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 BUG_ON(!PageActive(page));
1309 list_move(&page->lru, &zone->active_list);
1310 pgmoved++;
1311 if (!pagevec_add(&pvec, page)) {
1312 zone->nr_active += pgmoved;
1313 pgmoved = 0;
1314 spin_unlock_irq(&zone->lru_lock);
1315 __pagevec_release(&pvec);
1316 spin_lock_irq(&zone->lru_lock);
1317 }
1318 }
1319 zone->nr_active += pgmoved;
Nick Piggina74609f2006-01-06 00:11:20 -08001320 spin_unlock(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Nick Piggina74609f2006-01-06 00:11:20 -08001322 __mod_page_state_zone(zone, pgrefill, pgscanned);
1323 __mod_page_state(pgdeactivate, pgdeactivate);
1324 local_irq_enable();
1325
1326 pagevec_release(&pvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327}
1328
1329/*
1330 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1331 */
Andrew Morton05ff5132006-03-22 00:08:20 -08001332static unsigned long shrink_zone(int priority, struct zone *zone,
1333 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334{
1335 unsigned long nr_active;
1336 unsigned long nr_inactive;
Christoph Lameter86959492006-03-22 00:08:18 -08001337 unsigned long nr_to_scan;
Andrew Morton05ff5132006-03-22 00:08:20 -08001338 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Martin Hicks53e9a612005-09-03 15:54:51 -07001340 atomic_inc(&zone->reclaim_in_progress);
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 /*
1343 * Add one to `nr_to_scan' just to make sure that the kernel will
1344 * slowly sift through the active list.
1345 */
Christoph Lameter86959492006-03-22 00:08:18 -08001346 zone->nr_scan_active += (zone->nr_active >> priority) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 nr_active = zone->nr_scan_active;
1348 if (nr_active >= sc->swap_cluster_max)
1349 zone->nr_scan_active = 0;
1350 else
1351 nr_active = 0;
1352
Christoph Lameter86959492006-03-22 00:08:18 -08001353 zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 nr_inactive = zone->nr_scan_inactive;
1355 if (nr_inactive >= sc->swap_cluster_max)
1356 zone->nr_scan_inactive = 0;
1357 else
1358 nr_inactive = 0;
1359
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 while (nr_active || nr_inactive) {
1361 if (nr_active) {
Christoph Lameter86959492006-03-22 00:08:18 -08001362 nr_to_scan = min(nr_active,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 (unsigned long)sc->swap_cluster_max);
Christoph Lameter86959492006-03-22 00:08:18 -08001364 nr_active -= nr_to_scan;
Andrew Morton1742f192006-03-22 00:08:21 -08001365 shrink_active_list(nr_to_scan, zone, sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 }
1367
1368 if (nr_inactive) {
Christoph Lameter86959492006-03-22 00:08:18 -08001369 nr_to_scan = min(nr_inactive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 (unsigned long)sc->swap_cluster_max);
Christoph Lameter86959492006-03-22 00:08:18 -08001371 nr_inactive -= nr_to_scan;
Andrew Morton1742f192006-03-22 00:08:21 -08001372 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1373 sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 }
1375 }
1376
1377 throttle_vm_writeout();
Martin Hicks53e9a612005-09-03 15:54:51 -07001378
1379 atomic_dec(&zone->reclaim_in_progress);
Andrew Morton05ff5132006-03-22 00:08:20 -08001380 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381}
1382
1383/*
1384 * This is the direct reclaim path, for page-allocating processes. We only
1385 * try to reclaim pages from zones which will satisfy the caller's allocation
1386 * request.
1387 *
1388 * We reclaim from a zone even if that zone is over pages_high. Because:
1389 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1390 * allocation or
1391 * b) The zones may be over pages_high but they must go *over* pages_high to
1392 * satisfy the `incremental min' zone defense algorithm.
1393 *
1394 * Returns the number of reclaimed pages.
1395 *
1396 * If a zone is deemed to be full of pinned pages then just give it a light
1397 * scan then give up on it.
1398 */
Andrew Morton1742f192006-03-22 00:08:21 -08001399static unsigned long shrink_zones(int priority, struct zone **zones,
Andrew Morton05ff5132006-03-22 00:08:20 -08001400 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401{
Andrew Morton05ff5132006-03-22 00:08:20 -08001402 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 int i;
1404
1405 for (i = 0; zones[i] != NULL; i++) {
1406 struct zone *zone = zones[i];
1407
Con Kolivasf3fe6512006-01-06 00:11:15 -08001408 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 continue;
1410
Paul Jackson9bf22292005-09-06 15:18:12 -07001411 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 continue;
1413
Christoph Lameter86959492006-03-22 00:08:18 -08001414 zone->temp_priority = priority;
1415 if (zone->prev_priority > priority)
1416 zone->prev_priority = priority;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
Christoph Lameter86959492006-03-22 00:08:18 -08001418 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 continue; /* Let kswapd poll it */
1420
Andrew Morton05ff5132006-03-22 00:08:20 -08001421 nr_reclaimed += shrink_zone(priority, zone, sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 }
Andrew Morton05ff5132006-03-22 00:08:20 -08001423 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424}
1425
1426/*
1427 * This is the main entry point to direct page reclaim.
1428 *
1429 * If a full scan of the inactive list fails to free enough memory then we
1430 * are "out of memory" and something needs to be killed.
1431 *
1432 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1433 * high - the zone may be full of dirty or under-writeback pages, which this
1434 * caller can't do much about. We kick pdflush and take explicit naps in the
1435 * hope that some of these pages can be written. But if the allocating task
1436 * holds filesystem locks which prevent writeout this might not work, and the
1437 * allocation attempt will fail.
1438 */
Andrew Morton69e05942006-03-22 00:08:19 -08001439unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440{
1441 int priority;
1442 int ret = 0;
Andrew Morton69e05942006-03-22 00:08:19 -08001443 unsigned long total_scanned = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -08001444 unsigned long nr_reclaimed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 struct reclaim_state *reclaim_state = current->reclaim_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 unsigned long lru_pages = 0;
1447 int i;
Andrew Morton179e9632006-03-22 00:08:18 -08001448 struct scan_control sc = {
1449 .gfp_mask = gfp_mask,
1450 .may_writepage = !laptop_mode,
1451 .swap_cluster_max = SWAP_CLUSTER_MAX,
1452 .may_swap = 1,
1453 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 inc_page_state(allocstall);
1456
1457 for (i = 0; zones[i] != NULL; i++) {
1458 struct zone *zone = zones[i];
1459
Paul Jackson9bf22292005-09-06 15:18:12 -07001460 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 continue;
1462
1463 zone->temp_priority = DEF_PRIORITY;
1464 lru_pages += zone->nr_active + zone->nr_inactive;
1465 }
1466
1467 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1468 sc.nr_mapped = read_page_state(nr_mapped);
1469 sc.nr_scanned = 0;
Rik van Rielf7b7fd82005-11-28 13:44:07 -08001470 if (!priority)
1471 disable_swap_token();
Andrew Morton1742f192006-03-22 00:08:21 -08001472 nr_reclaimed += shrink_zones(priority, zones, &sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1474 if (reclaim_state) {
Andrew Morton05ff5132006-03-22 00:08:20 -08001475 nr_reclaimed += reclaim_state->reclaimed_slab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 reclaim_state->reclaimed_slab = 0;
1477 }
1478 total_scanned += sc.nr_scanned;
Andrew Morton05ff5132006-03-22 00:08:20 -08001479 if (nr_reclaimed >= sc.swap_cluster_max) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 ret = 1;
1481 goto out;
1482 }
1483
1484 /*
1485 * Try to write back as many pages as we just scanned. This
1486 * tends to cause slow streaming writers to write data to the
1487 * disk smoothly, at the dirtying rate, which is nice. But
1488 * that's undesirable in laptop mode, where we *want* lumpy
1489 * writeout. So in laptop mode, write out the whole world.
1490 */
Andrew Morton179e9632006-03-22 00:08:18 -08001491 if (total_scanned > sc.swap_cluster_max +
1492 sc.swap_cluster_max / 2) {
Pekka J Enberg687a21c2005-06-28 20:44:55 -07001493 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 sc.may_writepage = 1;
1495 }
1496
1497 /* Take a nap, wait for some writeback to complete */
1498 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1499 blk_congestion_wait(WRITE, HZ/10);
1500 }
1501out:
1502 for (i = 0; zones[i] != 0; i++) {
1503 struct zone *zone = zones[i];
1504
Paul Jackson9bf22292005-09-06 15:18:12 -07001505 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 continue;
1507
1508 zone->prev_priority = zone->temp_priority;
1509 }
1510 return ret;
1511}
1512
1513/*
1514 * For kswapd, balance_pgdat() will work across all this node's zones until
1515 * they are all at pages_high.
1516 *
1517 * If `nr_pages' is non-zero then it is the number of pages which are to be
1518 * reclaimed, regardless of the zone occupancies. This is a software suspend
1519 * special.
1520 *
1521 * Returns the number of pages which were actually freed.
1522 *
1523 * There is special handling here for zones which are full of pinned pages.
1524 * This can happen if the pages are all mlocked, or if they are all used by
1525 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1526 * What we do is to detect the case where all pages in the zone have been
1527 * scanned twice and there has been zero successful reclaim. Mark the zone as
1528 * dead and from now on, only perform a short scan. Basically we're polling
1529 * the zone for when the problem goes away.
1530 *
1531 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1532 * zones which have free_pages > pages_high, but once a zone is found to have
1533 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1534 * of the number of free pages in the lower zones. This interoperates with
1535 * the page allocator fallback scheme to ensure that aging of pages is balanced
1536 * across the zones.
1537 */
Andrew Morton69e05942006-03-22 00:08:19 -08001538static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
1539 int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540{
Andrew Morton69e05942006-03-22 00:08:19 -08001541 unsigned long to_free = nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 int all_zones_ok;
1543 int priority;
1544 int i;
Andrew Morton69e05942006-03-22 00:08:19 -08001545 unsigned long total_scanned;
Andrew Morton05ff5132006-03-22 00:08:20 -08001546 unsigned long nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 struct reclaim_state *reclaim_state = current->reclaim_state;
Andrew Morton179e9632006-03-22 00:08:18 -08001548 struct scan_control sc = {
1549 .gfp_mask = GFP_KERNEL,
1550 .may_swap = 1,
1551 .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX,
1552 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
1554loop_again:
1555 total_scanned = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -08001556 nr_reclaimed = 0;
Andrew Morton179e9632006-03-22 00:08:18 -08001557 sc.may_writepage = !laptop_mode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 sc.nr_mapped = read_page_state(nr_mapped);
1559
1560 inc_page_state(pageoutrun);
1561
1562 for (i = 0; i < pgdat->nr_zones; i++) {
1563 struct zone *zone = pgdat->node_zones + i;
1564
1565 zone->temp_priority = DEF_PRIORITY;
1566 }
1567
1568 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1569 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1570 unsigned long lru_pages = 0;
1571
Rik van Rielf7b7fd82005-11-28 13:44:07 -08001572 /* The swap token gets in the way of swapout... */
1573 if (!priority)
1574 disable_swap_token();
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 all_zones_ok = 1;
1577
1578 if (nr_pages == 0) {
1579 /*
1580 * Scan in the highmem->dma direction for the highest
1581 * zone which needs scanning
1582 */
1583 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1584 struct zone *zone = pgdat->node_zones + i;
1585
Con Kolivasf3fe6512006-01-06 00:11:15 -08001586 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 continue;
1588
1589 if (zone->all_unreclaimable &&
1590 priority != DEF_PRIORITY)
1591 continue;
1592
1593 if (!zone_watermark_ok(zone, order,
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08001594 zone->pages_high, 0, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 end_zone = i;
1596 goto scan;
1597 }
1598 }
1599 goto out;
1600 } else {
1601 end_zone = pgdat->nr_zones - 1;
1602 }
1603scan:
1604 for (i = 0; i <= end_zone; i++) {
1605 struct zone *zone = pgdat->node_zones + i;
1606
1607 lru_pages += zone->nr_active + zone->nr_inactive;
1608 }
1609
1610 /*
1611 * Now scan the zone in the dma->highmem direction, stopping
1612 * at the last zone which needs scanning.
1613 *
1614 * We do this because the page allocator works in the opposite
1615 * direction. This prevents the page allocator from allocating
1616 * pages behind kswapd's direction of progress, which would
1617 * cause too much scanning of the lower zones.
1618 */
1619 for (i = 0; i <= end_zone; i++) {
1620 struct zone *zone = pgdat->node_zones + i;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001621 int nr_slab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Con Kolivasf3fe6512006-01-06 00:11:15 -08001623 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 continue;
1625
1626 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1627 continue;
1628
1629 if (nr_pages == 0) { /* Not software suspend */
1630 if (!zone_watermark_ok(zone, order,
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08001631 zone->pages_high, end_zone, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 all_zones_ok = 0;
1633 }
1634 zone->temp_priority = priority;
1635 if (zone->prev_priority > priority)
1636 zone->prev_priority = priority;
1637 sc.nr_scanned = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -08001638 nr_reclaimed += shrink_zone(priority, zone, &sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 reclaim_state->reclaimed_slab = 0;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001640 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1641 lru_pages);
Andrew Morton05ff5132006-03-22 00:08:20 -08001642 nr_reclaimed += reclaim_state->reclaimed_slab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 total_scanned += sc.nr_scanned;
1644 if (zone->all_unreclaimable)
1645 continue;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001646 if (nr_slab == 0 && zone->pages_scanned >=
1647 (zone->nr_active + zone->nr_inactive) * 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 zone->all_unreclaimable = 1;
1649 /*
1650 * If we've done a decent amount of scanning and
1651 * the reclaim ratio is low, start doing writepage
1652 * even in laptop mode
1653 */
1654 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
Andrew Morton05ff5132006-03-22 00:08:20 -08001655 total_scanned > nr_reclaimed + nr_reclaimed / 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 sc.may_writepage = 1;
1657 }
Andrew Morton05ff5132006-03-22 00:08:20 -08001658 if (nr_pages && to_free > nr_reclaimed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 continue; /* swsusp: need to do more work */
1660 if (all_zones_ok)
1661 break; /* kswapd: all done */
1662 /*
1663 * OK, kswapd is getting into trouble. Take a nap, then take
1664 * another pass across the zones.
1665 */
1666 if (total_scanned && priority < DEF_PRIORITY - 2)
1667 blk_congestion_wait(WRITE, HZ/10);
1668
1669 /*
1670 * We do this so kswapd doesn't build up large priorities for
1671 * example when it is freeing in parallel with allocators. It
1672 * matches the direct reclaim path behaviour in terms of impact
1673 * on zone->*_priority.
1674 */
Andrew Morton05ff5132006-03-22 00:08:20 -08001675 if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 break;
1677 }
1678out:
1679 for (i = 0; i < pgdat->nr_zones; i++) {
1680 struct zone *zone = pgdat->node_zones + i;
1681
1682 zone->prev_priority = zone->temp_priority;
1683 }
1684 if (!all_zones_ok) {
1685 cond_resched();
1686 goto loop_again;
1687 }
1688
Andrew Morton05ff5132006-03-22 00:08:20 -08001689 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690}
1691
1692/*
1693 * The background pageout daemon, started as a kernel thread
1694 * from the init process.
1695 *
1696 * This basically trickles out pages so that we have _some_
1697 * free memory available even if there is no other activity
1698 * that frees anything up. This is needed for things like routing
1699 * etc, where we otherwise might have all activity going on in
1700 * asynchronous contexts that cannot page things out.
1701 *
1702 * If there are applications that are active memory-allocators
1703 * (most normal use), this basically shouldn't matter.
1704 */
1705static int kswapd(void *p)
1706{
1707 unsigned long order;
1708 pg_data_t *pgdat = (pg_data_t*)p;
1709 struct task_struct *tsk = current;
1710 DEFINE_WAIT(wait);
1711 struct reclaim_state reclaim_state = {
1712 .reclaimed_slab = 0,
1713 };
1714 cpumask_t cpumask;
1715
1716 daemonize("kswapd%d", pgdat->node_id);
1717 cpumask = node_to_cpumask(pgdat->node_id);
1718 if (!cpus_empty(cpumask))
1719 set_cpus_allowed(tsk, cpumask);
1720 current->reclaim_state = &reclaim_state;
1721
1722 /*
1723 * Tell the memory management that we're a "memory allocator",
1724 * and that if we need more memory we should get access to it
1725 * regardless (see "__alloc_pages()"). "kswapd" should
1726 * never get caught in the normal page freeing logic.
1727 *
1728 * (Kswapd normally doesn't need memory anyway, but sometimes
1729 * you need a small amount of memory in order to be able to
1730 * page out something else, and this flag essentially protects
1731 * us from recursively trying to free more memory as we're
1732 * trying to free the first piece of memory in the first place).
1733 */
Christoph Lameter930d9152006-01-08 01:00:47 -08001734 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
1736 order = 0;
1737 for ( ; ; ) {
1738 unsigned long new_order;
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001739
1740 try_to_freeze();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
1742 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1743 new_order = pgdat->kswapd_max_order;
1744 pgdat->kswapd_max_order = 0;
1745 if (order < new_order) {
1746 /*
1747 * Don't sleep if someone wants a larger 'order'
1748 * allocation
1749 */
1750 order = new_order;
1751 } else {
1752 schedule();
1753 order = pgdat->kswapd_max_order;
1754 }
1755 finish_wait(&pgdat->kswapd_wait, &wait);
1756
1757 balance_pgdat(pgdat, 0, order);
1758 }
1759 return 0;
1760}
1761
1762/*
1763 * A zone is low on free memory, so wake its kswapd task to service it.
1764 */
1765void wakeup_kswapd(struct zone *zone, int order)
1766{
1767 pg_data_t *pgdat;
1768
Con Kolivasf3fe6512006-01-06 00:11:15 -08001769 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 return;
1771
1772 pgdat = zone->zone_pgdat;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08001773 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 return;
1775 if (pgdat->kswapd_max_order < order)
1776 pgdat->kswapd_max_order = order;
Paul Jackson9bf22292005-09-06 15:18:12 -07001777 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 return;
Con Kolivas8d0986e2005-09-13 01:25:07 -07001779 if (!waitqueue_active(&pgdat->kswapd_wait))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 return;
Con Kolivas8d0986e2005-09-13 01:25:07 -07001781 wake_up_interruptible(&pgdat->kswapd_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782}
1783
1784#ifdef CONFIG_PM
1785/*
1786 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1787 * pages.
1788 */
Andrew Morton69e05942006-03-22 00:08:19 -08001789unsigned long shrink_all_memory(unsigned long nr_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790{
1791 pg_data_t *pgdat;
Andrew Morton69e05942006-03-22 00:08:19 -08001792 unsigned long nr_to_free = nr_pages;
1793 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 struct reclaim_state reclaim_state = {
1795 .reclaimed_slab = 0,
1796 };
1797
1798 current->reclaim_state = &reclaim_state;
1799 for_each_pgdat(pgdat) {
Andrew Morton69e05942006-03-22 00:08:19 -08001800 unsigned long freed;
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 freed = balance_pgdat(pgdat, nr_to_free, 0);
1803 ret += freed;
1804 nr_to_free -= freed;
Andrew Morton69e05942006-03-22 00:08:19 -08001805 if ((long)nr_to_free <= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 break;
1807 }
1808 current->reclaim_state = NULL;
1809 return ret;
1810}
1811#endif
1812
1813#ifdef CONFIG_HOTPLUG_CPU
1814/* It's optimal to keep kswapds on the same CPUs as their memory, but
1815 not required for correctness. So if the last cpu in a node goes
1816 away, we get changed to run anywhere: as the first one comes back,
1817 restore their cpu bindings. */
1818static int __devinit cpu_callback(struct notifier_block *nfb,
Andrew Morton69e05942006-03-22 00:08:19 -08001819 unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820{
1821 pg_data_t *pgdat;
1822 cpumask_t mask;
1823
1824 if (action == CPU_ONLINE) {
1825 for_each_pgdat(pgdat) {
1826 mask = node_to_cpumask(pgdat->node_id);
1827 if (any_online_cpu(mask) != NR_CPUS)
1828 /* One of our CPUs online: restore mask */
1829 set_cpus_allowed(pgdat->kswapd, mask);
1830 }
1831 }
1832 return NOTIFY_OK;
1833}
1834#endif /* CONFIG_HOTPLUG_CPU */
1835
1836static int __init kswapd_init(void)
1837{
1838 pg_data_t *pgdat;
Andrew Morton69e05942006-03-22 00:08:19 -08001839
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 swap_setup();
Andrew Morton69e05942006-03-22 00:08:19 -08001841 for_each_pgdat(pgdat) {
1842 pid_t pid;
1843
1844 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
1845 BUG_ON(pid < 0);
1846 pgdat->kswapd = find_task_by_pid(pid);
1847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 total_memory = nr_free_pagecache_pages();
1849 hotcpu_notifier(cpu_callback, 0);
1850 return 0;
1851}
1852
1853module_init(kswapd_init)
Christoph Lameter9eeff232006-01-18 17:42:31 -08001854
1855#ifdef CONFIG_NUMA
1856/*
1857 * Zone reclaim mode
1858 *
1859 * If non-zero call zone_reclaim when the number of free pages falls below
1860 * the watermarks.
1861 *
1862 * In the future we may add flags to the mode. However, the page allocator
1863 * should only have to check that zone_reclaim_mode != 0 before calling
1864 * zone_reclaim().
1865 */
1866int zone_reclaim_mode __read_mostly;
1867
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08001868#define RECLAIM_OFF 0
1869#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
1870#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1871#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001872#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08001873
Christoph Lameter9eeff232006-01-18 17:42:31 -08001874/*
1875 * Mininum time between zone reclaim scans
1876 */
Christoph Lameter2a11ff02006-02-01 03:05:33 -08001877int zone_reclaim_interval __read_mostly = 30*HZ;
Christoph Lametera92f7122006-02-01 03:05:32 -08001878
1879/*
1880 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1881 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1882 * a zone.
1883 */
1884#define ZONE_RECLAIM_PRIORITY 4
1885
Christoph Lameter9eeff232006-01-18 17:42:31 -08001886/*
1887 * Try to free up some pages from this zone through reclaim.
1888 */
Andrew Morton179e9632006-03-22 00:08:18 -08001889static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
Christoph Lameter9eeff232006-01-18 17:42:31 -08001890{
Christoph Lameter7fb2d462006-03-22 00:08:22 -08001891 /* Minimum pages needed in order to stay on node */
Andrew Morton69e05942006-03-22 00:08:19 -08001892 const unsigned long nr_pages = 1 << order;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001893 struct task_struct *p = current;
1894 struct reclaim_state reclaim_state;
Christoph Lameter86959492006-03-22 00:08:18 -08001895 int priority;
Andrew Morton05ff5132006-03-22 00:08:20 -08001896 unsigned long nr_reclaimed = 0;
Andrew Morton179e9632006-03-22 00:08:18 -08001897 struct scan_control sc = {
1898 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1899 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1900 .nr_mapped = read_page_state(nr_mapped),
Andrew Morton69e05942006-03-22 00:08:19 -08001901 .swap_cluster_max = max_t(unsigned long, nr_pages,
1902 SWAP_CLUSTER_MAX),
Andrew Morton179e9632006-03-22 00:08:18 -08001903 .gfp_mask = gfp_mask,
1904 };
Christoph Lameter9eeff232006-01-18 17:42:31 -08001905
1906 disable_swap_token();
Christoph Lameter9eeff232006-01-18 17:42:31 -08001907 cond_resched();
Christoph Lameterd4f77962006-02-24 13:04:22 -08001908 /*
1909 * We need to be able to allocate from the reserves for RECLAIM_SWAP
1910 * and we also need to be able to write out pages for RECLAIM_WRITE
1911 * and RECLAIM_SWAP.
1912 */
1913 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001914 reclaim_state.reclaimed_slab = 0;
1915 p->reclaim_state = &reclaim_state;
Christoph Lameterc84db232006-02-01 03:05:29 -08001916
Christoph Lametera92f7122006-02-01 03:05:32 -08001917 /*
1918 * Free memory by calling shrink zone with increasing priorities
1919 * until we have enough memory freed.
1920 */
Christoph Lameter86959492006-03-22 00:08:18 -08001921 priority = ZONE_RECLAIM_PRIORITY;
Christoph Lametera92f7122006-02-01 03:05:32 -08001922 do {
Andrew Morton05ff5132006-03-22 00:08:20 -08001923 nr_reclaimed += shrink_zone(priority, zone, &sc);
Christoph Lameter86959492006-03-22 00:08:18 -08001924 priority--;
Andrew Morton05ff5132006-03-22 00:08:20 -08001925 } while (priority >= 0 && nr_reclaimed < nr_pages);
Christoph Lameterc84db232006-02-01 03:05:29 -08001926
Andrew Morton05ff5132006-03-22 00:08:20 -08001927 if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001928 /*
Christoph Lameter7fb2d462006-03-22 00:08:22 -08001929 * shrink_slab() does not currently allow us to determine how
1930 * many pages were freed in this zone. So we just shake the slab
1931 * a bit and then go off node for this particular allocation
1932 * despite possibly having freed enough memory to allocate in
1933 * this zone. If we freed local memory then the next
1934 * allocations will be local again.
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001935 *
1936 * shrink_slab will free memory on all zones and may take
1937 * a long time.
1938 */
1939 shrink_slab(sc.nr_scanned, gfp_mask, order);
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001940 }
1941
Christoph Lameter9eeff232006-01-18 17:42:31 -08001942 p->reclaim_state = NULL;
Christoph Lameterd4f77962006-02-24 13:04:22 -08001943 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
Christoph Lameter9eeff232006-01-18 17:42:31 -08001944
Christoph Lameter7fb2d462006-03-22 00:08:22 -08001945 if (nr_reclaimed == 0) {
1946 /*
1947 * We were unable to reclaim enough pages to stay on node. We
1948 * now allow off node accesses for a certain time period before
1949 * trying again to reclaim pages from the local zone.
1950 */
Christoph Lameter9eeff232006-01-18 17:42:31 -08001951 zone->last_unsuccessful_zone_reclaim = jiffies;
Christoph Lameter7fb2d462006-03-22 00:08:22 -08001952 }
Christoph Lameter9eeff232006-01-18 17:42:31 -08001953
Andrew Morton05ff5132006-03-22 00:08:20 -08001954 return nr_reclaimed >= nr_pages;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001955}
Andrew Morton179e9632006-03-22 00:08:18 -08001956
1957int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1958{
1959 cpumask_t mask;
1960 int node_id;
1961
1962 /*
1963 * Do not reclaim if there was a recent unsuccessful attempt at zone
1964 * reclaim. In that case we let allocations go off node for the
1965 * zone_reclaim_interval. Otherwise we would scan for each off-node
1966 * page allocation.
1967 */
1968 if (time_before(jiffies,
1969 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
1970 return 0;
1971
1972 /*
1973 * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1974 * not have reclaimable pages and if we should not delay the allocation
1975 * then do not scan.
1976 */
1977 if (!(gfp_mask & __GFP_WAIT) ||
1978 zone->all_unreclaimable ||
1979 atomic_read(&zone->reclaim_in_progress) > 0 ||
1980 (current->flags & PF_MEMALLOC))
1981 return 0;
1982
1983 /*
1984 * Only run zone reclaim on the local zone or on zones that do not
1985 * have associated processors. This will favor the local processor
1986 * over remote processors and spread off node memory allocations
1987 * as wide as possible.
1988 */
1989 node_id = zone->zone_pgdat->node_id;
1990 mask = node_to_cpumask(node_id);
1991 if (!cpus_empty(mask) && node_id != numa_node_id())
1992 return 0;
1993 return __zone_reclaim(zone, gfp_mask, order);
1994}
Christoph Lameter9eeff232006-01-18 17:42:31 -08001995#endif