blob: e1c64230ffdd908db888b41022ce1cf31e130ed9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/file.h>
23#include <linux/writeback.h>
24#include <linux/blkdev.h>
25#include <linux/buffer_head.h> /* for try_to_release_page(),
26 buffer_heads_over_limit */
27#include <linux/mm_inline.h>
28#include <linux/pagevec.h>
29#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
34#include <linux/notifier.h>
35#include <linux/rwsem.h>
36
37#include <asm/tlbflush.h>
38#include <asm/div64.h>
39
40#include <linux/swapops.h>
41
42/* possible outcome of pageout() */
43typedef enum {
44 /* failed to write page out, page is locked */
45 PAGE_KEEP,
46 /* move page to the active list, page is locked */
47 PAGE_ACTIVATE,
48 /* page has been sent to the disk successfully, page is unlocked */
49 PAGE_SUCCESS,
50 /* page is clean and locked */
51 PAGE_CLEAN,
52} pageout_t;
53
54struct scan_control {
55 /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
56 unsigned long nr_to_scan;
57
58 /* Incremented by the number of inactive pages that were scanned */
59 unsigned long nr_scanned;
60
61 /* Incremented by the number of pages reclaimed */
62 unsigned long nr_reclaimed;
63
64 unsigned long nr_mapped; /* From page_state */
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 /* Ask shrink_caches, or shrink_zone to scan at this priority */
67 unsigned int priority;
68
69 /* This context's GFP mask */
Al Viro6daa0e22005-10-21 03:18:50 -040070 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72 int may_writepage;
73
Christoph Lameterf1fd1062006-01-18 17:42:30 -080074 /* Can pages be swapped as part of reclaim? */
75 int may_swap;
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
78 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
79 * In this context, it doesn't matter that we scan the
80 * whole list at once. */
81 int swap_cluster_max;
82};
83
84/*
85 * The list of shrinker callbacks used by to apply pressure to
86 * ageable caches.
87 */
88struct shrinker {
89 shrinker_t shrinker;
90 struct list_head list;
91 int seeks; /* seeks to recreate an obj */
92 long nr; /* objs pending delete */
93};
94
95#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
96
97#ifdef ARCH_HAS_PREFETCH
98#define prefetch_prev_lru_page(_page, _base, _field) \
99 do { \
100 if ((_page)->lru.prev != _base) { \
101 struct page *prev; \
102 \
103 prev = lru_to_page(&(_page->lru)); \
104 prefetch(&prev->_field); \
105 } \
106 } while (0)
107#else
108#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
109#endif
110
111#ifdef ARCH_HAS_PREFETCHW
112#define prefetchw_prev_lru_page(_page, _base, _field) \
113 do { \
114 if ((_page)->lru.prev != _base) { \
115 struct page *prev; \
116 \
117 prev = lru_to_page(&(_page->lru)); \
118 prefetchw(&prev->_field); \
119 } \
120 } while (0)
121#else
122#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
123#endif
124
125/*
126 * From 0 .. 100. Higher means more swappy.
127 */
128int vm_swappiness = 60;
129static long total_memory;
130
131static LIST_HEAD(shrinker_list);
132static DECLARE_RWSEM(shrinker_rwsem);
133
134/*
135 * Add a shrinker callback to be called from the vm
136 */
137struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
138{
139 struct shrinker *shrinker;
140
141 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
142 if (shrinker) {
143 shrinker->shrinker = theshrinker;
144 shrinker->seeks = seeks;
145 shrinker->nr = 0;
146 down_write(&shrinker_rwsem);
147 list_add_tail(&shrinker->list, &shrinker_list);
148 up_write(&shrinker_rwsem);
149 }
150 return shrinker;
151}
152EXPORT_SYMBOL(set_shrinker);
153
154/*
155 * Remove one
156 */
157void remove_shrinker(struct shrinker *shrinker)
158{
159 down_write(&shrinker_rwsem);
160 list_del(&shrinker->list);
161 up_write(&shrinker_rwsem);
162 kfree(shrinker);
163}
164EXPORT_SYMBOL(remove_shrinker);
165
166#define SHRINK_BATCH 128
167/*
168 * Call the shrink functions to age shrinkable caches
169 *
170 * Here we assume it costs one seek to replace a lru page and that it also
171 * takes a seek to recreate a cache object. With this in mind we age equal
172 * percentages of the lru and ageable caches. This should balance the seeks
173 * generated by these structures.
174 *
175 * If the vm encounted mapped pages on the LRU it increase the pressure on
176 * slab to avoid swapping.
177 *
178 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
179 *
180 * `lru_pages' represents the number of on-LRU pages in all the zones which
181 * are eligible for the caller's allocation attempt. It is used for balancing
182 * slab reclaim versus page reclaim.
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700183 *
184 * Returns the number of slab objects which we shrunk.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 */
Andrew Morton9d0243b2006-01-08 01:00:39 -0800186int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
188 struct shrinker *shrinker;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700189 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 if (scanned == 0)
192 scanned = SWAP_CLUSTER_MAX;
193
194 if (!down_read_trylock(&shrinker_rwsem))
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700195 return 1; /* Assume we'll be able to shrink next time */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 list_for_each_entry(shrinker, &shrinker_list, list) {
198 unsigned long long delta;
199 unsigned long total_scan;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800200 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 delta = (4 * scanned) / shrinker->seeks;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800203 delta *= max_pass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 do_div(delta, lru_pages + 1);
205 shrinker->nr += delta;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800206 if (shrinker->nr < 0) {
207 printk(KERN_ERR "%s: nr=%ld\n",
208 __FUNCTION__, shrinker->nr);
209 shrinker->nr = max_pass;
210 }
211
212 /*
213 * Avoid risking looping forever due to too large nr value:
214 * never try to free more than twice the estimate number of
215 * freeable entries.
216 */
217 if (shrinker->nr > max_pass * 2)
218 shrinker->nr = max_pass * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 total_scan = shrinker->nr;
221 shrinker->nr = 0;
222
223 while (total_scan >= SHRINK_BATCH) {
224 long this_scan = SHRINK_BATCH;
225 int shrink_ret;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700226 int nr_before;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700228 nr_before = (*shrinker->shrinker)(0, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
230 if (shrink_ret == -1)
231 break;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700232 if (shrink_ret < nr_before)
233 ret += nr_before - shrink_ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 mod_page_state(slabs_scanned, this_scan);
235 total_scan -= this_scan;
236
237 cond_resched();
238 }
239
240 shrinker->nr += total_scan;
241 }
242 up_read(&shrinker_rwsem);
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700243 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
246/* Called without lock on whether page is mapped, so answer is unstable */
247static inline int page_mapping_inuse(struct page *page)
248{
249 struct address_space *mapping;
250
251 /* Page is in somebody's page tables. */
252 if (page_mapped(page))
253 return 1;
254
255 /* Be more reluctant to reclaim swapcache than pagecache */
256 if (PageSwapCache(page))
257 return 1;
258
259 mapping = page_mapping(page);
260 if (!mapping)
261 return 0;
262
263 /* File is mmap'd by somebody? */
264 return mapping_mapped(mapping);
265}
266
267static inline int is_page_cache_freeable(struct page *page)
268{
269 return page_count(page) - !!PagePrivate(page) == 2;
270}
271
272static int may_write_to_queue(struct backing_dev_info *bdi)
273{
Christoph Lameter930d9152006-01-08 01:00:47 -0800274 if (current->flags & PF_SWAPWRITE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 return 1;
276 if (!bdi_write_congested(bdi))
277 return 1;
278 if (bdi == current->backing_dev_info)
279 return 1;
280 return 0;
281}
282
283/*
284 * We detected a synchronous write error writing a page out. Probably
285 * -ENOSPC. We need to propagate that into the address_space for a subsequent
286 * fsync(), msync() or close().
287 *
288 * The tricky part is that after writepage we cannot touch the mapping: nothing
289 * prevents it from being freed up. But we have a ref on the page and once
290 * that page is locked, the mapping is pinned.
291 *
292 * We're allowed to run sleeping lock_page() here because we know the caller has
293 * __GFP_FS.
294 */
295static void handle_write_error(struct address_space *mapping,
296 struct page *page, int error)
297{
298 lock_page(page);
299 if (page_mapping(page) == mapping) {
300 if (error == -ENOSPC)
301 set_bit(AS_ENOSPC, &mapping->flags);
302 else
303 set_bit(AS_EIO, &mapping->flags);
304 }
305 unlock_page(page);
306}
307
308/*
309 * pageout is called by shrink_list() for each dirty page. Calls ->writepage().
310 */
311static pageout_t pageout(struct page *page, struct address_space *mapping)
312{
313 /*
314 * If the page is dirty, only perform writeback if that write
315 * will be non-blocking. To prevent this allocation from being
316 * stalled by pagecache activity. But note that there may be
317 * stalls if we need to run get_block(). We could test
318 * PagePrivate for that.
319 *
320 * If this process is currently in generic_file_write() against
321 * this page's queue, we can perform writeback even if that
322 * will block.
323 *
324 * If the page is swapcache, write it back even if that would
325 * block, for some throttling. This happens by accident, because
326 * swap_backing_dev_info is bust: it doesn't reflect the
327 * congestion state of the swapdevs. Easy to fix, if needed.
328 * See swapfile.c:page_queue_congested().
329 */
330 if (!is_page_cache_freeable(page))
331 return PAGE_KEEP;
332 if (!mapping) {
333 /*
334 * Some data journaling orphaned pages can have
335 * page->mapping == NULL while being dirty with clean buffers.
336 */
akpm@osdl.org323aca62005-04-16 15:24:06 -0700337 if (PagePrivate(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (try_to_free_buffers(page)) {
339 ClearPageDirty(page);
340 printk("%s: orphaned page\n", __FUNCTION__);
341 return PAGE_CLEAN;
342 }
343 }
344 return PAGE_KEEP;
345 }
346 if (mapping->a_ops->writepage == NULL)
347 return PAGE_ACTIVATE;
348 if (!may_write_to_queue(mapping->backing_dev_info))
349 return PAGE_KEEP;
350
351 if (clear_page_dirty_for_io(page)) {
352 int res;
353 struct writeback_control wbc = {
354 .sync_mode = WB_SYNC_NONE,
355 .nr_to_write = SWAP_CLUSTER_MAX,
356 .nonblocking = 1,
357 .for_reclaim = 1,
358 };
359
360 SetPageReclaim(page);
361 res = mapping->a_ops->writepage(page, &wbc);
362 if (res < 0)
363 handle_write_error(mapping, page, res);
Zach Brown994fc28c2005-12-15 14:28:17 -0800364 if (res == AOP_WRITEPAGE_ACTIVATE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 ClearPageReclaim(page);
366 return PAGE_ACTIVATE;
367 }
368 if (!PageWriteback(page)) {
369 /* synchronous write or broken a_ops? */
370 ClearPageReclaim(page);
371 }
372
373 return PAGE_SUCCESS;
374 }
375
376 return PAGE_CLEAN;
377}
378
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800379static int remove_mapping(struct address_space *mapping, struct page *page)
380{
381 if (!mapping)
382 return 0; /* truncate got there first */
383
384 write_lock_irq(&mapping->tree_lock);
385
386 /*
387 * The non-racy check for busy page. It is critical to check
388 * PageDirty _after_ making sure that the page is freeable and
389 * not in use by anybody. (pagecache + us == 2)
390 */
391 if (unlikely(page_count(page) != 2))
392 goto cannot_free;
393 smp_rmb();
394 if (unlikely(PageDirty(page)))
395 goto cannot_free;
396
397 if (PageSwapCache(page)) {
398 swp_entry_t swap = { .val = page_private(page) };
399 __delete_from_swap_cache(page);
400 write_unlock_irq(&mapping->tree_lock);
401 swap_free(swap);
402 __put_page(page); /* The pagecache ref */
403 return 1;
404 }
405
406 __remove_from_page_cache(page);
407 write_unlock_irq(&mapping->tree_lock);
408 __put_page(page);
409 return 1;
410
411cannot_free:
412 write_unlock_irq(&mapping->tree_lock);
413 return 0;
414}
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416/*
417 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
418 */
419static int shrink_list(struct list_head *page_list, struct scan_control *sc)
420{
421 LIST_HEAD(ret_pages);
422 struct pagevec freed_pvec;
423 int pgactivate = 0;
424 int reclaimed = 0;
425
426 cond_resched();
427
428 pagevec_init(&freed_pvec, 1);
429 while (!list_empty(page_list)) {
430 struct address_space *mapping;
431 struct page *page;
432 int may_enter_fs;
433 int referenced;
434
435 cond_resched();
436
437 page = lru_to_page(page_list);
438 list_del(&page->lru);
439
440 if (TestSetPageLocked(page))
441 goto keep;
442
443 BUG_ON(PageActive(page));
444
445 sc->nr_scanned++;
Christoph Lameter80e43422006-02-11 17:55:53 -0800446
447 if (!sc->may_swap && page_mapped(page))
448 goto keep_locked;
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 /* Double the slab pressure for mapped and swapcache pages */
451 if (page_mapped(page) || PageSwapCache(page))
452 sc->nr_scanned++;
453
454 if (PageWriteback(page))
455 goto keep_locked;
456
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800457 referenced = page_referenced(page, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 /* In active use or really unfreeable? Activate it. */
459 if (referenced && page_mapping_inuse(page))
460 goto activate_locked;
461
462#ifdef CONFIG_SWAP
463 /*
464 * Anonymous process memory has backing store?
465 * Try to allocate it some swap space here.
466 */
Lee Schermerhornc3400102005-10-29 18:15:51 -0700467 if (PageAnon(page) && !PageSwapCache(page)) {
Christoph Lameterf1fd1062006-01-18 17:42:30 -0800468 if (!sc->may_swap)
469 goto keep_locked;
Christoph Lameter1480a542006-01-08 01:00:53 -0800470 if (!add_to_swap(page, GFP_ATOMIC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 goto activate_locked;
472 }
473#endif /* CONFIG_SWAP */
474
475 mapping = page_mapping(page);
476 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
477 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
478
479 /*
480 * The page is mapped into the page tables of one or more
481 * processes. Try to unmap it here.
482 */
483 if (page_mapped(page) && mapping) {
Christoph Lameteraa3f18b2006-02-01 03:05:32 -0800484 /*
485 * No unmapping if we do not swap
486 */
487 if (!sc->may_swap)
488 goto keep_locked;
489
Christoph Lametera48d07a2006-02-01 03:05:38 -0800490 switch (try_to_unmap(page, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 case SWAP_FAIL:
492 goto activate_locked;
493 case SWAP_AGAIN:
494 goto keep_locked;
495 case SWAP_SUCCESS:
496 ; /* try to free the page below */
497 }
498 }
499
500 if (PageDirty(page)) {
501 if (referenced)
502 goto keep_locked;
503 if (!may_enter_fs)
504 goto keep_locked;
Christoph Lameter52a83632006-02-01 03:05:28 -0800505 if (!sc->may_writepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 goto keep_locked;
507
508 /* Page is dirty, try to write it out here */
509 switch(pageout(page, mapping)) {
510 case PAGE_KEEP:
511 goto keep_locked;
512 case PAGE_ACTIVATE:
513 goto activate_locked;
514 case PAGE_SUCCESS:
515 if (PageWriteback(page) || PageDirty(page))
516 goto keep;
517 /*
518 * A synchronous write - probably a ramdisk. Go
519 * ahead and try to reclaim the page.
520 */
521 if (TestSetPageLocked(page))
522 goto keep;
523 if (PageDirty(page) || PageWriteback(page))
524 goto keep_locked;
525 mapping = page_mapping(page);
526 case PAGE_CLEAN:
527 ; /* try to free the page below */
528 }
529 }
530
531 /*
532 * If the page has buffers, try to free the buffer mappings
533 * associated with this page. If we succeed we try to free
534 * the page as well.
535 *
536 * We do this even if the page is PageDirty().
537 * try_to_release_page() does not perform I/O, but it is
538 * possible for a page to have PageDirty set, but it is actually
539 * clean (all its buffers are clean). This happens if the
540 * buffers were written out directly, with submit_bh(). ext3
541 * will do this, as well as the blockdev mapping.
542 * try_to_release_page() will discover that cleanness and will
543 * drop the buffers and mark the page clean - it can be freed.
544 *
545 * Rarely, pages can have buffers and no ->mapping. These are
546 * the pages which were not successfully invalidated in
547 * truncate_complete_page(). We try to drop those buffers here
548 * and if that worked, and the page is no longer mapped into
549 * process address space (page_count == 1) it can be freed.
550 * Otherwise, leave the page on the LRU so it is swappable.
551 */
552 if (PagePrivate(page)) {
553 if (!try_to_release_page(page, sc->gfp_mask))
554 goto activate_locked;
555 if (!mapping && page_count(page) == 1)
556 goto free_it;
557 }
558
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800559 if (!remove_mapping(mapping, page))
560 goto keep_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562free_it:
563 unlock_page(page);
564 reclaimed++;
565 if (!pagevec_add(&freed_pvec, page))
566 __pagevec_release_nonlru(&freed_pvec);
567 continue;
568
569activate_locked:
570 SetPageActive(page);
571 pgactivate++;
572keep_locked:
573 unlock_page(page);
574keep:
575 list_add(&page->lru, &ret_pages);
576 BUG_ON(PageLRU(page));
577 }
578 list_splice(&ret_pages, page_list);
579 if (pagevec_count(&freed_pvec))
580 __pagevec_release_nonlru(&freed_pvec);
581 mod_page_state(pgactivate, pgactivate);
582 sc->nr_reclaimed += reclaimed;
583 return reclaimed;
584}
585
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800586#ifdef CONFIG_MIGRATION
Christoph Lameter8419c312006-01-08 01:00:52 -0800587static inline void move_to_lru(struct page *page)
588{
589 list_del(&page->lru);
590 if (PageActive(page)) {
591 /*
592 * lru_cache_add_active checks that
593 * the PG_active bit is off.
594 */
595 ClearPageActive(page);
596 lru_cache_add_active(page);
597 } else {
598 lru_cache_add(page);
599 }
600 put_page(page);
601}
602
603/*
Nick Piggin053837f2006-01-18 17:42:27 -0800604 * Add isolated pages on the list back to the LRU.
Christoph Lameter8419c312006-01-08 01:00:52 -0800605 *
606 * returns the number of pages put back.
607 */
608int putback_lru_pages(struct list_head *l)
609{
610 struct page *page;
611 struct page *page2;
612 int count = 0;
613
614 list_for_each_entry_safe(page, page2, l, lru) {
615 move_to_lru(page);
616 count++;
617 }
618 return count;
619}
620
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621/*
Christoph Lametere965f962006-02-01 03:05:41 -0800622 * Non migratable page
623 */
624int fail_migrate_page(struct page *newpage, struct page *page)
625{
626 return -EIO;
627}
628EXPORT_SYMBOL(fail_migrate_page);
629
630/*
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800631 * swapout a single page
632 * page is locked upon entry, unlocked on exit
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800633 */
634static int swap_page(struct page *page)
635{
636 struct address_space *mapping = page_mapping(page);
637
638 if (page_mapped(page) && mapping)
Christoph Lameter418aade2006-02-10 01:51:15 -0800639 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800640 goto unlock_retry;
641
642 if (PageDirty(page)) {
643 /* Page is dirty, try to write it out here */
644 switch(pageout(page, mapping)) {
645 case PAGE_KEEP:
646 case PAGE_ACTIVATE:
647 goto unlock_retry;
648
649 case PAGE_SUCCESS:
650 goto retry;
651
652 case PAGE_CLEAN:
653 ; /* try to free the page below */
654 }
655 }
656
657 if (PagePrivate(page)) {
658 if (!try_to_release_page(page, GFP_KERNEL) ||
659 (!mapping && page_count(page) == 1))
660 goto unlock_retry;
661 }
662
663 if (remove_mapping(mapping, page)) {
664 /* Success */
665 unlock_page(page);
666 return 0;
667 }
668
669unlock_retry:
670 unlock_page(page);
671
672retry:
Christoph Lameterd0d96322006-01-08 01:00:55 -0800673 return -EAGAIN;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800674}
Christoph Lametere965f962006-02-01 03:05:41 -0800675EXPORT_SYMBOL(swap_page);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800676
677/*
678 * Page migration was first developed in the context of the memory hotplug
679 * project. The main authors of the migration code are:
680 *
681 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
682 * Hirokazu Takahashi <taka@valinux.co.jp>
683 * Dave Hansen <haveblue@us.ibm.com>
684 * Christoph Lameter <clameter@sgi.com>
685 */
686
687/*
688 * Remove references for a page and establish the new page with the correct
689 * basic settings to be able to stop accesses to the page.
690 */
Christoph Lametere965f962006-02-01 03:05:41 -0800691int migrate_page_remove_references(struct page *newpage,
Christoph Lametera48d07a2006-02-01 03:05:38 -0800692 struct page *page, int nr_refs)
693{
694 struct address_space *mapping = page_mapping(page);
695 struct page **radix_pointer;
696
697 /*
698 * Avoid doing any of the following work if the page count
699 * indicates that the page is in use or truncate has removed
700 * the page.
701 */
702 if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
703 return 1;
704
705 /*
706 * Establish swap ptes for anonymous pages or destroy pte
707 * maps for files.
708 *
709 * In order to reestablish file backed mappings the fault handlers
710 * will take the radix tree_lock which may then be used to stop
711 * processses from accessing this page until the new page is ready.
712 *
713 * A process accessing via a swap pte (an anonymous page) will take a
714 * page_lock on the old page which will block the process until the
715 * migration attempt is complete. At that time the PageSwapCache bit
716 * will be examined. If the page was migrated then the PageSwapCache
717 * bit will be clear and the operation to retrieve the page will be
718 * retried which will find the new page in the radix tree. Then a new
719 * direct mapping may be generated based on the radix tree contents.
720 *
721 * If the page was not migrated then the PageSwapCache bit
722 * is still set and the operation may continue.
723 */
724 try_to_unmap(page, 1);
725
726 /*
727 * Give up if we were unable to remove all mappings.
728 */
729 if (page_mapcount(page))
730 return 1;
731
732 write_lock_irq(&mapping->tree_lock);
733
734 radix_pointer = (struct page **)radix_tree_lookup_slot(
735 &mapping->page_tree,
736 page_index(page));
737
738 if (!page_mapping(page) || page_count(page) != nr_refs ||
739 *radix_pointer != page) {
740 write_unlock_irq(&mapping->tree_lock);
741 return 1;
742 }
743
744 /*
745 * Now we know that no one else is looking at the page.
746 *
747 * Certain minimal information about a page must be available
748 * in order for other subsystems to properly handle the page if they
749 * find it through the radix tree update before we are finished
750 * copying the page.
751 */
752 get_page(newpage);
753 newpage->index = page->index;
754 newpage->mapping = page->mapping;
755 if (PageSwapCache(page)) {
756 SetPageSwapCache(newpage);
757 set_page_private(newpage, page_private(page));
758 }
759
760 *radix_pointer = newpage;
761 __put_page(page);
762 write_unlock_irq(&mapping->tree_lock);
763
764 return 0;
765}
Christoph Lametere965f962006-02-01 03:05:41 -0800766EXPORT_SYMBOL(migrate_page_remove_references);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800767
768/*
769 * Copy the page to its new location
770 */
771void migrate_page_copy(struct page *newpage, struct page *page)
772{
773 copy_highpage(newpage, page);
774
775 if (PageError(page))
776 SetPageError(newpage);
777 if (PageReferenced(page))
778 SetPageReferenced(newpage);
779 if (PageUptodate(page))
780 SetPageUptodate(newpage);
781 if (PageActive(page))
782 SetPageActive(newpage);
783 if (PageChecked(page))
784 SetPageChecked(newpage);
785 if (PageMappedToDisk(page))
786 SetPageMappedToDisk(newpage);
787
788 if (PageDirty(page)) {
789 clear_page_dirty_for_io(page);
790 set_page_dirty(newpage);
791 }
792
793 ClearPageSwapCache(page);
794 ClearPageActive(page);
795 ClearPagePrivate(page);
796 set_page_private(page, 0);
797 page->mapping = NULL;
798
799 /*
800 * If any waiters have accumulated on the new page then
801 * wake them up.
802 */
803 if (PageWriteback(newpage))
804 end_page_writeback(newpage);
805}
Christoph Lametere965f962006-02-01 03:05:41 -0800806EXPORT_SYMBOL(migrate_page_copy);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800807
808/*
809 * Common logic to directly migrate a single page suitable for
810 * pages that do not use PagePrivate.
811 *
812 * Pages are locked upon entry and exit.
813 */
814int migrate_page(struct page *newpage, struct page *page)
815{
816 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
817
818 if (migrate_page_remove_references(newpage, page, 2))
819 return -EAGAIN;
820
821 migrate_page_copy(newpage, page);
822
Christoph Lametera3351e52006-02-01 03:05:39 -0800823 /*
824 * Remove auxiliary swap entries and replace
825 * them with real ptes.
826 *
827 * Note that a real pte entry will allow processes that are not
828 * waiting on the page lock to use the new page via the page tables
829 * before the new page is unlocked.
830 */
831 remove_from_swap(newpage);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800832 return 0;
833}
Christoph Lametere965f962006-02-01 03:05:41 -0800834EXPORT_SYMBOL(migrate_page);
Christoph Lametera48d07a2006-02-01 03:05:38 -0800835
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800836/*
837 * migrate_pages
838 *
839 * Two lists are passed to this function. The first list
840 * contains the pages isolated from the LRU to be migrated.
841 * The second list contains new pages that the pages isolated
842 * can be moved to. If the second list is NULL then all
843 * pages are swapped out.
844 *
845 * The function returns after 10 attempts or if no pages
Christoph Lameter418aade2006-02-10 01:51:15 -0800846 * are movable anymore because to has become empty
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800847 * or no retryable pages exist anymore.
848 *
Christoph Lameterd0d96322006-01-08 01:00:55 -0800849 * Return: Number of pages not migrated when "to" ran empty.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800850 */
Christoph Lameterd4984712006-01-08 01:00:55 -0800851int migrate_pages(struct list_head *from, struct list_head *to,
852 struct list_head *moved, struct list_head *failed)
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800853{
854 int retry;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800855 int nr_failed = 0;
856 int pass = 0;
857 struct page *page;
858 struct page *page2;
859 int swapwrite = current->flags & PF_SWAPWRITE;
Christoph Lameterd0d96322006-01-08 01:00:55 -0800860 int rc;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800861
862 if (!swapwrite)
863 current->flags |= PF_SWAPWRITE;
864
865redo:
866 retry = 0;
867
Christoph Lameterd4984712006-01-08 01:00:55 -0800868 list_for_each_entry_safe(page, page2, from, lru) {
Christoph Lametera48d07a2006-02-01 03:05:38 -0800869 struct page *newpage = NULL;
870 struct address_space *mapping;
871
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800872 cond_resched();
873
Christoph Lameterd0d96322006-01-08 01:00:55 -0800874 rc = 0;
875 if (page_count(page) == 1)
Christoph Lameteree274972006-01-08 01:00:54 -0800876 /* page was freed from under us. So we are done. */
Christoph Lameterd0d96322006-01-08 01:00:55 -0800877 goto next;
878
Christoph Lametera48d07a2006-02-01 03:05:38 -0800879 if (to && list_empty(to))
880 break;
881
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800882 /*
883 * Skip locked pages during the first two passes to give the
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800884 * functions holding the lock time to release the page. Later we
885 * use lock_page() to have a higher chance of acquiring the
886 * lock.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800887 */
Christoph Lameterd0d96322006-01-08 01:00:55 -0800888 rc = -EAGAIN;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800889 if (pass > 2)
890 lock_page(page);
891 else
892 if (TestSetPageLocked(page))
Christoph Lameterd0d96322006-01-08 01:00:55 -0800893 goto next;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800894
895 /*
896 * Only wait on writeback if we have already done a pass where
897 * we we may have triggered writeouts for lots of pages.
898 */
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800899 if (pass > 0) {
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800900 wait_on_page_writeback(page);
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800901 } else {
Christoph Lameterd0d96322006-01-08 01:00:55 -0800902 if (PageWriteback(page))
903 goto unlock_page;
Christoph Lameter7cbe34c2006-01-08 01:00:49 -0800904 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800905
Christoph Lameterd0d96322006-01-08 01:00:55 -0800906 /*
907 * Anonymous pages must have swap cache references otherwise
908 * the information contained in the page maps cannot be
909 * preserved.
910 */
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800911 if (PageAnon(page) && !PageSwapCache(page)) {
Christoph Lameter1480a542006-01-08 01:00:53 -0800912 if (!add_to_swap(page, GFP_KERNEL)) {
Christoph Lameterd0d96322006-01-08 01:00:55 -0800913 rc = -ENOMEM;
914 goto unlock_page;
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800915 }
916 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800917
Christoph Lametera48d07a2006-02-01 03:05:38 -0800918 if (!to) {
919 rc = swap_page(page);
920 goto next;
921 }
922
923 newpage = lru_to_page(to);
924 lock_page(newpage);
925
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800926 /*
Christoph Lametera48d07a2006-02-01 03:05:38 -0800927 * Pages are properly locked and writeback is complete.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800928 * Try to migrate the page.
929 */
Christoph Lametera48d07a2006-02-01 03:05:38 -0800930 mapping = page_mapping(page);
931 if (!mapping)
932 goto unlock_both;
933
Christoph Lametere965f962006-02-01 03:05:41 -0800934 if (mapping->a_ops->migratepage) {
Christoph Lameter418aade2006-02-10 01:51:15 -0800935 /*
936 * Most pages have a mapping and most filesystems
937 * should provide a migration function. Anonymous
938 * pages are part of swap space which also has its
939 * own migration function. This is the most common
940 * path for page migration.
941 */
Christoph Lametere965f962006-02-01 03:05:41 -0800942 rc = mapping->a_ops->migratepage(newpage, page);
943 goto unlock_both;
944 }
945
Christoph Lametera48d07a2006-02-01 03:05:38 -0800946 /*
Christoph Lameter418aade2006-02-10 01:51:15 -0800947 * Default handling if a filesystem does not provide
948 * a migration function. We can only migrate clean
949 * pages so try to write out any dirty pages first.
Christoph Lametera48d07a2006-02-01 03:05:38 -0800950 */
951 if (PageDirty(page)) {
952 switch (pageout(page, mapping)) {
953 case PAGE_KEEP:
954 case PAGE_ACTIVATE:
955 goto unlock_both;
956
957 case PAGE_SUCCESS:
958 unlock_page(newpage);
959 goto next;
960
961 case PAGE_CLEAN:
962 ; /* try to migrate the page below */
963 }
964 }
Christoph Lameter418aade2006-02-10 01:51:15 -0800965
Christoph Lametera48d07a2006-02-01 03:05:38 -0800966 /*
Christoph Lameter418aade2006-02-10 01:51:15 -0800967 * Buffers are managed in a filesystem specific way.
968 * We must have no buffers or drop them.
Christoph Lametera48d07a2006-02-01 03:05:38 -0800969 */
970 if (!page_has_buffers(page) ||
971 try_to_release_page(page, GFP_KERNEL)) {
972 rc = migrate_page(newpage, page);
973 goto unlock_both;
974 }
975
976 /*
977 * On early passes with mapped pages simply
978 * retry. There may be a lock held for some
979 * buffers that may go away. Later
980 * swap them out.
981 */
982 if (pass > 4) {
Christoph Lameter418aade2006-02-10 01:51:15 -0800983 /*
984 * Persistently unable to drop buffers..... As a
985 * measure of last resort we fall back to
986 * swap_page().
987 */
Christoph Lametera48d07a2006-02-01 03:05:38 -0800988 unlock_page(newpage);
989 newpage = NULL;
990 rc = swap_page(page);
991 goto next;
992 }
993
994unlock_both:
995 unlock_page(newpage);
Christoph Lameterd0d96322006-01-08 01:00:55 -0800996
997unlock_page:
998 unlock_page(page);
999
1000next:
1001 if (rc == -EAGAIN) {
1002 retry++;
1003 } else if (rc) {
1004 /* Permanent failure */
1005 list_move(&page->lru, failed);
1006 nr_failed++;
1007 } else {
Christoph Lametera48d07a2006-02-01 03:05:38 -08001008 if (newpage) {
1009 /* Successful migration. Return page to LRU */
1010 move_to_lru(newpage);
1011 }
Christoph Lameterd4984712006-01-08 01:00:55 -08001012 list_move(&page->lru, moved);
Christoph Lameterd4984712006-01-08 01:00:55 -08001013 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -08001014 }
1015 if (retry && pass++ < 10)
1016 goto redo;
1017
1018 if (!swapwrite)
1019 current->flags &= ~PF_SWAPWRITE;
1020
Christoph Lameter49d2e9c2006-01-08 01:00:48 -08001021 return nr_failed + retry;
1022}
Christoph Lameter8419c312006-01-08 01:00:52 -08001023
Christoph Lameter8419c312006-01-08 01:00:52 -08001024/*
1025 * Isolate one page from the LRU lists and put it on the
Nick Piggin053837f2006-01-18 17:42:27 -08001026 * indicated list with elevated refcount.
Christoph Lameter8419c312006-01-08 01:00:52 -08001027 *
1028 * Result:
1029 * 0 = page not on LRU list
1030 * 1 = page removed from LRU list and added to the specified list.
Christoph Lameter8419c312006-01-08 01:00:52 -08001031 */
1032int isolate_lru_page(struct page *page)
1033{
Nick Piggin053837f2006-01-18 17:42:27 -08001034 int ret = 0;
Christoph Lameter8419c312006-01-08 01:00:52 -08001035
Nick Piggin053837f2006-01-18 17:42:27 -08001036 if (PageLRU(page)) {
1037 struct zone *zone = page_zone(page);
1038 spin_lock_irq(&zone->lru_lock);
1039 if (TestClearPageLRU(page)) {
1040 ret = 1;
1041 get_page(page);
1042 if (PageActive(page))
1043 del_page_from_active_list(zone, page);
1044 else
1045 del_page_from_inactive_list(zone, page);
1046 }
1047 spin_unlock_irq(&zone->lru_lock);
Christoph Lameter8419c312006-01-08 01:00:52 -08001048 }
Nick Piggin053837f2006-01-18 17:42:27 -08001049
1050 return ret;
Christoph Lameter8419c312006-01-08 01:00:52 -08001051}
Christoph Lameter7cbe34c2006-01-08 01:00:49 -08001052#endif
Christoph Lameter49d2e9c2006-01-08 01:00:48 -08001053
1054/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 * zone->lru_lock is heavily contended. Some of the functions that
1056 * shrink the lists perform better by taking out a batch of pages
1057 * and working on them outside the LRU lock.
1058 *
1059 * For pagecache intensive workloads, this function is the hottest
1060 * spot in the kernel (apart from copy_*_user functions).
1061 *
1062 * Appropriate locks must be held before calling this function.
1063 *
1064 * @nr_to_scan: The number of pages to look through on the list.
1065 * @src: The LRU list to pull pages off.
1066 * @dst: The temp list to put pages on to.
1067 * @scanned: The number of pages that were scanned.
1068 *
1069 * returns how many pages were moved onto *@dst.
1070 */
1071static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
1072 struct list_head *dst, int *scanned)
1073{
1074 int nr_taken = 0;
1075 struct page *page;
1076 int scan = 0;
1077
1078 while (scan++ < nr_to_scan && !list_empty(src)) {
1079 page = lru_to_page(src);
1080 prefetchw_prev_lru_page(page, src, flags);
1081
Nick Piggin053837f2006-01-18 17:42:27 -08001082 if (!TestClearPageLRU(page))
Christoph Lameter21eac812006-01-08 01:00:45 -08001083 BUG();
Nick Piggin053837f2006-01-18 17:42:27 -08001084 list_del(&page->lru);
1085 if (get_page_testone(page)) {
1086 /*
1087 * It is being freed elsewhere
1088 */
1089 __put_page(page);
1090 SetPageLRU(page);
1091 list_add(&page->lru, src);
1092 continue;
1093 } else {
1094 list_add(&page->lru, dst);
1095 nr_taken++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 }
1097 }
1098
1099 *scanned = scan;
1100 return nr_taken;
1101}
1102
1103/*
1104 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
1105 */
1106static void shrink_cache(struct zone *zone, struct scan_control *sc)
1107{
1108 LIST_HEAD(page_list);
1109 struct pagevec pvec;
1110 int max_scan = sc->nr_to_scan;
1111
1112 pagevec_init(&pvec, 1);
1113
1114 lru_add_drain();
1115 spin_lock_irq(&zone->lru_lock);
1116 while (max_scan > 0) {
1117 struct page *page;
1118 int nr_taken;
1119 int nr_scan;
1120 int nr_freed;
1121
1122 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
1123 &zone->inactive_list,
1124 &page_list, &nr_scan);
1125 zone->nr_inactive -= nr_taken;
1126 zone->pages_scanned += nr_scan;
1127 spin_unlock_irq(&zone->lru_lock);
1128
1129 if (nr_taken == 0)
1130 goto done;
1131
1132 max_scan -= nr_scan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 nr_freed = shrink_list(&page_list, sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Nick Piggina74609f2006-01-06 00:11:20 -08001135 local_irq_disable();
1136 if (current_is_kswapd()) {
1137 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
1138 __mod_page_state(kswapd_steal, nr_freed);
1139 } else
1140 __mod_page_state_zone(zone, pgscan_direct, nr_scan);
1141 __mod_page_state_zone(zone, pgsteal, nr_freed);
1142
1143 spin_lock(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 /*
1145 * Put back any unfreeable pages.
1146 */
1147 while (!list_empty(&page_list)) {
1148 page = lru_to_page(&page_list);
1149 if (TestSetPageLRU(page))
1150 BUG();
1151 list_del(&page->lru);
1152 if (PageActive(page))
1153 add_page_to_active_list(zone, page);
1154 else
1155 add_page_to_inactive_list(zone, page);
1156 if (!pagevec_add(&pvec, page)) {
1157 spin_unlock_irq(&zone->lru_lock);
1158 __pagevec_release(&pvec);
1159 spin_lock_irq(&zone->lru_lock);
1160 }
1161 }
1162 }
1163 spin_unlock_irq(&zone->lru_lock);
1164done:
1165 pagevec_release(&pvec);
1166}
1167
1168/*
1169 * This moves pages from the active list to the inactive list.
1170 *
1171 * We move them the other way if the page is referenced by one or more
1172 * processes, from rmap.
1173 *
1174 * If the pages are mostly unmapped, the processing is fast and it is
1175 * appropriate to hold zone->lru_lock across the whole operation. But if
1176 * the pages are mapped, the processing is slow (page_referenced()) so we
1177 * should drop zone->lru_lock around each page. It's impossible to balance
1178 * this, so instead we remove the pages from the LRU while processing them.
1179 * It is safe to rely on PG_active against the non-LRU pages in here because
1180 * nobody will play with that bit on a non-LRU page.
1181 *
1182 * The downside is that we have to touch page->_count against each page.
1183 * But we had to alter page->flags anyway.
1184 */
1185static void
1186refill_inactive_zone(struct zone *zone, struct scan_control *sc)
1187{
1188 int pgmoved;
1189 int pgdeactivate = 0;
1190 int pgscanned;
1191 int nr_pages = sc->nr_to_scan;
1192 LIST_HEAD(l_hold); /* The pages which were snipped off */
1193 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
1194 LIST_HEAD(l_active); /* Pages to go onto the active_list */
1195 struct page *page;
1196 struct pagevec pvec;
1197 int reclaim_mapped = 0;
1198 long mapped_ratio;
1199 long distress;
1200 long swap_tendency;
1201
1202 lru_add_drain();
1203 spin_lock_irq(&zone->lru_lock);
1204 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
1205 &l_hold, &pgscanned);
1206 zone->pages_scanned += pgscanned;
1207 zone->nr_active -= pgmoved;
1208 spin_unlock_irq(&zone->lru_lock);
1209
1210 /*
1211 * `distress' is a measure of how much trouble we're having reclaiming
1212 * pages. 0 -> no problems. 100 -> great trouble.
1213 */
1214 distress = 100 >> zone->prev_priority;
1215
1216 /*
1217 * The point of this algorithm is to decide when to start reclaiming
1218 * mapped memory instead of just pagecache. Work out how much memory
1219 * is mapped.
1220 */
1221 mapped_ratio = (sc->nr_mapped * 100) / total_memory;
1222
1223 /*
1224 * Now decide how much we really want to unmap some pages. The mapped
1225 * ratio is downgraded - just because there's a lot of mapped memory
1226 * doesn't necessarily mean that page reclaim isn't succeeding.
1227 *
1228 * The distress ratio is important - we don't want to start going oom.
1229 *
1230 * A 100% value of vm_swappiness overrides this algorithm altogether.
1231 */
1232 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
1233
1234 /*
1235 * Now use this metric to decide whether to start moving mapped memory
1236 * onto the inactive list.
1237 */
Christoph Lameter80e43422006-02-11 17:55:53 -08001238 if (swap_tendency >= 100 && sc->may_swap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 reclaim_mapped = 1;
1240
1241 while (!list_empty(&l_hold)) {
1242 cond_resched();
1243 page = lru_to_page(&l_hold);
1244 list_del(&page->lru);
1245 if (page_mapped(page)) {
1246 if (!reclaim_mapped ||
1247 (total_swap_pages == 0 && PageAnon(page)) ||
Rik van Rielf7b7fd82005-11-28 13:44:07 -08001248 page_referenced(page, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 list_add(&page->lru, &l_active);
1250 continue;
1251 }
1252 }
1253 list_add(&page->lru, &l_inactive);
1254 }
1255
1256 pagevec_init(&pvec, 1);
1257 pgmoved = 0;
1258 spin_lock_irq(&zone->lru_lock);
1259 while (!list_empty(&l_inactive)) {
1260 page = lru_to_page(&l_inactive);
1261 prefetchw_prev_lru_page(page, &l_inactive, flags);
1262 if (TestSetPageLRU(page))
1263 BUG();
1264 if (!TestClearPageActive(page))
1265 BUG();
1266 list_move(&page->lru, &zone->inactive_list);
1267 pgmoved++;
1268 if (!pagevec_add(&pvec, page)) {
1269 zone->nr_inactive += pgmoved;
1270 spin_unlock_irq(&zone->lru_lock);
1271 pgdeactivate += pgmoved;
1272 pgmoved = 0;
1273 if (buffer_heads_over_limit)
1274 pagevec_strip(&pvec);
1275 __pagevec_release(&pvec);
1276 spin_lock_irq(&zone->lru_lock);
1277 }
1278 }
1279 zone->nr_inactive += pgmoved;
1280 pgdeactivate += pgmoved;
1281 if (buffer_heads_over_limit) {
1282 spin_unlock_irq(&zone->lru_lock);
1283 pagevec_strip(&pvec);
1284 spin_lock_irq(&zone->lru_lock);
1285 }
1286
1287 pgmoved = 0;
1288 while (!list_empty(&l_active)) {
1289 page = lru_to_page(&l_active);
1290 prefetchw_prev_lru_page(page, &l_active, flags);
1291 if (TestSetPageLRU(page))
1292 BUG();
1293 BUG_ON(!PageActive(page));
1294 list_move(&page->lru, &zone->active_list);
1295 pgmoved++;
1296 if (!pagevec_add(&pvec, page)) {
1297 zone->nr_active += pgmoved;
1298 pgmoved = 0;
1299 spin_unlock_irq(&zone->lru_lock);
1300 __pagevec_release(&pvec);
1301 spin_lock_irq(&zone->lru_lock);
1302 }
1303 }
1304 zone->nr_active += pgmoved;
Nick Piggina74609f2006-01-06 00:11:20 -08001305 spin_unlock(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
Nick Piggina74609f2006-01-06 00:11:20 -08001307 __mod_page_state_zone(zone, pgrefill, pgscanned);
1308 __mod_page_state(pgdeactivate, pgdeactivate);
1309 local_irq_enable();
1310
1311 pagevec_release(&pvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312}
1313
1314/*
1315 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1316 */
1317static void
1318shrink_zone(struct zone *zone, struct scan_control *sc)
1319{
1320 unsigned long nr_active;
1321 unsigned long nr_inactive;
1322
Martin Hicks53e9a612005-09-03 15:54:51 -07001323 atomic_inc(&zone->reclaim_in_progress);
1324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 /*
1326 * Add one to `nr_to_scan' just to make sure that the kernel will
1327 * slowly sift through the active list.
1328 */
1329 zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1;
1330 nr_active = zone->nr_scan_active;
1331 if (nr_active >= sc->swap_cluster_max)
1332 zone->nr_scan_active = 0;
1333 else
1334 nr_active = 0;
1335
1336 zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1;
1337 nr_inactive = zone->nr_scan_inactive;
1338 if (nr_inactive >= sc->swap_cluster_max)
1339 zone->nr_scan_inactive = 0;
1340 else
1341 nr_inactive = 0;
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 while (nr_active || nr_inactive) {
1344 if (nr_active) {
1345 sc->nr_to_scan = min(nr_active,
1346 (unsigned long)sc->swap_cluster_max);
1347 nr_active -= sc->nr_to_scan;
1348 refill_inactive_zone(zone, sc);
1349 }
1350
1351 if (nr_inactive) {
1352 sc->nr_to_scan = min(nr_inactive,
1353 (unsigned long)sc->swap_cluster_max);
1354 nr_inactive -= sc->nr_to_scan;
1355 shrink_cache(zone, sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 }
1357 }
1358
1359 throttle_vm_writeout();
Martin Hicks53e9a612005-09-03 15:54:51 -07001360
1361 atomic_dec(&zone->reclaim_in_progress);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362}
1363
1364/*
1365 * This is the direct reclaim path, for page-allocating processes. We only
1366 * try to reclaim pages from zones which will satisfy the caller's allocation
1367 * request.
1368 *
1369 * We reclaim from a zone even if that zone is over pages_high. Because:
1370 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1371 * allocation or
1372 * b) The zones may be over pages_high but they must go *over* pages_high to
1373 * satisfy the `incremental min' zone defense algorithm.
1374 *
1375 * Returns the number of reclaimed pages.
1376 *
1377 * If a zone is deemed to be full of pinned pages then just give it a light
1378 * scan then give up on it.
1379 */
1380static void
1381shrink_caches(struct zone **zones, struct scan_control *sc)
1382{
1383 int i;
1384
1385 for (i = 0; zones[i] != NULL; i++) {
1386 struct zone *zone = zones[i];
1387
Con Kolivasf3fe6512006-01-06 00:11:15 -08001388 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 continue;
1390
Paul Jackson9bf22292005-09-06 15:18:12 -07001391 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 continue;
1393
1394 zone->temp_priority = sc->priority;
1395 if (zone->prev_priority > sc->priority)
1396 zone->prev_priority = sc->priority;
1397
1398 if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
1399 continue; /* Let kswapd poll it */
1400
1401 shrink_zone(zone, sc);
1402 }
1403}
1404
1405/*
1406 * This is the main entry point to direct page reclaim.
1407 *
1408 * If a full scan of the inactive list fails to free enough memory then we
1409 * are "out of memory" and something needs to be killed.
1410 *
1411 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1412 * high - the zone may be full of dirty or under-writeback pages, which this
1413 * caller can't do much about. We kick pdflush and take explicit naps in the
1414 * hope that some of these pages can be written. But if the allocating task
1415 * holds filesystem locks which prevent writeout this might not work, and the
1416 * allocation attempt will fail.
1417 */
Al Viro6daa0e22005-10-21 03:18:50 -04001418int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419{
1420 int priority;
1421 int ret = 0;
1422 int total_scanned = 0, total_reclaimed = 0;
1423 struct reclaim_state *reclaim_state = current->reclaim_state;
1424 struct scan_control sc;
1425 unsigned long lru_pages = 0;
1426 int i;
1427
1428 sc.gfp_mask = gfp_mask;
Christoph Lameter52a83632006-02-01 03:05:28 -08001429 sc.may_writepage = !laptop_mode;
Christoph Lameterf1fd1062006-01-18 17:42:30 -08001430 sc.may_swap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
1432 inc_page_state(allocstall);
1433
1434 for (i = 0; zones[i] != NULL; i++) {
1435 struct zone *zone = zones[i];
1436
Paul Jackson9bf22292005-09-06 15:18:12 -07001437 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 continue;
1439
1440 zone->temp_priority = DEF_PRIORITY;
1441 lru_pages += zone->nr_active + zone->nr_inactive;
1442 }
1443
1444 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1445 sc.nr_mapped = read_page_state(nr_mapped);
1446 sc.nr_scanned = 0;
1447 sc.nr_reclaimed = 0;
1448 sc.priority = priority;
1449 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
Rik van Rielf7b7fd82005-11-28 13:44:07 -08001450 if (!priority)
1451 disable_swap_token();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 shrink_caches(zones, &sc);
1453 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1454 if (reclaim_state) {
1455 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1456 reclaim_state->reclaimed_slab = 0;
1457 }
1458 total_scanned += sc.nr_scanned;
1459 total_reclaimed += sc.nr_reclaimed;
1460 if (total_reclaimed >= sc.swap_cluster_max) {
1461 ret = 1;
1462 goto out;
1463 }
1464
1465 /*
1466 * Try to write back as many pages as we just scanned. This
1467 * tends to cause slow streaming writers to write data to the
1468 * disk smoothly, at the dirtying rate, which is nice. But
1469 * that's undesirable in laptop mode, where we *want* lumpy
1470 * writeout. So in laptop mode, write out the whole world.
1471 */
1472 if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) {
Pekka J Enberg687a21c2005-06-28 20:44:55 -07001473 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 sc.may_writepage = 1;
1475 }
1476
1477 /* Take a nap, wait for some writeback to complete */
1478 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1479 blk_congestion_wait(WRITE, HZ/10);
1480 }
1481out:
1482 for (i = 0; zones[i] != 0; i++) {
1483 struct zone *zone = zones[i];
1484
Paul Jackson9bf22292005-09-06 15:18:12 -07001485 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 continue;
1487
1488 zone->prev_priority = zone->temp_priority;
1489 }
1490 return ret;
1491}
1492
1493/*
1494 * For kswapd, balance_pgdat() will work across all this node's zones until
1495 * they are all at pages_high.
1496 *
1497 * If `nr_pages' is non-zero then it is the number of pages which are to be
1498 * reclaimed, regardless of the zone occupancies. This is a software suspend
1499 * special.
1500 *
1501 * Returns the number of pages which were actually freed.
1502 *
1503 * There is special handling here for zones which are full of pinned pages.
1504 * This can happen if the pages are all mlocked, or if they are all used by
1505 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1506 * What we do is to detect the case where all pages in the zone have been
1507 * scanned twice and there has been zero successful reclaim. Mark the zone as
1508 * dead and from now on, only perform a short scan. Basically we're polling
1509 * the zone for when the problem goes away.
1510 *
1511 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1512 * zones which have free_pages > pages_high, but once a zone is found to have
1513 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1514 * of the number of free pages in the lower zones. This interoperates with
1515 * the page allocator fallback scheme to ensure that aging of pages is balanced
1516 * across the zones.
1517 */
1518static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order)
1519{
1520 int to_free = nr_pages;
1521 int all_zones_ok;
1522 int priority;
1523 int i;
1524 int total_scanned, total_reclaimed;
1525 struct reclaim_state *reclaim_state = current->reclaim_state;
1526 struct scan_control sc;
1527
1528loop_again:
1529 total_scanned = 0;
1530 total_reclaimed = 0;
1531 sc.gfp_mask = GFP_KERNEL;
Christoph Lameter52a83632006-02-01 03:05:28 -08001532 sc.may_writepage = !laptop_mode;
Christoph Lameterf1fd1062006-01-18 17:42:30 -08001533 sc.may_swap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 sc.nr_mapped = read_page_state(nr_mapped);
1535
1536 inc_page_state(pageoutrun);
1537
1538 for (i = 0; i < pgdat->nr_zones; i++) {
1539 struct zone *zone = pgdat->node_zones + i;
1540
1541 zone->temp_priority = DEF_PRIORITY;
1542 }
1543
1544 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1545 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1546 unsigned long lru_pages = 0;
1547
Rik van Rielf7b7fd82005-11-28 13:44:07 -08001548 /* The swap token gets in the way of swapout... */
1549 if (!priority)
1550 disable_swap_token();
1551
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 all_zones_ok = 1;
1553
1554 if (nr_pages == 0) {
1555 /*
1556 * Scan in the highmem->dma direction for the highest
1557 * zone which needs scanning
1558 */
1559 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1560 struct zone *zone = pgdat->node_zones + i;
1561
Con Kolivasf3fe6512006-01-06 00:11:15 -08001562 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 continue;
1564
1565 if (zone->all_unreclaimable &&
1566 priority != DEF_PRIORITY)
1567 continue;
1568
1569 if (!zone_watermark_ok(zone, order,
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08001570 zone->pages_high, 0, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 end_zone = i;
1572 goto scan;
1573 }
1574 }
1575 goto out;
1576 } else {
1577 end_zone = pgdat->nr_zones - 1;
1578 }
1579scan:
1580 for (i = 0; i <= end_zone; i++) {
1581 struct zone *zone = pgdat->node_zones + i;
1582
1583 lru_pages += zone->nr_active + zone->nr_inactive;
1584 }
1585
1586 /*
1587 * Now scan the zone in the dma->highmem direction, stopping
1588 * at the last zone which needs scanning.
1589 *
1590 * We do this because the page allocator works in the opposite
1591 * direction. This prevents the page allocator from allocating
1592 * pages behind kswapd's direction of progress, which would
1593 * cause too much scanning of the lower zones.
1594 */
1595 for (i = 0; i <= end_zone; i++) {
1596 struct zone *zone = pgdat->node_zones + i;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001597 int nr_slab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
Con Kolivasf3fe6512006-01-06 00:11:15 -08001599 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 continue;
1601
1602 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1603 continue;
1604
1605 if (nr_pages == 0) { /* Not software suspend */
1606 if (!zone_watermark_ok(zone, order,
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08001607 zone->pages_high, end_zone, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 all_zones_ok = 0;
1609 }
1610 zone->temp_priority = priority;
1611 if (zone->prev_priority > priority)
1612 zone->prev_priority = priority;
1613 sc.nr_scanned = 0;
1614 sc.nr_reclaimed = 0;
1615 sc.priority = priority;
1616 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
Martin Hicks1e7e5a92005-06-21 17:14:43 -07001617 atomic_inc(&zone->reclaim_in_progress);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 shrink_zone(zone, &sc);
Martin Hicks1e7e5a92005-06-21 17:14:43 -07001619 atomic_dec(&zone->reclaim_in_progress);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 reclaim_state->reclaimed_slab = 0;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001621 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1622 lru_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1624 total_reclaimed += sc.nr_reclaimed;
1625 total_scanned += sc.nr_scanned;
1626 if (zone->all_unreclaimable)
1627 continue;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -07001628 if (nr_slab == 0 && zone->pages_scanned >=
1629 (zone->nr_active + zone->nr_inactive) * 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 zone->all_unreclaimable = 1;
1631 /*
1632 * If we've done a decent amount of scanning and
1633 * the reclaim ratio is low, start doing writepage
1634 * even in laptop mode
1635 */
1636 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1637 total_scanned > total_reclaimed+total_reclaimed/2)
1638 sc.may_writepage = 1;
1639 }
1640 if (nr_pages && to_free > total_reclaimed)
1641 continue; /* swsusp: need to do more work */
1642 if (all_zones_ok)
1643 break; /* kswapd: all done */
1644 /*
1645 * OK, kswapd is getting into trouble. Take a nap, then take
1646 * another pass across the zones.
1647 */
1648 if (total_scanned && priority < DEF_PRIORITY - 2)
1649 blk_congestion_wait(WRITE, HZ/10);
1650
1651 /*
1652 * We do this so kswapd doesn't build up large priorities for
1653 * example when it is freeing in parallel with allocators. It
1654 * matches the direct reclaim path behaviour in terms of impact
1655 * on zone->*_priority.
1656 */
1657 if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages))
1658 break;
1659 }
1660out:
1661 for (i = 0; i < pgdat->nr_zones; i++) {
1662 struct zone *zone = pgdat->node_zones + i;
1663
1664 zone->prev_priority = zone->temp_priority;
1665 }
1666 if (!all_zones_ok) {
1667 cond_resched();
1668 goto loop_again;
1669 }
1670
1671 return total_reclaimed;
1672}
1673
1674/*
1675 * The background pageout daemon, started as a kernel thread
1676 * from the init process.
1677 *
1678 * This basically trickles out pages so that we have _some_
1679 * free memory available even if there is no other activity
1680 * that frees anything up. This is needed for things like routing
1681 * etc, where we otherwise might have all activity going on in
1682 * asynchronous contexts that cannot page things out.
1683 *
1684 * If there are applications that are active memory-allocators
1685 * (most normal use), this basically shouldn't matter.
1686 */
1687static int kswapd(void *p)
1688{
1689 unsigned long order;
1690 pg_data_t *pgdat = (pg_data_t*)p;
1691 struct task_struct *tsk = current;
1692 DEFINE_WAIT(wait);
1693 struct reclaim_state reclaim_state = {
1694 .reclaimed_slab = 0,
1695 };
1696 cpumask_t cpumask;
1697
1698 daemonize("kswapd%d", pgdat->node_id);
1699 cpumask = node_to_cpumask(pgdat->node_id);
1700 if (!cpus_empty(cpumask))
1701 set_cpus_allowed(tsk, cpumask);
1702 current->reclaim_state = &reclaim_state;
1703
1704 /*
1705 * Tell the memory management that we're a "memory allocator",
1706 * and that if we need more memory we should get access to it
1707 * regardless (see "__alloc_pages()"). "kswapd" should
1708 * never get caught in the normal page freeing logic.
1709 *
1710 * (Kswapd normally doesn't need memory anyway, but sometimes
1711 * you need a small amount of memory in order to be able to
1712 * page out something else, and this flag essentially protects
1713 * us from recursively trying to free more memory as we're
1714 * trying to free the first piece of memory in the first place).
1715 */
Christoph Lameter930d9152006-01-08 01:00:47 -08001716 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 order = 0;
1719 for ( ; ; ) {
1720 unsigned long new_order;
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001721
1722 try_to_freeze();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1725 new_order = pgdat->kswapd_max_order;
1726 pgdat->kswapd_max_order = 0;
1727 if (order < new_order) {
1728 /*
1729 * Don't sleep if someone wants a larger 'order'
1730 * allocation
1731 */
1732 order = new_order;
1733 } else {
1734 schedule();
1735 order = pgdat->kswapd_max_order;
1736 }
1737 finish_wait(&pgdat->kswapd_wait, &wait);
1738
1739 balance_pgdat(pgdat, 0, order);
1740 }
1741 return 0;
1742}
1743
1744/*
1745 * A zone is low on free memory, so wake its kswapd task to service it.
1746 */
1747void wakeup_kswapd(struct zone *zone, int order)
1748{
1749 pg_data_t *pgdat;
1750
Con Kolivasf3fe6512006-01-06 00:11:15 -08001751 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 return;
1753
1754 pgdat = zone->zone_pgdat;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08001755 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 return;
1757 if (pgdat->kswapd_max_order < order)
1758 pgdat->kswapd_max_order = order;
Paul Jackson9bf22292005-09-06 15:18:12 -07001759 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 return;
Con Kolivas8d0986e2005-09-13 01:25:07 -07001761 if (!waitqueue_active(&pgdat->kswapd_wait))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 return;
Con Kolivas8d0986e2005-09-13 01:25:07 -07001763 wake_up_interruptible(&pgdat->kswapd_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764}
1765
1766#ifdef CONFIG_PM
1767/*
1768 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1769 * pages.
1770 */
1771int shrink_all_memory(int nr_pages)
1772{
1773 pg_data_t *pgdat;
1774 int nr_to_free = nr_pages;
1775 int ret = 0;
1776 struct reclaim_state reclaim_state = {
1777 .reclaimed_slab = 0,
1778 };
1779
1780 current->reclaim_state = &reclaim_state;
1781 for_each_pgdat(pgdat) {
1782 int freed;
1783 freed = balance_pgdat(pgdat, nr_to_free, 0);
1784 ret += freed;
1785 nr_to_free -= freed;
1786 if (nr_to_free <= 0)
1787 break;
1788 }
1789 current->reclaim_state = NULL;
1790 return ret;
1791}
1792#endif
1793
1794#ifdef CONFIG_HOTPLUG_CPU
1795/* It's optimal to keep kswapds on the same CPUs as their memory, but
1796 not required for correctness. So if the last cpu in a node goes
1797 away, we get changed to run anywhere: as the first one comes back,
1798 restore their cpu bindings. */
1799static int __devinit cpu_callback(struct notifier_block *nfb,
1800 unsigned long action,
1801 void *hcpu)
1802{
1803 pg_data_t *pgdat;
1804 cpumask_t mask;
1805
1806 if (action == CPU_ONLINE) {
1807 for_each_pgdat(pgdat) {
1808 mask = node_to_cpumask(pgdat->node_id);
1809 if (any_online_cpu(mask) != NR_CPUS)
1810 /* One of our CPUs online: restore mask */
1811 set_cpus_allowed(pgdat->kswapd, mask);
1812 }
1813 }
1814 return NOTIFY_OK;
1815}
1816#endif /* CONFIG_HOTPLUG_CPU */
1817
1818static int __init kswapd_init(void)
1819{
1820 pg_data_t *pgdat;
1821 swap_setup();
1822 for_each_pgdat(pgdat)
1823 pgdat->kswapd
1824 = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL));
1825 total_memory = nr_free_pagecache_pages();
1826 hotcpu_notifier(cpu_callback, 0);
1827 return 0;
1828}
1829
1830module_init(kswapd_init)
Christoph Lameter9eeff232006-01-18 17:42:31 -08001831
1832#ifdef CONFIG_NUMA
1833/*
1834 * Zone reclaim mode
1835 *
1836 * If non-zero call zone_reclaim when the number of free pages falls below
1837 * the watermarks.
1838 *
1839 * In the future we may add flags to the mode. However, the page allocator
1840 * should only have to check that zone_reclaim_mode != 0 before calling
1841 * zone_reclaim().
1842 */
1843int zone_reclaim_mode __read_mostly;
1844
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08001845#define RECLAIM_OFF 0
1846#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
1847#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1848#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001849#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08001850
Christoph Lameter9eeff232006-01-18 17:42:31 -08001851/*
1852 * Mininum time between zone reclaim scans
1853 */
Christoph Lameter2a11ff02006-02-01 03:05:33 -08001854int zone_reclaim_interval __read_mostly = 30*HZ;
Christoph Lametera92f7122006-02-01 03:05:32 -08001855
1856/*
1857 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1858 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1859 * a zone.
1860 */
1861#define ZONE_RECLAIM_PRIORITY 4
1862
Christoph Lameter9eeff232006-01-18 17:42:31 -08001863/*
1864 * Try to free up some pages from this zone through reclaim.
1865 */
1866int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1867{
Christoph Lameter89288622006-02-01 03:05:25 -08001868 int nr_pages;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001869 struct task_struct *p = current;
1870 struct reclaim_state reclaim_state;
Christoph Lameter89288622006-02-01 03:05:25 -08001871 struct scan_control sc;
Christoph Lameter42c722d2006-02-01 03:05:26 -08001872 cpumask_t mask;
1873 int node_id;
Christoph Lameter89288622006-02-01 03:05:25 -08001874
1875 if (time_before(jiffies,
Christoph Lameter2a11ff02006-02-01 03:05:33 -08001876 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
Christoph Lameter89288622006-02-01 03:05:25 -08001877 return 0;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001878
1879 if (!(gfp_mask & __GFP_WAIT) ||
Christoph Lameter9eeff232006-01-18 17:42:31 -08001880 zone->all_unreclaimable ||
1881 atomic_read(&zone->reclaim_in_progress) > 0)
1882 return 0;
1883
Christoph Lameter42c722d2006-02-01 03:05:26 -08001884 node_id = zone->zone_pgdat->node_id;
1885 mask = node_to_cpumask(node_id);
1886 if (!cpus_empty(mask) && node_id != numa_node_id())
1887 return 0;
1888
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08001889 sc.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE);
1890 sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP);
Christoph Lameter89288622006-02-01 03:05:25 -08001891 sc.nr_scanned = 0;
1892 sc.nr_reclaimed = 0;
Christoph Lametera92f7122006-02-01 03:05:32 -08001893 sc.priority = ZONE_RECLAIM_PRIORITY + 1;
Christoph Lameter89288622006-02-01 03:05:25 -08001894 sc.nr_mapped = read_page_state(nr_mapped);
1895 sc.gfp_mask = gfp_mask;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001896
1897 disable_swap_token();
1898
Christoph Lameter89288622006-02-01 03:05:25 -08001899 nr_pages = 1 << order;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001900 if (nr_pages > SWAP_CLUSTER_MAX)
1901 sc.swap_cluster_max = nr_pages;
1902 else
1903 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1904
1905 cond_resched();
1906 p->flags |= PF_MEMALLOC;
1907 reclaim_state.reclaimed_slab = 0;
1908 p->reclaim_state = &reclaim_state;
Christoph Lameterc84db232006-02-01 03:05:29 -08001909
Christoph Lametera92f7122006-02-01 03:05:32 -08001910 /*
1911 * Free memory by calling shrink zone with increasing priorities
1912 * until we have enough memory freed.
1913 */
1914 do {
1915 sc.priority--;
1916 shrink_zone(zone, &sc);
1917
1918 } while (sc.nr_reclaimed < nr_pages && sc.priority > 0);
Christoph Lameterc84db232006-02-01 03:05:29 -08001919
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08001920 if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
1921 /*
1922 * shrink_slab does not currently allow us to determine
1923 * how many pages were freed in the zone. So we just
1924 * shake the slab and then go offnode for a single allocation.
1925 *
1926 * shrink_slab will free memory on all zones and may take
1927 * a long time.
1928 */
1929 shrink_slab(sc.nr_scanned, gfp_mask, order);
1930 sc.nr_reclaimed = 1; /* Avoid getting the off node timeout */
1931 }
1932
Christoph Lameter9eeff232006-01-18 17:42:31 -08001933 p->reclaim_state = NULL;
1934 current->flags &= ~PF_MEMALLOC;
1935
1936 if (sc.nr_reclaimed == 0)
1937 zone->last_unsuccessful_zone_reclaim = jiffies;
1938
Christoph Lameterc84db232006-02-01 03:05:29 -08001939 return sc.nr_reclaimed >= nr_pages;
Christoph Lameter9eeff232006-01-18 17:42:31 -08001940}
1941#endif
1942