blob: 13fad5fcdf7996eab9812d5cb781d9157f8cc55e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
18 */
19
20/*
21 * Lock ordering in mm:
22 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -080023 * inode->i_mutex (while writing or truncating, not reading or faulting)
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * inode->i_alloc_sem
25 *
26 * When a page fault occurs in writing from user to file, down_read
Jes Sorensen1b1dcc12006-01-09 15:59:24 -080027 * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
28 * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
29 * taken together; in truncation, i_mutex is taken outermost.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 * mm->mmap_sem
32 * page->flags PG_locked (lock_page)
33 * mapping->i_mmap_lock
34 * anon_vma->lock
Hugh Dickinsb8072f02005-10-29 18:16:41 -070035 * mm->page_table_lock or pte_lock
Nick Piggin053837f2006-01-18 17:42:27 -080036 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
Hugh Dickins5d337b92005-09-03 15:54:41 -070037 * swap_lock (in swap_duplicate, swap_info_get)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 * mmlist_lock (in mmput, drain_mmlist and others)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * mapping->private_lock (in __set_page_dirty_buffers)
40 * inode_lock (in set_page_dirty's __mark_inode_dirty)
41 * sb_lock (within inode_lock in fs/fs-writeback.c)
42 * mapping->tree_lock (widely used, in set_page_dirty,
43 * in arch-dependent flush_dcache_mmap_lock,
44 * within inode_lock in __sync_single_inode)
45 */
46
47#include <linux/mm.h>
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/swapops.h>
51#include <linux/slab.h>
52#include <linux/init.h>
53#include <linux/rmap.h>
54#include <linux/rcupdate.h>
Christoph Lametera48d07a2006-02-01 03:05:38 -080055#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#include <asm/tlbflush.h>
58
59//#define RMAP_DEBUG /* can be enabled only for debugging */
60
61kmem_cache_t *anon_vma_cachep;
62
63static inline void validate_anon_vma(struct vm_area_struct *find_vma)
64{
65#ifdef RMAP_DEBUG
66 struct anon_vma *anon_vma = find_vma->anon_vma;
67 struct vm_area_struct *vma;
68 unsigned int mapcount = 0;
69 int found = 0;
70
71 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
72 mapcount++;
73 BUG_ON(mapcount > 100000);
74 if (vma == find_vma)
75 found = 1;
76 }
77 BUG_ON(!found);
78#endif
79}
80
81/* This must be called under the mmap_sem. */
82int anon_vma_prepare(struct vm_area_struct *vma)
83{
84 struct anon_vma *anon_vma = vma->anon_vma;
85
86 might_sleep();
87 if (unlikely(!anon_vma)) {
88 struct mm_struct *mm = vma->vm_mm;
89 struct anon_vma *allocated, *locked;
90
91 anon_vma = find_mergeable_anon_vma(vma);
92 if (anon_vma) {
93 allocated = NULL;
94 locked = anon_vma;
95 spin_lock(&locked->lock);
96 } else {
97 anon_vma = anon_vma_alloc();
98 if (unlikely(!anon_vma))
99 return -ENOMEM;
100 allocated = anon_vma;
101 locked = NULL;
102 }
103
104 /* page_table_lock to protect against threads */
105 spin_lock(&mm->page_table_lock);
106 if (likely(!vma->anon_vma)) {
107 vma->anon_vma = anon_vma;
108 list_add(&vma->anon_vma_node, &anon_vma->head);
109 allocated = NULL;
110 }
111 spin_unlock(&mm->page_table_lock);
112
113 if (locked)
114 spin_unlock(&locked->lock);
115 if (unlikely(allocated))
116 anon_vma_free(allocated);
117 }
118 return 0;
119}
120
121void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
122{
123 BUG_ON(vma->anon_vma != next->anon_vma);
124 list_del(&next->anon_vma_node);
125}
126
127void __anon_vma_link(struct vm_area_struct *vma)
128{
129 struct anon_vma *anon_vma = vma->anon_vma;
130
131 if (anon_vma) {
132 list_add(&vma->anon_vma_node, &anon_vma->head);
133 validate_anon_vma(vma);
134 }
135}
136
137void anon_vma_link(struct vm_area_struct *vma)
138{
139 struct anon_vma *anon_vma = vma->anon_vma;
140
141 if (anon_vma) {
142 spin_lock(&anon_vma->lock);
143 list_add(&vma->anon_vma_node, &anon_vma->head);
144 validate_anon_vma(vma);
145 spin_unlock(&anon_vma->lock);
146 }
147}
148
149void anon_vma_unlink(struct vm_area_struct *vma)
150{
151 struct anon_vma *anon_vma = vma->anon_vma;
152 int empty;
153
154 if (!anon_vma)
155 return;
156
157 spin_lock(&anon_vma->lock);
158 validate_anon_vma(vma);
159 list_del(&vma->anon_vma_node);
160
161 /* We must garbage collect the anon_vma if it's empty */
162 empty = list_empty(&anon_vma->head);
163 spin_unlock(&anon_vma->lock);
164
165 if (empty)
166 anon_vma_free(anon_vma);
167}
168
169static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
170{
171 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
172 SLAB_CTOR_CONSTRUCTOR) {
173 struct anon_vma *anon_vma = data;
174
175 spin_lock_init(&anon_vma->lock);
176 INIT_LIST_HEAD(&anon_vma->head);
177 }
178}
179
180void __init anon_vma_init(void)
181{
182 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
183 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
184}
185
186/*
187 * Getting a lock on a stable anon_vma from a page off the LRU is
188 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
189 */
190static struct anon_vma *page_lock_anon_vma(struct page *page)
191{
192 struct anon_vma *anon_vma = NULL;
193 unsigned long anon_mapping;
194
195 rcu_read_lock();
196 anon_mapping = (unsigned long) page->mapping;
197 if (!(anon_mapping & PAGE_MAPPING_ANON))
198 goto out;
199 if (!page_mapped(page))
200 goto out;
201
202 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
203 spin_lock(&anon_vma->lock);
204out:
205 rcu_read_unlock();
206 return anon_vma;
207}
208
209/*
210 * At what user virtual address is page expected in vma?
211 */
212static inline unsigned long
213vma_address(struct page *page, struct vm_area_struct *vma)
214{
215 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
216 unsigned long address;
217
218 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
219 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
220 /* page should be within any vma from prio_tree_next */
221 BUG_ON(!PageAnon(page));
222 return -EFAULT;
223 }
224 return address;
225}
226
227/*
228 * At what user virtual address is page expected in vma? checking that the
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800229 * page matches the vma: currently only used on anon pages, by unuse_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 */
231unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
232{
233 if (PageAnon(page)) {
234 if ((void *)vma->anon_vma !=
235 (void *)page->mapping - PAGE_MAPPING_ANON)
236 return -EFAULT;
237 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
Hugh Dickinsee498ed2005-11-21 21:32:18 -0800238 if (!vma->vm_file ||
239 vma->vm_file->f_mapping != page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 return -EFAULT;
241 } else
242 return -EFAULT;
243 return vma_address(page, vma);
244}
245
246/*
Nikita Danilov81b40822005-05-01 08:58:36 -0700247 * Check that @page is mapped at @address into @mm.
248 *
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700249 * On success returns with pte mapped and locked.
Nikita Danilov81b40822005-05-01 08:58:36 -0700250 */
Carsten Otteceffc072005-06-23 22:05:25 -0700251pte_t *page_check_address(struct page *page, struct mm_struct *mm,
Hugh Dickinsc0718802005-10-29 18:16:31 -0700252 unsigned long address, spinlock_t **ptlp)
Nikita Danilov81b40822005-05-01 08:58:36 -0700253{
254 pgd_t *pgd;
255 pud_t *pud;
256 pmd_t *pmd;
257 pte_t *pte;
Hugh Dickinsc0718802005-10-29 18:16:31 -0700258 spinlock_t *ptl;
Nikita Danilov81b40822005-05-01 08:58:36 -0700259
Nikita Danilov81b40822005-05-01 08:58:36 -0700260 pgd = pgd_offset(mm, address);
Hugh Dickinsc0718802005-10-29 18:16:31 -0700261 if (!pgd_present(*pgd))
262 return NULL;
263
264 pud = pud_offset(pgd, address);
265 if (!pud_present(*pud))
266 return NULL;
267
268 pmd = pmd_offset(pud, address);
269 if (!pmd_present(*pmd))
270 return NULL;
271
272 pte = pte_offset_map(pmd, address);
273 /* Make a quick check before getting the lock */
274 if (!pte_present(*pte)) {
275 pte_unmap(pte);
276 return NULL;
Nikita Danilov81b40822005-05-01 08:58:36 -0700277 }
Hugh Dickinsc0718802005-10-29 18:16:31 -0700278
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700279 ptl = pte_lockptr(mm, pmd);
Hugh Dickinsc0718802005-10-29 18:16:31 -0700280 spin_lock(ptl);
281 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
282 *ptlp = ptl;
283 return pte;
284 }
285 pte_unmap_unlock(pte, ptl);
286 return NULL;
Nikita Danilov81b40822005-05-01 08:58:36 -0700287}
288
289/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 * Subfunctions of page_referenced: page_referenced_one called
291 * repeatedly from either page_referenced_anon or page_referenced_file.
292 */
293static int page_referenced_one(struct page *page,
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800294 struct vm_area_struct *vma, unsigned int *mapcount)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295{
296 struct mm_struct *mm = vma->vm_mm;
297 unsigned long address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 pte_t *pte;
Hugh Dickinsc0718802005-10-29 18:16:31 -0700299 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 int referenced = 0;
301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 address = vma_address(page, vma);
303 if (address == -EFAULT)
304 goto out;
305
Hugh Dickinsc0718802005-10-29 18:16:31 -0700306 pte = page_check_address(page, mm, address, &ptl);
307 if (!pte)
308 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
Hugh Dickinsc0718802005-10-29 18:16:31 -0700310 if (ptep_clear_flush_young(vma, address, pte))
311 referenced++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Hugh Dickinsc0718802005-10-29 18:16:31 -0700313 /* Pretend the page is referenced if the task has the
314 swap token and is in the middle of a page fault. */
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800315 if (mm != current->mm && has_swap_token(mm) &&
Hugh Dickinsc0718802005-10-29 18:16:31 -0700316 rwsem_is_locked(&mm->mmap_sem))
317 referenced++;
318
319 (*mapcount)--;
320 pte_unmap_unlock(pte, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321out:
322 return referenced;
323}
324
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800325static int page_referenced_anon(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326{
327 unsigned int mapcount;
328 struct anon_vma *anon_vma;
329 struct vm_area_struct *vma;
330 int referenced = 0;
331
332 anon_vma = page_lock_anon_vma(page);
333 if (!anon_vma)
334 return referenced;
335
336 mapcount = page_mapcount(page);
337 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800338 referenced += page_referenced_one(page, vma, &mapcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (!mapcount)
340 break;
341 }
342 spin_unlock(&anon_vma->lock);
343 return referenced;
344}
345
346/**
347 * page_referenced_file - referenced check for object-based rmap
348 * @page: the page we're checking references on.
349 *
350 * For an object-based mapped page, find all the places it is mapped and
351 * check/clear the referenced flag. This is done by following the page->mapping
352 * pointer, then walking the chain of vmas it holds. It returns the number
353 * of references it found.
354 *
355 * This function is only called from page_referenced for object-based pages.
356 */
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800357static int page_referenced_file(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358{
359 unsigned int mapcount;
360 struct address_space *mapping = page->mapping;
361 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
362 struct vm_area_struct *vma;
363 struct prio_tree_iter iter;
364 int referenced = 0;
365
366 /*
367 * The caller's checks on page->mapping and !PageAnon have made
368 * sure that this is a file page: the check for page->mapping
369 * excludes the case just before it gets set on an anon page.
370 */
371 BUG_ON(PageAnon(page));
372
373 /*
374 * The page lock not only makes sure that page->mapping cannot
375 * suddenly be NULLified by truncation, it makes sure that the
376 * structure at mapping cannot be freed and reused yet,
377 * so we can safely take mapping->i_mmap_lock.
378 */
379 BUG_ON(!PageLocked(page));
380
381 spin_lock(&mapping->i_mmap_lock);
382
383 /*
384 * i_mmap_lock does not stabilize mapcount at all, but mapcount
385 * is more likely to be accurate if we note it after spinning.
386 */
387 mapcount = page_mapcount(page);
388
389 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
390 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
391 == (VM_LOCKED|VM_MAYSHARE)) {
392 referenced++;
393 break;
394 }
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800395 referenced += page_referenced_one(page, vma, &mapcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 if (!mapcount)
397 break;
398 }
399
400 spin_unlock(&mapping->i_mmap_lock);
401 return referenced;
402}
403
404/**
405 * page_referenced - test if the page was referenced
406 * @page: the page to test
407 * @is_locked: caller holds lock on the page
408 *
409 * Quick test_and_clear_referenced for all mappings to a page,
410 * returns the number of ptes which referenced the page.
411 */
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800412int page_referenced(struct page *page, int is_locked)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413{
414 int referenced = 0;
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (page_test_and_clear_young(page))
417 referenced++;
418
419 if (TestClearPageReferenced(page))
420 referenced++;
421
422 if (page_mapped(page) && page->mapping) {
423 if (PageAnon(page))
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800424 referenced += page_referenced_anon(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 else if (is_locked)
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800426 referenced += page_referenced_file(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 else if (TestSetPageLocked(page))
428 referenced++;
429 else {
430 if (page->mapping)
Rik van Rielf7b7fd82005-11-28 13:44:07 -0800431 referenced += page_referenced_file(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 unlock_page(page);
433 }
434 }
435 return referenced;
436}
437
438/**
Nick Piggin9617d952006-01-06 00:11:12 -0800439 * page_set_anon_rmap - setup new anonymous rmap
440 * @page: the page to add the mapping to
441 * @vma: the vm area in which the mapping is added
442 * @address: the user virtual address mapped
443 */
444static void __page_set_anon_rmap(struct page *page,
445 struct vm_area_struct *vma, unsigned long address)
446{
447 struct anon_vma *anon_vma = vma->anon_vma;
448
449 BUG_ON(!anon_vma);
450 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
451 page->mapping = (struct address_space *) anon_vma;
452
453 page->index = linear_page_index(vma, address);
454
Nick Piggina74609f2006-01-06 00:11:20 -0800455 /*
456 * nr_mapped state can be updated without turning off
457 * interrupts because it is not modified via interrupt.
458 */
459 __inc_page_state(nr_mapped);
Nick Piggin9617d952006-01-06 00:11:12 -0800460}
461
462/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 * page_add_anon_rmap - add pte mapping to an anonymous page
464 * @page: the page to add the mapping to
465 * @vma: the vm area in which the mapping is added
466 * @address: the user virtual address mapped
467 *
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700468 * The caller needs to hold the pte lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 */
470void page_add_anon_rmap(struct page *page,
471 struct vm_area_struct *vma, unsigned long address)
472{
Nick Piggin9617d952006-01-06 00:11:12 -0800473 if (atomic_inc_and_test(&page->_mapcount))
474 __page_set_anon_rmap(page, vma, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 /* else checking page index and mapping is racy */
476}
477
Nick Piggin9617d952006-01-06 00:11:12 -0800478/*
479 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
480 * @page: the page to add the mapping to
481 * @vma: the vm area in which the mapping is added
482 * @address: the user virtual address mapped
483 *
484 * Same as page_add_anon_rmap but must only be called on *new* pages.
485 * This means the inc-and-test can be bypassed.
486 */
487void page_add_new_anon_rmap(struct page *page,
488 struct vm_area_struct *vma, unsigned long address)
489{
490 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
491 __page_set_anon_rmap(page, vma, address);
492}
493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494/**
495 * page_add_file_rmap - add pte mapping to a file page
496 * @page: the page to add the mapping to
497 *
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700498 * The caller needs to hold the pte lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 */
500void page_add_file_rmap(struct page *page)
501{
502 BUG_ON(PageAnon(page));
Nick Pigginb5810032005-10-29 18:16:12 -0700503 BUG_ON(!pfn_valid(page_to_pfn(page)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505 if (atomic_inc_and_test(&page->_mapcount))
Nick Piggina74609f2006-01-06 00:11:20 -0800506 __inc_page_state(nr_mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507}
508
509/**
510 * page_remove_rmap - take down pte mapping from a page
511 * @page: page to remove mapping from
512 *
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700513 * The caller needs to hold the pte lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 */
515void page_remove_rmap(struct page *page)
516{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 if (atomic_add_negative(-1, &page->_mapcount)) {
Dave Jonesef2bf0d2006-01-08 01:01:00 -0800518 if (page_mapcount(page) < 0) {
519 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
520 printk (KERN_EMERG " page->flags = %lx\n", page->flags);
521 printk (KERN_EMERG " page->count = %x\n", page_count(page));
522 printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
523 }
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 BUG_ON(page_mapcount(page) < 0);
526 /*
527 * It would be tidy to reset the PageAnon mapping here,
528 * but that might overwrite a racing page_add_anon_rmap
529 * which increments mapcount after us but sets mapping
530 * before us: so leave the reset to free_hot_cold_page,
531 * and remember that it's only reliable while mapped.
532 * Leaving it set also helps swapoff to reinstate ptes
533 * faster for those pages still in swapcache.
534 */
535 if (page_test_and_clear_dirty(page))
536 set_page_dirty(page);
Nick Piggina74609f2006-01-06 00:11:20 -0800537 __dec_page_state(nr_mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 }
539}
540
541/*
542 * Subfunctions of try_to_unmap: try_to_unmap_one called
543 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
544 */
Christoph Lametera48d07a2006-02-01 03:05:38 -0800545static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
546 int ignore_refs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547{
548 struct mm_struct *mm = vma->vm_mm;
549 unsigned long address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 pte_t *pte;
551 pte_t pteval;
Hugh Dickinsc0718802005-10-29 18:16:31 -0700552 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 int ret = SWAP_AGAIN;
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 address = vma_address(page, vma);
556 if (address == -EFAULT)
557 goto out;
558
Hugh Dickinsc0718802005-10-29 18:16:31 -0700559 pte = page_check_address(page, mm, address, &ptl);
560 if (!pte)
Nikita Danilov81b40822005-05-01 08:58:36 -0700561 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 /*
564 * If the page is mlock()d, we cannot swap it out.
565 * If it's recently referenced (perhaps page_referenced
566 * skipped over this mm) then we should reactivate it.
567 */
Hugh Dickins101d2be2005-11-21 21:32:16 -0800568 if ((vma->vm_flags & VM_LOCKED) ||
Christoph Lametera48d07a2006-02-01 03:05:38 -0800569 (ptep_clear_flush_young(vma, address, pte)
570 && !ignore_refs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 ret = SWAP_FAIL;
572 goto out_unmap;
573 }
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 /* Nuke the page table entry. */
576 flush_cache_page(vma, address, page_to_pfn(page));
577 pteval = ptep_clear_flush(vma, address, pte);
578
579 /* Move the dirty bit to the physical page now the pte is gone. */
580 if (pte_dirty(pteval))
581 set_page_dirty(page);
582
Hugh Dickins365e9c872005-10-29 18:16:18 -0700583 /* Update high watermark before we lower rss */
584 update_hiwater_rss(mm);
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 if (PageAnon(page)) {
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700587 swp_entry_t entry = { .val = page_private(page) };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 /*
589 * Store the swap location in the pte.
590 * See handle_pte_fault() ...
591 */
592 BUG_ON(!PageSwapCache(page));
593 swap_duplicate(entry);
594 if (list_empty(&mm->mmlist)) {
595 spin_lock(&mmlist_lock);
Hugh Dickinsf412ac02005-10-29 18:16:41 -0700596 if (list_empty(&mm->mmlist))
597 list_add(&mm->mmlist, &init_mm.mmlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 spin_unlock(&mmlist_lock);
599 }
600 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
601 BUG_ON(pte_file(*pte));
602 dec_mm_counter(mm, anon_rss);
Hugh Dickins42946212005-10-29 18:16:05 -0700603 } else
604 dec_mm_counter(mm, file_rss);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 page_remove_rmap(page);
607 page_cache_release(page);
608
609out_unmap:
Hugh Dickinsc0718802005-10-29 18:16:31 -0700610 pte_unmap_unlock(pte, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611out:
612 return ret;
613}
614
615/*
616 * objrmap doesn't work for nonlinear VMAs because the assumption that
617 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
618 * Consequently, given a particular page and its ->index, we cannot locate the
619 * ptes which are mapping that page without an exhaustive linear search.
620 *
621 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
622 * maps the file to which the target page belongs. The ->vm_private_data field
623 * holds the current cursor into that scan. Successive searches will circulate
624 * around the vma's virtual address space.
625 *
626 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
627 * more scanning pressure is placed against them as well. Eventually pages
628 * will become fully unmapped and are eligible for eviction.
629 *
630 * For very sparsely populated VMAs this is a little inefficient - chances are
631 * there there won't be many ptes located within the scan cluster. In this case
632 * maybe we could scan further - to the end of the pte page, perhaps.
633 */
634#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
635#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
636
637static void try_to_unmap_cluster(unsigned long cursor,
638 unsigned int *mapcount, struct vm_area_struct *vma)
639{
640 struct mm_struct *mm = vma->vm_mm;
641 pgd_t *pgd;
642 pud_t *pud;
643 pmd_t *pmd;
Hugh Dickinsc0718802005-10-29 18:16:31 -0700644 pte_t *pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 pte_t pteval;
Hugh Dickinsc0718802005-10-29 18:16:31 -0700646 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 struct page *page;
648 unsigned long address;
649 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 address = (vma->vm_start + cursor) & CLUSTER_MASK;
652 end = address + CLUSTER_SIZE;
653 if (address < vma->vm_start)
654 address = vma->vm_start;
655 if (end > vma->vm_end)
656 end = vma->vm_end;
657
658 pgd = pgd_offset(mm, address);
659 if (!pgd_present(*pgd))
Hugh Dickinsc0718802005-10-29 18:16:31 -0700660 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
662 pud = pud_offset(pgd, address);
663 if (!pud_present(*pud))
Hugh Dickinsc0718802005-10-29 18:16:31 -0700664 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 pmd = pmd_offset(pud, address);
667 if (!pmd_present(*pmd))
Hugh Dickinsc0718802005-10-29 18:16:31 -0700668 return;
669
670 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Hugh Dickins365e9c872005-10-29 18:16:18 -0700672 /* Update high watermark before we lower rss */
673 update_hiwater_rss(mm);
674
Hugh Dickinsc0718802005-10-29 18:16:31 -0700675 for (; address < end; pte++, address += PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (!pte_present(*pte))
677 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800678 page = vm_normal_page(vma, address, *pte);
679 BUG_ON(!page || PageAnon(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
681 if (ptep_clear_flush_young(vma, address, pte))
682 continue;
683
684 /* Nuke the page table entry. */
Ben Collinseca35132005-11-29 11:45:26 -0800685 flush_cache_page(vma, address, pte_pfn(*pte));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 pteval = ptep_clear_flush(vma, address, pte);
687
688 /* If nonlinear, store the file page offset in the pte. */
689 if (page->index != linear_page_index(vma, address))
690 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
691
692 /* Move the dirty bit to the physical page now the pte is gone. */
693 if (pte_dirty(pteval))
694 set_page_dirty(page);
695
696 page_remove_rmap(page);
697 page_cache_release(page);
Hugh Dickins42946212005-10-29 18:16:05 -0700698 dec_mm_counter(mm, file_rss);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 (*mapcount)--;
700 }
Hugh Dickinsc0718802005-10-29 18:16:31 -0700701 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702}
703
Christoph Lametera48d07a2006-02-01 03:05:38 -0800704static int try_to_unmap_anon(struct page *page, int ignore_refs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
706 struct anon_vma *anon_vma;
707 struct vm_area_struct *vma;
708 int ret = SWAP_AGAIN;
709
710 anon_vma = page_lock_anon_vma(page);
711 if (!anon_vma)
712 return ret;
713
714 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
Christoph Lametera48d07a2006-02-01 03:05:38 -0800715 ret = try_to_unmap_one(page, vma, ignore_refs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if (ret == SWAP_FAIL || !page_mapped(page))
717 break;
718 }
719 spin_unlock(&anon_vma->lock);
720 return ret;
721}
722
723/**
724 * try_to_unmap_file - unmap file page using the object-based rmap method
725 * @page: the page to unmap
726 *
727 * Find all the mappings of a page using the mapping pointer and the vma chains
728 * contained in the address_space struct it points to.
729 *
730 * This function is only called from try_to_unmap for object-based pages.
731 */
Christoph Lametera48d07a2006-02-01 03:05:38 -0800732static int try_to_unmap_file(struct page *page, int ignore_refs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
734 struct address_space *mapping = page->mapping;
735 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
736 struct vm_area_struct *vma;
737 struct prio_tree_iter iter;
738 int ret = SWAP_AGAIN;
739 unsigned long cursor;
740 unsigned long max_nl_cursor = 0;
741 unsigned long max_nl_size = 0;
742 unsigned int mapcount;
743
744 spin_lock(&mapping->i_mmap_lock);
745 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
Christoph Lametera48d07a2006-02-01 03:05:38 -0800746 ret = try_to_unmap_one(page, vma, ignore_refs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (ret == SWAP_FAIL || !page_mapped(page))
748 goto out;
749 }
750
751 if (list_empty(&mapping->i_mmap_nonlinear))
752 goto out;
753
754 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
755 shared.vm_set.list) {
Hugh Dickins101d2be2005-11-21 21:32:16 -0800756 if (vma->vm_flags & VM_LOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 continue;
758 cursor = (unsigned long) vma->vm_private_data;
759 if (cursor > max_nl_cursor)
760 max_nl_cursor = cursor;
761 cursor = vma->vm_end - vma->vm_start;
762 if (cursor > max_nl_size)
763 max_nl_size = cursor;
764 }
765
766 if (max_nl_size == 0) { /* any nonlinears locked or reserved */
767 ret = SWAP_FAIL;
768 goto out;
769 }
770
771 /*
772 * We don't try to search for this page in the nonlinear vmas,
773 * and page_referenced wouldn't have found it anyway. Instead
774 * just walk the nonlinear vmas trying to age and unmap some.
775 * The mapcount of the page we came in with is irrelevant,
776 * but even so use it as a guide to how hard we should try?
777 */
778 mapcount = page_mapcount(page);
779 if (!mapcount)
780 goto out;
781 cond_resched_lock(&mapping->i_mmap_lock);
782
783 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
784 if (max_nl_cursor == 0)
785 max_nl_cursor = CLUSTER_SIZE;
786
787 do {
788 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
789 shared.vm_set.list) {
Hugh Dickins101d2be2005-11-21 21:32:16 -0800790 if (vma->vm_flags & VM_LOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 continue;
792 cursor = (unsigned long) vma->vm_private_data;
Hugh Dickins839b9682005-09-03 15:54:43 -0700793 while ( cursor < max_nl_cursor &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 cursor < vma->vm_end - vma->vm_start) {
795 try_to_unmap_cluster(cursor, &mapcount, vma);
796 cursor += CLUSTER_SIZE;
797 vma->vm_private_data = (void *) cursor;
798 if ((int)mapcount <= 0)
799 goto out;
800 }
801 vma->vm_private_data = (void *) max_nl_cursor;
802 }
803 cond_resched_lock(&mapping->i_mmap_lock);
804 max_nl_cursor += CLUSTER_SIZE;
805 } while (max_nl_cursor <= max_nl_size);
806
807 /*
808 * Don't loop forever (perhaps all the remaining pages are
809 * in locked vmas). Reset cursor on all unreserved nonlinear
810 * vmas, now forgetting on which ones it had fallen behind.
811 */
Hugh Dickins101d2be2005-11-21 21:32:16 -0800812 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
813 vma->vm_private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814out:
815 spin_unlock(&mapping->i_mmap_lock);
816 return ret;
817}
818
819/**
820 * try_to_unmap - try to remove all page table mappings to a page
821 * @page: the page to get unmapped
822 *
823 * Tries to remove all the page table entries which are mapping this
824 * page, used in the pageout path. Caller must hold the page lock.
825 * Return values are:
826 *
827 * SWAP_SUCCESS - we succeeded in removing all mappings
828 * SWAP_AGAIN - we missed a mapping, try again later
829 * SWAP_FAIL - the page is unswappable
830 */
Christoph Lametera48d07a2006-02-01 03:05:38 -0800831int try_to_unmap(struct page *page, int ignore_refs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832{
833 int ret;
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 BUG_ON(!PageLocked(page));
836
837 if (PageAnon(page))
Christoph Lametera48d07a2006-02-01 03:05:38 -0800838 ret = try_to_unmap_anon(page, ignore_refs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 else
Christoph Lametera48d07a2006-02-01 03:05:38 -0800840 ret = try_to_unmap_file(page, ignore_refs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842 if (!page_mapped(page))
843 ret = SWAP_SUCCESS;
844 return ret;
845}
Nikita Danilov81b40822005-05-01 08:58:36 -0700846