blob: 342deee22684ffdfded563506d684ef0f2b18191 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
Francois Camie1f8e872008-10-15 22:01:59 -07006 * 10Sep2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Initial version.
8 */
9
10#include <linux/kernel.h>
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070011#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/mm.h>
Nick Piggin0fd0e6b2006-09-27 01:50:02 -070013#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/module.h>
15#include <linux/pagemap.h>
Nate Diller01f27052007-05-09 02:35:07 -070016#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/pagevec.h>
Andrew Mortone08748ce2006-12-10 02:19:31 -080018#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/buffer_head.h> /* grr. try_to_release_page,
Jan Karaaaa40592005-10-30 15:00:16 -080020 do_invalidatepage */
Rik van Rielba470de2008-10-18 20:26:50 -070021#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23
David Howellscf9a2ae2006-08-29 19:05:54 +010024/**
Fengguang Wu28bc44d2008-02-03 18:04:10 +020025 * do_invalidatepage - invalidate part or all of a page
David Howellscf9a2ae2006-08-29 19:05:54 +010026 * @page: the page which is affected
27 * @offset: the index of the truncation point
28 *
29 * do_invalidatepage() is called when all or part of the page has become
30 * invalidated by a truncate operation.
31 *
32 * do_invalidatepage() does not have to release all buffers, but it must
33 * ensure that no dirty buffer is left outside @offset and that no I/O
34 * is underway against any of the blocks which are outside the truncation
35 * point. Because the caller is about to free (and possibly reuse) those
36 * blocks on-disk.
37 */
38void do_invalidatepage(struct page *page, unsigned long offset)
39{
40 void (*invalidatepage)(struct page *, unsigned long);
41 invalidatepage = page->mapping->a_ops->invalidatepage;
David Howells93614012006-09-30 20:45:40 +020042#ifdef CONFIG_BLOCK
David Howellscf9a2ae2006-08-29 19:05:54 +010043 if (!invalidatepage)
44 invalidatepage = block_invalidatepage;
David Howells93614012006-09-30 20:45:40 +020045#endif
David Howellscf9a2ae2006-08-29 19:05:54 +010046 if (invalidatepage)
47 (*invalidatepage)(page, offset);
48}
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050static inline void truncate_partial_page(struct page *page, unsigned partial)
51{
Christoph Lametereebd2aa2008-02-04 22:28:29 -080052 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
David Howells266cf652009-04-03 16:42:36 +010053 if (page_has_private(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 do_invalidatepage(page, partial);
55}
56
Linus Torvaldsecdfc972007-01-26 12:47:06 -080057/*
58 * This cancels just the dirty bit on the kernel page itself, it
59 * does NOT actually remove dirty bits on any mmap's that may be
60 * around. It also leaves the page tagged dirty, so any sync
61 * activity will still find it on the dirty lists, and in particular,
62 * clear_page_dirty_for_io() will still look at the dirty bits in
63 * the VM.
64 *
65 * Doing this should *normally* only ever be done when a page
66 * is truncated, and is not actually mapped anywhere at all. However,
67 * fs/buffer.c does this when it notices that somebody has cleaned
68 * out all the buffers on a page without actually doing it through
69 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
70 */
Linus Torvaldsfba25912006-12-20 13:46:42 -080071void cancel_dirty_page(struct page *page, unsigned int account_size)
72{
Linus Torvalds8368e322006-12-23 09:25:04 -080073 if (TestClearPageDirty(page)) {
74 struct address_space *mapping = page->mapping;
75 if (mapping && mapping_cap_account_dirty(mapping)) {
76 dec_zone_page_state(page, NR_FILE_DIRTY);
Peter Zijlstrac9e51e42007-10-16 23:25:47 -070077 dec_bdi_stat(mapping->backing_dev_info,
78 BDI_RECLAIMABLE);
Linus Torvalds8368e322006-12-23 09:25:04 -080079 if (account_size)
80 task_io_account_cancelled_write(account_size);
81 }
Andrew Morton3e67c092006-12-21 11:00:33 -080082 }
Linus Torvaldsfba25912006-12-20 13:46:42 -080083}
Linus Torvalds8368e322006-12-23 09:25:04 -080084EXPORT_SYMBOL(cancel_dirty_page);
Linus Torvaldsfba25912006-12-20 13:46:42 -080085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * If truncate cannot remove the fs-private metadata from the page, the page
Shaohua Li62e1c552008-02-04 22:29:33 -080088 * becomes orphaned. It will be left on the LRU and may even be mapped into
Nick Piggin54cb8822007-07-19 01:46:59 -070089 * user pagetables if we're racing with filemap_fault().
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 *
91 * We need to bale out if page->mapping is no longer equal to the original
92 * mapping. This happens a) when the VM reclaimed the page while we waited on
Andrew Mortonfc0ecff2007-02-10 01:45:39 -080093 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
95 */
Nick Piggin750b4982009-09-16 11:50:12 +020096static int
Linus Torvalds1da177e2005-04-16 15:20:36 -070097truncate_complete_page(struct address_space *mapping, struct page *page)
98{
99 if (page->mapping != mapping)
Nick Piggin750b4982009-09-16 11:50:12 +0200100 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
David Howells266cf652009-04-03 16:42:36 +0100102 if (page_has_private(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 do_invalidatepage(page, 0);
104
Bjorn Steinbrinka2b34562008-02-04 22:29:28 -0800105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
106
Rik van Rielba470de2008-10-18 20:26:50 -0700107 clear_page_mlock(page);
Nick Piggin787d2212007-07-17 04:03:34 -0700108 remove_from_page_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 ClearPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 page_cache_release(page); /* pagecache ref */
Nick Piggin750b4982009-09-16 11:50:12 +0200111 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
113
114/*
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800115 * This is for invalidate_mapping_pages(). That function can be called at
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 * any time, and is not supposed to throw away dirty pages. But pages can
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700117 * be marked dirty at any time too, so use remove_mapping which safely
118 * discards clean, unused pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 *
120 * Returns non-zero if the page was successfully invalidated.
121 */
122static int
123invalidate_complete_page(struct address_space *mapping, struct page *page)
124{
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700125 int ret;
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 if (page->mapping != mapping)
128 return 0;
129
David Howells266cf652009-04-03 16:42:36 +0100130 if (page_has_private(page) && !try_to_release_page(page, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 return 0;
132
Rik van Rielba470de2008-10-18 20:26:50 -0700133 clear_page_mlock(page);
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700134 ret = remove_mapping(mapping, page);
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700135
136 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
Nick Piggin750b4982009-09-16 11:50:12 +0200139int truncate_inode_page(struct address_space *mapping, struct page *page)
140{
141 if (page_mapped(page)) {
142 unmap_mapping_range(mapping,
143 (loff_t)page->index << PAGE_CACHE_SHIFT,
144 PAGE_CACHE_SIZE, 0);
145 }
146 return truncate_complete_page(mapping, page);
147}
148
Wu Fengguang83f78662009-09-16 11:50:13 +0200149/*
Andi Kleen25718732009-09-16 11:50:13 +0200150 * Used to get rid of pages on hardware memory corruption.
151 */
152int generic_error_remove_page(struct address_space *mapping, struct page *page)
153{
154 if (!mapping)
155 return -EINVAL;
156 /*
157 * Only punch for normal data pages for now.
158 * Handling other types like directories would need more auditing.
159 */
160 if (!S_ISREG(mapping->host->i_mode))
161 return -EIO;
162 return truncate_inode_page(mapping, page);
163}
164EXPORT_SYMBOL(generic_error_remove_page);
165
166/*
Wu Fengguang83f78662009-09-16 11:50:13 +0200167 * Safely invalidate one page from its pagecache mapping.
168 * It only drops clean, unused pages. The page must be locked.
169 *
170 * Returns 1 if the page is successfully invalidated, otherwise 0.
171 */
172int invalidate_inode_page(struct page *page)
173{
174 struct address_space *mapping = page_mapping(page);
175 if (!mapping)
176 return 0;
177 if (PageDirty(page) || PageWriteback(page))
178 return 0;
179 if (page_mapped(page))
180 return 0;
181 return invalidate_complete_page(mapping, page);
182}
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/**
Randy Dunlap06432452008-02-29 22:03:15 -0800185 * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * @mapping: mapping to truncate
187 * @lstart: offset from which to truncate
Hans Reiserd7339072006-01-06 00:10:36 -0800188 * @lend: offset to which to truncate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 *
Hans Reiserd7339072006-01-06 00:10:36 -0800190 * Truncate the page cache, removing the pages that are between
191 * specified offsets (and zeroing out partial page
192 * (if lstart is not page aligned)).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 *
194 * Truncate takes two passes - the first pass is nonblocking. It will not
195 * block on page locks and it will not block on writeback. The second pass
196 * will wait. This is to prevent as much IO as possible in the affected region.
197 * The first pass will remove most pages, so the search cost of the second pass
198 * is low.
199 *
200 * When looking at page->index outside the page lock we need to be careful to
201 * copy it into a local to avoid races (it could change at any time).
202 *
203 * We pass down the cache-hot hint to the page freeing code. Even if the
204 * mapping is large, it is probably the case that the final pages are the most
205 * recently touched, and freeing happens in ascending file offset order.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 */
Hans Reiserd7339072006-01-06 00:10:36 -0800207void truncate_inode_pages_range(struct address_space *mapping,
208 loff_t lstart, loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
210 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
Hans Reiserd7339072006-01-06 00:10:36 -0800211 pgoff_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
213 struct pagevec pvec;
214 pgoff_t next;
215 int i;
216
217 if (mapping->nrpages == 0)
218 return;
219
Hans Reiserd7339072006-01-06 00:10:36 -0800220 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
221 end = (lend >> PAGE_CACHE_SHIFT);
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 pagevec_init(&pvec, 0);
224 next = start;
Hans Reiserd7339072006-01-06 00:10:36 -0800225 while (next <= end &&
226 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 for (i = 0; i < pagevec_count(&pvec); i++) {
228 struct page *page = pvec.pages[i];
229 pgoff_t page_index = page->index;
230
Hans Reiserd7339072006-01-06 00:10:36 -0800231 if (page_index > end) {
232 next = page_index;
233 break;
234 }
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 if (page_index > next)
237 next = page_index;
238 next++;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200239 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 continue;
241 if (PageWriteback(page)) {
242 unlock_page(page);
243 continue;
244 }
Nick Piggin750b4982009-09-16 11:50:12 +0200245 truncate_inode_page(mapping, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 unlock_page(page);
247 }
248 pagevec_release(&pvec);
249 cond_resched();
250 }
251
252 if (partial) {
253 struct page *page = find_lock_page(mapping, start - 1);
254 if (page) {
255 wait_on_page_writeback(page);
256 truncate_partial_page(page, partial);
257 unlock_page(page);
258 page_cache_release(page);
259 }
260 }
261
262 next = start;
263 for ( ; ; ) {
264 cond_resched();
265 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
266 if (next == start)
267 break;
268 next = start;
269 continue;
270 }
Hans Reiserd7339072006-01-06 00:10:36 -0800271 if (pvec.pages[0]->index > end) {
272 pagevec_release(&pvec);
273 break;
274 }
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800275 mem_cgroup_uncharge_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 for (i = 0; i < pagevec_count(&pvec); i++) {
277 struct page *page = pvec.pages[i];
278
Hans Reiserd7339072006-01-06 00:10:36 -0800279 if (page->index > end)
280 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 lock_page(page);
282 wait_on_page_writeback(page);
Nick Piggin750b4982009-09-16 11:50:12 +0200283 truncate_inode_page(mapping, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 if (page->index > next)
285 next = page->index;
286 next++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 unlock_page(page);
288 }
289 pagevec_release(&pvec);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800290 mem_cgroup_uncharge_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 }
292}
Hans Reiserd7339072006-01-06 00:10:36 -0800293EXPORT_SYMBOL(truncate_inode_pages_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Hans Reiserd7339072006-01-06 00:10:36 -0800295/**
296 * truncate_inode_pages - truncate *all* the pages from an offset
297 * @mapping: mapping to truncate
298 * @lstart: offset from which to truncate
299 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800300 * Called under (and serialised by) inode->i_mutex.
Hans Reiserd7339072006-01-06 00:10:36 -0800301 */
302void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
303{
304 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
305}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306EXPORT_SYMBOL(truncate_inode_pages);
307
Mike Waychison28697352009-06-16 15:32:59 -0700308/**
309 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
310 * @mapping: the address_space which holds the pages to invalidate
311 * @start: the offset 'from' which to invalidate
312 * @end: the offset 'to' which to invalidate (inclusive)
313 *
314 * This function only removes the unlocked pages, if you want to
315 * remove all the pages of one inode, you must call truncate_inode_pages.
316 *
317 * invalidate_mapping_pages() will not block on IO activity. It will not
318 * invalidate pages which are dirty, locked, under writeback or mapped into
319 * pagetables.
320 */
321unsigned long invalidate_mapping_pages(struct address_space *mapping,
322 pgoff_t start, pgoff_t end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323{
324 struct pagevec pvec;
325 pgoff_t next = start;
326 unsigned long ret = 0;
327 int i;
328
329 pagevec_init(&pvec, 0);
330 while (next <= end &&
331 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800332 mem_cgroup_uncharge_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 for (i = 0; i < pagevec_count(&pvec); i++) {
334 struct page *page = pvec.pages[i];
NeilBrowne0f23602006-06-23 02:05:48 -0700335 pgoff_t index;
336 int lock_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Nick Piggin529ae9a2008-08-02 12:01:03 +0200338 lock_failed = !trylock_page(page);
NeilBrowne0f23602006-06-23 02:05:48 -0700339
340 /*
341 * We really shouldn't be looking at the ->index of an
342 * unlocked page. But we're not allowed to lock these
343 * pages. So we rely upon nobody altering the ->index
344 * of this (pinned-by-us) page.
345 */
346 index = page->index;
347 if (index > next)
348 next = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 next++;
NeilBrowne0f23602006-06-23 02:05:48 -0700350 if (lock_failed)
351 continue;
352
Wu Fengguang83f78662009-09-16 11:50:13 +0200353 ret += invalidate_inode_page(page);
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 unlock_page(page);
356 if (next > end)
357 break;
358 }
359 pagevec_release(&pvec);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800360 mem_cgroup_uncharge_end();
Mike Waychison28697352009-06-16 15:32:59 -0700361 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 }
363 return ret;
364}
Anton Altaparmakov54bc4852007-02-10 01:45:38 -0800365EXPORT_SYMBOL(invalidate_mapping_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700367/*
368 * This is like invalidate_complete_page(), except it ignores the page's
369 * refcount. We do this because invalidate_inode_pages2() needs stronger
370 * invalidation guarantees, and cannot afford to leave pages behind because
Anderson Briglia2706a1b2007-07-15 23:38:09 -0700371 * shrink_page_list() has a temp ref on them, or because they're transiently
372 * sitting in the lru_cache_add() pagevecs.
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700373 */
374static int
375invalidate_complete_page2(struct address_space *mapping, struct page *page)
376{
377 if (page->mapping != mapping)
378 return 0;
379
David Howells266cf652009-04-03 16:42:36 +0100380 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700381 return 0;
382
Nick Piggin19fd6232008-07-25 19:45:32 -0700383 spin_lock_irq(&mapping->tree_lock);
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700384 if (PageDirty(page))
385 goto failed;
386
Rik van Rielba470de2008-10-18 20:26:50 -0700387 clear_page_mlock(page);
David Howells266cf652009-04-03 16:42:36 +0100388 BUG_ON(page_has_private(page));
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700389 __remove_from_page_cache(page);
Nick Piggin19fd6232008-07-25 19:45:32 -0700390 spin_unlock_irq(&mapping->tree_lock);
Daisuke Nishimurae767e052009-05-28 14:34:28 -0700391 mem_cgroup_uncharge_cache_page(page);
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700392 page_cache_release(page); /* pagecache ref */
393 return 1;
394failed:
Nick Piggin19fd6232008-07-25 19:45:32 -0700395 spin_unlock_irq(&mapping->tree_lock);
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700396 return 0;
397}
398
Trond Myklebuste3db7692007-01-10 23:15:39 -0800399static int do_launder_page(struct address_space *mapping, struct page *page)
400{
401 if (!PageDirty(page))
402 return 0;
403 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
404 return 0;
405 return mapping->a_ops->launder_page(page);
406}
407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408/**
409 * invalidate_inode_pages2_range - remove range of pages from an address_space
Martin Waitz67be2dd2005-05-01 08:59:26 -0700410 * @mapping: the address_space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 * @start: the page offset 'from' which to invalidate
412 * @end: the page offset 'to' which to invalidate (inclusive)
413 *
414 * Any pages which are found to be mapped into pagetables are unmapped prior to
415 * invalidation.
416 *
Hisashi Hifumi6ccfa802008-09-02 14:35:40 -0700417 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 */
419int invalidate_inode_pages2_range(struct address_space *mapping,
420 pgoff_t start, pgoff_t end)
421{
422 struct pagevec pvec;
423 pgoff_t next;
424 int i;
425 int ret = 0;
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700426 int ret2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 int did_range_unmap = 0;
428 int wrapped = 0;
429
430 pagevec_init(&pvec, 0);
431 next = start;
Trond Myklebust7b965e02007-02-28 20:13:55 -0800432 while (next <= end && !wrapped &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 pagevec_lookup(&pvec, mapping, next,
434 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800435 mem_cgroup_uncharge_start();
Trond Myklebust7b965e02007-02-28 20:13:55 -0800436 for (i = 0; i < pagevec_count(&pvec); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 struct page *page = pvec.pages[i];
438 pgoff_t page_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
440 lock_page(page);
441 if (page->mapping != mapping) {
442 unlock_page(page);
443 continue;
444 }
445 page_index = page->index;
446 next = page_index + 1;
447 if (next == 0)
448 wrapped = 1;
449 if (page_index > end) {
450 unlock_page(page);
451 break;
452 }
453 wait_on_page_writeback(page);
Nick Piggind00806b2007-07-19 01:46:57 -0700454 if (page_mapped(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 if (!did_range_unmap) {
456 /*
457 * Zap the rest of the file in one hit.
458 */
459 unmap_mapping_range(mapping,
Oleg Drokin479ef592005-11-23 13:37:47 -0800460 (loff_t)page_index<<PAGE_CACHE_SHIFT,
461 (loff_t)(end - page_index + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 << PAGE_CACHE_SHIFT,
463 0);
464 did_range_unmap = 1;
465 } else {
466 /*
467 * Just zap this page
468 */
469 unmap_mapping_range(mapping,
Oleg Drokin479ef592005-11-23 13:37:47 -0800470 (loff_t)page_index<<PAGE_CACHE_SHIFT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 PAGE_CACHE_SIZE, 0);
472 }
473 }
Nick Piggind00806b2007-07-19 01:46:57 -0700474 BUG_ON(page_mapped(page));
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700475 ret2 = do_launder_page(mapping, page);
476 if (ret2 == 0) {
477 if (!invalidate_complete_page2(mapping, page))
Hisashi Hifumi6ccfa802008-09-02 14:35:40 -0700478 ret2 = -EBUSY;
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700479 }
480 if (ret2 < 0)
481 ret = ret2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 unlock_page(page);
483 }
484 pagevec_release(&pvec);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800485 mem_cgroup_uncharge_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 cond_resched();
487 }
488 return ret;
489}
490EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
491
492/**
493 * invalidate_inode_pages2 - remove all pages from an address_space
Martin Waitz67be2dd2005-05-01 08:59:26 -0700494 * @mapping: the address_space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 *
496 * Any pages which are found to be mapped into pagetables are unmapped prior to
497 * invalidation.
498 *
Peng Taoe9de25d2009-10-19 14:48:13 +0800499 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 */
501int invalidate_inode_pages2(struct address_space *mapping)
502{
503 return invalidate_inode_pages2_range(mapping, 0, -1);
504}
505EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000506
507/**
508 * truncate_pagecache - unmap and remove pagecache that has been truncated
509 * @inode: inode
510 * @old: old file offset
511 * @new: new file offset
512 *
513 * inode's new i_size must already be written before truncate_pagecache
514 * is called.
515 *
516 * This function should typically be called before the filesystem
517 * releases resources associated with the freed range (eg. deallocates
518 * blocks). This way, pagecache will always stay logically coherent
519 * with on-disk format, and the filesystem would not have to deal with
520 * situations such as writepage being called for a page that has already
521 * had its underlying blocks deallocated.
522 */
523void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
524{
525 if (new < old) {
526 struct address_space *mapping = inode->i_mapping;
527
528 /*
529 * unmap_mapping_range is called twice, first simply for
530 * efficiency so that truncate_inode_pages does fewer
531 * single-page unmaps. However after this first call, and
532 * before truncate_inode_pages finishes, it is possible for
533 * private pages to be COWed, which remain after
534 * truncate_inode_pages finishes, hence the second
535 * unmap_mapping_range call must be made for correctness.
536 */
537 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
538 truncate_inode_pages(mapping, new);
539 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
540 }
541}
542EXPORT_SYMBOL(truncate_pagecache);
543
544/**
545 * vmtruncate - unmap mappings "freed" by truncate() syscall
546 * @inode: inode of the file used
547 * @offset: file offset to start truncating
548 *
549 * NOTE! We have to be ready to update the memory sharing
550 * between the file and the memory map for a potential last
551 * incomplete page. Ugly, but necessary.
552 */
553int vmtruncate(struct inode *inode, loff_t offset)
554{
555 loff_t oldsize;
556 int error;
557
558 error = inode_newsize_ok(inode, offset);
559 if (error)
560 return error;
561 oldsize = inode->i_size;
562 i_size_write(inode, offset);
563 truncate_pagecache(inode, oldsize, offset);
564 if (inode->i_op->truncate)
565 inode->i_op->truncate(inode);
566
567 return error;
568}
569EXPORT_SYMBOL(vmtruncate);