blob: a9566752913596152022f5d22983cb4f2cb4d2cf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
Francois Camie1f8e872008-10-15 22:01:59 -07006 * 10Sep2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Initial version.
8 */
9
10#include <linux/kernel.h>
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070011#include <linux/backing-dev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
Nick Piggin0fd0e6b2006-09-27 01:50:02 -070014#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
16#include <linux/pagemap.h>
Nate Diller01f27052007-05-09 02:35:07 -070017#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/pagevec.h>
Andrew Mortone08748ce2006-12-10 02:19:31 -080019#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/buffer_head.h> /* grr. try_to_release_page,
Jan Karaaaa40592005-10-30 15:00:16 -080021 do_invalidatepage */
Rik van Rielba470de2008-10-18 20:26:50 -070022#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24
David Howellscf9a2ae2006-08-29 19:05:54 +010025/**
Fengguang Wu28bc44d2008-02-03 18:04:10 +020026 * do_invalidatepage - invalidate part or all of a page
David Howellscf9a2ae2006-08-29 19:05:54 +010027 * @page: the page which is affected
28 * @offset: the index of the truncation point
29 *
30 * do_invalidatepage() is called when all or part of the page has become
31 * invalidated by a truncate operation.
32 *
33 * do_invalidatepage() does not have to release all buffers, but it must
34 * ensure that no dirty buffer is left outside @offset and that no I/O
35 * is underway against any of the blocks which are outside the truncation
36 * point. Because the caller is about to free (and possibly reuse) those
37 * blocks on-disk.
38 */
39void do_invalidatepage(struct page *page, unsigned long offset)
40{
41 void (*invalidatepage)(struct page *, unsigned long);
42 invalidatepage = page->mapping->a_ops->invalidatepage;
David Howells93614012006-09-30 20:45:40 +020043#ifdef CONFIG_BLOCK
David Howellscf9a2ae2006-08-29 19:05:54 +010044 if (!invalidatepage)
45 invalidatepage = block_invalidatepage;
David Howells93614012006-09-30 20:45:40 +020046#endif
David Howellscf9a2ae2006-08-29 19:05:54 +010047 if (invalidatepage)
48 (*invalidatepage)(page, offset);
49}
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051static inline void truncate_partial_page(struct page *page, unsigned partial)
52{
Christoph Lametereebd2aa2008-02-04 22:28:29 -080053 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
David Howells266cf652009-04-03 16:42:36 +010054 if (page_has_private(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 do_invalidatepage(page, partial);
56}
57
Linus Torvaldsecdfc972007-01-26 12:47:06 -080058/*
59 * This cancels just the dirty bit on the kernel page itself, it
60 * does NOT actually remove dirty bits on any mmap's that may be
61 * around. It also leaves the page tagged dirty, so any sync
62 * activity will still find it on the dirty lists, and in particular,
63 * clear_page_dirty_for_io() will still look at the dirty bits in
64 * the VM.
65 *
66 * Doing this should *normally* only ever be done when a page
67 * is truncated, and is not actually mapped anywhere at all. However,
68 * fs/buffer.c does this when it notices that somebody has cleaned
69 * out all the buffers on a page without actually doing it through
70 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
71 */
Linus Torvaldsfba25912006-12-20 13:46:42 -080072void cancel_dirty_page(struct page *page, unsigned int account_size)
73{
Linus Torvalds8368e322006-12-23 09:25:04 -080074 if (TestClearPageDirty(page)) {
75 struct address_space *mapping = page->mapping;
76 if (mapping && mapping_cap_account_dirty(mapping)) {
77 dec_zone_page_state(page, NR_FILE_DIRTY);
Peter Zijlstrac9e51e42007-10-16 23:25:47 -070078 dec_bdi_stat(mapping->backing_dev_info,
79 BDI_RECLAIMABLE);
Linus Torvalds8368e322006-12-23 09:25:04 -080080 if (account_size)
81 task_io_account_cancelled_write(account_size);
82 }
Andrew Morton3e67c092006-12-21 11:00:33 -080083 }
Linus Torvaldsfba25912006-12-20 13:46:42 -080084}
Linus Torvalds8368e322006-12-23 09:25:04 -080085EXPORT_SYMBOL(cancel_dirty_page);
Linus Torvaldsfba25912006-12-20 13:46:42 -080086
Linus Torvalds1da177e2005-04-16 15:20:36 -070087/*
88 * If truncate cannot remove the fs-private metadata from the page, the page
Shaohua Li62e1c552008-02-04 22:29:33 -080089 * becomes orphaned. It will be left on the LRU and may even be mapped into
Nick Piggin54cb8822007-07-19 01:46:59 -070090 * user pagetables if we're racing with filemap_fault().
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 *
92 * We need to bale out if page->mapping is no longer equal to the original
93 * mapping. This happens a) when the VM reclaimed the page while we waited on
Andrew Mortonfc0ecff2007-02-10 01:45:39 -080094 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
96 */
Nick Piggin750b4982009-09-16 11:50:12 +020097static int
Linus Torvalds1da177e2005-04-16 15:20:36 -070098truncate_complete_page(struct address_space *mapping, struct page *page)
99{
100 if (page->mapping != mapping)
Nick Piggin750b4982009-09-16 11:50:12 +0200101 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
David Howells266cf652009-04-03 16:42:36 +0100103 if (page_has_private(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 do_invalidatepage(page, 0);
105
Bjorn Steinbrinka2b34562008-02-04 22:29:28 -0800106 cancel_dirty_page(page, PAGE_CACHE_SIZE);
107
Rik van Rielba470de2008-10-18 20:26:50 -0700108 clear_page_mlock(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 ClearPageMappedToDisk(page);
Minchan Kim5adc7b52011-03-22 16:32:41 -0700110 delete_from_page_cache(page);
Nick Piggin750b4982009-09-16 11:50:12 +0200111 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
113
114/*
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800115 * This is for invalidate_mapping_pages(). That function can be called at
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 * any time, and is not supposed to throw away dirty pages. But pages can
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700117 * be marked dirty at any time too, so use remove_mapping which safely
118 * discards clean, unused pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 *
120 * Returns non-zero if the page was successfully invalidated.
121 */
122static int
123invalidate_complete_page(struct address_space *mapping, struct page *page)
124{
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700125 int ret;
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 if (page->mapping != mapping)
128 return 0;
129
David Howells266cf652009-04-03 16:42:36 +0100130 if (page_has_private(page) && !try_to_release_page(page, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 return 0;
132
Rik van Rielba470de2008-10-18 20:26:50 -0700133 clear_page_mlock(page);
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700134 ret = remove_mapping(mapping, page);
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700135
136 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
Nick Piggin750b4982009-09-16 11:50:12 +0200139int truncate_inode_page(struct address_space *mapping, struct page *page)
140{
141 if (page_mapped(page)) {
142 unmap_mapping_range(mapping,
143 (loff_t)page->index << PAGE_CACHE_SHIFT,
144 PAGE_CACHE_SIZE, 0);
145 }
146 return truncate_complete_page(mapping, page);
147}
148
Wu Fengguang83f78662009-09-16 11:50:13 +0200149/*
Andi Kleen25718732009-09-16 11:50:13 +0200150 * Used to get rid of pages on hardware memory corruption.
151 */
152int generic_error_remove_page(struct address_space *mapping, struct page *page)
153{
154 if (!mapping)
155 return -EINVAL;
156 /*
157 * Only punch for normal data pages for now.
158 * Handling other types like directories would need more auditing.
159 */
160 if (!S_ISREG(mapping->host->i_mode))
161 return -EIO;
162 return truncate_inode_page(mapping, page);
163}
164EXPORT_SYMBOL(generic_error_remove_page);
165
166/*
Wu Fengguang83f78662009-09-16 11:50:13 +0200167 * Safely invalidate one page from its pagecache mapping.
168 * It only drops clean, unused pages. The page must be locked.
169 *
170 * Returns 1 if the page is successfully invalidated, otherwise 0.
171 */
172int invalidate_inode_page(struct page *page)
173{
174 struct address_space *mapping = page_mapping(page);
175 if (!mapping)
176 return 0;
177 if (PageDirty(page) || PageWriteback(page))
178 return 0;
179 if (page_mapped(page))
180 return 0;
181 return invalidate_complete_page(mapping, page);
182}
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/**
Randy Dunlap06432452008-02-29 22:03:15 -0800185 * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * @mapping: mapping to truncate
187 * @lstart: offset from which to truncate
Hans Reiserd7339072006-01-06 00:10:36 -0800188 * @lend: offset to which to truncate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 *
Hans Reiserd7339072006-01-06 00:10:36 -0800190 * Truncate the page cache, removing the pages that are between
191 * specified offsets (and zeroing out partial page
192 * (if lstart is not page aligned)).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 *
194 * Truncate takes two passes - the first pass is nonblocking. It will not
195 * block on page locks and it will not block on writeback. The second pass
196 * will wait. This is to prevent as much IO as possible in the affected region.
197 * The first pass will remove most pages, so the search cost of the second pass
198 * is low.
199 *
200 * When looking at page->index outside the page lock we need to be careful to
201 * copy it into a local to avoid races (it could change at any time).
202 *
203 * We pass down the cache-hot hint to the page freeing code. Even if the
204 * mapping is large, it is probably the case that the final pages are the most
205 * recently touched, and freeing happens in ascending file offset order.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 */
Hans Reiserd7339072006-01-06 00:10:36 -0800207void truncate_inode_pages_range(struct address_space *mapping,
208 loff_t lstart, loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
210 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
Hans Reiserd7339072006-01-06 00:10:36 -0800211 pgoff_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
213 struct pagevec pvec;
214 pgoff_t next;
215 int i;
216
217 if (mapping->nrpages == 0)
218 return;
219
Hans Reiserd7339072006-01-06 00:10:36 -0800220 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
221 end = (lend >> PAGE_CACHE_SHIFT);
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 pagevec_init(&pvec, 0);
224 next = start;
Hans Reiserd7339072006-01-06 00:10:36 -0800225 while (next <= end &&
226 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
Hugh Dickinse5598f82011-02-25 14:44:29 -0800227 mem_cgroup_uncharge_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 for (i = 0; i < pagevec_count(&pvec); i++) {
229 struct page *page = pvec.pages[i];
230 pgoff_t page_index = page->index;
231
Hans Reiserd7339072006-01-06 00:10:36 -0800232 if (page_index > end) {
233 next = page_index;
234 break;
235 }
236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 if (page_index > next)
238 next = page_index;
239 next++;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200240 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 continue;
242 if (PageWriteback(page)) {
243 unlock_page(page);
244 continue;
245 }
Nick Piggin750b4982009-09-16 11:50:12 +0200246 truncate_inode_page(mapping, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 unlock_page(page);
248 }
249 pagevec_release(&pvec);
Hugh Dickinse5598f82011-02-25 14:44:29 -0800250 mem_cgroup_uncharge_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 cond_resched();
252 }
253
254 if (partial) {
255 struct page *page = find_lock_page(mapping, start - 1);
256 if (page) {
257 wait_on_page_writeback(page);
258 truncate_partial_page(page, partial);
259 unlock_page(page);
260 page_cache_release(page);
261 }
262 }
263
264 next = start;
265 for ( ; ; ) {
266 cond_resched();
267 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
268 if (next == start)
269 break;
270 next = start;
271 continue;
272 }
Hans Reiserd7339072006-01-06 00:10:36 -0800273 if (pvec.pages[0]->index > end) {
274 pagevec_release(&pvec);
275 break;
276 }
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800277 mem_cgroup_uncharge_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 for (i = 0; i < pagevec_count(&pvec); i++) {
279 struct page *page = pvec.pages[i];
280
Hans Reiserd7339072006-01-06 00:10:36 -0800281 if (page->index > end)
282 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 lock_page(page);
284 wait_on_page_writeback(page);
Nick Piggin750b4982009-09-16 11:50:12 +0200285 truncate_inode_page(mapping, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 if (page->index > next)
287 next = page->index;
288 next++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 unlock_page(page);
290 }
291 pagevec_release(&pvec);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800292 mem_cgroup_uncharge_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 }
294}
Hans Reiserd7339072006-01-06 00:10:36 -0800295EXPORT_SYMBOL(truncate_inode_pages_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Hans Reiserd7339072006-01-06 00:10:36 -0800297/**
298 * truncate_inode_pages - truncate *all* the pages from an offset
299 * @mapping: mapping to truncate
300 * @lstart: offset from which to truncate
301 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800302 * Called under (and serialised by) inode->i_mutex.
Hans Reiserd7339072006-01-06 00:10:36 -0800303 */
304void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
305{
306 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
307}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308EXPORT_SYMBOL(truncate_inode_pages);
309
Mike Waychison28697352009-06-16 15:32:59 -0700310/**
311 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
312 * @mapping: the address_space which holds the pages to invalidate
313 * @start: the offset 'from' which to invalidate
314 * @end: the offset 'to' which to invalidate (inclusive)
315 *
316 * This function only removes the unlocked pages, if you want to
317 * remove all the pages of one inode, you must call truncate_inode_pages.
318 *
319 * invalidate_mapping_pages() will not block on IO activity. It will not
320 * invalidate pages which are dirty, locked, under writeback or mapped into
321 * pagetables.
322 */
323unsigned long invalidate_mapping_pages(struct address_space *mapping,
Minchan Kim31560182011-03-22 16:32:52 -0700324 pgoff_t start, pgoff_t end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
326 struct pagevec pvec;
327 pgoff_t next = start;
Minchan Kim31560182011-03-22 16:32:52 -0700328 unsigned long ret;
329 unsigned long count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 int i;
331
332 pagevec_init(&pvec, 0);
333 while (next <= end &&
334 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800335 mem_cgroup_uncharge_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 for (i = 0; i < pagevec_count(&pvec); i++) {
337 struct page *page = pvec.pages[i];
NeilBrowne0f23602006-06-23 02:05:48 -0700338 pgoff_t index;
339 int lock_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Nick Piggin529ae9a2008-08-02 12:01:03 +0200341 lock_failed = !trylock_page(page);
NeilBrowne0f23602006-06-23 02:05:48 -0700342
343 /*
344 * We really shouldn't be looking at the ->index of an
345 * unlocked page. But we're not allowed to lock these
346 * pages. So we rely upon nobody altering the ->index
347 * of this (pinned-by-us) page.
348 */
349 index = page->index;
350 if (index > next)
351 next = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 next++;
NeilBrowne0f23602006-06-23 02:05:48 -0700353 if (lock_failed)
354 continue;
355
Minchan Kim31560182011-03-22 16:32:52 -0700356 ret = invalidate_inode_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 unlock_page(page);
Minchan Kim31560182011-03-22 16:32:52 -0700358 /*
359 * Invalidation is a hint that the page is no longer
360 * of interest and try to speed up its reclaim.
361 */
362 if (!ret)
363 deactivate_page(page);
364 count += ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 if (next > end)
366 break;
367 }
368 pagevec_release(&pvec);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800369 mem_cgroup_uncharge_end();
Mike Waychison28697352009-06-16 15:32:59 -0700370 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 }
Minchan Kim31560182011-03-22 16:32:52 -0700372 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
Anton Altaparmakov54bc4852007-02-10 01:45:38 -0800374EXPORT_SYMBOL(invalidate_mapping_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700376/*
377 * This is like invalidate_complete_page(), except it ignores the page's
378 * refcount. We do this because invalidate_inode_pages2() needs stronger
379 * invalidation guarantees, and cannot afford to leave pages behind because
Anderson Briglia2706a1b2007-07-15 23:38:09 -0700380 * shrink_page_list() has a temp ref on them, or because they're transiently
381 * sitting in the lru_cache_add() pagevecs.
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700382 */
383static int
384invalidate_complete_page2(struct address_space *mapping, struct page *page)
385{
386 if (page->mapping != mapping)
387 return 0;
388
David Howells266cf652009-04-03 16:42:36 +0100389 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700390 return 0;
391
Nick Piggin19fd6232008-07-25 19:45:32 -0700392 spin_lock_irq(&mapping->tree_lock);
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700393 if (PageDirty(page))
394 goto failed;
395
Rik van Rielba470de2008-10-18 20:26:50 -0700396 clear_page_mlock(page);
David Howells266cf652009-04-03 16:42:36 +0100397 BUG_ON(page_has_private(page));
Minchan Kime64a7822011-03-22 16:32:44 -0700398 __delete_from_page_cache(page);
Nick Piggin19fd6232008-07-25 19:45:32 -0700399 spin_unlock_irq(&mapping->tree_lock);
Daisuke Nishimurae767e052009-05-28 14:34:28 -0700400 mem_cgroup_uncharge_cache_page(page);
Linus Torvalds6072d132010-12-01 13:35:19 -0500401
402 if (mapping->a_ops->freepage)
403 mapping->a_ops->freepage(page);
404
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700405 page_cache_release(page); /* pagecache ref */
406 return 1;
407failed:
Nick Piggin19fd6232008-07-25 19:45:32 -0700408 spin_unlock_irq(&mapping->tree_lock);
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700409 return 0;
410}
411
Trond Myklebuste3db7692007-01-10 23:15:39 -0800412static int do_launder_page(struct address_space *mapping, struct page *page)
413{
414 if (!PageDirty(page))
415 return 0;
416 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
417 return 0;
418 return mapping->a_ops->launder_page(page);
419}
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421/**
422 * invalidate_inode_pages2_range - remove range of pages from an address_space
Martin Waitz67be2dd2005-05-01 08:59:26 -0700423 * @mapping: the address_space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 * @start: the page offset 'from' which to invalidate
425 * @end: the page offset 'to' which to invalidate (inclusive)
426 *
427 * Any pages which are found to be mapped into pagetables are unmapped prior to
428 * invalidation.
429 *
Hisashi Hifumi6ccfa802008-09-02 14:35:40 -0700430 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 */
432int invalidate_inode_pages2_range(struct address_space *mapping,
433 pgoff_t start, pgoff_t end)
434{
435 struct pagevec pvec;
436 pgoff_t next;
437 int i;
438 int ret = 0;
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700439 int ret2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 int did_range_unmap = 0;
441 int wrapped = 0;
442
443 pagevec_init(&pvec, 0);
444 next = start;
Trond Myklebust7b965e02007-02-28 20:13:55 -0800445 while (next <= end && !wrapped &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 pagevec_lookup(&pvec, mapping, next,
447 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800448 mem_cgroup_uncharge_start();
Trond Myklebust7b965e02007-02-28 20:13:55 -0800449 for (i = 0; i < pagevec_count(&pvec); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 struct page *page = pvec.pages[i];
451 pgoff_t page_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 lock_page(page);
454 if (page->mapping != mapping) {
455 unlock_page(page);
456 continue;
457 }
458 page_index = page->index;
459 next = page_index + 1;
460 if (next == 0)
461 wrapped = 1;
462 if (page_index > end) {
463 unlock_page(page);
464 break;
465 }
466 wait_on_page_writeback(page);
Nick Piggind00806b2007-07-19 01:46:57 -0700467 if (page_mapped(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 if (!did_range_unmap) {
469 /*
470 * Zap the rest of the file in one hit.
471 */
472 unmap_mapping_range(mapping,
Oleg Drokin479ef592005-11-23 13:37:47 -0800473 (loff_t)page_index<<PAGE_CACHE_SHIFT,
474 (loff_t)(end - page_index + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 << PAGE_CACHE_SHIFT,
476 0);
477 did_range_unmap = 1;
478 } else {
479 /*
480 * Just zap this page
481 */
482 unmap_mapping_range(mapping,
Oleg Drokin479ef592005-11-23 13:37:47 -0800483 (loff_t)page_index<<PAGE_CACHE_SHIFT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 PAGE_CACHE_SIZE, 0);
485 }
486 }
Nick Piggind00806b2007-07-19 01:46:57 -0700487 BUG_ON(page_mapped(page));
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700488 ret2 = do_launder_page(mapping, page);
489 if (ret2 == 0) {
490 if (!invalidate_complete_page2(mapping, page))
Hisashi Hifumi6ccfa802008-09-02 14:35:40 -0700491 ret2 = -EBUSY;
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700492 }
493 if (ret2 < 0)
494 ret = ret2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 unlock_page(page);
496 }
497 pagevec_release(&pvec);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800498 mem_cgroup_uncharge_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 cond_resched();
500 }
501 return ret;
502}
503EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
504
505/**
506 * invalidate_inode_pages2 - remove all pages from an address_space
Martin Waitz67be2dd2005-05-01 08:59:26 -0700507 * @mapping: the address_space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 *
509 * Any pages which are found to be mapped into pagetables are unmapped prior to
510 * invalidation.
511 *
Peng Taoe9de25d2009-10-19 14:48:13 +0800512 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 */
514int invalidate_inode_pages2(struct address_space *mapping)
515{
516 return invalidate_inode_pages2_range(mapping, 0, -1);
517}
518EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000519
520/**
521 * truncate_pagecache - unmap and remove pagecache that has been truncated
522 * @inode: inode
523 * @old: old file offset
524 * @new: new file offset
525 *
526 * inode's new i_size must already be written before truncate_pagecache
527 * is called.
528 *
529 * This function should typically be called before the filesystem
530 * releases resources associated with the freed range (eg. deallocates
531 * blocks). This way, pagecache will always stay logically coherent
532 * with on-disk format, and the filesystem would not have to deal with
533 * situations such as writepage being called for a page that has already
534 * had its underlying blocks deallocated.
535 */
536void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
537{
OGAWA Hirofumicedabed2010-01-13 21:14:09 +0900538 struct address_space *mapping = inode->i_mapping;
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000539
OGAWA Hirofumicedabed2010-01-13 21:14:09 +0900540 /*
541 * unmap_mapping_range is called twice, first simply for
542 * efficiency so that truncate_inode_pages does fewer
543 * single-page unmaps. However after this first call, and
544 * before truncate_inode_pages finishes, it is possible for
545 * private pages to be COWed, which remain after
546 * truncate_inode_pages finishes, hence the second
547 * unmap_mapping_range call must be made for correctness.
548 */
549 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
550 truncate_inode_pages(mapping, new);
551 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000552}
553EXPORT_SYMBOL(truncate_pagecache);
554
555/**
Christoph Hellwig2c27c652010-06-04 11:30:04 +0200556 * truncate_setsize - update inode and pagecache for a new file size
557 * @inode: inode
558 * @newsize: new file size
559 *
Jan Kara382e27d2011-01-20 14:44:26 -0800560 * truncate_setsize updates i_size and performs pagecache truncation (if
561 * necessary) to @newsize. It will be typically be called from the filesystem's
562 * setattr function when ATTR_SIZE is passed in.
Christoph Hellwig2c27c652010-06-04 11:30:04 +0200563 *
Jan Kara382e27d2011-01-20 14:44:26 -0800564 * Must be called with inode_mutex held and before all filesystem specific
565 * block truncation has been performed.
Christoph Hellwig2c27c652010-06-04 11:30:04 +0200566 */
567void truncate_setsize(struct inode *inode, loff_t newsize)
568{
569 loff_t oldsize;
570
571 oldsize = inode->i_size;
572 i_size_write(inode, newsize);
573
574 truncate_pagecache(inode, oldsize, newsize);
575}
576EXPORT_SYMBOL(truncate_setsize);
577
578/**
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000579 * vmtruncate - unmap mappings "freed" by truncate() syscall
580 * @inode: inode of the file used
581 * @offset: file offset to start truncating
582 *
Christoph Hellwig2c27c652010-06-04 11:30:04 +0200583 * This function is deprecated and truncate_setsize or truncate_pagecache
584 * should be used instead, together with filesystem specific block truncation.
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000585 */
586int vmtruncate(struct inode *inode, loff_t offset)
587{
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000588 int error;
589
Christoph Hellwig2c27c652010-06-04 11:30:04 +0200590 error = inode_newsize_ok(inode, offset);
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000591 if (error)
592 return error;
npiggin@suse.de7bb46a62010-05-27 01:05:33 +1000593
Christoph Hellwig2c27c652010-06-04 11:30:04 +0200594 truncate_setsize(inode, offset);
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000595 if (inode->i_op->truncate)
596 inode->i_op->truncate(inode);
Christoph Hellwig2c27c652010-06-04 11:30:04 +0200597 return 0;
npiggin@suse.de25d9e2d2009-08-21 02:35:05 +1000598}
599EXPORT_SYMBOL(vmtruncate);