Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * mm/truncate.c - code for taking down pages from address_spaces |
| 3 | * |
| 4 | * Copyright (C) 2002, Linus Torvalds |
| 5 | * |
Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 6 | * 10Sep2002 Andrew Morton |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Initial version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kernel.h> |
Alexey Dobriyan | 4af3c9c | 2007-10-16 23:29:23 -0700 | [diff] [blame] | 11 | #include <linux/backing-dev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/mm.h> |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 14 | #include <linux/swap.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 15 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/pagemap.h> |
Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 17 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/pagevec.h> |
Andrew Morton | e08748c | 2006-12-10 02:19:31 -0800 | [diff] [blame] | 19 | #include <linux/task_io_accounting_ops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
Jan Kara | aaa4059 | 2005-10-30 15:00:16 -0800 | [diff] [blame] | 21 | do_invalidatepage */ |
Dan Magenheimer | c515e1f | 2011-05-26 10:01:43 -0600 | [diff] [blame] | 22 | #include <linux/cleancache.h> |
Jan Kara | ff6da22 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 23 | #include <linux/rmap.h> |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 24 | #include "internal.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
| 26 | |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 27 | /** |
Fengguang Wu | 28bc44d | 2008-02-03 18:04:10 +0200 | [diff] [blame] | 28 | * do_invalidatepage - invalidate part or all of a page |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 29 | * @page: the page which is affected |
| 30 | * @offset: the index of the truncation point |
| 31 | * |
| 32 | * do_invalidatepage() is called when all or part of the page has become |
| 33 | * invalidated by a truncate operation. |
| 34 | * |
| 35 | * do_invalidatepage() does not have to release all buffers, but it must |
| 36 | * ensure that no dirty buffer is left outside @offset and that no I/O |
| 37 | * is underway against any of the blocks which are outside the truncation |
| 38 | * point. Because the caller is about to free (and possibly reuse) those |
| 39 | * blocks on-disk. |
| 40 | */ |
| 41 | void do_invalidatepage(struct page *page, unsigned long offset) |
| 42 | { |
| 43 | void (*invalidatepage)(struct page *, unsigned long); |
| 44 | invalidatepage = page->mapping->a_ops->invalidatepage; |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 45 | #ifdef CONFIG_BLOCK |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 46 | if (!invalidatepage) |
| 47 | invalidatepage = block_invalidatepage; |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 48 | #endif |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 49 | if (invalidatepage) |
| 50 | (*invalidatepage)(page, offset); |
| 51 | } |
| 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | static inline void truncate_partial_page(struct page *page, unsigned partial) |
| 54 | { |
Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 55 | zero_user_segment(page, partial, PAGE_CACHE_SIZE); |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 56 | cleancache_invalidate_page(page->mapping, page); |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 57 | if (page_has_private(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | do_invalidatepage(page, partial); |
| 59 | } |
| 60 | |
Linus Torvalds | ecdfc97 | 2007-01-26 12:47:06 -0800 | [diff] [blame] | 61 | /* |
| 62 | * This cancels just the dirty bit on the kernel page itself, it |
| 63 | * does NOT actually remove dirty bits on any mmap's that may be |
| 64 | * around. It also leaves the page tagged dirty, so any sync |
| 65 | * activity will still find it on the dirty lists, and in particular, |
| 66 | * clear_page_dirty_for_io() will still look at the dirty bits in |
| 67 | * the VM. |
| 68 | * |
| 69 | * Doing this should *normally* only ever be done when a page |
| 70 | * is truncated, and is not actually mapped anywhere at all. However, |
| 71 | * fs/buffer.c does this when it notices that somebody has cleaned |
| 72 | * out all the buffers on a page without actually doing it through |
| 73 | * the VM. Can you say "ext3 is horribly ugly"? Tought you could. |
| 74 | */ |
Linus Torvalds | fba2591 | 2006-12-20 13:46:42 -0800 | [diff] [blame] | 75 | void cancel_dirty_page(struct page *page, unsigned int account_size) |
| 76 | { |
Linus Torvalds | 8368e32 | 2006-12-23 09:25:04 -0800 | [diff] [blame] | 77 | if (TestClearPageDirty(page)) { |
| 78 | struct address_space *mapping = page->mapping; |
| 79 | if (mapping && mapping_cap_account_dirty(mapping)) { |
| 80 | dec_zone_page_state(page, NR_FILE_DIRTY); |
Peter Zijlstra | c9e51e4 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 81 | dec_bdi_stat(mapping->backing_dev_info, |
| 82 | BDI_RECLAIMABLE); |
Linus Torvalds | 8368e32 | 2006-12-23 09:25:04 -0800 | [diff] [blame] | 83 | if (account_size) |
| 84 | task_io_account_cancelled_write(account_size); |
| 85 | } |
Andrew Morton | 3e67c09 | 2006-12-21 11:00:33 -0800 | [diff] [blame] | 86 | } |
Linus Torvalds | fba2591 | 2006-12-20 13:46:42 -0800 | [diff] [blame] | 87 | } |
Linus Torvalds | 8368e32 | 2006-12-23 09:25:04 -0800 | [diff] [blame] | 88 | EXPORT_SYMBOL(cancel_dirty_page); |
Linus Torvalds | fba2591 | 2006-12-20 13:46:42 -0800 | [diff] [blame] | 89 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | /* |
| 91 | * If truncate cannot remove the fs-private metadata from the page, the page |
Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 92 | * becomes orphaned. It will be left on the LRU and may even be mapped into |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 93 | * user pagetables if we're racing with filemap_fault(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | * |
| 95 | * We need to bale out if page->mapping is no longer equal to the original |
| 96 | * mapping. This happens a) when the VM reclaimed the page while we waited on |
Andrew Morton | fc0ecff | 2007-02-10 01:45:39 -0800 | [diff] [blame] | 97 | * its lock, b) when a concurrent invalidate_mapping_pages got there first and |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. |
| 99 | */ |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 100 | static int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | truncate_complete_page(struct address_space *mapping, struct page *page) |
| 102 | { |
| 103 | if (page->mapping != mapping) |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 104 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 106 | if (page_has_private(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | do_invalidatepage(page, 0); |
| 108 | |
Bjorn Steinbrink | a2b3456 | 2008-02-04 22:29:28 -0800 | [diff] [blame] | 109 | cancel_dirty_page(page, PAGE_CACHE_SIZE); |
| 110 | |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 111 | clear_page_mlock(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | ClearPageMappedToDisk(page); |
Minchan Kim | 5adc7b5 | 2011-03-22 16:32:41 -0700 | [diff] [blame] | 113 | delete_from_page_cache(page); |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 114 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | /* |
Andrew Morton | fc0ecff | 2007-02-10 01:45:39 -0800 | [diff] [blame] | 118 | * This is for invalidate_mapping_pages(). That function can be called at |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | * any time, and is not supposed to throw away dirty pages. But pages can |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 120 | * be marked dirty at any time too, so use remove_mapping which safely |
| 121 | * discards clean, unused pages. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | * |
| 123 | * Returns non-zero if the page was successfully invalidated. |
| 124 | */ |
| 125 | static int |
| 126 | invalidate_complete_page(struct address_space *mapping, struct page *page) |
| 127 | { |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 128 | int ret; |
| 129 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | if (page->mapping != mapping) |
| 131 | return 0; |
| 132 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 133 | if (page_has_private(page) && !try_to_release_page(page, 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | return 0; |
| 135 | |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 136 | clear_page_mlock(page); |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 137 | ret = remove_mapping(mapping, page); |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 138 | |
| 139 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | } |
| 141 | |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 142 | int truncate_inode_page(struct address_space *mapping, struct page *page) |
| 143 | { |
| 144 | if (page_mapped(page)) { |
| 145 | unmap_mapping_range(mapping, |
| 146 | (loff_t)page->index << PAGE_CACHE_SHIFT, |
| 147 | PAGE_CACHE_SIZE, 0); |
| 148 | } |
| 149 | return truncate_complete_page(mapping, page); |
| 150 | } |
| 151 | |
Wu Fengguang | 83f7866 | 2009-09-16 11:50:13 +0200 | [diff] [blame] | 152 | /* |
Andi Kleen | 2571873 | 2009-09-16 11:50:13 +0200 | [diff] [blame] | 153 | * Used to get rid of pages on hardware memory corruption. |
| 154 | */ |
| 155 | int generic_error_remove_page(struct address_space *mapping, struct page *page) |
| 156 | { |
| 157 | if (!mapping) |
| 158 | return -EINVAL; |
| 159 | /* |
| 160 | * Only punch for normal data pages for now. |
| 161 | * Handling other types like directories would need more auditing. |
| 162 | */ |
| 163 | if (!S_ISREG(mapping->host->i_mode)) |
| 164 | return -EIO; |
| 165 | return truncate_inode_page(mapping, page); |
| 166 | } |
| 167 | EXPORT_SYMBOL(generic_error_remove_page); |
| 168 | |
| 169 | /* |
Wu Fengguang | 83f7866 | 2009-09-16 11:50:13 +0200 | [diff] [blame] | 170 | * Safely invalidate one page from its pagecache mapping. |
| 171 | * It only drops clean, unused pages. The page must be locked. |
| 172 | * |
| 173 | * Returns 1 if the page is successfully invalidated, otherwise 0. |
| 174 | */ |
| 175 | int invalidate_inode_page(struct page *page) |
| 176 | { |
| 177 | struct address_space *mapping = page_mapping(page); |
| 178 | if (!mapping) |
| 179 | return 0; |
| 180 | if (PageDirty(page) || PageWriteback(page)) |
| 181 | return 0; |
| 182 | if (page_mapped(page)) |
| 183 | return 0; |
| 184 | return invalidate_complete_page(mapping, page); |
| 185 | } |
| 186 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | /** |
Liu Bo | 73c1e20 | 2012-02-21 10:57:20 +0800 | [diff] [blame] | 188 | * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | * @mapping: mapping to truncate |
| 190 | * @lstart: offset from which to truncate |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 191 | * @lend: offset to which to truncate |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | * |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 193 | * Truncate the page cache, removing the pages that are between |
| 194 | * specified offsets (and zeroing out partial page |
| 195 | * (if lstart is not page aligned)). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | * |
| 197 | * Truncate takes two passes - the first pass is nonblocking. It will not |
| 198 | * block on page locks and it will not block on writeback. The second pass |
| 199 | * will wait. This is to prevent as much IO as possible in the affected region. |
| 200 | * The first pass will remove most pages, so the search cost of the second pass |
| 201 | * is low. |
| 202 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | * We pass down the cache-hot hint to the page freeing code. Even if the |
| 204 | * mapping is large, it is probably the case that the final pages are the most |
| 205 | * recently touched, and freeing happens in ascending file offset order. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | */ |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 207 | void truncate_inode_pages_range(struct address_space *mapping, |
| 208 | loff_t lstart, loff_t lend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | { |
| 210 | const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; |
| 211 | const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); |
| 212 | struct pagevec pvec; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 213 | pgoff_t index; |
| 214 | pgoff_t end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | int i; |
| 216 | |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 217 | cleancache_invalidate_inode(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | if (mapping->nrpages == 0) |
| 219 | return; |
| 220 | |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 221 | BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); |
| 222 | end = (lend >> PAGE_CACHE_SHIFT); |
| 223 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | pagevec_init(&pvec, 0); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 225 | index = start; |
| 226 | while (index <= end && pagevec_lookup(&pvec, mapping, index, |
| 227 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
Hugh Dickins | e5598f8 | 2011-02-25 14:44:29 -0800 | [diff] [blame] | 228 | mem_cgroup_uncharge_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 230 | struct page *page = pvec.pages[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 232 | /* We rely upon deletion not changing page->index */ |
| 233 | index = page->index; |
| 234 | if (index > end) |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 235 | break; |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 236 | |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 237 | if (!trylock_page(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | continue; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 239 | WARN_ON(page->index != index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | if (PageWriteback(page)) { |
| 241 | unlock_page(page); |
| 242 | continue; |
| 243 | } |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 244 | truncate_inode_page(mapping, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | unlock_page(page); |
| 246 | } |
| 247 | pagevec_release(&pvec); |
Hugh Dickins | e5598f8 | 2011-02-25 14:44:29 -0800 | [diff] [blame] | 248 | mem_cgroup_uncharge_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | cond_resched(); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 250 | index++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | } |
| 252 | |
| 253 | if (partial) { |
| 254 | struct page *page = find_lock_page(mapping, start - 1); |
| 255 | if (page) { |
| 256 | wait_on_page_writeback(page); |
| 257 | truncate_partial_page(page, partial); |
| 258 | unlock_page(page); |
| 259 | page_cache_release(page); |
| 260 | } |
| 261 | } |
| 262 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 263 | index = start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | for ( ; ; ) { |
| 265 | cond_resched(); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 266 | if (!pagevec_lookup(&pvec, mapping, index, |
| 267 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
| 268 | if (index == start) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | break; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 270 | index = start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | continue; |
| 272 | } |
Hugh Dickins | d082357 | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 273 | if (index == start && pvec.pages[0]->index > end) { |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 274 | pagevec_release(&pvec); |
| 275 | break; |
| 276 | } |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 277 | mem_cgroup_uncharge_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 279 | struct page *page = pvec.pages[i]; |
| 280 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 281 | /* We rely upon deletion not changing page->index */ |
| 282 | index = page->index; |
| 283 | if (index > end) |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 284 | break; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | lock_page(page); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 287 | WARN_ON(page->index != index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | wait_on_page_writeback(page); |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 289 | truncate_inode_page(mapping, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | unlock_page(page); |
| 291 | } |
| 292 | pagevec_release(&pvec); |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 293 | mem_cgroup_uncharge_end(); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 294 | index++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | } |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 296 | cleancache_invalidate_inode(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | } |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 298 | EXPORT_SYMBOL(truncate_inode_pages_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 300 | /** |
| 301 | * truncate_inode_pages - truncate *all* the pages from an offset |
| 302 | * @mapping: mapping to truncate |
| 303 | * @lstart: offset from which to truncate |
| 304 | * |
Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 305 | * Called under (and serialised by) inode->i_mutex. |
Jan Kara | 0814257 | 2011-06-27 16:18:10 -0700 | [diff] [blame] | 306 | * |
| 307 | * Note: When this function returns, there can be a page in the process of |
| 308 | * deletion (inside __delete_from_page_cache()) in the specified range. Thus |
| 309 | * mapping->nrpages can be non-zero when this function returns even after |
| 310 | * truncation of the whole mapping. |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 311 | */ |
| 312 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) |
| 313 | { |
| 314 | truncate_inode_pages_range(mapping, lstart, (loff_t)-1); |
| 315 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | EXPORT_SYMBOL(truncate_inode_pages); |
| 317 | |
Mike Waychison | 2869735 | 2009-06-16 15:32:59 -0700 | [diff] [blame] | 318 | /** |
| 319 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode |
| 320 | * @mapping: the address_space which holds the pages to invalidate |
| 321 | * @start: the offset 'from' which to invalidate |
| 322 | * @end: the offset 'to' which to invalidate (inclusive) |
| 323 | * |
| 324 | * This function only removes the unlocked pages, if you want to |
| 325 | * remove all the pages of one inode, you must call truncate_inode_pages. |
| 326 | * |
| 327 | * invalidate_mapping_pages() will not block on IO activity. It will not |
| 328 | * invalidate pages which are dirty, locked, under writeback or mapped into |
| 329 | * pagetables. |
| 330 | */ |
| 331 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 332 | pgoff_t start, pgoff_t end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | { |
| 334 | struct pagevec pvec; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 335 | pgoff_t index = start; |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 336 | unsigned long ret; |
| 337 | unsigned long count = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | int i; |
| 339 | |
Hugh Dickins | 31475dd | 2011-08-03 16:21:27 -0700 | [diff] [blame] | 340 | /* |
| 341 | * Note: this function may get called on a shmem/tmpfs mapping: |
| 342 | * pagevec_lookup() might then return 0 prematurely (because it |
| 343 | * got a gangful of swap entries); but it's hardly worth worrying |
| 344 | * about - it can rarely have anything to free from such a mapping |
| 345 | * (most pages are dirty), and already skips over any difficulties. |
| 346 | */ |
| 347 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | pagevec_init(&pvec, 0); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 349 | while (index <= end && pagevec_lookup(&pvec, mapping, index, |
| 350 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 351 | mem_cgroup_uncharge_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 353 | struct page *page = pvec.pages[i]; |
| 354 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 355 | /* We rely upon deletion not changing page->index */ |
NeilBrown | e0f2360 | 2006-06-23 02:05:48 -0700 | [diff] [blame] | 356 | index = page->index; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 357 | if (index > end) |
| 358 | break; |
NeilBrown | e0f2360 | 2006-06-23 02:05:48 -0700 | [diff] [blame] | 359 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 360 | if (!trylock_page(page)) |
| 361 | continue; |
| 362 | WARN_ON(page->index != index); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 363 | ret = invalidate_inode_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | unlock_page(page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 365 | /* |
| 366 | * Invalidation is a hint that the page is no longer |
| 367 | * of interest and try to speed up its reclaim. |
| 368 | */ |
| 369 | if (!ret) |
| 370 | deactivate_page(page); |
| 371 | count += ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | } |
| 373 | pagevec_release(&pvec); |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 374 | mem_cgroup_uncharge_end(); |
Mike Waychison | 2869735 | 2009-06-16 15:32:59 -0700 | [diff] [blame] | 375 | cond_resched(); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 376 | index++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | } |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 378 | return count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | } |
Anton Altaparmakov | 54bc485 | 2007-02-10 01:45:38 -0800 | [diff] [blame] | 380 | EXPORT_SYMBOL(invalidate_mapping_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 382 | /* |
| 383 | * This is like invalidate_complete_page(), except it ignores the page's |
| 384 | * refcount. We do this because invalidate_inode_pages2() needs stronger |
| 385 | * invalidation guarantees, and cannot afford to leave pages behind because |
Anderson Briglia | 2706a1b | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 386 | * shrink_page_list() has a temp ref on them, or because they're transiently |
| 387 | * sitting in the lru_cache_add() pagevecs. |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 388 | */ |
| 389 | static int |
| 390 | invalidate_complete_page2(struct address_space *mapping, struct page *page) |
| 391 | { |
| 392 | if (page->mapping != mapping) |
| 393 | return 0; |
| 394 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 395 | if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 396 | return 0; |
| 397 | |
Hugh Dickins | 9ec0b70 | 2012-10-08 16:33:14 -0700 | [diff] [blame] | 398 | clear_page_mlock(page); |
| 399 | |
Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 400 | spin_lock_irq(&mapping->tree_lock); |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 401 | if (PageDirty(page)) |
| 402 | goto failed; |
| 403 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 404 | BUG_ON(page_has_private(page)); |
Minchan Kim | e64a782 | 2011-03-22 16:32:44 -0700 | [diff] [blame] | 405 | __delete_from_page_cache(page); |
Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 406 | spin_unlock_irq(&mapping->tree_lock); |
Daisuke Nishimura | e767e05 | 2009-05-28 14:34:28 -0700 | [diff] [blame] | 407 | mem_cgroup_uncharge_cache_page(page); |
Linus Torvalds | 6072d13 | 2010-12-01 13:35:19 -0500 | [diff] [blame] | 408 | |
| 409 | if (mapping->a_ops->freepage) |
| 410 | mapping->a_ops->freepage(page); |
| 411 | |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 412 | page_cache_release(page); /* pagecache ref */ |
| 413 | return 1; |
| 414 | failed: |
Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 415 | spin_unlock_irq(&mapping->tree_lock); |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 416 | return 0; |
| 417 | } |
| 418 | |
Trond Myklebust | e3db769 | 2007-01-10 23:15:39 -0800 | [diff] [blame] | 419 | static int do_launder_page(struct address_space *mapping, struct page *page) |
| 420 | { |
| 421 | if (!PageDirty(page)) |
| 422 | return 0; |
| 423 | if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) |
| 424 | return 0; |
| 425 | return mapping->a_ops->launder_page(page); |
| 426 | } |
| 427 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | /** |
| 429 | * invalidate_inode_pages2_range - remove range of pages from an address_space |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 430 | * @mapping: the address_space |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | * @start: the page offset 'from' which to invalidate |
| 432 | * @end: the page offset 'to' which to invalidate (inclusive) |
| 433 | * |
| 434 | * Any pages which are found to be mapped into pagetables are unmapped prior to |
| 435 | * invalidation. |
| 436 | * |
Hisashi Hifumi | 6ccfa80 | 2008-09-02 14:35:40 -0700 | [diff] [blame] | 437 | * Returns -EBUSY if any pages could not be invalidated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | */ |
| 439 | int invalidate_inode_pages2_range(struct address_space *mapping, |
| 440 | pgoff_t start, pgoff_t end) |
| 441 | { |
| 442 | struct pagevec pvec; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 443 | pgoff_t index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | int i; |
| 445 | int ret = 0; |
Hisashi Hifumi | 0dd1334 | 2008-04-28 02:12:08 -0700 | [diff] [blame] | 446 | int ret2 = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | int did_range_unmap = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 449 | cleancache_invalidate_inode(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | pagevec_init(&pvec, 0); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 451 | index = start; |
| 452 | while (index <= end && pagevec_lookup(&pvec, mapping, index, |
| 453 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 454 | mem_cgroup_uncharge_start(); |
Trond Myklebust | 7b965e0 | 2007-02-28 20:13:55 -0800 | [diff] [blame] | 455 | for (i = 0; i < pagevec_count(&pvec); i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | struct page *page = pvec.pages[i]; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 457 | |
| 458 | /* We rely upon deletion not changing page->index */ |
| 459 | index = page->index; |
| 460 | if (index > end) |
| 461 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | |
| 463 | lock_page(page); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 464 | WARN_ON(page->index != index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | if (page->mapping != mapping) { |
| 466 | unlock_page(page); |
| 467 | continue; |
| 468 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | wait_on_page_writeback(page); |
Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 470 | if (page_mapped(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | if (!did_range_unmap) { |
| 472 | /* |
| 473 | * Zap the rest of the file in one hit. |
| 474 | */ |
| 475 | unmap_mapping_range(mapping, |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 476 | (loff_t)index << PAGE_CACHE_SHIFT, |
| 477 | (loff_t)(1 + end - index) |
| 478 | << PAGE_CACHE_SHIFT, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | 0); |
| 480 | did_range_unmap = 1; |
| 481 | } else { |
| 482 | /* |
| 483 | * Just zap this page |
| 484 | */ |
| 485 | unmap_mapping_range(mapping, |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 486 | (loff_t)index << PAGE_CACHE_SHIFT, |
| 487 | PAGE_CACHE_SIZE, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | } |
| 489 | } |
Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 490 | BUG_ON(page_mapped(page)); |
Hisashi Hifumi | 0dd1334 | 2008-04-28 02:12:08 -0700 | [diff] [blame] | 491 | ret2 = do_launder_page(mapping, page); |
| 492 | if (ret2 == 0) { |
| 493 | if (!invalidate_complete_page2(mapping, page)) |
Hisashi Hifumi | 6ccfa80 | 2008-09-02 14:35:40 -0700 | [diff] [blame] | 494 | ret2 = -EBUSY; |
Hisashi Hifumi | 0dd1334 | 2008-04-28 02:12:08 -0700 | [diff] [blame] | 495 | } |
| 496 | if (ret2 < 0) |
| 497 | ret = ret2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | unlock_page(page); |
| 499 | } |
| 500 | pagevec_release(&pvec); |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 501 | mem_cgroup_uncharge_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | cond_resched(); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 503 | index++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | } |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 505 | cleancache_invalidate_inode(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | return ret; |
| 507 | } |
| 508 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); |
| 509 | |
| 510 | /** |
| 511 | * invalidate_inode_pages2 - remove all pages from an address_space |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 512 | * @mapping: the address_space |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | * |
| 514 | * Any pages which are found to be mapped into pagetables are unmapped prior to |
| 515 | * invalidation. |
| 516 | * |
Peng Tao | e9de25d | 2009-10-19 14:48:13 +0800 | [diff] [blame] | 517 | * Returns -EBUSY if any pages could not be invalidated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | */ |
| 519 | int invalidate_inode_pages2(struct address_space *mapping) |
| 520 | { |
| 521 | return invalidate_inode_pages2_range(mapping, 0, -1); |
| 522 | } |
| 523 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 524 | |
| 525 | /** |
| 526 | * truncate_pagecache - unmap and remove pagecache that has been truncated |
| 527 | * @inode: inode |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 528 | * @oldsize: old file size |
| 529 | * @newsize: new file size |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 530 | * |
| 531 | * inode's new i_size must already be written before truncate_pagecache |
| 532 | * is called. |
| 533 | * |
| 534 | * This function should typically be called before the filesystem |
| 535 | * releases resources associated with the freed range (eg. deallocates |
| 536 | * blocks). This way, pagecache will always stay logically coherent |
| 537 | * with on-disk format, and the filesystem would not have to deal with |
| 538 | * situations such as writepage being called for a page that has already |
| 539 | * had its underlying blocks deallocated. |
| 540 | */ |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 541 | void truncate_pagecache(struct inode *inode, loff_t oldsize, loff_t newsize) |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 542 | { |
OGAWA Hirofumi | cedabed | 2010-01-13 21:14:09 +0900 | [diff] [blame] | 543 | struct address_space *mapping = inode->i_mapping; |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 544 | loff_t holebegin = round_up(newsize, PAGE_SIZE); |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 545 | |
OGAWA Hirofumi | cedabed | 2010-01-13 21:14:09 +0900 | [diff] [blame] | 546 | /* |
| 547 | * unmap_mapping_range is called twice, first simply for |
| 548 | * efficiency so that truncate_inode_pages does fewer |
| 549 | * single-page unmaps. However after this first call, and |
| 550 | * before truncate_inode_pages finishes, it is possible for |
| 551 | * private pages to be COWed, which remain after |
| 552 | * truncate_inode_pages finishes, hence the second |
| 553 | * unmap_mapping_range call must be made for correctness. |
| 554 | */ |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 555 | unmap_mapping_range(mapping, holebegin, 0, 1); |
| 556 | truncate_inode_pages(mapping, newsize); |
| 557 | unmap_mapping_range(mapping, holebegin, 0, 1); |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 558 | } |
| 559 | EXPORT_SYMBOL(truncate_pagecache); |
| 560 | |
| 561 | /** |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 562 | * truncate_setsize - update inode and pagecache for a new file size |
| 563 | * @inode: inode |
| 564 | * @newsize: new file size |
| 565 | * |
Jan Kara | 382e27d | 2011-01-20 14:44:26 -0800 | [diff] [blame] | 566 | * truncate_setsize updates i_size and performs pagecache truncation (if |
| 567 | * necessary) to @newsize. It will be typically be called from the filesystem's |
| 568 | * setattr function when ATTR_SIZE is passed in. |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 569 | * |
Jan Kara | 382e27d | 2011-01-20 14:44:26 -0800 | [diff] [blame] | 570 | * Must be called with inode_mutex held and before all filesystem specific |
| 571 | * block truncation has been performed. |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 572 | */ |
| 573 | void truncate_setsize(struct inode *inode, loff_t newsize) |
| 574 | { |
Jan Kara | ff6da22 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 575 | loff_t oldsize = inode->i_size; |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 576 | i_size_write(inode, newsize); |
| 577 | |
Jan Kara | ff6da22 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 578 | if (newsize > oldsize) |
| 579 | pagecache_isize_extended(inode, oldsize, newsize); |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 580 | truncate_pagecache(inode, oldsize, newsize); |
| 581 | } |
| 582 | EXPORT_SYMBOL(truncate_setsize); |
| 583 | |
| 584 | /** |
Jan Kara | ff6da22 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 585 | * pagecache_isize_extended - update pagecache after extension of i_size |
| 586 | * @inode: inode for which i_size was extended |
| 587 | * @from: original inode size |
| 588 | * @to: new inode size |
| 589 | * |
| 590 | * Handle extension of inode size either caused by extending truncate or by |
| 591 | * write starting after current i_size. We mark the page straddling current |
| 592 | * i_size RO so that page_mkwrite() is called on the nearest write access to |
| 593 | * the page. This way filesystem can be sure that page_mkwrite() is called on |
| 594 | * the page before user writes to the page via mmap after the i_size has been |
| 595 | * changed. |
| 596 | * |
| 597 | * The function must be called after i_size is updated so that page fault |
| 598 | * coming after we unlock the page will already see the new i_size. |
| 599 | * The function must be called while we still hold i_mutex - this not only |
| 600 | * makes sure i_size is stable but also that userspace cannot observe new |
| 601 | * i_size value before we are prepared to store mmap writes at new inode size. |
| 602 | */ |
| 603 | void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) |
| 604 | { |
| 605 | int bsize = 1 << inode->i_blkbits; |
| 606 | loff_t rounded_from; |
| 607 | struct page *page; |
| 608 | pgoff_t index; |
| 609 | |
| 610 | WARN_ON(!mutex_is_locked(&inode->i_mutex)); |
| 611 | WARN_ON(to > inode->i_size); |
| 612 | |
| 613 | if (from >= to || bsize == PAGE_CACHE_SIZE) |
| 614 | return; |
| 615 | /* Page straddling @from will not have any hole block created? */ |
| 616 | rounded_from = round_up(from, bsize); |
| 617 | if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) |
| 618 | return; |
| 619 | |
| 620 | index = from >> PAGE_CACHE_SHIFT; |
| 621 | page = find_lock_page(inode->i_mapping, index); |
| 622 | /* Page not cached? Nothing to do */ |
| 623 | if (!page) |
| 624 | return; |
| 625 | /* |
| 626 | * See clear_page_dirty_for_io() for details why set_page_dirty() |
| 627 | * is needed. |
| 628 | */ |
| 629 | if (page_mkclean(page)) |
| 630 | set_page_dirty(page); |
| 631 | unlock_page(page); |
| 632 | page_cache_release(page); |
| 633 | } |
| 634 | EXPORT_SYMBOL(pagecache_isize_extended); |
| 635 | |
| 636 | /** |
| 637 | * truncate_pagecache_range - unmap and remove pagecache that is hole-punched |
| 638 | * @inode: inode |
| 639 | * @lstart: offset of beginning of hole |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 640 | * vmtruncate - unmap mappings "freed" by truncate() syscall |
| 641 | * @inode: inode of the file used |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 642 | * @newsize: file offset to start truncating |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 643 | * |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 644 | * This function is deprecated and truncate_setsize or truncate_pagecache |
| 645 | * should be used instead, together with filesystem specific block truncation. |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 646 | */ |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 647 | int vmtruncate(struct inode *inode, loff_t newsize) |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 648 | { |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 649 | int error; |
| 650 | |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 651 | error = inode_newsize_ok(inode, newsize); |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 652 | if (error) |
| 653 | return error; |
npiggin@suse.de | 7bb46a6 | 2010-05-27 01:05:33 +1000 | [diff] [blame] | 654 | |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 655 | truncate_setsize(inode, newsize); |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 656 | if (inode->i_op->truncate) |
| 657 | inode->i_op->truncate(inode); |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 658 | return 0; |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 659 | } |
| 660 | EXPORT_SYMBOL(vmtruncate); |
Hugh Dickins | 5b8ba10 | 2011-06-27 16:18:01 -0700 | [diff] [blame] | 661 | |
Hugh Dickins | 623e3db | 2012-03-28 14:42:40 -0700 | [diff] [blame] | 662 | /** |
| 663 | * truncate_pagecache_range - unmap and remove pagecache that is hole-punched |
| 664 | * @inode: inode |
| 665 | * @lstart: offset of beginning of hole |
| 666 | * @lend: offset of last byte of hole |
| 667 | * |
| 668 | * This function should typically be called before the filesystem |
| 669 | * releases resources associated with the freed range (eg. deallocates |
| 670 | * blocks). This way, pagecache will always stay logically coherent |
| 671 | * with on-disk format, and the filesystem would not have to deal with |
| 672 | * situations such as writepage being called for a page that has already |
| 673 | * had its underlying blocks deallocated. |
| 674 | */ |
| 675 | void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) |
| 676 | { |
| 677 | struct address_space *mapping = inode->i_mapping; |
| 678 | loff_t unmap_start = round_up(lstart, PAGE_SIZE); |
| 679 | loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; |
| 680 | /* |
| 681 | * This rounding is currently just for example: unmap_mapping_range |
| 682 | * expands its hole outwards, whereas we want it to contract the hole |
| 683 | * inwards. However, existing callers of truncate_pagecache_range are |
| 684 | * doing their own page rounding first; and truncate_inode_pages_range |
| 685 | * currently BUGs if lend is not pagealigned-1 (it handles partial |
| 686 | * page at start of hole, but not partial page at end of hole). Note |
| 687 | * unmap_mapping_range allows holelen 0 for all, and we allow lend -1. |
| 688 | */ |
| 689 | |
| 690 | /* |
| 691 | * Unlike in truncate_pagecache, unmap_mapping_range is called only |
| 692 | * once (before truncating pagecache), and without "even_cows" flag: |
| 693 | * hole-punching should not remove private COWed pages from the hole. |
| 694 | */ |
| 695 | if ((u64)unmap_end > (u64)unmap_start) |
| 696 | unmap_mapping_range(mapping, unmap_start, |
| 697 | 1 + unmap_end - unmap_start, 0); |
| 698 | truncate_inode_pages_range(mapping, lstart, lend); |
| 699 | } |
| 700 | EXPORT_SYMBOL(truncate_pagecache_range); |