Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * mm/truncate.c - code for taking down pages from address_spaces |
| 3 | * |
| 4 | * Copyright (C) 2002, Linus Torvalds |
| 5 | * |
Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 6 | * 10Sep2002 Andrew Morton |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Initial version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kernel.h> |
Alexey Dobriyan | 4af3c9c | 2007-10-16 23:29:23 -0700 | [diff] [blame] | 11 | #include <linux/backing-dev.h> |
Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 12 | #include <linux/dax.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 15 | #include <linux/swap.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 16 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/pagemap.h> |
Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 18 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/pagevec.h> |
Andrew Morton | e08748ce | 2006-12-10 02:19:31 -0800 | [diff] [blame] | 20 | #include <linux/task_io_accounting_ops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
Jan Kara | aaa4059 | 2005-10-30 15:00:16 -0800 | [diff] [blame] | 22 | do_invalidatepage */ |
Hugh Dickins | 3a4f8a0 | 2017-02-24 14:59:36 -0800 | [diff] [blame] | 23 | #include <linux/shmem_fs.h> |
Dan Magenheimer | c515e1f | 2011-05-26 10:01:43 -0600 | [diff] [blame] | 24 | #include <linux/cleancache.h> |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 25 | #include <linux/rmap.h> |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 26 | #include "internal.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 28 | /* |
| 29 | * Regular page slots are stabilized by the page lock even without the tree |
| 30 | * itself locked. These unlocked entries need verification under the tree |
| 31 | * lock. |
| 32 | */ |
| 33 | static inline void __clear_shadow_entry(struct address_space *mapping, |
| 34 | pgoff_t index, void *entry) |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 35 | { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 36 | struct radix_tree_node *node; |
| 37 | void **slot; |
| 38 | |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 39 | if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 40 | return; |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 41 | if (*slot != entry) |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 42 | return; |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 43 | __radix_tree_replace(&mapping->page_tree, node, slot, NULL, |
Mel Gorman | c7df8ad | 2017-11-15 17:37:41 -0800 | [diff] [blame] | 44 | workingset_update_node); |
Jan Kara | ac401cc | 2016-05-12 18:29:18 +0200 | [diff] [blame] | 45 | mapping->nrexceptional--; |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, |
| 49 | void *entry) |
| 50 | { |
| 51 | spin_lock_irq(&mapping->tree_lock); |
| 52 | __clear_shadow_entry(mapping, index, entry); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 53 | spin_unlock_irq(&mapping->tree_lock); |
| 54 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 56 | /* |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 57 | * Unconditionally remove exceptional entries. Usually called from truncate |
| 58 | * path. Note that the pagevec may be altered by this function by removing |
| 59 | * exceptional entries similar to what pagevec_remove_exceptionals does. |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 60 | */ |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 61 | static void truncate_exceptional_pvec_entries(struct address_space *mapping, |
| 62 | struct pagevec *pvec, pgoff_t *indices, |
| 63 | pgoff_t end) |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 64 | { |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 65 | int i, j; |
| 66 | bool dax, lock; |
| 67 | |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 68 | /* Handled by shmem itself */ |
| 69 | if (shmem_mapping(mapping)) |
| 70 | return; |
| 71 | |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 72 | for (j = 0; j < pagevec_count(pvec); j++) |
| 73 | if (radix_tree_exceptional_entry(pvec->pages[j])) |
| 74 | break; |
| 75 | |
| 76 | if (j == pagevec_count(pvec)) |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 77 | return; |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 78 | |
| 79 | dax = dax_mapping(mapping); |
| 80 | lock = !dax && indices[j] < end; |
| 81 | if (lock) |
| 82 | spin_lock_irq(&mapping->tree_lock); |
| 83 | |
| 84 | for (i = j; i < pagevec_count(pvec); i++) { |
| 85 | struct page *page = pvec->pages[i]; |
| 86 | pgoff_t index = indices[i]; |
| 87 | |
| 88 | if (!radix_tree_exceptional_entry(page)) { |
| 89 | pvec->pages[j++] = page; |
| 90 | continue; |
| 91 | } |
| 92 | |
| 93 | if (index >= end) |
| 94 | continue; |
| 95 | |
| 96 | if (unlikely(dax)) { |
| 97 | dax_delete_mapping_entry(mapping, index); |
| 98 | continue; |
| 99 | } |
| 100 | |
| 101 | __clear_shadow_entry(mapping, index, page); |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 102 | } |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 103 | |
| 104 | if (lock) |
| 105 | spin_unlock_irq(&mapping->tree_lock); |
| 106 | pvec->nr = j; |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | /* |
| 110 | * Invalidate exceptional entry if easily possible. This handles exceptional |
Ross Zwisler | 4636e70 | 2017-05-12 15:46:47 -0700 | [diff] [blame] | 111 | * entries for invalidate_inode_pages(). |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 112 | */ |
| 113 | static int invalidate_exceptional_entry(struct address_space *mapping, |
| 114 | pgoff_t index, void *entry) |
| 115 | { |
Ross Zwisler | 4636e70 | 2017-05-12 15:46:47 -0700 | [diff] [blame] | 116 | /* Handled by shmem itself, or for DAX we do nothing. */ |
| 117 | if (shmem_mapping(mapping) || dax_mapping(mapping)) |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 118 | return 1; |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 119 | clear_shadow_entry(mapping, index, entry); |
| 120 | return 1; |
| 121 | } |
| 122 | |
| 123 | /* |
| 124 | * Invalidate exceptional entry if clean. This handles exceptional entries for |
| 125 | * invalidate_inode_pages2() so for DAX it evicts only clean entries. |
| 126 | */ |
| 127 | static int invalidate_exceptional_entry2(struct address_space *mapping, |
| 128 | pgoff_t index, void *entry) |
| 129 | { |
| 130 | /* Handled by shmem itself */ |
| 131 | if (shmem_mapping(mapping)) |
| 132 | return 1; |
| 133 | if (dax_mapping(mapping)) |
| 134 | return dax_invalidate_mapping_entry_sync(mapping, index); |
| 135 | clear_shadow_entry(mapping, index, entry); |
| 136 | return 1; |
| 137 | } |
| 138 | |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 139 | /** |
Fengguang Wu | 28bc44d | 2008-02-03 18:04:10 +0200 | [diff] [blame] | 140 | * do_invalidatepage - invalidate part or all of a page |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 141 | * @page: the page which is affected |
Lukas Czerner | d47992f | 2013-05-21 23:17:23 -0400 | [diff] [blame] | 142 | * @offset: start of the range to invalidate |
| 143 | * @length: length of the range to invalidate |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 144 | * |
| 145 | * do_invalidatepage() is called when all or part of the page has become |
| 146 | * invalidated by a truncate operation. |
| 147 | * |
| 148 | * do_invalidatepage() does not have to release all buffers, but it must |
| 149 | * ensure that no dirty buffer is left outside @offset and that no I/O |
| 150 | * is underway against any of the blocks which are outside the truncation |
| 151 | * point. Because the caller is about to free (and possibly reuse) those |
| 152 | * blocks on-disk. |
| 153 | */ |
Lukas Czerner | d47992f | 2013-05-21 23:17:23 -0400 | [diff] [blame] | 154 | void do_invalidatepage(struct page *page, unsigned int offset, |
| 155 | unsigned int length) |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 156 | { |
Lukas Czerner | d47992f | 2013-05-21 23:17:23 -0400 | [diff] [blame] | 157 | void (*invalidatepage)(struct page *, unsigned int, unsigned int); |
| 158 | |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 159 | invalidatepage = page->mapping->a_ops->invalidatepage; |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 160 | #ifdef CONFIG_BLOCK |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 161 | if (!invalidatepage) |
| 162 | invalidatepage = block_invalidatepage; |
David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 163 | #endif |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 164 | if (invalidatepage) |
Lukas Czerner | d47992f | 2013-05-21 23:17:23 -0400 | [diff] [blame] | 165 | (*invalidatepage)(page, offset, length); |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 166 | } |
| 167 | |
Linus Torvalds | ecdfc97 | 2007-01-26 12:47:06 -0800 | [diff] [blame] | 168 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | * If truncate cannot remove the fs-private metadata from the page, the page |
Shaohua Li | 62e1c55 | 2008-02-04 22:29:33 -0800 | [diff] [blame] | 170 | * becomes orphaned. It will be left on the LRU and may even be mapped into |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 171 | * user pagetables if we're racing with filemap_fault(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | * |
| 173 | * We need to bale out if page->mapping is no longer equal to the original |
| 174 | * mapping. This happens a) when the VM reclaimed the page while we waited on |
Andrew Morton | fc0ecff | 2007-02-10 01:45:39 -0800 | [diff] [blame] | 175 | * its lock, b) when a concurrent invalidate_mapping_pages got there first and |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. |
| 177 | */ |
Jan Kara | 9f4e41f | 2017-11-15 17:37:15 -0800 | [diff] [blame] | 178 | static void |
| 179 | truncate_cleanup_page(struct address_space *mapping, struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | { |
Jan Kara | 9f4e41f | 2017-11-15 17:37:15 -0800 | [diff] [blame] | 181 | if (page_mapped(page)) { |
| 182 | loff_t holelen; |
| 183 | |
| 184 | holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE; |
| 185 | unmap_mapping_range(mapping, |
| 186 | (loff_t)page->index << PAGE_SHIFT, |
| 187 | holelen, 0); |
| 188 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 190 | if (page_has_private(page)) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 191 | do_invalidatepage(page, 0, PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
Konstantin Khlebnikov | b9ea251 | 2015-04-14 15:45:27 -0700 | [diff] [blame] | 193 | /* |
| 194 | * Some filesystems seem to re-dirty the page even after |
| 195 | * the VM has canceled the dirty bit (eg ext3 journaling). |
| 196 | * Hence dirty accounting check is placed after invalidation. |
| 197 | */ |
Tejun Heo | 11f81be | 2015-05-22 17:13:15 -0400 | [diff] [blame] | 198 | cancel_dirty_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | ClearPageMappedToDisk(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | /* |
Andrew Morton | fc0ecff | 2007-02-10 01:45:39 -0800 | [diff] [blame] | 203 | * This is for invalidate_mapping_pages(). That function can be called at |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | * any time, and is not supposed to throw away dirty pages. But pages can |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 205 | * be marked dirty at any time too, so use remove_mapping which safely |
| 206 | * discards clean, unused pages. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | * |
| 208 | * Returns non-zero if the page was successfully invalidated. |
| 209 | */ |
| 210 | static int |
| 211 | invalidate_complete_page(struct address_space *mapping, struct page *page) |
| 212 | { |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 213 | int ret; |
| 214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | if (page->mapping != mapping) |
| 216 | return 0; |
| 217 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 218 | if (page_has_private(page) && !try_to_release_page(page, 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | return 0; |
| 220 | |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 221 | ret = remove_mapping(mapping, page); |
Nick Piggin | 0fd0e6b | 2006-09-27 01:50:02 -0700 | [diff] [blame] | 222 | |
| 223 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | } |
| 225 | |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 226 | int truncate_inode_page(struct address_space *mapping, struct page *page) |
| 227 | { |
Kirill A. Shutemov | fc127da | 2016-07-26 15:26:07 -0700 | [diff] [blame] | 228 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 229 | |
Jan Kara | 9f4e41f | 2017-11-15 17:37:15 -0800 | [diff] [blame] | 230 | if (page->mapping != mapping) |
| 231 | return -EIO; |
| 232 | |
| 233 | truncate_cleanup_page(mapping, page); |
| 234 | delete_from_page_cache(page); |
| 235 | return 0; |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 236 | } |
| 237 | |
Wu Fengguang | 83f7866 | 2009-09-16 11:50:13 +0200 | [diff] [blame] | 238 | /* |
Andi Kleen | 2571873 | 2009-09-16 11:50:13 +0200 | [diff] [blame] | 239 | * Used to get rid of pages on hardware memory corruption. |
| 240 | */ |
| 241 | int generic_error_remove_page(struct address_space *mapping, struct page *page) |
| 242 | { |
| 243 | if (!mapping) |
| 244 | return -EINVAL; |
| 245 | /* |
| 246 | * Only punch for normal data pages for now. |
| 247 | * Handling other types like directories would need more auditing. |
| 248 | */ |
| 249 | if (!S_ISREG(mapping->host->i_mode)) |
| 250 | return -EIO; |
| 251 | return truncate_inode_page(mapping, page); |
| 252 | } |
| 253 | EXPORT_SYMBOL(generic_error_remove_page); |
| 254 | |
| 255 | /* |
Wu Fengguang | 83f7866 | 2009-09-16 11:50:13 +0200 | [diff] [blame] | 256 | * Safely invalidate one page from its pagecache mapping. |
| 257 | * It only drops clean, unused pages. The page must be locked. |
| 258 | * |
| 259 | * Returns 1 if the page is successfully invalidated, otherwise 0. |
| 260 | */ |
| 261 | int invalidate_inode_page(struct page *page) |
| 262 | { |
| 263 | struct address_space *mapping = page_mapping(page); |
| 264 | if (!mapping) |
| 265 | return 0; |
| 266 | if (PageDirty(page) || PageWriteback(page)) |
| 267 | return 0; |
| 268 | if (page_mapped(page)) |
| 269 | return 0; |
| 270 | return invalidate_complete_page(mapping, page); |
| 271 | } |
| 272 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | /** |
Liu Bo | 73c1e20 | 2012-02-21 10:57:20 +0800 | [diff] [blame] | 274 | * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | * @mapping: mapping to truncate |
| 276 | * @lstart: offset from which to truncate |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 277 | * @lend: offset to which to truncate (inclusive) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | * |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 279 | * Truncate the page cache, removing the pages that are between |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 280 | * specified offsets (and zeroing out partial pages |
| 281 | * if lstart or lend + 1 is not page aligned). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | * |
| 283 | * Truncate takes two passes - the first pass is nonblocking. It will not |
| 284 | * block on page locks and it will not block on writeback. The second pass |
| 285 | * will wait. This is to prevent as much IO as possible in the affected region. |
| 286 | * The first pass will remove most pages, so the search cost of the second pass |
| 287 | * is low. |
| 288 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | * We pass down the cache-hot hint to the page freeing code. Even if the |
| 290 | * mapping is large, it is probably the case that the final pages are the most |
| 291 | * recently touched, and freeing happens in ascending file offset order. |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 292 | * |
| 293 | * Note that since ->invalidatepage() accepts range to invalidate |
| 294 | * truncate_inode_pages_range is able to handle cases where lend + 1 is not |
| 295 | * page aligned properly. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | */ |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 297 | void truncate_inode_pages_range(struct address_space *mapping, |
| 298 | loff_t lstart, loff_t lend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | { |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 300 | pgoff_t start; /* inclusive */ |
| 301 | pgoff_t end; /* exclusive */ |
| 302 | unsigned int partial_start; /* inclusive */ |
| 303 | unsigned int partial_end; /* exclusive */ |
| 304 | struct pagevec pvec; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 305 | pgoff_t indices[PAGEVEC_SIZE]; |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 306 | pgoff_t index; |
| 307 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | |
Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 309 | if (mapping->nrpages == 0 && mapping->nrexceptional == 0) |
Andrey Ryabinin | 34ccb69 | 2017-05-03 14:56:09 -0700 | [diff] [blame] | 310 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 312 | /* Offsets within partial pages */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 313 | partial_start = lstart & (PAGE_SIZE - 1); |
| 314 | partial_end = (lend + 1) & (PAGE_SIZE - 1); |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 315 | |
| 316 | /* |
| 317 | * 'start' and 'end' always covers the range of pages to be fully |
| 318 | * truncated. Partial pages are covered with 'partial_start' at the |
| 319 | * start of the range and 'partial_end' at the end of the range. |
| 320 | * Note that 'end' is exclusive while 'lend' is inclusive. |
| 321 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 322 | start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 323 | if (lend == -1) |
| 324 | /* |
| 325 | * lend == -1 indicates end-of-file so we have to set 'end' |
| 326 | * to the highest possible pgoff_t and since the type is |
| 327 | * unsigned we're using -1. |
| 328 | */ |
| 329 | end = -1; |
| 330 | else |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 331 | end = (lend + 1) >> PAGE_SHIFT; |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 332 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 333 | pagevec_init(&pvec); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 334 | index = start; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 335 | while (index < end && pagevec_lookup_entries(&pvec, mapping, index, |
| 336 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
| 337 | indices)) { |
Jan Kara | aa65c29 | 2017-11-15 17:37:33 -0800 | [diff] [blame] | 338 | /* |
| 339 | * Pagevec array has exceptional entries and we may also fail |
| 340 | * to lock some pages. So we store pages that can be deleted |
| 341 | * in a new pagevec. |
| 342 | */ |
| 343 | struct pagevec locked_pvec; |
| 344 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 345 | pagevec_init(&locked_pvec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 347 | struct page *page = pvec.pages[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 349 | /* We rely upon deletion not changing page->index */ |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 350 | index = indices[i]; |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 351 | if (index >= end) |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 352 | break; |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 353 | |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 354 | if (radix_tree_exceptional_entry(page)) |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 355 | continue; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 356 | |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 357 | if (!trylock_page(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | continue; |
Kirill A. Shutemov | 5cbc198 | 2016-11-30 15:54:19 -0800 | [diff] [blame] | 359 | WARN_ON(page_to_index(page) != index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | if (PageWriteback(page)) { |
| 361 | unlock_page(page); |
| 362 | continue; |
| 363 | } |
Jan Kara | aa65c29 | 2017-11-15 17:37:33 -0800 | [diff] [blame] | 364 | if (page->mapping != mapping) { |
| 365 | unlock_page(page); |
| 366 | continue; |
| 367 | } |
| 368 | pagevec_add(&locked_pvec, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | } |
Jan Kara | aa65c29 | 2017-11-15 17:37:33 -0800 | [diff] [blame] | 370 | for (i = 0; i < pagevec_count(&locked_pvec); i++) |
| 371 | truncate_cleanup_page(mapping, locked_pvec.pages[i]); |
| 372 | delete_from_page_cache_batch(mapping, &locked_pvec); |
| 373 | for (i = 0; i < pagevec_count(&locked_pvec); i++) |
| 374 | unlock_page(locked_pvec.pages[i]); |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 375 | truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | pagevec_release(&pvec); |
| 377 | cond_resched(); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 378 | index++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | } |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 380 | if (partial_start) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | struct page *page = find_lock_page(mapping, start - 1); |
| 382 | if (page) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 383 | unsigned int top = PAGE_SIZE; |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 384 | if (start > end) { |
| 385 | /* Truncation within a single page */ |
| 386 | top = partial_end; |
| 387 | partial_end = 0; |
| 388 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | wait_on_page_writeback(page); |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 390 | zero_user_segment(page, partial_start, top); |
| 391 | cleancache_invalidate_page(mapping, page); |
| 392 | if (page_has_private(page)) |
| 393 | do_invalidatepage(page, partial_start, |
| 394 | top - partial_start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 396 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | } |
| 398 | } |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 399 | if (partial_end) { |
| 400 | struct page *page = find_lock_page(mapping, end); |
| 401 | if (page) { |
| 402 | wait_on_page_writeback(page); |
| 403 | zero_user_segment(page, 0, partial_end); |
| 404 | cleancache_invalidate_page(mapping, page); |
| 405 | if (page_has_private(page)) |
| 406 | do_invalidatepage(page, 0, |
| 407 | partial_end); |
| 408 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 409 | put_page(page); |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 410 | } |
| 411 | } |
| 412 | /* |
| 413 | * If the truncation happened within a single page no pages |
| 414 | * will be released, just zeroed, so we can bail out now. |
| 415 | */ |
| 416 | if (start >= end) |
Andrey Ryabinin | 34ccb69 | 2017-05-03 14:56:09 -0700 | [diff] [blame] | 417 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 419 | index = start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | for ( ; ; ) { |
| 421 | cond_resched(); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 422 | if (!pagevec_lookup_entries(&pvec, mapping, index, |
Hugh Dickins | 792ceae | 2014-07-23 14:00:15 -0700 | [diff] [blame] | 423 | min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { |
| 424 | /* If all gone from start onwards, we're done */ |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 425 | if (index == start) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | break; |
Hugh Dickins | 792ceae | 2014-07-23 14:00:15 -0700 | [diff] [blame] | 427 | /* Otherwise restart to make sure all gone */ |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 428 | index = start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | continue; |
| 430 | } |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 431 | if (index == start && indices[0] >= end) { |
Hugh Dickins | 792ceae | 2014-07-23 14:00:15 -0700 | [diff] [blame] | 432 | /* All gone out of hole to be punched, we're done */ |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 433 | pagevec_remove_exceptionals(&pvec); |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 434 | pagevec_release(&pvec); |
| 435 | break; |
| 436 | } |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 437 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 439 | struct page *page = pvec.pages[i]; |
| 440 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 441 | /* We rely upon deletion not changing page->index */ |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 442 | index = indices[i]; |
Hugh Dickins | 792ceae | 2014-07-23 14:00:15 -0700 | [diff] [blame] | 443 | if (index >= end) { |
| 444 | /* Restart punch to make sure all gone */ |
| 445 | index = start - 1; |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 446 | break; |
Hugh Dickins | 792ceae | 2014-07-23 14:00:15 -0700 | [diff] [blame] | 447 | } |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 448 | |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 449 | if (radix_tree_exceptional_entry(page)) |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 450 | continue; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 451 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | lock_page(page); |
Kirill A. Shutemov | 5cbc198 | 2016-11-30 15:54:19 -0800 | [diff] [blame] | 453 | WARN_ON(page_to_index(page) != index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | wait_on_page_writeback(page); |
Nick Piggin | 750b498 | 2009-09-16 11:50:12 +0200 | [diff] [blame] | 455 | truncate_inode_page(mapping, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | unlock_page(page); |
| 457 | } |
Mel Gorman | f218759 | 2017-11-15 17:37:44 -0800 | [diff] [blame] | 458 | truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | pagevec_release(&pvec); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 460 | index++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | } |
Andrey Ryabinin | 34ccb69 | 2017-05-03 14:56:09 -0700 | [diff] [blame] | 462 | |
| 463 | out: |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 464 | cleancache_invalidate_inode(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | } |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 466 | EXPORT_SYMBOL(truncate_inode_pages_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 468 | /** |
| 469 | * truncate_inode_pages - truncate *all* the pages from an offset |
| 470 | * @mapping: mapping to truncate |
| 471 | * @lstart: offset from which to truncate |
| 472 | * |
Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 473 | * Called under (and serialised by) inode->i_mutex. |
Jan Kara | 0814257 | 2011-06-27 16:18:10 -0700 | [diff] [blame] | 474 | * |
| 475 | * Note: When this function returns, there can be a page in the process of |
| 476 | * deletion (inside __delete_from_page_cache()) in the specified range. Thus |
| 477 | * mapping->nrpages can be non-zero when this function returns even after |
| 478 | * truncation of the whole mapping. |
Hans Reiser | d733907 | 2006-01-06 00:10:36 -0800 | [diff] [blame] | 479 | */ |
| 480 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) |
| 481 | { |
| 482 | truncate_inode_pages_range(mapping, lstart, (loff_t)-1); |
| 483 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | EXPORT_SYMBOL(truncate_inode_pages); |
| 485 | |
Mike Waychison | 2869735 | 2009-06-16 15:32:59 -0700 | [diff] [blame] | 486 | /** |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 487 | * truncate_inode_pages_final - truncate *all* pages before inode dies |
| 488 | * @mapping: mapping to truncate |
| 489 | * |
| 490 | * Called under (and serialized by) inode->i_mutex. |
| 491 | * |
| 492 | * Filesystems have to use this in the .evict_inode path to inform the |
| 493 | * VM that this is the final truncate and the inode is going away. |
| 494 | */ |
| 495 | void truncate_inode_pages_final(struct address_space *mapping) |
| 496 | { |
Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 497 | unsigned long nrexceptional; |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 498 | unsigned long nrpages; |
| 499 | |
| 500 | /* |
| 501 | * Page reclaim can not participate in regular inode lifetime |
| 502 | * management (can't call iput()) and thus can race with the |
| 503 | * inode teardown. Tell it when the address space is exiting, |
| 504 | * so that it does not install eviction information after the |
| 505 | * final truncate has begun. |
| 506 | */ |
| 507 | mapping_set_exiting(mapping); |
| 508 | |
| 509 | /* |
| 510 | * When reclaim installs eviction entries, it increases |
Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 511 | * nrexceptional first, then decreases nrpages. Make sure we see |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 512 | * this in the right order or we might miss an entry. |
| 513 | */ |
| 514 | nrpages = mapping->nrpages; |
| 515 | smp_rmb(); |
Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 516 | nrexceptional = mapping->nrexceptional; |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 517 | |
Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 518 | if (nrpages || nrexceptional) { |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 519 | /* |
| 520 | * As truncation uses a lockless tree lookup, cycle |
| 521 | * the tree lock to make sure any ongoing tree |
| 522 | * modification that does not see AS_EXITING is |
| 523 | * completed before starting the final truncate. |
| 524 | */ |
| 525 | spin_lock_irq(&mapping->tree_lock); |
| 526 | spin_unlock_irq(&mapping->tree_lock); |
| 527 | |
| 528 | truncate_inode_pages(mapping, 0); |
| 529 | } |
| 530 | } |
| 531 | EXPORT_SYMBOL(truncate_inode_pages_final); |
| 532 | |
| 533 | /** |
Mike Waychison | 2869735 | 2009-06-16 15:32:59 -0700 | [diff] [blame] | 534 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode |
| 535 | * @mapping: the address_space which holds the pages to invalidate |
| 536 | * @start: the offset 'from' which to invalidate |
| 537 | * @end: the offset 'to' which to invalidate (inclusive) |
| 538 | * |
| 539 | * This function only removes the unlocked pages, if you want to |
| 540 | * remove all the pages of one inode, you must call truncate_inode_pages. |
| 541 | * |
| 542 | * invalidate_mapping_pages() will not block on IO activity. It will not |
| 543 | * invalidate pages which are dirty, locked, under writeback or mapped into |
| 544 | * pagetables. |
| 545 | */ |
| 546 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 547 | pgoff_t start, pgoff_t end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | { |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 549 | pgoff_t indices[PAGEVEC_SIZE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | struct pagevec pvec; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 551 | pgoff_t index = start; |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 552 | unsigned long ret; |
| 553 | unsigned long count = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | int i; |
| 555 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 556 | pagevec_init(&pvec); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 557 | while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, |
| 558 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, |
| 559 | indices)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 561 | struct page *page = pvec.pages[i]; |
| 562 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 563 | /* We rely upon deletion not changing page->index */ |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 564 | index = indices[i]; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 565 | if (index > end) |
| 566 | break; |
NeilBrown | e0f2360 | 2006-06-23 02:05:48 -0700 | [diff] [blame] | 567 | |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 568 | if (radix_tree_exceptional_entry(page)) { |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 569 | invalidate_exceptional_entry(mapping, index, |
| 570 | page); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 571 | continue; |
| 572 | } |
| 573 | |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 574 | if (!trylock_page(page)) |
| 575 | continue; |
Kirill A. Shutemov | fc127da | 2016-07-26 15:26:07 -0700 | [diff] [blame] | 576 | |
Kirill A. Shutemov | 5cbc198 | 2016-11-30 15:54:19 -0800 | [diff] [blame] | 577 | WARN_ON(page_to_index(page) != index); |
Kirill A. Shutemov | fc127da | 2016-07-26 15:26:07 -0700 | [diff] [blame] | 578 | |
| 579 | /* Middle of THP: skip */ |
| 580 | if (PageTransTail(page)) { |
| 581 | unlock_page(page); |
| 582 | continue; |
| 583 | } else if (PageTransHuge(page)) { |
| 584 | index += HPAGE_PMD_NR - 1; |
| 585 | i += HPAGE_PMD_NR - 1; |
Jan Kara | 76b6f9b | 2017-07-10 15:48:59 -0700 | [diff] [blame] | 586 | /* |
| 587 | * 'end' is in the middle of THP. Don't |
| 588 | * invalidate the page as the part outside of |
| 589 | * 'end' could be still useful. |
| 590 | */ |
| 591 | if (index > end) { |
| 592 | unlock_page(page); |
Kirill A. Shutemov | fc127da | 2016-07-26 15:26:07 -0700 | [diff] [blame] | 593 | continue; |
Jan Kara | 76b6f9b | 2017-07-10 15:48:59 -0700 | [diff] [blame] | 594 | } |
Kirill A. Shutemov | fc127da | 2016-07-26 15:26:07 -0700 | [diff] [blame] | 595 | } |
| 596 | |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 597 | ret = invalidate_inode_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | unlock_page(page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 599 | /* |
| 600 | * Invalidation is a hint that the page is no longer |
| 601 | * of interest and try to speed up its reclaim. |
| 602 | */ |
| 603 | if (!ret) |
Minchan Kim | cc5993b | 2015-04-15 16:13:26 -0700 | [diff] [blame] | 604 | deactivate_file_page(page); |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 605 | count += ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | } |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 607 | pagevec_remove_exceptionals(&pvec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | pagevec_release(&pvec); |
Mike Waychison | 2869735 | 2009-06-16 15:32:59 -0700 | [diff] [blame] | 609 | cond_resched(); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 610 | index++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | } |
Minchan Kim | 3156018 | 2011-03-22 16:32:52 -0700 | [diff] [blame] | 612 | return count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | } |
Anton Altaparmakov | 54bc485 | 2007-02-10 01:45:38 -0800 | [diff] [blame] | 614 | EXPORT_SYMBOL(invalidate_mapping_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 616 | /* |
| 617 | * This is like invalidate_complete_page(), except it ignores the page's |
| 618 | * refcount. We do this because invalidate_inode_pages2() needs stronger |
| 619 | * invalidation guarantees, and cannot afford to leave pages behind because |
Anderson Briglia | 2706a1b | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 620 | * shrink_page_list() has a temp ref on them, or because they're transiently |
| 621 | * sitting in the lru_cache_add() pagevecs. |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 622 | */ |
| 623 | static int |
| 624 | invalidate_complete_page2(struct address_space *mapping, struct page *page) |
| 625 | { |
Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 626 | unsigned long flags; |
| 627 | |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 628 | if (page->mapping != mapping) |
| 629 | return 0; |
| 630 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 631 | if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 632 | return 0; |
| 633 | |
Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 634 | spin_lock_irqsave(&mapping->tree_lock, flags); |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 635 | if (PageDirty(page)) |
| 636 | goto failed; |
| 637 | |
David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 638 | BUG_ON(page_has_private(page)); |
Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 639 | __delete_from_page_cache(page, NULL); |
Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 640 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
Linus Torvalds | 6072d13 | 2010-12-01 13:35:19 -0500 | [diff] [blame] | 641 | |
| 642 | if (mapping->a_ops->freepage) |
| 643 | mapping->a_ops->freepage(page); |
| 644 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 645 | put_page(page); /* pagecache ref */ |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 646 | return 1; |
| 647 | failed: |
Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 648 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
Andrew Morton | bd4c8ce | 2006-09-30 23:29:29 -0700 | [diff] [blame] | 649 | return 0; |
| 650 | } |
| 651 | |
Trond Myklebust | e3db769 | 2007-01-10 23:15:39 -0800 | [diff] [blame] | 652 | static int do_launder_page(struct address_space *mapping, struct page *page) |
| 653 | { |
| 654 | if (!PageDirty(page)) |
| 655 | return 0; |
| 656 | if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) |
| 657 | return 0; |
| 658 | return mapping->a_ops->launder_page(page); |
| 659 | } |
| 660 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | /** |
| 662 | * invalidate_inode_pages2_range - remove range of pages from an address_space |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 663 | * @mapping: the address_space |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | * @start: the page offset 'from' which to invalidate |
| 665 | * @end: the page offset 'to' which to invalidate (inclusive) |
| 666 | * |
| 667 | * Any pages which are found to be mapped into pagetables are unmapped prior to |
| 668 | * invalidation. |
| 669 | * |
Hisashi Hifumi | 6ccfa80 | 2008-09-02 14:35:40 -0700 | [diff] [blame] | 670 | * Returns -EBUSY if any pages could not be invalidated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | */ |
| 672 | int invalidate_inode_pages2_range(struct address_space *mapping, |
| 673 | pgoff_t start, pgoff_t end) |
| 674 | { |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 675 | pgoff_t indices[PAGEVEC_SIZE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | struct pagevec pvec; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 677 | pgoff_t index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | int i; |
| 679 | int ret = 0; |
Hisashi Hifumi | 0dd1334 | 2008-04-28 02:12:08 -0700 | [diff] [blame] | 680 | int ret2 = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | int did_range_unmap = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | |
Andrey Ryabinin | 32691f0 | 2017-05-03 14:56:06 -0700 | [diff] [blame] | 683 | if (mapping->nrpages == 0 && mapping->nrexceptional == 0) |
Andrey Ryabinin | 34ccb69 | 2017-05-03 14:56:09 -0700 | [diff] [blame] | 684 | goto out; |
Andrey Ryabinin | 32691f0 | 2017-05-03 14:56:06 -0700 | [diff] [blame] | 685 | |
Mel Gorman | 8667982 | 2017-11-15 17:37:52 -0800 | [diff] [blame] | 686 | pagevec_init(&pvec); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 687 | index = start; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 688 | while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, |
| 689 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, |
| 690 | indices)) { |
Trond Myklebust | 7b965e0 | 2007-02-28 20:13:55 -0800 | [diff] [blame] | 691 | for (i = 0; i < pagevec_count(&pvec); i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | struct page *page = pvec.pages[i]; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 693 | |
| 694 | /* We rely upon deletion not changing page->index */ |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 695 | index = indices[i]; |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 696 | if (index > end) |
| 697 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 699 | if (radix_tree_exceptional_entry(page)) { |
Jan Kara | c6dcf52 | 2016-08-10 17:22:44 +0200 | [diff] [blame] | 700 | if (!invalidate_exceptional_entry2(mapping, |
| 701 | index, page)) |
| 702 | ret = -EBUSY; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 703 | continue; |
| 704 | } |
| 705 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | lock_page(page); |
Kirill A. Shutemov | 5cbc198 | 2016-11-30 15:54:19 -0800 | [diff] [blame] | 707 | WARN_ON(page_to_index(page) != index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | if (page->mapping != mapping) { |
| 709 | unlock_page(page); |
| 710 | continue; |
| 711 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | wait_on_page_writeback(page); |
Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 713 | if (page_mapped(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | if (!did_range_unmap) { |
| 715 | /* |
| 716 | * Zap the rest of the file in one hit. |
| 717 | */ |
| 718 | unmap_mapping_range(mapping, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 719 | (loff_t)index << PAGE_SHIFT, |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 720 | (loff_t)(1 + end - index) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 721 | << PAGE_SHIFT, |
| 722 | 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | did_range_unmap = 1; |
| 724 | } else { |
| 725 | /* |
| 726 | * Just zap this page |
| 727 | */ |
| 728 | unmap_mapping_range(mapping, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 729 | (loff_t)index << PAGE_SHIFT, |
| 730 | PAGE_SIZE, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | } |
| 732 | } |
Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 733 | BUG_ON(page_mapped(page)); |
Hisashi Hifumi | 0dd1334 | 2008-04-28 02:12:08 -0700 | [diff] [blame] | 734 | ret2 = do_launder_page(mapping, page); |
| 735 | if (ret2 == 0) { |
| 736 | if (!invalidate_complete_page2(mapping, page)) |
Hisashi Hifumi | 6ccfa80 | 2008-09-02 14:35:40 -0700 | [diff] [blame] | 737 | ret2 = -EBUSY; |
Hisashi Hifumi | 0dd1334 | 2008-04-28 02:12:08 -0700 | [diff] [blame] | 738 | } |
| 739 | if (ret2 < 0) |
| 740 | ret = ret2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | unlock_page(page); |
| 742 | } |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 743 | pagevec_remove_exceptionals(&pvec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | pagevec_release(&pvec); |
| 745 | cond_resched(); |
Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 746 | index++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | } |
Jan Kara | cd65637 | 2017-05-12 15:46:50 -0700 | [diff] [blame] | 748 | /* |
| 749 | * For DAX we invalidate page tables after invalidating radix tree. We |
| 750 | * could invalidate page tables while invalidating each entry however |
| 751 | * that would be expensive. And doing range unmapping before doesn't |
| 752 | * work as we have no cheap way to find whether radix tree entry didn't |
| 753 | * get remapped later. |
| 754 | */ |
| 755 | if (dax_mapping(mapping)) { |
| 756 | unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT, |
| 757 | (loff_t)(end - start + 1) << PAGE_SHIFT, 0); |
| 758 | } |
Andrey Ryabinin | 34ccb69 | 2017-05-03 14:56:09 -0700 | [diff] [blame] | 759 | out: |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 760 | cleancache_invalidate_inode(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | return ret; |
| 762 | } |
| 763 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); |
| 764 | |
| 765 | /** |
| 766 | * invalidate_inode_pages2 - remove all pages from an address_space |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 767 | * @mapping: the address_space |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | * |
| 769 | * Any pages which are found to be mapped into pagetables are unmapped prior to |
| 770 | * invalidation. |
| 771 | * |
Peng Tao | e9de25d | 2009-10-19 14:48:13 +0800 | [diff] [blame] | 772 | * Returns -EBUSY if any pages could not be invalidated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | */ |
| 774 | int invalidate_inode_pages2(struct address_space *mapping) |
| 775 | { |
| 776 | return invalidate_inode_pages2_range(mapping, 0, -1); |
| 777 | } |
| 778 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 779 | |
| 780 | /** |
| 781 | * truncate_pagecache - unmap and remove pagecache that has been truncated |
| 782 | * @inode: inode |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 783 | * @newsize: new file size |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 784 | * |
| 785 | * inode's new i_size must already be written before truncate_pagecache |
| 786 | * is called. |
| 787 | * |
| 788 | * This function should typically be called before the filesystem |
| 789 | * releases resources associated with the freed range (eg. deallocates |
| 790 | * blocks). This way, pagecache will always stay logically coherent |
| 791 | * with on-disk format, and the filesystem would not have to deal with |
| 792 | * situations such as writepage being called for a page that has already |
| 793 | * had its underlying blocks deallocated. |
| 794 | */ |
Kirill A. Shutemov | 7caef26 | 2013-09-12 15:13:56 -0700 | [diff] [blame] | 795 | void truncate_pagecache(struct inode *inode, loff_t newsize) |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 796 | { |
OGAWA Hirofumi | cedabed | 2010-01-13 21:14:09 +0900 | [diff] [blame] | 797 | struct address_space *mapping = inode->i_mapping; |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 798 | loff_t holebegin = round_up(newsize, PAGE_SIZE); |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 799 | |
OGAWA Hirofumi | cedabed | 2010-01-13 21:14:09 +0900 | [diff] [blame] | 800 | /* |
| 801 | * unmap_mapping_range is called twice, first simply for |
| 802 | * efficiency so that truncate_inode_pages does fewer |
| 803 | * single-page unmaps. However after this first call, and |
| 804 | * before truncate_inode_pages finishes, it is possible for |
| 805 | * private pages to be COWed, which remain after |
| 806 | * truncate_inode_pages finishes, hence the second |
| 807 | * unmap_mapping_range call must be made for correctness. |
| 808 | */ |
Hugh Dickins | 8a549be | 2011-07-25 17:12:24 -0700 | [diff] [blame] | 809 | unmap_mapping_range(mapping, holebegin, 0, 1); |
| 810 | truncate_inode_pages(mapping, newsize); |
| 811 | unmap_mapping_range(mapping, holebegin, 0, 1); |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 812 | } |
| 813 | EXPORT_SYMBOL(truncate_pagecache); |
| 814 | |
| 815 | /** |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 816 | * truncate_setsize - update inode and pagecache for a new file size |
| 817 | * @inode: inode |
| 818 | * @newsize: new file size |
| 819 | * |
Jan Kara | 382e27d | 2011-01-20 14:44:26 -0800 | [diff] [blame] | 820 | * truncate_setsize updates i_size and performs pagecache truncation (if |
| 821 | * necessary) to @newsize. It will be typically be called from the filesystem's |
| 822 | * setattr function when ATTR_SIZE is passed in. |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 823 | * |
Jan Kara | 77783d0 | 2014-11-07 08:29:25 +1100 | [diff] [blame] | 824 | * Must be called with a lock serializing truncates and writes (generally |
| 825 | * i_mutex but e.g. xfs uses a different lock) and before all filesystem |
| 826 | * specific block truncation has been performed. |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 827 | */ |
| 828 | void truncate_setsize(struct inode *inode, loff_t newsize) |
| 829 | { |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 830 | loff_t oldsize = inode->i_size; |
| 831 | |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 832 | i_size_write(inode, newsize); |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 833 | if (newsize > oldsize) |
| 834 | pagecache_isize_extended(inode, oldsize, newsize); |
Kirill A. Shutemov | 7caef26 | 2013-09-12 15:13:56 -0700 | [diff] [blame] | 835 | truncate_pagecache(inode, newsize); |
Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 836 | } |
| 837 | EXPORT_SYMBOL(truncate_setsize); |
| 838 | |
| 839 | /** |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 840 | * pagecache_isize_extended - update pagecache after extension of i_size |
| 841 | * @inode: inode for which i_size was extended |
| 842 | * @from: original inode size |
| 843 | * @to: new inode size |
| 844 | * |
| 845 | * Handle extension of inode size either caused by extending truncate or by |
| 846 | * write starting after current i_size. We mark the page straddling current |
| 847 | * i_size RO so that page_mkwrite() is called on the nearest write access to |
| 848 | * the page. This way filesystem can be sure that page_mkwrite() is called on |
| 849 | * the page before user writes to the page via mmap after the i_size has been |
| 850 | * changed. |
| 851 | * |
| 852 | * The function must be called after i_size is updated so that page fault |
| 853 | * coming after we unlock the page will already see the new i_size. |
| 854 | * The function must be called while we still hold i_mutex - this not only |
| 855 | * makes sure i_size is stable but also that userspace cannot observe new |
| 856 | * i_size value before we are prepared to store mmap writes at new inode size. |
| 857 | */ |
| 858 | void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) |
| 859 | { |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 860 | int bsize = i_blocksize(inode); |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 861 | loff_t rounded_from; |
| 862 | struct page *page; |
| 863 | pgoff_t index; |
| 864 | |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 865 | WARN_ON(to > inode->i_size); |
| 866 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 867 | if (from >= to || bsize == PAGE_SIZE) |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 868 | return; |
| 869 | /* Page straddling @from will not have any hole block created? */ |
| 870 | rounded_from = round_up(from, bsize); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 871 | if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 872 | return; |
| 873 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 874 | index = from >> PAGE_SHIFT; |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 875 | page = find_lock_page(inode->i_mapping, index); |
| 876 | /* Page not cached? Nothing to do */ |
| 877 | if (!page) |
| 878 | return; |
| 879 | /* |
| 880 | * See clear_page_dirty_for_io() for details why set_page_dirty() |
| 881 | * is needed. |
| 882 | */ |
| 883 | if (page_mkclean(page)) |
| 884 | set_page_dirty(page); |
| 885 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 886 | put_page(page); |
Jan Kara | 90a8020 | 2014-10-01 21:49:18 -0400 | [diff] [blame] | 887 | } |
| 888 | EXPORT_SYMBOL(pagecache_isize_extended); |
| 889 | |
| 890 | /** |
Hugh Dickins | 623e3db | 2012-03-28 14:42:40 -0700 | [diff] [blame] | 891 | * truncate_pagecache_range - unmap and remove pagecache that is hole-punched |
| 892 | * @inode: inode |
| 893 | * @lstart: offset of beginning of hole |
| 894 | * @lend: offset of last byte of hole |
| 895 | * |
| 896 | * This function should typically be called before the filesystem |
| 897 | * releases resources associated with the freed range (eg. deallocates |
| 898 | * blocks). This way, pagecache will always stay logically coherent |
| 899 | * with on-disk format, and the filesystem would not have to deal with |
| 900 | * situations such as writepage being called for a page that has already |
| 901 | * had its underlying blocks deallocated. |
| 902 | */ |
| 903 | void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) |
| 904 | { |
| 905 | struct address_space *mapping = inode->i_mapping; |
| 906 | loff_t unmap_start = round_up(lstart, PAGE_SIZE); |
| 907 | loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; |
| 908 | /* |
| 909 | * This rounding is currently just for example: unmap_mapping_range |
| 910 | * expands its hole outwards, whereas we want it to contract the hole |
| 911 | * inwards. However, existing callers of truncate_pagecache_range are |
Lukas Czerner | 5a72039 | 2013-05-27 23:32:35 -0400 | [diff] [blame] | 912 | * doing their own page rounding first. Note that unmap_mapping_range |
| 913 | * allows holelen 0 for all, and we allow lend -1 for end of file. |
Hugh Dickins | 623e3db | 2012-03-28 14:42:40 -0700 | [diff] [blame] | 914 | */ |
| 915 | |
| 916 | /* |
| 917 | * Unlike in truncate_pagecache, unmap_mapping_range is called only |
| 918 | * once (before truncating pagecache), and without "even_cows" flag: |
| 919 | * hole-punching should not remove private COWed pages from the hole. |
| 920 | */ |
| 921 | if ((u64)unmap_end > (u64)unmap_start) |
| 922 | unmap_mapping_range(mapping, unmap_start, |
| 923 | 1 + unmap_end - unmap_start, 0); |
| 924 | truncate_inode_pages_range(mapping, lstart, lend); |
| 925 | } |
| 926 | EXPORT_SYMBOL(truncate_pagecache_range); |