blob: c9a63f0b69a2b4147d9c9ac0fa398c817ab3564b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
6 * 10Sep2002 akpm@zip.com.au
7 * Initial version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/pagevec.h>
15#include <linux/buffer_head.h> /* grr. try_to_release_page,
16 block_invalidatepage */
17
18
19static int do_invalidatepage(struct page *page, unsigned long offset)
20{
21 int (*invalidatepage)(struct page *, unsigned long);
22 invalidatepage = page->mapping->a_ops->invalidatepage;
23 if (invalidatepage == NULL)
24 invalidatepage = block_invalidatepage;
25 return (*invalidatepage)(page, offset);
26}
27
28static inline void truncate_partial_page(struct page *page, unsigned partial)
29{
30 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
31 if (PagePrivate(page))
32 do_invalidatepage(page, partial);
33}
34
35/*
36 * If truncate cannot remove the fs-private metadata from the page, the page
37 * becomes anonymous. It will be left on the LRU and may even be mapped into
38 * user pagetables if we're racing with filemap_nopage().
39 *
40 * We need to bale out if page->mapping is no longer equal to the original
41 * mapping. This happens a) when the VM reclaimed the page while we waited on
42 * its lock, b) when a concurrent invalidate_inode_pages got there first and
43 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
44 */
45static void
46truncate_complete_page(struct address_space *mapping, struct page *page)
47{
48 if (page->mapping != mapping)
49 return;
50
51 if (PagePrivate(page))
52 do_invalidatepage(page, 0);
53
54 clear_page_dirty(page);
55 ClearPageUptodate(page);
56 ClearPageMappedToDisk(page);
57 remove_from_page_cache(page);
58 page_cache_release(page); /* pagecache ref */
59}
60
61/*
62 * This is for invalidate_inode_pages(). That function can be called at
63 * any time, and is not supposed to throw away dirty pages. But pages can
64 * be marked dirty at any time too. So we re-check the dirtiness inside
65 * ->tree_lock. That provides exclusion against the __set_page_dirty
66 * functions.
67 *
68 * Returns non-zero if the page was successfully invalidated.
69 */
70static int
71invalidate_complete_page(struct address_space *mapping, struct page *page)
72{
73 if (page->mapping != mapping)
74 return 0;
75
76 if (PagePrivate(page) && !try_to_release_page(page, 0))
77 return 0;
78
79 write_lock_irq(&mapping->tree_lock);
80 if (PageDirty(page)) {
81 write_unlock_irq(&mapping->tree_lock);
82 return 0;
83 }
84
85 BUG_ON(PagePrivate(page));
86 __remove_from_page_cache(page);
87 write_unlock_irq(&mapping->tree_lock);
88 ClearPageUptodate(page);
89 page_cache_release(page); /* pagecache ref */
90 return 1;
91}
92
93/**
94 * truncate_inode_pages - truncate *all* the pages from an offset
95 * @mapping: mapping to truncate
96 * @lstart: offset from which to truncate
97 *
98 * Truncate the page cache at a set offset, removing the pages that are beyond
99 * that offset (and zeroing out partial pages).
100 *
101 * Truncate takes two passes - the first pass is nonblocking. It will not
102 * block on page locks and it will not block on writeback. The second pass
103 * will wait. This is to prevent as much IO as possible in the affected region.
104 * The first pass will remove most pages, so the search cost of the second pass
105 * is low.
106 *
107 * When looking at page->index outside the page lock we need to be careful to
108 * copy it into a local to avoid races (it could change at any time).
109 *
110 * We pass down the cache-hot hint to the page freeing code. Even if the
111 * mapping is large, it is probably the case that the final pages are the most
112 * recently touched, and freeing happens in ascending file offset order.
113 *
114 * Called under (and serialised by) inode->i_sem.
115 */
116void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
117{
118 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
119 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
120 struct pagevec pvec;
121 pgoff_t next;
122 int i;
123
124 if (mapping->nrpages == 0)
125 return;
126
127 pagevec_init(&pvec, 0);
128 next = start;
129 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
130 for (i = 0; i < pagevec_count(&pvec); i++) {
131 struct page *page = pvec.pages[i];
132 pgoff_t page_index = page->index;
133
134 if (page_index > next)
135 next = page_index;
136 next++;
137 if (TestSetPageLocked(page))
138 continue;
139 if (PageWriteback(page)) {
140 unlock_page(page);
141 continue;
142 }
143 truncate_complete_page(mapping, page);
144 unlock_page(page);
145 }
146 pagevec_release(&pvec);
147 cond_resched();
148 }
149
150 if (partial) {
151 struct page *page = find_lock_page(mapping, start - 1);
152 if (page) {
153 wait_on_page_writeback(page);
154 truncate_partial_page(page, partial);
155 unlock_page(page);
156 page_cache_release(page);
157 }
158 }
159
160 next = start;
161 for ( ; ; ) {
162 cond_resched();
163 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
164 if (next == start)
165 break;
166 next = start;
167 continue;
168 }
169 for (i = 0; i < pagevec_count(&pvec); i++) {
170 struct page *page = pvec.pages[i];
171
172 lock_page(page);
173 wait_on_page_writeback(page);
174 if (page->index > next)
175 next = page->index;
176 next++;
177 truncate_complete_page(mapping, page);
178 unlock_page(page);
179 }
180 pagevec_release(&pvec);
181 }
182}
183
184EXPORT_SYMBOL(truncate_inode_pages);
185
186/**
187 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
188 * @mapping: the address_space which holds the pages to invalidate
189 * @start: the offset 'from' which to invalidate
190 * @end: the offset 'to' which to invalidate (inclusive)
191 *
192 * This function only removes the unlocked pages, if you want to
193 * remove all the pages of one inode, you must call truncate_inode_pages.
194 *
195 * invalidate_mapping_pages() will not block on IO activity. It will not
196 * invalidate pages which are dirty, locked, under writeback or mapped into
197 * pagetables.
198 */
199unsigned long invalidate_mapping_pages(struct address_space *mapping,
200 pgoff_t start, pgoff_t end)
201{
202 struct pagevec pvec;
203 pgoff_t next = start;
204 unsigned long ret = 0;
205 int i;
206
207 pagevec_init(&pvec, 0);
208 while (next <= end &&
209 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
210 for (i = 0; i < pagevec_count(&pvec); i++) {
211 struct page *page = pvec.pages[i];
212
213 if (TestSetPageLocked(page)) {
214 next++;
215 continue;
216 }
217 if (page->index > next)
218 next = page->index;
219 next++;
220 if (PageDirty(page) || PageWriteback(page))
221 goto unlock;
222 if (page_mapped(page))
223 goto unlock;
224 ret += invalidate_complete_page(mapping, page);
225unlock:
226 unlock_page(page);
227 if (next > end)
228 break;
229 }
230 pagevec_release(&pvec);
231 cond_resched();
232 }
233 return ret;
234}
235
236unsigned long invalidate_inode_pages(struct address_space *mapping)
237{
238 return invalidate_mapping_pages(mapping, 0, ~0UL);
239}
240
241EXPORT_SYMBOL(invalidate_inode_pages);
242
243/**
244 * invalidate_inode_pages2_range - remove range of pages from an address_space
245 * @mapping - the address_space
246 * @start: the page offset 'from' which to invalidate
247 * @end: the page offset 'to' which to invalidate (inclusive)
248 *
249 * Any pages which are found to be mapped into pagetables are unmapped prior to
250 * invalidation.
251 *
252 * Returns -EIO if any pages could not be invalidated.
253 */
254int invalidate_inode_pages2_range(struct address_space *mapping,
255 pgoff_t start, pgoff_t end)
256{
257 struct pagevec pvec;
258 pgoff_t next;
259 int i;
260 int ret = 0;
261 int did_range_unmap = 0;
262 int wrapped = 0;
263
264 pagevec_init(&pvec, 0);
265 next = start;
266 while (next <= end && !ret && !wrapped &&
267 pagevec_lookup(&pvec, mapping, next,
268 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
269 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
270 struct page *page = pvec.pages[i];
271 pgoff_t page_index;
272 int was_dirty;
273
274 lock_page(page);
275 if (page->mapping != mapping) {
276 unlock_page(page);
277 continue;
278 }
279 page_index = page->index;
280 next = page_index + 1;
281 if (next == 0)
282 wrapped = 1;
283 if (page_index > end) {
284 unlock_page(page);
285 break;
286 }
287 wait_on_page_writeback(page);
288 while (page_mapped(page)) {
289 if (!did_range_unmap) {
290 /*
291 * Zap the rest of the file in one hit.
292 */
293 unmap_mapping_range(mapping,
294 page_index << PAGE_CACHE_SHIFT,
295 (end - page_index + 1)
296 << PAGE_CACHE_SHIFT,
297 0);
298 did_range_unmap = 1;
299 } else {
300 /*
301 * Just zap this page
302 */
303 unmap_mapping_range(mapping,
304 page_index << PAGE_CACHE_SHIFT,
305 PAGE_CACHE_SIZE, 0);
306 }
307 }
308 was_dirty = test_clear_page_dirty(page);
309 if (!invalidate_complete_page(mapping, page)) {
310 if (was_dirty)
311 set_page_dirty(page);
312 ret = -EIO;
313 }
314 unlock_page(page);
315 }
316 pagevec_release(&pvec);
317 cond_resched();
318 }
319 return ret;
320}
321EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
322
323/**
324 * invalidate_inode_pages2 - remove all pages from an address_space
325 * @mapping - the address_space
326 *
327 * Any pages which are found to be mapped into pagetables are unmapped prior to
328 * invalidation.
329 *
330 * Returns -EIO if any pages could not be invalidated.
331 */
332int invalidate_inode_pages2(struct address_space *mapping)
333{
334 return invalidate_inode_pages2_range(mapping, 0, -1);
335}
336EXPORT_SYMBOL_GPL(invalidate_inode_pages2);