blob: 368678c2d53107d3cabf3c98a15bccaa37dfd3f5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
11 */
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/aio.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080018#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/kernel_stat.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/pagemap.h>
24#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/hash.h>
27#include <linux/writeback.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/security.h>
31#include <linux/syscalls.h>
Paul Jackson44110fe2006-03-24 03:16:04 -080032#include <linux/cpuset.h>
Carsten Otteceffc072005-06-23 22:05:25 -070033#include "filemap.h"
Nick Piggin0f8053a2006-03-22 00:08:33 -080034#include "internal.h"
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * FIXME: remove all knowledge of the buffer layer from the core VM
38 */
39#include <linux/buffer_head.h> /* for generic_osync_inode */
40
41#include <asm/uaccess.h>
42#include <asm/mman.h>
43
Adrian Bunk5ce78522005-09-10 00:26:28 -070044static ssize_t
45generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
46 loff_t offset, unsigned long nr_segs);
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/*
49 * Shared mappings implemented 30.11.1994. It's not fully working yet,
50 * though.
51 *
52 * Shared mappings now work. 15.8.1995 Bruno.
53 *
54 * finished 'unifying' the page and buffer cache and SMP-threaded the
55 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
56 *
57 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
58 */
59
60/*
61 * Lock ordering:
62 *
63 * ->i_mmap_lock (vmtruncate)
64 * ->private_lock (__free_pte->__set_page_dirty_buffers)
Hugh Dickins5d337b92005-09-03 15:54:41 -070065 * ->swap_lock (exclusive_swap_page, others)
66 * ->mapping->tree_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -080068 * ->i_mutex
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 * ->i_mmap_lock (truncate->unmap_mapping_range)
70 *
71 * ->mmap_sem
72 * ->i_mmap_lock
Hugh Dickinsb8072f02005-10-29 18:16:41 -070073 * ->page_table_lock or pte_lock (various, mainly in memory.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
75 *
76 * ->mmap_sem
77 * ->lock_page (access_process_vm)
78 *
79 * ->mmap_sem
Jes Sorensen1b1dcc12006-01-09 15:59:24 -080080 * ->i_mutex (msync)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -080082 * ->i_mutex
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 * ->i_alloc_sem (various)
84 *
85 * ->inode_lock
86 * ->sb_lock (fs/fs-writeback.c)
87 * ->mapping->tree_lock (__sync_single_inode)
88 *
89 * ->i_mmap_lock
90 * ->anon_vma.lock (vma_adjust)
91 *
92 * ->anon_vma.lock
Hugh Dickinsb8072f02005-10-29 18:16:41 -070093 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 *
Hugh Dickinsb8072f02005-10-29 18:16:41 -070095 * ->page_table_lock or pte_lock
Hugh Dickins5d337b92005-09-03 15:54:41 -070096 * ->swap_lock (try_to_unmap_one)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 * ->private_lock (try_to_unmap_one)
98 * ->tree_lock (try_to_unmap_one)
99 * ->zone.lru_lock (follow_page->mark_page_accessed)
Nick Piggin053837f2006-01-18 17:42:27 -0800100 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * ->private_lock (page_remove_rmap->set_page_dirty)
102 * ->tree_lock (page_remove_rmap->set_page_dirty)
103 * ->inode_lock (page_remove_rmap->set_page_dirty)
104 * ->inode_lock (zap_pte_range->set_page_dirty)
105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
106 *
107 * ->task->proc_lock
108 * ->dcache_lock (proc_pid_lookup)
109 */
110
111/*
112 * Remove a page from the page cache and free it. Caller has to make
113 * sure the page is locked and that nobody else uses it - or that usage
114 * is safe. The caller must hold a write_lock on the mapping's tree_lock.
115 */
116void __remove_from_page_cache(struct page *page)
117{
118 struct address_space *mapping = page->mapping;
119
120 radix_tree_delete(&mapping->page_tree, page->index);
121 page->mapping = NULL;
122 mapping->nrpages--;
123 pagecache_acct(-1);
124}
125
126void remove_from_page_cache(struct page *page)
127{
128 struct address_space *mapping = page->mapping;
129
Matt Mackallcd7619d2005-05-01 08:59:01 -0700130 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132 write_lock_irq(&mapping->tree_lock);
133 __remove_from_page_cache(page);
134 write_unlock_irq(&mapping->tree_lock);
135}
136
137static int sync_page(void *word)
138{
139 struct address_space *mapping;
140 struct page *page;
141
Andi Kleen07808b72005-11-05 17:25:53 +0100142 page = container_of((unsigned long *)word, struct page, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 /*
William Lee Irwin IIIdd1d5af2005-05-01 08:58:38 -0700145 * page_mapping() is being called without PG_locked held.
146 * Some knowledge of the state and use of the page is used to
147 * reduce the requirements down to a memory barrier.
148 * The danger here is of a stale page_mapping() return value
149 * indicating a struct address_space different from the one it's
150 * associated with when it is associated with one.
151 * After smp_mb(), it's either the correct page_mapping() for
152 * the page, or an old page_mapping() and the page's own
153 * page_mapping() has gone NULL.
154 * The ->sync_page() address_space operation must tolerate
155 * page_mapping() going NULL. By an amazing coincidence,
156 * this comes about because none of the users of the page
157 * in the ->sync_page() methods make essential use of the
158 * page_mapping(), merely passing the page down to the backing
159 * device's unplug functions when it's non-NULL, which in turn
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700160 * ignore it for all cases but swap, where only page_private(page) is
William Lee Irwin IIIdd1d5af2005-05-01 08:58:38 -0700161 * of interest. When page_mapping() does go NULL, the entire
162 * call stack gracefully ignores the page and returns.
163 * -- wli
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 */
165 smp_mb();
166 mapping = page_mapping(page);
167 if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
168 mapping->a_ops->sync_page(page);
169 io_schedule();
170 return 0;
171}
172
173/**
Randy Dunlap485bb992006-06-23 02:03:49 -0700174 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
Martin Waitz67be2dd2005-05-01 08:59:26 -0700175 * @mapping: address space structure to write
176 * @start: offset in bytes where the range starts
Andrew Morton469eb4d2006-03-24 03:17:45 -0800177 * @end: offset in bytes where the range ends (inclusive)
Martin Waitz67be2dd2005-05-01 08:59:26 -0700178 * @sync_mode: enable synchronous operation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 *
Randy Dunlap485bb992006-06-23 02:03:49 -0700180 * Start writeback against all of a mapping's dirty pages that lie
181 * within the byte offsets <start, end> inclusive.
182 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
Randy Dunlap485bb992006-06-23 02:03:49 -0700184 * opposed to a regular memory cleansing writeback. The difference between
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 * these two operations is that if a dirty page/buffer is encountered, it must
186 * be waited upon, and not just skipped over.
187 */
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800188int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
189 loff_t end, int sync_mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 int ret;
192 struct writeback_control wbc = {
193 .sync_mode = sync_mode,
194 .nr_to_write = mapping->nrpages * 2,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700195 .range_start = start,
196 .range_end = end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 };
198
199 if (!mapping_cap_writeback_dirty(mapping))
200 return 0;
201
202 ret = do_writepages(mapping, &wbc);
203 return ret;
204}
205
206static inline int __filemap_fdatawrite(struct address_space *mapping,
207 int sync_mode)
208{
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700209 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
212int filemap_fdatawrite(struct address_space *mapping)
213{
214 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
215}
216EXPORT_SYMBOL(filemap_fdatawrite);
217
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800218static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
219 loff_t end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
221 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
222}
223
Randy Dunlap485bb992006-06-23 02:03:49 -0700224/**
225 * filemap_flush - mostly a non-blocking flush
226 * @mapping: target address_space
227 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 * This is a mostly non-blocking flush. Not suitable for data-integrity
229 * purposes - I/O may not be started against all dirty pages.
230 */
231int filemap_flush(struct address_space *mapping)
232{
233 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
234}
235EXPORT_SYMBOL(filemap_flush);
236
Randy Dunlap485bb992006-06-23 02:03:49 -0700237/**
238 * wait_on_page_writeback_range - wait for writeback to complete
239 * @mapping: target address_space
240 * @start: beginning page index
241 * @end: ending page index
242 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 * Wait for writeback to complete against pages indexed by start->end
244 * inclusive
245 */
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800246int wait_on_page_writeback_range(struct address_space *mapping,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 pgoff_t start, pgoff_t end)
248{
249 struct pagevec pvec;
250 int nr_pages;
251 int ret = 0;
252 pgoff_t index;
253
254 if (end < start)
255 return 0;
256
257 pagevec_init(&pvec, 0);
258 index = start;
259 while ((index <= end) &&
260 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
261 PAGECACHE_TAG_WRITEBACK,
262 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
263 unsigned i;
264
265 for (i = 0; i < nr_pages; i++) {
266 struct page *page = pvec.pages[i];
267
268 /* until radix tree lookup accepts end_index */
269 if (page->index > end)
270 continue;
271
272 wait_on_page_writeback(page);
273 if (PageError(page))
274 ret = -EIO;
275 }
276 pagevec_release(&pvec);
277 cond_resched();
278 }
279
280 /* Check for outstanding write errors */
281 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
282 ret = -ENOSPC;
283 if (test_and_clear_bit(AS_EIO, &mapping->flags))
284 ret = -EIO;
285
286 return ret;
287}
288
Randy Dunlap485bb992006-06-23 02:03:49 -0700289/**
290 * sync_page_range - write and wait on all pages in the passed range
291 * @inode: target inode
292 * @mapping: target address_space
293 * @pos: beginning offset in pages to write
294 * @count: number of bytes to write
295 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 * Write and wait upon all the pages in the passed range. This is a "data
297 * integrity" operation. It waits upon in-flight writeout before starting and
298 * waiting upon new writeout. If there was an IO error, return it.
299 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800300 * We need to re-take i_mutex during the generic_osync_inode list walk because
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 * it is otherwise livelockable.
302 */
303int sync_page_range(struct inode *inode, struct address_space *mapping,
OGAWA Hirofumi268fc162006-01-08 01:02:12 -0800304 loff_t pos, loff_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
306 pgoff_t start = pos >> PAGE_CACHE_SHIFT;
307 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
308 int ret;
309
310 if (!mapping_cap_writeback_dirty(mapping) || !count)
311 return 0;
312 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
313 if (ret == 0) {
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800314 mutex_lock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800316 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 }
318 if (ret == 0)
319 ret = wait_on_page_writeback_range(mapping, start, end);
320 return ret;
321}
322EXPORT_SYMBOL(sync_page_range);
323
Randy Dunlap485bb992006-06-23 02:03:49 -0700324/**
325 * sync_page_range_nolock
326 * @inode: target inode
327 * @mapping: target address_space
328 * @pos: beginning offset in pages to write
329 * @count: number of bytes to write
330 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800331 * Note: Holding i_mutex across sync_page_range_nolock is not a good idea
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 * as it forces O_SYNC writers to different parts of the same file
333 * to be serialised right until io completion.
334 */
OGAWA Hirofumi268fc162006-01-08 01:02:12 -0800335int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
336 loff_t pos, loff_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 pgoff_t start = pos >> PAGE_CACHE_SHIFT;
339 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
340 int ret;
341
342 if (!mapping_cap_writeback_dirty(mapping) || !count)
343 return 0;
344 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
345 if (ret == 0)
346 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
347 if (ret == 0)
348 ret = wait_on_page_writeback_range(mapping, start, end);
349 return ret;
350}
OGAWA Hirofumi268fc162006-01-08 01:02:12 -0800351EXPORT_SYMBOL(sync_page_range_nolock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353/**
Randy Dunlap485bb992006-06-23 02:03:49 -0700354 * filemap_fdatawait - wait for all under-writeback pages to complete
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 * @mapping: address space structure to wait for
Randy Dunlap485bb992006-06-23 02:03:49 -0700356 *
357 * Walk the list of under-writeback pages of the given address space
358 * and wait for all of them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 */
360int filemap_fdatawait(struct address_space *mapping)
361{
362 loff_t i_size = i_size_read(mapping->host);
363
364 if (i_size == 0)
365 return 0;
366
367 return wait_on_page_writeback_range(mapping, 0,
368 (i_size - 1) >> PAGE_CACHE_SHIFT);
369}
370EXPORT_SYMBOL(filemap_fdatawait);
371
372int filemap_write_and_wait(struct address_space *mapping)
373{
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800374 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376 if (mapping->nrpages) {
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800377 err = filemap_fdatawrite(mapping);
378 /*
379 * Even if the above returned error, the pages may be
380 * written partially (e.g. -ENOSPC), so we wait for it.
381 * But the -EIO is special case, it may indicate the worst
382 * thing (e.g. bug) happened, so we avoid waiting for it.
383 */
384 if (err != -EIO) {
385 int err2 = filemap_fdatawait(mapping);
386 if (!err)
387 err = err2;
388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 }
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800390 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800392EXPORT_SYMBOL(filemap_write_and_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Randy Dunlap485bb992006-06-23 02:03:49 -0700394/**
395 * filemap_write_and_wait_range - write out & wait on a file range
396 * @mapping: the address_space for the pages
397 * @lstart: offset in bytes where the range starts
398 * @lend: offset in bytes where the range ends (inclusive)
399 *
Andrew Morton469eb4d2006-03-24 03:17:45 -0800400 * Write out and wait upon file offsets lstart->lend, inclusive.
401 *
402 * Note that `lend' is inclusive (describes the last byte to be written) so
403 * that this function can be used to write to the very end-of-file (end = -1).
404 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405int filemap_write_and_wait_range(struct address_space *mapping,
406 loff_t lstart, loff_t lend)
407{
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800408 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 if (mapping->nrpages) {
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800411 err = __filemap_fdatawrite_range(mapping, lstart, lend,
412 WB_SYNC_ALL);
413 /* See comment of filemap_write_and_wait() */
414 if (err != -EIO) {
415 int err2 = wait_on_page_writeback_range(mapping,
416 lstart >> PAGE_CACHE_SHIFT,
417 lend >> PAGE_CACHE_SHIFT);
418 if (!err)
419 err = err2;
420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 }
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800422 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Randy Dunlap485bb992006-06-23 02:03:49 -0700425/**
426 * add_to_page_cache - add newly allocated pagecache pages
427 * @page: page to add
428 * @mapping: the page's address_space
429 * @offset: page index
430 * @gfp_mask: page allocation mode
431 *
432 * This function is used to add newly allocated pagecache pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 * the page is new, so we can just run SetPageLocked() against it.
434 * The other page state flags were set by rmqueue().
435 *
436 * This function does not add the page to the LRU. The caller must do that.
437 */
438int add_to_page_cache(struct page *page, struct address_space *mapping,
Al Viro6daa0e22005-10-21 03:18:50 -0400439 pgoff_t offset, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
442
443 if (error == 0) {
444 write_lock_irq(&mapping->tree_lock);
445 error = radix_tree_insert(&mapping->page_tree, offset, page);
446 if (!error) {
447 page_cache_get(page);
448 SetPageLocked(page);
449 page->mapping = mapping;
450 page->index = offset;
451 mapping->nrpages++;
452 pagecache_acct(1);
453 }
454 write_unlock_irq(&mapping->tree_lock);
455 radix_tree_preload_end();
456 }
457 return error;
458}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459EXPORT_SYMBOL(add_to_page_cache);
460
461int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
Al Viro6daa0e22005-10-21 03:18:50 -0400462 pgoff_t offset, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463{
464 int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
465 if (ret == 0)
466 lru_cache_add(page);
467 return ret;
468}
469
Paul Jackson44110fe2006-03-24 03:16:04 -0800470#ifdef CONFIG_NUMA
471struct page *page_cache_alloc(struct address_space *x)
472{
473 if (cpuset_do_page_mem_spread()) {
474 int n = cpuset_mem_spread_node();
475 return alloc_pages_node(n, mapping_gfp_mask(x), 0);
476 }
477 return alloc_pages(mapping_gfp_mask(x), 0);
478}
479EXPORT_SYMBOL(page_cache_alloc);
480
481struct page *page_cache_alloc_cold(struct address_space *x)
482{
483 if (cpuset_do_page_mem_spread()) {
484 int n = cpuset_mem_spread_node();
485 return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0);
486 }
487 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
488}
489EXPORT_SYMBOL(page_cache_alloc_cold);
490#endif
491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492/*
493 * In order to wait for pages to become available there must be
494 * waitqueues associated with pages. By using a hash table of
495 * waitqueues where the bucket discipline is to maintain all
496 * waiters on the same queue and wake all when any of the pages
497 * become available, and for the woken contexts to check to be
498 * sure the appropriate page became available, this saves space
499 * at a cost of "thundering herd" phenomena during rare hash
500 * collisions.
501 */
502static wait_queue_head_t *page_waitqueue(struct page *page)
503{
504 const struct zone *zone = page_zone(page);
505
506 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
507}
508
509static inline void wake_up_page(struct page *page, int bit)
510{
511 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
512}
513
514void fastcall wait_on_page_bit(struct page *page, int bit_nr)
515{
516 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
517
518 if (test_bit(bit_nr, &page->flags))
519 __wait_on_bit(page_waitqueue(page), &wait, sync_page,
520 TASK_UNINTERRUPTIBLE);
521}
522EXPORT_SYMBOL(wait_on_page_bit);
523
524/**
Randy Dunlap485bb992006-06-23 02:03:49 -0700525 * unlock_page - unlock a locked page
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 * @page: the page
527 *
528 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
529 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
530 * mechananism between PageLocked pages and PageWriteback pages is shared.
531 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
532 *
533 * The first mb is necessary to safely close the critical section opened by the
534 * TestSetPageLocked(), the second mb is necessary to enforce ordering between
535 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
536 * parallel wait_on_page_locked()).
537 */
538void fastcall unlock_page(struct page *page)
539{
540 smp_mb__before_clear_bit();
541 if (!TestClearPageLocked(page))
542 BUG();
543 smp_mb__after_clear_bit();
544 wake_up_page(page, PG_locked);
545}
546EXPORT_SYMBOL(unlock_page);
547
Randy Dunlap485bb992006-06-23 02:03:49 -0700548/**
549 * end_page_writeback - end writeback against a page
550 * @page: the page
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 */
552void end_page_writeback(struct page *page)
553{
554 if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
555 if (!test_clear_page_writeback(page))
556 BUG();
557 }
558 smp_mb__after_clear_bit();
559 wake_up_page(page, PG_writeback);
560}
561EXPORT_SYMBOL(end_page_writeback);
562
Randy Dunlap485bb992006-06-23 02:03:49 -0700563/**
564 * __lock_page - get a lock on the page, assuming we need to sleep to get it
565 * @page: the page to lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 *
Randy Dunlap485bb992006-06-23 02:03:49 -0700567 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 * random driver's requestfn sets TASK_RUNNING, we could busywait. However
569 * chances are that on the second loop, the block layer's plug list is empty,
570 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
571 */
572void fastcall __lock_page(struct page *page)
573{
574 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
575
576 __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
577 TASK_UNINTERRUPTIBLE);
578}
579EXPORT_SYMBOL(__lock_page);
580
Randy Dunlap485bb992006-06-23 02:03:49 -0700581/**
582 * find_get_page - find and get a page reference
583 * @mapping: the address_space to search
584 * @offset: the page index
585 *
586 * A rather lightweight function, finding and getting a reference to a
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 * hashed page atomically.
588 */
589struct page * find_get_page(struct address_space *mapping, unsigned long offset)
590{
591 struct page *page;
592
593 read_lock_irq(&mapping->tree_lock);
594 page = radix_tree_lookup(&mapping->page_tree, offset);
595 if (page)
596 page_cache_get(page);
597 read_unlock_irq(&mapping->tree_lock);
598 return page;
599}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600EXPORT_SYMBOL(find_get_page);
601
Randy Dunlap485bb992006-06-23 02:03:49 -0700602/**
603 * find_trylock_page - find and lock a page
604 * @mapping: the address_space to search
605 * @offset: the page index
606 *
607 * Same as find_get_page(), but trylock it instead of incrementing the count.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 */
609struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
610{
611 struct page *page;
612
613 read_lock_irq(&mapping->tree_lock);
614 page = radix_tree_lookup(&mapping->page_tree, offset);
615 if (page && TestSetPageLocked(page))
616 page = NULL;
617 read_unlock_irq(&mapping->tree_lock);
618 return page;
619}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620EXPORT_SYMBOL(find_trylock_page);
621
622/**
623 * find_lock_page - locate, pin and lock a pagecache page
Martin Waitz67be2dd2005-05-01 08:59:26 -0700624 * @mapping: the address_space to search
625 * @offset: the page index
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 *
627 * Locates the desired pagecache page, locks it, increments its reference
628 * count and returns its address.
629 *
630 * Returns zero if the page was not present. find_lock_page() may sleep.
631 */
632struct page *find_lock_page(struct address_space *mapping,
633 unsigned long offset)
634{
635 struct page *page;
636
637 read_lock_irq(&mapping->tree_lock);
638repeat:
639 page = radix_tree_lookup(&mapping->page_tree, offset);
640 if (page) {
641 page_cache_get(page);
642 if (TestSetPageLocked(page)) {
643 read_unlock_irq(&mapping->tree_lock);
Nikita Danilovbbfbb7c2006-01-06 00:11:08 -0800644 __lock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 read_lock_irq(&mapping->tree_lock);
646
647 /* Has the page been truncated while we slept? */
Nikita Danilovbbfbb7c2006-01-06 00:11:08 -0800648 if (unlikely(page->mapping != mapping ||
649 page->index != offset)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 unlock_page(page);
651 page_cache_release(page);
652 goto repeat;
653 }
654 }
655 }
656 read_unlock_irq(&mapping->tree_lock);
657 return page;
658}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659EXPORT_SYMBOL(find_lock_page);
660
661/**
662 * find_or_create_page - locate or add a pagecache page
Martin Waitz67be2dd2005-05-01 08:59:26 -0700663 * @mapping: the page's address_space
664 * @index: the page's index into the mapping
665 * @gfp_mask: page allocation mode
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 *
667 * Locates a page in the pagecache. If the page is not present, a new page
668 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
669 * LRU list. The returned page is locked and has its reference count
670 * incremented.
671 *
672 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
673 * allocation!
674 *
675 * find_or_create_page() returns the desired page's address, or zero on
676 * memory exhaustion.
677 */
678struct page *find_or_create_page(struct address_space *mapping,
Al Viro6daa0e22005-10-21 03:18:50 -0400679 unsigned long index, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680{
681 struct page *page, *cached_page = NULL;
682 int err;
683repeat:
684 page = find_lock_page(mapping, index);
685 if (!page) {
686 if (!cached_page) {
687 cached_page = alloc_page(gfp_mask);
688 if (!cached_page)
689 return NULL;
690 }
691 err = add_to_page_cache_lru(cached_page, mapping,
692 index, gfp_mask);
693 if (!err) {
694 page = cached_page;
695 cached_page = NULL;
696 } else if (err == -EEXIST)
697 goto repeat;
698 }
699 if (cached_page)
700 page_cache_release(cached_page);
701 return page;
702}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703EXPORT_SYMBOL(find_or_create_page);
704
705/**
706 * find_get_pages - gang pagecache lookup
707 * @mapping: The address_space to search
708 * @start: The starting page index
709 * @nr_pages: The maximum number of pages
710 * @pages: Where the resulting pages are placed
711 *
712 * find_get_pages() will search for and return a group of up to
713 * @nr_pages pages in the mapping. The pages are placed at @pages.
714 * find_get_pages() takes a reference against the returned pages.
715 *
716 * The search returns a group of mapping-contiguous pages with ascending
717 * indexes. There may be holes in the indices due to not-present pages.
718 *
719 * find_get_pages() returns the number of pages which were found.
720 */
721unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
722 unsigned int nr_pages, struct page **pages)
723{
724 unsigned int i;
725 unsigned int ret;
726
727 read_lock_irq(&mapping->tree_lock);
728 ret = radix_tree_gang_lookup(&mapping->page_tree,
729 (void **)pages, start, nr_pages);
730 for (i = 0; i < ret; i++)
731 page_cache_get(pages[i]);
732 read_unlock_irq(&mapping->tree_lock);
733 return ret;
734}
735
Jens Axboeebf43502006-04-27 08:46:01 +0200736/**
737 * find_get_pages_contig - gang contiguous pagecache lookup
738 * @mapping: The address_space to search
739 * @index: The starting page index
740 * @nr_pages: The maximum number of pages
741 * @pages: Where the resulting pages are placed
742 *
743 * find_get_pages_contig() works exactly like find_get_pages(), except
744 * that the returned number of pages are guaranteed to be contiguous.
745 *
746 * find_get_pages_contig() returns the number of pages which were found.
747 */
748unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
749 unsigned int nr_pages, struct page **pages)
750{
751 unsigned int i;
752 unsigned int ret;
753
754 read_lock_irq(&mapping->tree_lock);
755 ret = radix_tree_gang_lookup(&mapping->page_tree,
756 (void **)pages, index, nr_pages);
757 for (i = 0; i < ret; i++) {
758 if (pages[i]->mapping == NULL || pages[i]->index != index)
759 break;
760
761 page_cache_get(pages[i]);
762 index++;
763 }
764 read_unlock_irq(&mapping->tree_lock);
765 return i;
766}
767
Randy Dunlap485bb992006-06-23 02:03:49 -0700768/**
769 * find_get_pages_tag - find and return pages that match @tag
770 * @mapping: the address_space to search
771 * @index: the starting page index
772 * @tag: the tag index
773 * @nr_pages: the maximum number of pages
774 * @pages: where the resulting pages are placed
775 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 * Like find_get_pages, except we only return pages which are tagged with
Randy Dunlap485bb992006-06-23 02:03:49 -0700777 * @tag. We update @index to index the next page for the traversal.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 */
779unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
780 int tag, unsigned int nr_pages, struct page **pages)
781{
782 unsigned int i;
783 unsigned int ret;
784
785 read_lock_irq(&mapping->tree_lock);
786 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
787 (void **)pages, *index, nr_pages, tag);
788 for (i = 0; i < ret; i++)
789 page_cache_get(pages[i]);
790 if (ret)
791 *index = pages[ret - 1]->index + 1;
792 read_unlock_irq(&mapping->tree_lock);
793 return ret;
794}
795
Randy Dunlap485bb992006-06-23 02:03:49 -0700796/**
797 * grab_cache_page_nowait - returns locked page at given index in given cache
798 * @mapping: target address_space
799 * @index: the page index
800 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 * Same as grab_cache_page, but do not wait if the page is unavailable.
802 * This is intended for speculative data generators, where the data can
803 * be regenerated if the page couldn't be grabbed. This routine should
804 * be safe to call while holding the lock for another page.
805 *
806 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
807 * and deadlock against the caller's locked page.
808 */
809struct page *
810grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
811{
812 struct page *page = find_get_page(mapping, index);
Al Viro6daa0e22005-10-21 03:18:50 -0400813 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 if (page) {
816 if (!TestSetPageLocked(page))
817 return page;
818 page_cache_release(page);
819 return NULL;
820 }
821 gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
822 page = alloc_pages(gfp_mask, 0);
823 if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
824 page_cache_release(page);
825 page = NULL;
826 }
827 return page;
828}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829EXPORT_SYMBOL(grab_cache_page_nowait);
830
Randy Dunlap485bb992006-06-23 02:03:49 -0700831/**
832 * do_generic_mapping_read - generic file read routine
833 * @mapping: address_space to be read
834 * @_ra: file's readahead state
835 * @filp: the file to read
836 * @ppos: current file position
837 * @desc: read_descriptor
838 * @actor: read method
839 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * This is a generic file read routine, and uses the
Randy Dunlap485bb992006-06-23 02:03:49 -0700841 * mapping->a_ops->readpage() function for the actual low-level stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 *
843 * This is really ugly. But the goto's actually try to clarify some
844 * of the logic when it comes to error handling etc.
845 *
Randy Dunlap485bb992006-06-23 02:03:49 -0700846 * Note the struct file* is only passed for the use of readpage.
847 * It may be NULL.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 */
849void do_generic_mapping_read(struct address_space *mapping,
850 struct file_ra_state *_ra,
851 struct file *filp,
852 loff_t *ppos,
853 read_descriptor_t *desc,
854 read_actor_t actor)
855{
856 struct inode *inode = mapping->host;
857 unsigned long index;
858 unsigned long end_index;
859 unsigned long offset;
860 unsigned long last_index;
861 unsigned long next_index;
862 unsigned long prev_index;
863 loff_t isize;
864 struct page *cached_page;
865 int error;
866 struct file_ra_state ra = *_ra;
867
868 cached_page = NULL;
869 index = *ppos >> PAGE_CACHE_SHIFT;
870 next_index = index;
871 prev_index = ra.prev_page;
872 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
873 offset = *ppos & ~PAGE_CACHE_MASK;
874
875 isize = i_size_read(inode);
876 if (!isize)
877 goto out;
878
879 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
880 for (;;) {
881 struct page *page;
882 unsigned long nr, ret;
883
884 /* nr is the maximum number of bytes to copy from this page */
885 nr = PAGE_CACHE_SIZE;
886 if (index >= end_index) {
887 if (index > end_index)
888 goto out;
889 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
890 if (nr <= offset) {
891 goto out;
892 }
893 }
894 nr = nr - offset;
895
896 cond_resched();
897 if (index == next_index)
898 next_index = page_cache_readahead(mapping, &ra, filp,
899 index, last_index - index);
900
901find_page:
902 page = find_get_page(mapping, index);
903 if (unlikely(page == NULL)) {
904 handle_ra_miss(mapping, &ra, index);
905 goto no_cached_page;
906 }
907 if (!PageUptodate(page))
908 goto page_not_up_to_date;
909page_ok:
910
911 /* If users can be writing to this page using arbitrary
912 * virtual addresses, take care about potential aliasing
913 * before reading the page on the kernel side.
914 */
915 if (mapping_writably_mapped(mapping))
916 flush_dcache_page(page);
917
918 /*
919 * When (part of) the same page is read multiple times
920 * in succession, only mark it as accessed the first time.
921 */
922 if (prev_index != index)
923 mark_page_accessed(page);
924 prev_index = index;
925
926 /*
927 * Ok, we have the page, and it's up-to-date, so
928 * now we can copy it to user space...
929 *
930 * The actor routine returns how many bytes were actually used..
931 * NOTE! This may not be the same as how much of a user buffer
932 * we filled up (we may be padding etc), so we can only update
933 * "pos" here (the actor routine has to update the user buffer
934 * pointers and the remaining count).
935 */
936 ret = actor(desc, page, offset, nr);
937 offset += ret;
938 index += offset >> PAGE_CACHE_SHIFT;
939 offset &= ~PAGE_CACHE_MASK;
940
941 page_cache_release(page);
942 if (ret == nr && desc->count)
943 continue;
944 goto out;
945
946page_not_up_to_date:
947 /* Get exclusive access to the page ... */
948 lock_page(page);
949
950 /* Did it get unhashed before we got the lock? */
951 if (!page->mapping) {
952 unlock_page(page);
953 page_cache_release(page);
954 continue;
955 }
956
957 /* Did somebody else fill it already? */
958 if (PageUptodate(page)) {
959 unlock_page(page);
960 goto page_ok;
961 }
962
963readpage:
964 /* Start the actual read. The read will unlock the page. */
965 error = mapping->a_ops->readpage(filp, page);
966
Zach Brown994fc28c2005-12-15 14:28:17 -0800967 if (unlikely(error)) {
968 if (error == AOP_TRUNCATED_PAGE) {
969 page_cache_release(page);
970 goto find_page;
971 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 goto readpage_error;
Zach Brown994fc28c2005-12-15 14:28:17 -0800973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975 if (!PageUptodate(page)) {
976 lock_page(page);
977 if (!PageUptodate(page)) {
978 if (page->mapping == NULL) {
979 /*
980 * invalidate_inode_pages got it
981 */
982 unlock_page(page);
983 page_cache_release(page);
984 goto find_page;
985 }
986 unlock_page(page);
987 error = -EIO;
988 goto readpage_error;
989 }
990 unlock_page(page);
991 }
992
993 /*
994 * i_size must be checked after we have done ->readpage.
995 *
996 * Checking i_size after the readpage allows us to calculate
997 * the correct value for "nr", which means the zero-filled
998 * part of the page is not copied back to userspace (unless
999 * another truncate extends the file - this is desired though).
1000 */
1001 isize = i_size_read(inode);
1002 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1003 if (unlikely(!isize || index > end_index)) {
1004 page_cache_release(page);
1005 goto out;
1006 }
1007
1008 /* nr is the maximum number of bytes to copy from this page */
1009 nr = PAGE_CACHE_SIZE;
1010 if (index == end_index) {
1011 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1012 if (nr <= offset) {
1013 page_cache_release(page);
1014 goto out;
1015 }
1016 }
1017 nr = nr - offset;
1018 goto page_ok;
1019
1020readpage_error:
1021 /* UHHUH! A synchronous read error occurred. Report it */
1022 desc->error = error;
1023 page_cache_release(page);
1024 goto out;
1025
1026no_cached_page:
1027 /*
1028 * Ok, it wasn't cached, so we need to create a new
1029 * page..
1030 */
1031 if (!cached_page) {
1032 cached_page = page_cache_alloc_cold(mapping);
1033 if (!cached_page) {
1034 desc->error = -ENOMEM;
1035 goto out;
1036 }
1037 }
1038 error = add_to_page_cache_lru(cached_page, mapping,
1039 index, GFP_KERNEL);
1040 if (error) {
1041 if (error == -EEXIST)
1042 goto find_page;
1043 desc->error = error;
1044 goto out;
1045 }
1046 page = cached_page;
1047 cached_page = NULL;
1048 goto readpage;
1049 }
1050
1051out:
1052 *_ra = ra;
1053
1054 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1055 if (cached_page)
1056 page_cache_release(cached_page);
1057 if (filp)
1058 file_accessed(filp);
1059}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060EXPORT_SYMBOL(do_generic_mapping_read);
1061
1062int file_read_actor(read_descriptor_t *desc, struct page *page,
1063 unsigned long offset, unsigned long size)
1064{
1065 char *kaddr;
1066 unsigned long left, count = desc->count;
1067
1068 if (size > count)
1069 size = count;
1070
1071 /*
1072 * Faults on the destination of a read are common, so do it before
1073 * taking the kmap.
1074 */
1075 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1076 kaddr = kmap_atomic(page, KM_USER0);
1077 left = __copy_to_user_inatomic(desc->arg.buf,
1078 kaddr + offset, size);
1079 kunmap_atomic(kaddr, KM_USER0);
1080 if (left == 0)
1081 goto success;
1082 }
1083
1084 /* Do it the slow way */
1085 kaddr = kmap(page);
1086 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1087 kunmap(page);
1088
1089 if (left) {
1090 size -= left;
1091 desc->error = -EFAULT;
1092 }
1093success:
1094 desc->count = count - size;
1095 desc->written += size;
1096 desc->arg.buf += size;
1097 return size;
1098}
1099
Randy Dunlap485bb992006-06-23 02:03:49 -07001100/**
1101 * __generic_file_aio_read - generic filesystem read routine
1102 * @iocb: kernel I/O control block
1103 * @iov: io vector request
1104 * @nr_segs: number of segments in the iovec
1105 * @ppos: current file position
1106 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 * This is the "read()" routine for all filesystems
1108 * that can use the page cache directly.
1109 */
1110ssize_t
1111__generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1112 unsigned long nr_segs, loff_t *ppos)
1113{
1114 struct file *filp = iocb->ki_filp;
1115 ssize_t retval;
1116 unsigned long seg;
1117 size_t count;
1118
1119 count = 0;
1120 for (seg = 0; seg < nr_segs; seg++) {
1121 const struct iovec *iv = &iov[seg];
1122
1123 /*
1124 * If any segment has a negative length, or the cumulative
1125 * length ever wraps negative then return -EINVAL.
1126 */
1127 count += iv->iov_len;
1128 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
1129 return -EINVAL;
1130 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
1131 continue;
1132 if (seg == 0)
1133 return -EFAULT;
1134 nr_segs = seg;
1135 count -= iv->iov_len; /* This segment is no good */
1136 break;
1137 }
1138
1139 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1140 if (filp->f_flags & O_DIRECT) {
1141 loff_t pos = *ppos, size;
1142 struct address_space *mapping;
1143 struct inode *inode;
1144
1145 mapping = filp->f_mapping;
1146 inode = mapping->host;
1147 retval = 0;
1148 if (!count)
1149 goto out; /* skip atime */
1150 size = i_size_read(inode);
1151 if (pos < size) {
1152 retval = generic_file_direct_IO(READ, iocb,
1153 iov, pos, nr_segs);
Suparna Bhattacharyab5c44c22005-05-21 16:33:36 -07001154 if (retval > 0 && !is_sync_kiocb(iocb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 retval = -EIOCBQUEUED;
1156 if (retval > 0)
1157 *ppos = pos + retval;
1158 }
1159 file_accessed(filp);
1160 goto out;
1161 }
1162
1163 retval = 0;
1164 if (count) {
1165 for (seg = 0; seg < nr_segs; seg++) {
1166 read_descriptor_t desc;
1167
1168 desc.written = 0;
1169 desc.arg.buf = iov[seg].iov_base;
1170 desc.count = iov[seg].iov_len;
1171 if (desc.count == 0)
1172 continue;
1173 desc.error = 0;
1174 do_generic_file_read(filp,ppos,&desc,file_read_actor);
1175 retval += desc.written;
Tejun Heo39e88ca2005-10-30 15:02:40 -08001176 if (desc.error) {
1177 retval = retval ?: desc.error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 break;
1179 }
1180 }
1181 }
1182out:
1183 return retval;
1184}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185EXPORT_SYMBOL(__generic_file_aio_read);
1186
1187ssize_t
1188generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
1189{
1190 struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1191
1192 BUG_ON(iocb->ki_pos != pos);
1193 return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
1194}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195EXPORT_SYMBOL(generic_file_aio_read);
1196
1197ssize_t
1198generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1199{
1200 struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1201 struct kiocb kiocb;
1202 ssize_t ret;
1203
1204 init_sync_kiocb(&kiocb, filp);
1205 ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos);
1206 if (-EIOCBQUEUED == ret)
1207 ret = wait_on_sync_kiocb(&kiocb);
1208 return ret;
1209}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210EXPORT_SYMBOL(generic_file_read);
1211
1212int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
1213{
1214 ssize_t written;
1215 unsigned long count = desc->count;
1216 struct file *file = desc->arg.data;
1217
1218 if (size > count)
1219 size = count;
1220
1221 written = file->f_op->sendpage(file, page, offset,
1222 size, &file->f_pos, size<count);
1223 if (written < 0) {
1224 desc->error = written;
1225 written = 0;
1226 }
1227 desc->count = count - written;
1228 desc->written += written;
1229 return written;
1230}
1231
1232ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
1233 size_t count, read_actor_t actor, void *target)
1234{
1235 read_descriptor_t desc;
1236
1237 if (!count)
1238 return 0;
1239
1240 desc.written = 0;
1241 desc.count = count;
1242 desc.arg.data = target;
1243 desc.error = 0;
1244
1245 do_generic_file_read(in_file, ppos, &desc, actor);
1246 if (desc.written)
1247 return desc.written;
1248 return desc.error;
1249}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250EXPORT_SYMBOL(generic_file_sendfile);
1251
1252static ssize_t
1253do_readahead(struct address_space *mapping, struct file *filp,
1254 unsigned long index, unsigned long nr)
1255{
1256 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1257 return -EINVAL;
1258
1259 force_page_cache_readahead(mapping, filp, index,
1260 max_sane_readahead(nr));
1261 return 0;
1262}
1263
1264asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1265{
1266 ssize_t ret;
1267 struct file *file;
1268
1269 ret = -EBADF;
1270 file = fget(fd);
1271 if (file) {
1272 if (file->f_mode & FMODE_READ) {
1273 struct address_space *mapping = file->f_mapping;
1274 unsigned long start = offset >> PAGE_CACHE_SHIFT;
1275 unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1276 unsigned long len = end - start + 1;
1277 ret = do_readahead(mapping, file, start, len);
1278 }
1279 fput(file);
1280 }
1281 return ret;
1282}
1283
1284#ifdef CONFIG_MMU
Randy Dunlap485bb992006-06-23 02:03:49 -07001285static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
1286/**
1287 * page_cache_read - adds requested page to the page cache if not already there
1288 * @file: file to read
1289 * @offset: page index
1290 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 * This adds the requested page to the page cache if it isn't already there,
1292 * and schedules an I/O to read in its contents from disk.
1293 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294static int fastcall page_cache_read(struct file * file, unsigned long offset)
1295{
1296 struct address_space *mapping = file->f_mapping;
1297 struct page *page;
Zach Brown994fc28c2005-12-15 14:28:17 -08001298 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
Zach Brown994fc28c2005-12-15 14:28:17 -08001300 do {
1301 page = page_cache_alloc_cold(mapping);
1302 if (!page)
1303 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
Zach Brown994fc28c2005-12-15 14:28:17 -08001305 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1306 if (ret == 0)
1307 ret = mapping->a_ops->readpage(file, page);
1308 else if (ret == -EEXIST)
1309 ret = 0; /* losing race to add is OK */
1310
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Zach Brown994fc28c2005-12-15 14:28:17 -08001313 } while (ret == AOP_TRUNCATED_PAGE);
1314
1315 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316}
1317
1318#define MMAP_LOTSAMISS (100)
1319
Randy Dunlap485bb992006-06-23 02:03:49 -07001320/**
1321 * filemap_nopage - read in file data for page fault handling
1322 * @area: the applicable vm_area
1323 * @address: target address to read in
1324 * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL
1325 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 * filemap_nopage() is invoked via the vma operations vector for a
1327 * mapped memory region to read in file data during a page fault.
1328 *
1329 * The goto's are kind of ugly, but this streamlines the normal case of having
1330 * it in the page cache, and handles the special cases reasonably without
1331 * having a lot of duplicated code.
1332 */
1333struct page *filemap_nopage(struct vm_area_struct *area,
1334 unsigned long address, int *type)
1335{
1336 int error;
1337 struct file *file = area->vm_file;
1338 struct address_space *mapping = file->f_mapping;
1339 struct file_ra_state *ra = &file->f_ra;
1340 struct inode *inode = mapping->host;
1341 struct page *page;
1342 unsigned long size, pgoff;
1343 int did_readaround = 0, majmin = VM_FAULT_MINOR;
1344
1345 pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
1346
1347retry_all:
1348 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1349 if (pgoff >= size)
1350 goto outside_data_content;
1351
1352 /* If we don't want any read-ahead, don't bother */
1353 if (VM_RandomReadHint(area))
1354 goto no_cached_page;
1355
1356 /*
1357 * The readahead code wants to be told about each and every page
1358 * so it can build and shrink its windows appropriately
1359 *
1360 * For sequential accesses, we use the generic readahead logic.
1361 */
1362 if (VM_SequentialReadHint(area))
1363 page_cache_readahead(mapping, ra, file, pgoff, 1);
1364
1365 /*
1366 * Do we have something in the page cache already?
1367 */
1368retry_find:
1369 page = find_get_page(mapping, pgoff);
1370 if (!page) {
1371 unsigned long ra_pages;
1372
1373 if (VM_SequentialReadHint(area)) {
1374 handle_ra_miss(mapping, ra, pgoff);
1375 goto no_cached_page;
1376 }
1377 ra->mmap_miss++;
1378
1379 /*
1380 * Do we miss much more than hit in this file? If so,
1381 * stop bothering with read-ahead. It will only hurt.
1382 */
1383 if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
1384 goto no_cached_page;
1385
1386 /*
1387 * To keep the pgmajfault counter straight, we need to
1388 * check did_readaround, as this is an inner loop.
1389 */
1390 if (!did_readaround) {
1391 majmin = VM_FAULT_MAJOR;
1392 inc_page_state(pgmajfault);
1393 }
1394 did_readaround = 1;
1395 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1396 if (ra_pages) {
1397 pgoff_t start = 0;
1398
1399 if (pgoff > ra_pages / 2)
1400 start = pgoff - ra_pages / 2;
1401 do_page_cache_readahead(mapping, file, start, ra_pages);
1402 }
1403 page = find_get_page(mapping, pgoff);
1404 if (!page)
1405 goto no_cached_page;
1406 }
1407
1408 if (!did_readaround)
1409 ra->mmap_hit++;
1410
1411 /*
1412 * Ok, found a page in the page cache, now we need to check
1413 * that it's up-to-date.
1414 */
1415 if (!PageUptodate(page))
1416 goto page_not_uptodate;
1417
1418success:
1419 /*
1420 * Found the page and have a reference on it.
1421 */
1422 mark_page_accessed(page);
1423 if (type)
1424 *type = majmin;
1425 return page;
1426
1427outside_data_content:
1428 /*
1429 * An external ptracer can access pages that normally aren't
1430 * accessible..
1431 */
1432 if (area->vm_mm == current->mm)
1433 return NULL;
1434 /* Fall through to the non-read-ahead case */
1435no_cached_page:
1436 /*
1437 * We're only likely to ever get here if MADV_RANDOM is in
1438 * effect.
1439 */
1440 error = page_cache_read(file, pgoff);
1441 grab_swap_token();
1442
1443 /*
1444 * The page we want has now been added to the page cache.
1445 * In the unlikely event that someone removed it in the
1446 * meantime, we'll just come back here and read it again.
1447 */
1448 if (error >= 0)
1449 goto retry_find;
1450
1451 /*
1452 * An error return from page_cache_read can result if the
1453 * system is low on memory, or a problem occurs while trying
1454 * to schedule I/O.
1455 */
1456 if (error == -ENOMEM)
1457 return NOPAGE_OOM;
1458 return NULL;
1459
1460page_not_uptodate:
1461 if (!did_readaround) {
1462 majmin = VM_FAULT_MAJOR;
1463 inc_page_state(pgmajfault);
1464 }
1465 lock_page(page);
1466
1467 /* Did it get unhashed while we waited for it? */
1468 if (!page->mapping) {
1469 unlock_page(page);
1470 page_cache_release(page);
1471 goto retry_all;
1472 }
1473
1474 /* Did somebody else get it up-to-date? */
1475 if (PageUptodate(page)) {
1476 unlock_page(page);
1477 goto success;
1478 }
1479
Zach Brown994fc28c2005-12-15 14:28:17 -08001480 error = mapping->a_ops->readpage(file, page);
1481 if (!error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 wait_on_page_locked(page);
1483 if (PageUptodate(page))
1484 goto success;
Zach Brown994fc28c2005-12-15 14:28:17 -08001485 } else if (error == AOP_TRUNCATED_PAGE) {
1486 page_cache_release(page);
1487 goto retry_find;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 }
1489
1490 /*
1491 * Umm, take care of errors if the page isn't up-to-date.
1492 * Try to re-read it _once_. We do this synchronously,
1493 * because there really aren't any performance issues here
1494 * and we need to check for errors.
1495 */
1496 lock_page(page);
1497
1498 /* Somebody truncated the page on us? */
1499 if (!page->mapping) {
1500 unlock_page(page);
1501 page_cache_release(page);
1502 goto retry_all;
1503 }
1504
1505 /* Somebody else successfully read it in? */
1506 if (PageUptodate(page)) {
1507 unlock_page(page);
1508 goto success;
1509 }
1510 ClearPageError(page);
Zach Brown994fc28c2005-12-15 14:28:17 -08001511 error = mapping->a_ops->readpage(file, page);
1512 if (!error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 wait_on_page_locked(page);
1514 if (PageUptodate(page))
1515 goto success;
Zach Brown994fc28c2005-12-15 14:28:17 -08001516 } else if (error == AOP_TRUNCATED_PAGE) {
1517 page_cache_release(page);
1518 goto retry_find;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 }
1520
1521 /*
1522 * Things didn't work out. Return zero to tell the
1523 * mm layer so, possibly freeing the page cache page first.
1524 */
1525 page_cache_release(page);
1526 return NULL;
1527}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528EXPORT_SYMBOL(filemap_nopage);
1529
1530static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
1531 int nonblock)
1532{
1533 struct address_space *mapping = file->f_mapping;
1534 struct page *page;
1535 int error;
1536
1537 /*
1538 * Do we have something in the page cache already?
1539 */
1540retry_find:
1541 page = find_get_page(mapping, pgoff);
1542 if (!page) {
1543 if (nonblock)
1544 return NULL;
1545 goto no_cached_page;
1546 }
1547
1548 /*
1549 * Ok, found a page in the page cache, now we need to check
1550 * that it's up-to-date.
1551 */
Jeff Moyerd3457342005-04-16 15:24:05 -07001552 if (!PageUptodate(page)) {
1553 if (nonblock) {
1554 page_cache_release(page);
1555 return NULL;
1556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 goto page_not_uptodate;
Jeff Moyerd3457342005-04-16 15:24:05 -07001558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
1560success:
1561 /*
1562 * Found the page and have a reference on it.
1563 */
1564 mark_page_accessed(page);
1565 return page;
1566
1567no_cached_page:
1568 error = page_cache_read(file, pgoff);
1569
1570 /*
1571 * The page we want has now been added to the page cache.
1572 * In the unlikely event that someone removed it in the
1573 * meantime, we'll just come back here and read it again.
1574 */
1575 if (error >= 0)
1576 goto retry_find;
1577
1578 /*
1579 * An error return from page_cache_read can result if the
1580 * system is low on memory, or a problem occurs while trying
1581 * to schedule I/O.
1582 */
1583 return NULL;
1584
1585page_not_uptodate:
1586 lock_page(page);
1587
1588 /* Did it get unhashed while we waited for it? */
1589 if (!page->mapping) {
1590 unlock_page(page);
1591 goto err;
1592 }
1593
1594 /* Did somebody else get it up-to-date? */
1595 if (PageUptodate(page)) {
1596 unlock_page(page);
1597 goto success;
1598 }
1599
Zach Brown994fc28c2005-12-15 14:28:17 -08001600 error = mapping->a_ops->readpage(file, page);
1601 if (!error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 wait_on_page_locked(page);
1603 if (PageUptodate(page))
1604 goto success;
Zach Brown994fc28c2005-12-15 14:28:17 -08001605 } else if (error == AOP_TRUNCATED_PAGE) {
1606 page_cache_release(page);
1607 goto retry_find;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 }
1609
1610 /*
1611 * Umm, take care of errors if the page isn't up-to-date.
1612 * Try to re-read it _once_. We do this synchronously,
1613 * because there really aren't any performance issues here
1614 * and we need to check for errors.
1615 */
1616 lock_page(page);
1617
1618 /* Somebody truncated the page on us? */
1619 if (!page->mapping) {
1620 unlock_page(page);
1621 goto err;
1622 }
1623 /* Somebody else successfully read it in? */
1624 if (PageUptodate(page)) {
1625 unlock_page(page);
1626 goto success;
1627 }
1628
1629 ClearPageError(page);
Zach Brown994fc28c2005-12-15 14:28:17 -08001630 error = mapping->a_ops->readpage(file, page);
1631 if (!error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 wait_on_page_locked(page);
1633 if (PageUptodate(page))
1634 goto success;
Zach Brown994fc28c2005-12-15 14:28:17 -08001635 } else if (error == AOP_TRUNCATED_PAGE) {
1636 page_cache_release(page);
1637 goto retry_find;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 }
1639
1640 /*
1641 * Things didn't work out. Return zero to tell the
1642 * mm layer so, possibly freeing the page cache page first.
1643 */
1644err:
1645 page_cache_release(page);
1646
1647 return NULL;
1648}
1649
1650int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
1651 unsigned long len, pgprot_t prot, unsigned long pgoff,
1652 int nonblock)
1653{
1654 struct file *file = vma->vm_file;
1655 struct address_space *mapping = file->f_mapping;
1656 struct inode *inode = mapping->host;
1657 unsigned long size;
1658 struct mm_struct *mm = vma->vm_mm;
1659 struct page *page;
1660 int err;
1661
1662 if (!nonblock)
1663 force_page_cache_readahead(mapping, vma->vm_file,
1664 pgoff, len >> PAGE_CACHE_SHIFT);
1665
1666repeat:
1667 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1668 if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
1669 return -EINVAL;
1670
1671 page = filemap_getpage(file, pgoff, nonblock);
Paolo 'Blaisorblade' Giarrussod44ed4f2005-09-03 15:54:55 -07001672
1673 /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
1674 * done in shmem_populate calling shmem_getpage */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 if (!page && !nonblock)
1676 return -ENOMEM;
Paolo 'Blaisorblade' Giarrussod44ed4f2005-09-03 15:54:55 -07001677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 if (page) {
1679 err = install_page(mm, vma, addr, page, prot);
1680 if (err) {
1681 page_cache_release(page);
1682 return err;
1683 }
Hugh Dickins65500d22005-10-29 18:15:59 -07001684 } else if (vma->vm_flags & VM_NONLINEAR) {
Paolo 'Blaisorblade' Giarrussod44ed4f2005-09-03 15:54:55 -07001685 /* No page was found just because we can't read it in now (being
1686 * here implies nonblock != 0), but the page may exist, so set
1687 * the PTE to fault it in later. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 err = install_file_pte(mm, vma, addr, pgoff, prot);
1689 if (err)
1690 return err;
1691 }
1692
1693 len -= PAGE_SIZE;
1694 addr += PAGE_SIZE;
1695 pgoff++;
1696 if (len)
1697 goto repeat;
1698
1699 return 0;
1700}
Nikita Danilovb1459462005-10-29 18:17:02 -07001701EXPORT_SYMBOL(filemap_populate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
1703struct vm_operations_struct generic_file_vm_ops = {
1704 .nopage = filemap_nopage,
1705 .populate = filemap_populate,
1706};
1707
1708/* This is used for a general mmap of a disk file */
1709
1710int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1711{
1712 struct address_space *mapping = file->f_mapping;
1713
1714 if (!mapping->a_ops->readpage)
1715 return -ENOEXEC;
1716 file_accessed(file);
1717 vma->vm_ops = &generic_file_vm_ops;
1718 return 0;
1719}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
1721/*
1722 * This is for filesystems which do not implement ->writepage.
1723 */
1724int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1725{
1726 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1727 return -EINVAL;
1728 return generic_file_mmap(file, vma);
1729}
1730#else
1731int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1732{
1733 return -ENOSYS;
1734}
1735int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1736{
1737 return -ENOSYS;
1738}
1739#endif /* CONFIG_MMU */
1740
1741EXPORT_SYMBOL(generic_file_mmap);
1742EXPORT_SYMBOL(generic_file_readonly_mmap);
1743
1744static inline struct page *__read_cache_page(struct address_space *mapping,
1745 unsigned long index,
1746 int (*filler)(void *,struct page*),
1747 void *data)
1748{
1749 struct page *page, *cached_page = NULL;
1750 int err;
1751repeat:
1752 page = find_get_page(mapping, index);
1753 if (!page) {
1754 if (!cached_page) {
1755 cached_page = page_cache_alloc_cold(mapping);
1756 if (!cached_page)
1757 return ERR_PTR(-ENOMEM);
1758 }
1759 err = add_to_page_cache_lru(cached_page, mapping,
1760 index, GFP_KERNEL);
1761 if (err == -EEXIST)
1762 goto repeat;
1763 if (err < 0) {
1764 /* Presumably ENOMEM for radix tree node */
1765 page_cache_release(cached_page);
1766 return ERR_PTR(err);
1767 }
1768 page = cached_page;
1769 cached_page = NULL;
1770 err = filler(data, page);
1771 if (err < 0) {
1772 page_cache_release(page);
1773 page = ERR_PTR(err);
1774 }
1775 }
1776 if (cached_page)
1777 page_cache_release(cached_page);
1778 return page;
1779}
1780
Randy Dunlap485bb992006-06-23 02:03:49 -07001781/**
1782 * read_cache_page - read into page cache, fill it if needed
1783 * @mapping: the page's address_space
1784 * @index: the page index
1785 * @filler: function to perform the read
1786 * @data: destination for read data
1787 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 * Read into the page cache. If a page already exists,
1789 * and PageUptodate() is not set, try to fill the page.
1790 */
1791struct page *read_cache_page(struct address_space *mapping,
1792 unsigned long index,
1793 int (*filler)(void *,struct page*),
1794 void *data)
1795{
1796 struct page *page;
1797 int err;
1798
1799retry:
1800 page = __read_cache_page(mapping, index, filler, data);
1801 if (IS_ERR(page))
1802 goto out;
1803 mark_page_accessed(page);
1804 if (PageUptodate(page))
1805 goto out;
1806
1807 lock_page(page);
1808 if (!page->mapping) {
1809 unlock_page(page);
1810 page_cache_release(page);
1811 goto retry;
1812 }
1813 if (PageUptodate(page)) {
1814 unlock_page(page);
1815 goto out;
1816 }
1817 err = filler(data, page);
1818 if (err < 0) {
1819 page_cache_release(page);
1820 page = ERR_PTR(err);
1821 }
1822 out:
1823 return page;
1824}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825EXPORT_SYMBOL(read_cache_page);
1826
1827/*
1828 * If the page was newly created, increment its refcount and add it to the
1829 * caller's lru-buffering pagevec. This function is specifically for
1830 * generic_file_write().
1831 */
1832static inline struct page *
1833__grab_cache_page(struct address_space *mapping, unsigned long index,
1834 struct page **cached_page, struct pagevec *lru_pvec)
1835{
1836 int err;
1837 struct page *page;
1838repeat:
1839 page = find_lock_page(mapping, index);
1840 if (!page) {
1841 if (!*cached_page) {
1842 *cached_page = page_cache_alloc(mapping);
1843 if (!*cached_page)
1844 return NULL;
1845 }
1846 err = add_to_page_cache(*cached_page, mapping,
1847 index, GFP_KERNEL);
1848 if (err == -EEXIST)
1849 goto repeat;
1850 if (err == 0) {
1851 page = *cached_page;
1852 page_cache_get(page);
1853 if (!pagevec_add(lru_pvec, page))
1854 __pagevec_lru_add(lru_pvec);
1855 *cached_page = NULL;
1856 }
1857 }
1858 return page;
1859}
1860
1861/*
1862 * The logic we want is
1863 *
1864 * if suid or (sgid and xgrp)
1865 * remove privs
1866 */
1867int remove_suid(struct dentry *dentry)
1868{
1869 mode_t mode = dentry->d_inode->i_mode;
1870 int kill = 0;
1871 int result = 0;
1872
1873 /* suid always must be killed */
1874 if (unlikely(mode & S_ISUID))
1875 kill = ATTR_KILL_SUID;
1876
1877 /*
1878 * sgid without any exec bits is just a mandatory locking mark; leave
1879 * it alone. If some exec bits are set, it's a real sgid; kill it.
1880 */
1881 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1882 kill |= ATTR_KILL_SGID;
1883
1884 if (unlikely(kill && !capable(CAP_FSETID))) {
1885 struct iattr newattrs;
1886
1887 newattrs.ia_valid = ATTR_FORCE | kill;
1888 result = notify_change(dentry, &newattrs);
1889 }
1890 return result;
1891}
1892EXPORT_SYMBOL(remove_suid);
1893
Carsten Otteceffc072005-06-23 22:05:25 -07001894size_t
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895__filemap_copy_from_user_iovec(char *vaddr,
1896 const struct iovec *iov, size_t base, size_t bytes)
1897{
1898 size_t copied = 0, left = 0;
1899
1900 while (bytes) {
1901 char __user *buf = iov->iov_base + base;
1902 int copy = min(bytes, iov->iov_len - base);
1903
1904 base = 0;
1905 left = __copy_from_user_inatomic(vaddr, buf, copy);
1906 copied += copy;
1907 bytes -= copy;
1908 vaddr += copy;
1909 iov++;
1910
1911 if (unlikely(left)) {
1912 /* zero the rest of the target like __copy_from_user */
1913 if (bytes)
1914 memset(vaddr, 0, bytes);
1915 break;
1916 }
1917 }
1918 return copied - left;
1919}
1920
1921/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 * Performs necessary checks before doing a write
1923 *
Randy Dunlap485bb992006-06-23 02:03:49 -07001924 * Can adjust writing position or amount of bytes to write.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 * Returns appropriate error code that caller should return or
1926 * zero in case that write should be allowed.
1927 */
1928inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1929{
1930 struct inode *inode = file->f_mapping->host;
1931 unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1932
1933 if (unlikely(*pos < 0))
1934 return -EINVAL;
1935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 if (!isblk) {
1937 /* FIXME: this is for backwards compatibility with 2.4 */
1938 if (file->f_flags & O_APPEND)
1939 *pos = i_size_read(inode);
1940
1941 if (limit != RLIM_INFINITY) {
1942 if (*pos >= limit) {
1943 send_sig(SIGXFSZ, current, 0);
1944 return -EFBIG;
1945 }
1946 if (*count > limit - (typeof(limit))*pos) {
1947 *count = limit - (typeof(limit))*pos;
1948 }
1949 }
1950 }
1951
1952 /*
1953 * LFS rule
1954 */
1955 if (unlikely(*pos + *count > MAX_NON_LFS &&
1956 !(file->f_flags & O_LARGEFILE))) {
1957 if (*pos >= MAX_NON_LFS) {
1958 send_sig(SIGXFSZ, current, 0);
1959 return -EFBIG;
1960 }
1961 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1962 *count = MAX_NON_LFS - (unsigned long)*pos;
1963 }
1964 }
1965
1966 /*
1967 * Are we about to exceed the fs block limit ?
1968 *
1969 * If we have written data it becomes a short write. If we have
1970 * exceeded without writing data we send a signal and return EFBIG.
1971 * Linus frestrict idea will clean these up nicely..
1972 */
1973 if (likely(!isblk)) {
1974 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1975 if (*count || *pos > inode->i_sb->s_maxbytes) {
1976 send_sig(SIGXFSZ, current, 0);
1977 return -EFBIG;
1978 }
1979 /* zero-length writes at ->s_maxbytes are OK */
1980 }
1981
1982 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1983 *count = inode->i_sb->s_maxbytes - *pos;
1984 } else {
1985 loff_t isize;
1986 if (bdev_read_only(I_BDEV(inode)))
1987 return -EPERM;
1988 isize = i_size_read(inode);
1989 if (*pos >= isize) {
1990 if (*count || *pos > isize)
1991 return -ENOSPC;
1992 }
1993
1994 if (*pos + *count > isize)
1995 *count = isize - *pos;
1996 }
1997 return 0;
1998}
1999EXPORT_SYMBOL(generic_write_checks);
2000
2001ssize_t
2002generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2003 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2004 size_t count, size_t ocount)
2005{
2006 struct file *file = iocb->ki_filp;
2007 struct address_space *mapping = file->f_mapping;
2008 struct inode *inode = mapping->host;
2009 ssize_t written;
2010
2011 if (count != ocount)
2012 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2013
2014 written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2015 if (written > 0) {
2016 loff_t end = pos + written;
2017 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2018 i_size_write(inode, end);
2019 mark_inode_dirty(inode);
2020 }
2021 *ppos = end;
2022 }
2023
2024 /*
2025 * Sync the fs metadata but not the minor inode changes and
2026 * of course not the data as we did direct DMA for the IO.
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002027 * i_mutex is held, which protects generic_osync_inode() from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 * livelocking.
2029 */
Hifumi Hisashi1e8a81c2005-06-25 14:54:32 -07002030 if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2031 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
2032 if (err < 0)
2033 written = err;
2034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 if (written == count && !is_sync_kiocb(iocb))
2036 written = -EIOCBQUEUED;
2037 return written;
2038}
2039EXPORT_SYMBOL(generic_file_direct_write);
2040
2041ssize_t
2042generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2043 unsigned long nr_segs, loff_t pos, loff_t *ppos,
2044 size_t count, ssize_t written)
2045{
2046 struct file *file = iocb->ki_filp;
2047 struct address_space * mapping = file->f_mapping;
2048 struct address_space_operations *a_ops = mapping->a_ops;
2049 struct inode *inode = mapping->host;
2050 long status = 0;
2051 struct page *page;
2052 struct page *cached_page = NULL;
2053 size_t bytes;
2054 struct pagevec lru_pvec;
2055 const struct iovec *cur_iov = iov; /* current iovec */
2056 size_t iov_base = 0; /* offset in the current iovec */
2057 char __user *buf;
2058
2059 pagevec_init(&lru_pvec, 0);
2060
2061 /*
2062 * handle partial DIO write. Adjust cur_iov if needed.
2063 */
2064 if (likely(nr_segs == 1))
2065 buf = iov->iov_base + written;
2066 else {
2067 filemap_set_next_iovec(&cur_iov, &iov_base, written);
akpm@osdl.orgf021e922005-05-01 08:58:35 -07002068 buf = cur_iov->iov_base + iov_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 }
2070
2071 do {
2072 unsigned long index;
2073 unsigned long offset;
Martin Schwidefskya5117182005-06-06 13:35:54 -07002074 unsigned long maxlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 size_t copied;
2076
2077 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
2078 index = pos >> PAGE_CACHE_SHIFT;
2079 bytes = PAGE_CACHE_SIZE - offset;
2080 if (bytes > count)
2081 bytes = count;
2082
2083 /*
2084 * Bring in the user page that we will copy from _first_.
2085 * Otherwise there's a nasty deadlock on copying from the
2086 * same page as we're writing to, without it being marked
2087 * up-to-date.
2088 */
Martin Schwidefskya5117182005-06-06 13:35:54 -07002089 maxlen = cur_iov->iov_len - iov_base;
2090 if (maxlen > bytes)
2091 maxlen = bytes;
2092 fault_in_pages_readable(buf, maxlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
2094 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
2095 if (!page) {
2096 status = -ENOMEM;
2097 break;
2098 }
2099
2100 status = a_ops->prepare_write(file, page, offset, offset+bytes);
2101 if (unlikely(status)) {
2102 loff_t isize = i_size_read(inode);
Zach Brown994fc28c2005-12-15 14:28:17 -08002103
2104 if (status != AOP_TRUNCATED_PAGE)
2105 unlock_page(page);
2106 page_cache_release(page);
2107 if (status == AOP_TRUNCATED_PAGE)
2108 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 /*
2110 * prepare_write() may have instantiated a few blocks
2111 * outside i_size. Trim these off again.
2112 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 if (pos + bytes > isize)
2114 vmtruncate(inode, isize);
2115 break;
2116 }
2117 if (likely(nr_segs == 1))
2118 copied = filemap_copy_from_user(page, offset,
2119 buf, bytes);
2120 else
2121 copied = filemap_copy_from_user_iovec(page, offset,
2122 cur_iov, iov_base, bytes);
2123 flush_dcache_page(page);
2124 status = a_ops->commit_write(file, page, offset, offset+bytes);
Zach Brown994fc28c2005-12-15 14:28:17 -08002125 if (status == AOP_TRUNCATED_PAGE) {
2126 page_cache_release(page);
2127 continue;
2128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 if (likely(copied > 0)) {
2130 if (!status)
2131 status = copied;
2132
2133 if (status >= 0) {
2134 written += status;
2135 count -= status;
2136 pos += status;
2137 buf += status;
akpm@osdl.orgf021e922005-05-01 08:58:35 -07002138 if (unlikely(nr_segs > 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 filemap_set_next_iovec(&cur_iov,
2140 &iov_base, status);
Badari Pulavartyb0cfbd92005-06-25 14:55:42 -07002141 if (count)
2142 buf = cur_iov->iov_base +
2143 iov_base;
Martin Schwidefskya5117182005-06-06 13:35:54 -07002144 } else {
2145 iov_base += status;
akpm@osdl.orgf021e922005-05-01 08:58:35 -07002146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 }
2148 }
2149 if (unlikely(copied != bytes))
2150 if (status >= 0)
2151 status = -EFAULT;
2152 unlock_page(page);
2153 mark_page_accessed(page);
2154 page_cache_release(page);
2155 if (status < 0)
2156 break;
2157 balance_dirty_pages_ratelimited(mapping);
2158 cond_resched();
2159 } while (count);
2160 *ppos = pos;
2161
2162 if (cached_page)
2163 page_cache_release(cached_page);
2164
2165 /*
2166 * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
2167 */
2168 if (likely(status >= 0)) {
2169 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2170 if (!a_ops->writepage || !is_sync_kiocb(iocb))
2171 status = generic_osync_inode(inode, mapping,
2172 OSYNC_METADATA|OSYNC_DATA);
2173 }
2174 }
2175
2176 /*
2177 * If we get here for O_DIRECT writes then we must have fallen through
2178 * to buffered writes (block instantiation inside i_size). So we sync
2179 * the file data here, to try to honour O_DIRECT expectations.
2180 */
2181 if (unlikely(file->f_flags & O_DIRECT) && written)
2182 status = filemap_write_and_wait(mapping);
2183
2184 pagevec_lru_add(&lru_pvec);
2185 return written ? written : status;
2186}
2187EXPORT_SYMBOL(generic_file_buffered_write);
2188
Adrian Bunk5ce78522005-09-10 00:26:28 -07002189static ssize_t
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2191 unsigned long nr_segs, loff_t *ppos)
2192{
2193 struct file *file = iocb->ki_filp;
2194 struct address_space * mapping = file->f_mapping;
2195 size_t ocount; /* original count */
2196 size_t count; /* after file limit checks */
2197 struct inode *inode = mapping->host;
2198 unsigned long seg;
2199 loff_t pos;
2200 ssize_t written;
2201 ssize_t err;
2202
2203 ocount = 0;
2204 for (seg = 0; seg < nr_segs; seg++) {
2205 const struct iovec *iv = &iov[seg];
2206
2207 /*
2208 * If any segment has a negative length, or the cumulative
2209 * length ever wraps negative then return -EINVAL.
2210 */
2211 ocount += iv->iov_len;
2212 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
2213 return -EINVAL;
2214 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
2215 continue;
2216 if (seg == 0)
2217 return -EFAULT;
2218 nr_segs = seg;
2219 ocount -= iv->iov_len; /* This segment is no good */
2220 break;
2221 }
2222
2223 count = ocount;
2224 pos = *ppos;
2225
2226 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2227
2228 /* We can write back this queue in page reclaim */
2229 current->backing_dev_info = mapping->backing_dev_info;
2230 written = 0;
2231
2232 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2233 if (err)
2234 goto out;
2235
2236 if (count == 0)
2237 goto out;
2238
2239 err = remove_suid(file->f_dentry);
2240 if (err)
2241 goto out;
2242
Christoph Hellwig870f4812006-01-09 20:52:01 -08002243 file_update_time(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
2245 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2246 if (unlikely(file->f_flags & O_DIRECT)) {
2247 written = generic_file_direct_write(iocb, iov,
2248 &nr_segs, pos, ppos, count, ocount);
2249 if (written < 0 || written == count)
2250 goto out;
2251 /*
2252 * direct-io write to a hole: fall through to buffered I/O
2253 * for completing the rest of the request.
2254 */
2255 pos += written;
2256 count -= written;
2257 }
2258
2259 written = generic_file_buffered_write(iocb, iov, nr_segs,
2260 pos, ppos, count, written);
2261out:
2262 current->backing_dev_info = NULL;
2263 return written ? written : err;
2264}
2265EXPORT_SYMBOL(generic_file_aio_write_nolock);
2266
2267ssize_t
2268generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2269 unsigned long nr_segs, loff_t *ppos)
2270{
2271 struct file *file = iocb->ki_filp;
2272 struct address_space *mapping = file->f_mapping;
2273 struct inode *inode = mapping->host;
2274 ssize_t ret;
2275 loff_t pos = *ppos;
2276
2277 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos);
2278
2279 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2280 int err;
2281
2282 err = sync_page_range_nolock(inode, mapping, pos, ret);
2283 if (err < 0)
2284 ret = err;
2285 }
2286 return ret;
2287}
2288
Adrian Bunk5ce78522005-09-10 00:26:28 -07002289static ssize_t
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290__generic_file_write_nolock(struct file *file, const struct iovec *iov,
2291 unsigned long nr_segs, loff_t *ppos)
2292{
2293 struct kiocb kiocb;
2294 ssize_t ret;
2295
2296 init_sync_kiocb(&kiocb, file);
2297 ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2298 if (ret == -EIOCBQUEUED)
2299 ret = wait_on_sync_kiocb(&kiocb);
2300 return ret;
2301}
2302
2303ssize_t
2304generic_file_write_nolock(struct file *file, const struct iovec *iov,
2305 unsigned long nr_segs, loff_t *ppos)
2306{
2307 struct kiocb kiocb;
2308 ssize_t ret;
2309
2310 init_sync_kiocb(&kiocb, file);
2311 ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2312 if (-EIOCBQUEUED == ret)
2313 ret = wait_on_sync_kiocb(&kiocb);
2314 return ret;
2315}
2316EXPORT_SYMBOL(generic_file_write_nolock);
2317
2318ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
2319 size_t count, loff_t pos)
2320{
2321 struct file *file = iocb->ki_filp;
2322 struct address_space *mapping = file->f_mapping;
2323 struct inode *inode = mapping->host;
2324 ssize_t ret;
2325 struct iovec local_iov = { .iov_base = (void __user *)buf,
2326 .iov_len = count };
2327
2328 BUG_ON(iocb->ki_pos != pos);
2329
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002330 mutex_lock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
2332 &iocb->ki_pos);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002333 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334
2335 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2336 ssize_t err;
2337
2338 err = sync_page_range(inode, mapping, pos, ret);
2339 if (err < 0)
2340 ret = err;
2341 }
2342 return ret;
2343}
2344EXPORT_SYMBOL(generic_file_aio_write);
2345
2346ssize_t generic_file_write(struct file *file, const char __user *buf,
2347 size_t count, loff_t *ppos)
2348{
2349 struct address_space *mapping = file->f_mapping;
2350 struct inode *inode = mapping->host;
2351 ssize_t ret;
2352 struct iovec local_iov = { .iov_base = (void __user *)buf,
2353 .iov_len = count };
2354
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002355 mutex_lock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002357 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
2359 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2360 ssize_t err;
2361
2362 err = sync_page_range(inode, mapping, *ppos - ret, ret);
2363 if (err < 0)
2364 ret = err;
2365 }
2366 return ret;
2367}
2368EXPORT_SYMBOL(generic_file_write);
2369
2370ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
2371 unsigned long nr_segs, loff_t *ppos)
2372{
2373 struct kiocb kiocb;
2374 ssize_t ret;
2375
2376 init_sync_kiocb(&kiocb, filp);
2377 ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos);
2378 if (-EIOCBQUEUED == ret)
2379 ret = wait_on_sync_kiocb(&kiocb);
2380 return ret;
2381}
2382EXPORT_SYMBOL(generic_file_readv);
2383
2384ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
2385 unsigned long nr_segs, loff_t *ppos)
2386{
2387 struct address_space *mapping = file->f_mapping;
2388 struct inode *inode = mapping->host;
2389 ssize_t ret;
2390
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002391 mutex_lock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002393 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
2395 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2396 int err;
2397
2398 err = sync_page_range(inode, mapping, *ppos - ret, ret);
2399 if (err < 0)
2400 ret = err;
2401 }
2402 return ret;
2403}
2404EXPORT_SYMBOL(generic_file_writev);
2405
2406/*
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002407 * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 * went wrong during pagecache shootdown.
2409 */
Adrian Bunk5ce78522005-09-10 00:26:28 -07002410static ssize_t
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2412 loff_t offset, unsigned long nr_segs)
2413{
2414 struct file *file = iocb->ki_filp;
2415 struct address_space *mapping = file->f_mapping;
2416 ssize_t retval;
2417 size_t write_len = 0;
2418
2419 /*
2420 * If it's a write, unmap all mmappings of the file up-front. This
2421 * will cause any pte dirty bits to be propagated into the pageframes
2422 * for the subsequent filemap_write_and_wait().
2423 */
2424 if (rw == WRITE) {
2425 write_len = iov_length(iov, nr_segs);
2426 if (mapping_mapped(mapping))
2427 unmap_mapping_range(mapping, offset, write_len, 0);
2428 }
2429
2430 retval = filemap_write_and_wait(mapping);
2431 if (retval == 0) {
2432 retval = mapping->a_ops->direct_IO(rw, iocb, iov,
2433 offset, nr_segs);
2434 if (rw == WRITE && mapping->nrpages) {
2435 pgoff_t end = (offset + write_len - 1)
2436 >> PAGE_CACHE_SHIFT;
2437 int err = invalidate_inode_pages2_range(mapping,
2438 offset >> PAGE_CACHE_SHIFT, end);
2439 if (err)
2440 retval = err;
2441 }
2442 }
2443 return retval;
2444}