blob: 1f157749041773e9e0e467a72439e190f1952cc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
Andrew Morton55e829a2006-12-10 02:19:27 -080037#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070043#include <linux/bit_spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
70void fastcall __lock_buffer(struct buffer_head *bh)
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
77void fastcall unlock_buffer(struct buffer_head *bh)
78{
Nick Piggin72ed3d02007-02-10 01:46:22 -080079 smp_mb__before_clear_bit();
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
84
85/*
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
89 */
90void __wait_on_buffer(struct buffer_head * bh)
91{
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93}
94
95static void
96__clear_page_buffers(struct page *page)
97{
98 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -070099 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 page_cache_release(page);
101}
102
103static void buffer_io_error(struct buffer_head *bh)
104{
105 char b[BDEVNAME_SIZE];
106
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
110}
111
112/*
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700113 * End-of-IO handler helper function which does not touch the bh after
114 * unlocking it.
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
118 * itself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 */
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700120static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121{
122 if (uptodate) {
123 set_buffer_uptodate(bh);
124 } else {
125 /* This happens, due to failed READA attempts. */
126 clear_buffer_uptodate(bh);
127 }
128 unlock_buffer(bh);
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700129}
130
131/*
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
134 */
135void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136{
137 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 put_bh(bh);
139}
140
141void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
142{
143 char b[BDEVNAME_SIZE];
144
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 buffer_io_error(bh);
150 printk(KERN_WARNING "lost page write due to "
151 "I/O error on %s\n",
152 bdevname(bh->b_bdev, b));
153 }
154 set_buffer_write_io_error(bh);
155 clear_buffer_uptodate(bh);
156 }
157 unlock_buffer(bh);
158 put_bh(bh);
159}
160
161/*
162 * Write out and wait upon all the dirty data associated with a block
163 * device via its mapping. Does not take the superblock lock.
164 */
165int sync_blockdev(struct block_device *bdev)
166{
167 int ret = 0;
168
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800169 if (bdev)
170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 return ret;
172}
173EXPORT_SYMBOL(sync_blockdev);
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/*
176 * Write out and wait upon all dirty data associated with this
177 * device. Filesystem data as well as the underlying block
178 * device. Takes the superblock lock.
179 */
180int fsync_bdev(struct block_device *bdev)
181{
182 struct super_block *sb = get_super(bdev);
183 if (sb) {
184 int res = fsync_super(sb);
185 drop_super(sb);
186 return res;
187 }
188 return sync_blockdev(bdev);
189}
190
191/**
192 * freeze_bdev -- lock a filesystem and force it into a consistent state
193 * @bdev: blockdevice to lock
194 *
David Chinnerf73ca1b2007-01-10 23:15:41 -0800195 * This takes the block device bd_mount_sem to make sure no new mounts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 * happen on bdev until thaw_bdev() is called.
197 * If a superblock is found on this device, we take the s_umount semaphore
198 * on it to make sure nobody unmounts until the snapshot creation is done.
199 */
200struct super_block *freeze_bdev(struct block_device *bdev)
201{
202 struct super_block *sb;
203
David Chinnerf73ca1b2007-01-10 23:15:41 -0800204 down(&bdev->bd_mount_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 sb = get_super(bdev);
206 if (sb && !(sb->s_flags & MS_RDONLY)) {
207 sb->s_frozen = SB_FREEZE_WRITE;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700208 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
OGAWA Hirofumid25b9a12006-03-25 03:07:44 -0800210 __fsync_super(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212 sb->s_frozen = SB_FREEZE_TRANS;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700213 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 sync_blockdev(sb->s_bdev);
216
217 if (sb->s_op->write_super_lockfs)
218 sb->s_op->write_super_lockfs(sb);
219 }
220
221 sync_blockdev(bdev);
222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
223}
224EXPORT_SYMBOL(freeze_bdev);
225
226/**
227 * thaw_bdev -- unlock filesystem
228 * @bdev: blockdevice to unlock
229 * @sb: associated superblock
230 *
231 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
232 */
233void thaw_bdev(struct block_device *bdev, struct super_block *sb)
234{
235 if (sb) {
236 BUG_ON(sb->s_bdev != bdev);
237
238 if (sb->s_op->unlockfs)
239 sb->s_op->unlockfs(sb);
240 sb->s_frozen = SB_UNFROZEN;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700241 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 wake_up(&sb->s_wait_unfrozen);
243 drop_super(sb);
244 }
245
David Chinnerf73ca1b2007-01-10 23:15:41 -0800246 up(&bdev->bd_mount_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248EXPORT_SYMBOL(thaw_bdev);
249
250/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 * Various filesystems appear to want __find_get_block to be non-blocking.
252 * But it's the page lock which protects the buffers. To get around this,
253 * we get exclusion from try_to_free_buffers with the blockdev mapping's
254 * private_lock.
255 *
256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257 * may be quite high. This code could TryLock the page, and if that
258 * succeeds, there is no need to take private_lock. (But if
259 * private_lock is contended then so is mapping->tree_lock).
260 */
261static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800262__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
264 struct inode *bd_inode = bdev->bd_inode;
265 struct address_space *bd_mapping = bd_inode->i_mapping;
266 struct buffer_head *ret = NULL;
267 pgoff_t index;
268 struct buffer_head *bh;
269 struct buffer_head *head;
270 struct page *page;
271 int all_mapped = 1;
272
273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 page = find_get_page(bd_mapping, index);
275 if (!page)
276 goto out;
277
278 spin_lock(&bd_mapping->private_lock);
279 if (!page_has_buffers(page))
280 goto out_unlock;
281 head = page_buffers(page);
282 bh = head;
283 do {
284 if (bh->b_blocknr == block) {
285 ret = bh;
286 get_bh(bh);
287 goto out_unlock;
288 }
289 if (!buffer_mapped(bh))
290 all_mapped = 0;
291 bh = bh->b_this_page;
292 } while (bh != head);
293
294 /* we might be here because some of the buffers on this page are
295 * not mapped. This is due to various races between
296 * file io on the block device and getblk. It gets dealt with
297 * elsewhere, don't buffer_error if we had some unmapped buffers
298 */
299 if (all_mapped) {
300 printk("__find_get_block_slow() failed. "
301 "block=%llu, b_blocknr=%llu\n",
Badari Pulavarty205f87f2006-03-26 01:38:00 -0800302 (unsigned long long)block,
303 (unsigned long long)bh->b_blocknr);
304 printk("b_state=0x%08lx, b_size=%zu\n",
305 bh->b_state, bh->b_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
307 }
308out_unlock:
309 spin_unlock(&bd_mapping->private_lock);
310 page_cache_release(page);
311out:
312 return ret;
313}
314
315/* If invalidate_buffers() will trash dirty buffers, it means some kind
316 of fs corruption is going on. Trashing dirty data always imply losing
317 information that was supposed to be just stored on the physical layer
318 by the user.
319
320 Thus invalidate_buffers in general usage is not allwowed to trash
321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322 be preserved. These buffers are simply skipped.
323
324 We also skip buffers which are still in use. For example this can
325 happen if a userspace program is reading the block device.
326
327 NOTE: In the case where the user removed a removable-media-disk even if
328 there's still dirty data not synced on disk (due a bug in the device driver
329 or due an error of the user), by not destroying the dirty buffers we could
330 generate corruption also on the next media inserted, thus a parameter is
331 necessary to handle this case in the most safe way possible (trying
332 to not corrupt also the new disk inserted with the data belonging to
333 the old now corrupted disk). Also for the ramdisk the natural thing
334 to do in order to release the ramdisk memory is to destroy dirty buffers.
335
336 These are two special cases. Normal usage imply the device driver
337 to issue a sync on the device (without waiting I/O completion) and
338 then an invalidate_buffers call that doesn't trash dirty buffers.
339
340 For handling cache coherency with the blkdev pagecache the 'update' case
341 is been introduced. It is needed to re-read from disk any pinned
342 buffer. NOTE: re-reading from disk is destructive so we can do it only
343 when we assume nobody is changing the buffercache under our I/O and when
344 we think the disk contains more recent information than the buffercache.
345 The update == 1 pass marks the buffers we need to update, the update == 2
346 pass does the actual I/O. */
Peter Zijlstraf98393a2007-05-06 14:49:54 -0700347void invalidate_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348{
Andrew Morton0e1dfc62006-07-30 03:03:28 -0700349 struct address_space *mapping = bdev->bd_inode->i_mapping;
350
351 if (mapping->nrpages == 0)
352 return;
353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 invalidate_bh_lrus();
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800355 invalidate_mapping_pages(mapping, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
357
358/*
359 * Kick pdflush then try to free up some ZONE_NORMAL memory.
360 */
361static void free_more_memory(void)
362{
363 struct zone **zones;
364 pg_data_t *pgdat;
365
Pekka J Enberg687a21c2005-06-28 20:44:55 -0700366 wakeup_pdflush(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 yield();
368
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -0800369 for_each_online_pgdat(pgdat) {
Al Viroaf4ca452005-10-21 02:55:38 -0400370 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 if (*zones)
Andy Whitcroft5ad333e2007-07-17 04:03:16 -0700372 try_to_free_pages(zones, 0, GFP_NOFS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 }
374}
375
376/*
377 * I/O completion handler for block_read_full_page() - pages
378 * which come unlocked at the end of I/O.
379 */
380static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
381{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700383 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 struct buffer_head *tmp;
385 struct page *page;
386 int page_uptodate = 1;
387
388 BUG_ON(!buffer_async_read(bh));
389
390 page = bh->b_page;
391 if (uptodate) {
392 set_buffer_uptodate(bh);
393 } else {
394 clear_buffer_uptodate(bh);
395 if (printk_ratelimit())
396 buffer_io_error(bh);
397 SetPageError(page);
398 }
399
400 /*
401 * Be _very_ careful from here on. Bad things can happen if
402 * two buffer heads end IO at almost the same time and both
403 * decide that the page is now completely done.
404 */
Nick Piggina3972202005-07-07 17:56:56 -0700405 first = page_buffers(page);
406 local_irq_save(flags);
407 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 clear_buffer_async_read(bh);
409 unlock_buffer(bh);
410 tmp = bh;
411 do {
412 if (!buffer_uptodate(tmp))
413 page_uptodate = 0;
414 if (buffer_async_read(tmp)) {
415 BUG_ON(!buffer_locked(tmp));
416 goto still_busy;
417 }
418 tmp = tmp->b_this_page;
419 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 /*
424 * If none of the buffers had errors and they are all
425 * uptodate then we can set the page uptodate.
426 */
427 if (page_uptodate && !PageError(page))
428 SetPageUptodate(page);
429 unlock_page(page);
430 return;
431
432still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700433 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 return;
436}
437
438/*
439 * Completion handler for block_write_full_page() - pages which are unlocked
440 * during I/O, and which have PageWriteback cleared upon I/O completion.
441 */
Adrian Bunkb6cd0b72006-06-27 02:53:54 -0700442static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
444 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700446 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 struct buffer_head *tmp;
448 struct page *page;
449
450 BUG_ON(!buffer_async_write(bh));
451
452 page = bh->b_page;
453 if (uptodate) {
454 set_buffer_uptodate(bh);
455 } else {
456 if (printk_ratelimit()) {
457 buffer_io_error(bh);
458 printk(KERN_WARNING "lost page write due to "
459 "I/O error on %s\n",
460 bdevname(bh->b_bdev, b));
461 }
462 set_bit(AS_EIO, &page->mapping->flags);
Jan Kara58ff4072006-10-17 00:10:19 -0700463 set_buffer_write_io_error(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 clear_buffer_uptodate(bh);
465 SetPageError(page);
466 }
467
Nick Piggina3972202005-07-07 17:56:56 -0700468 first = page_buffers(page);
469 local_irq_save(flags);
470 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 clear_buffer_async_write(bh);
473 unlock_buffer(bh);
474 tmp = bh->b_this_page;
475 while (tmp != bh) {
476 if (buffer_async_write(tmp)) {
477 BUG_ON(!buffer_locked(tmp));
478 goto still_busy;
479 }
480 tmp = tmp->b_this_page;
481 }
Nick Piggina3972202005-07-07 17:56:56 -0700482 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 end_page_writeback(page);
485 return;
486
487still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700488 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 return;
491}
492
493/*
494 * If a page's buffers are under async readin (end_buffer_async_read
495 * completion) then there is a possibility that another thread of
496 * control could lock one of the buffers after it has completed
497 * but while some of the other buffers have not completed. This
498 * locked buffer would confuse end_buffer_async_read() into not unlocking
499 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
500 * that this buffer is not under async I/O.
501 *
502 * The page comes unlocked when it has no locked buffer_async buffers
503 * left.
504 *
505 * PageLocked prevents anyone starting new async I/O reads any of
506 * the buffers.
507 *
508 * PageWriteback is used to prevent simultaneous writeout of the same
509 * page.
510 *
511 * PageLocked prevents anyone from starting writeback of a page which is
512 * under read I/O (PageWriteback is only ever set against a locked page).
513 */
514static void mark_buffer_async_read(struct buffer_head *bh)
515{
516 bh->b_end_io = end_buffer_async_read;
517 set_buffer_async_read(bh);
518}
519
520void mark_buffer_async_write(struct buffer_head *bh)
521{
522 bh->b_end_io = end_buffer_async_write;
523 set_buffer_async_write(bh);
524}
525EXPORT_SYMBOL(mark_buffer_async_write);
526
527
528/*
529 * fs/buffer.c contains helper functions for buffer-backed address space's
530 * fsync functions. A common requirement for buffer-based filesystems is
531 * that certain data from the backing blockdev needs to be written out for
532 * a successful fsync(). For example, ext2 indirect blocks need to be
533 * written back and waited upon before fsync() returns.
534 *
535 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537 * management of a list of dependent buffers at ->i_mapping->private_list.
538 *
539 * Locking is a little subtle: try_to_free_buffers() will remove buffers
540 * from their controlling inode's queue when they are being freed. But
541 * try_to_free_buffers() will be operating against the *blockdev* mapping
542 * at the time, not against the S_ISREG file which depends on those buffers.
543 * So the locking for private_list is via the private_lock in the address_space
544 * which backs the buffers. Which is different from the address_space
545 * against which the buffers are listed. So for a particular address_space,
546 * mapping->private_lock does *not* protect mapping->private_list! In fact,
547 * mapping->private_list will always be protected by the backing blockdev's
548 * ->private_lock.
549 *
550 * Which introduces a requirement: all buffers on an address_space's
551 * ->private_list must be from the same address_space: the blockdev's.
552 *
553 * address_spaces which do not place buffers at ->private_list via these
554 * utility functions are free to use private_lock and private_list for
555 * whatever they want. The only requirement is that list_empty(private_list)
556 * be true at clear_inode() time.
557 *
558 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
559 * filesystems should do that. invalidate_inode_buffers() should just go
560 * BUG_ON(!list_empty).
561 *
562 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
563 * take an address_space, not an inode. And it should be called
564 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
565 * queued up.
566 *
567 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568 * list if it is already on a list. Because if the buffer is on a list,
569 * it *must* already be on the right one. If not, the filesystem is being
570 * silly. This will save a ton of locking. But first we have to ensure
571 * that buffers are taken *off* the old inode's list when they are freed
572 * (presumably in truncate). That requires careful auditing of all
573 * filesystems (do it inside bforget()). It could also be done by bringing
574 * b_inode back.
575 */
576
577/*
578 * The buffer's backing address_space's private_lock must be held
579 */
580static inline void __remove_assoc_queue(struct buffer_head *bh)
581{
582 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -0700583 WARN_ON(!bh->b_assoc_map);
584 if (buffer_write_io_error(bh))
585 set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
589int inode_has_buffers(struct inode *inode)
590{
591 return !list_empty(&inode->i_data.private_list);
592}
593
594/*
595 * osync is designed to support O_SYNC io. It waits synchronously for
596 * all already-submitted IO to complete, but does not queue any new
597 * writes to the disk.
598 *
599 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600 * you dirty the buffers, and then use osync_inode_buffers to wait for
601 * completion. Any other dirty buffers which are not yet queued for
602 * write will not be flushed to disk by the osync.
603 */
604static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
605{
606 struct buffer_head *bh;
607 struct list_head *p;
608 int err = 0;
609
610 spin_lock(lock);
611repeat:
612 list_for_each_prev(p, list) {
613 bh = BH_ENTRY(p);
614 if (buffer_locked(bh)) {
615 get_bh(bh);
616 spin_unlock(lock);
617 wait_on_buffer(bh);
618 if (!buffer_uptodate(bh))
619 err = -EIO;
620 brelse(bh);
621 spin_lock(lock);
622 goto repeat;
623 }
624 }
625 spin_unlock(lock);
626 return err;
627}
628
629/**
630 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
631 * buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700632 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 *
634 * Starts I/O against the buffers at mapping->private_list, and waits upon
635 * that I/O.
636 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700637 * Basically, this is a convenience function for fsync().
638 * @mapping is a file or directory which needs those buffers to be written for
639 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 */
641int sync_mapping_buffers(struct address_space *mapping)
642{
643 struct address_space *buffer_mapping = mapping->assoc_mapping;
644
645 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
646 return 0;
647
648 return fsync_buffers_list(&buffer_mapping->private_lock,
649 &mapping->private_list);
650}
651EXPORT_SYMBOL(sync_mapping_buffers);
652
653/*
654 * Called when we've recently written block `bblock', and it is known that
655 * `bblock' was for a buffer_boundary() buffer. This means that the block at
656 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
657 * dirty, schedule it for IO. So that indirects merge nicely with their data.
658 */
659void write_boundary_block(struct block_device *bdev,
660 sector_t bblock, unsigned blocksize)
661{
662 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
663 if (bh) {
664 if (buffer_dirty(bh))
665 ll_rw_block(WRITE, 1, &bh);
666 put_bh(bh);
667 }
668}
669
670void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
671{
672 struct address_space *mapping = inode->i_mapping;
673 struct address_space *buffer_mapping = bh->b_page->mapping;
674
675 mark_buffer_dirty(bh);
676 if (!mapping->assoc_mapping) {
677 mapping->assoc_mapping = buffer_mapping;
678 } else {
Eric Sesterhenne827f922006-03-26 18:24:46 +0200679 BUG_ON(mapping->assoc_mapping != buffer_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 }
681 if (list_empty(&bh->b_assoc_buffers)) {
682 spin_lock(&buffer_mapping->private_lock);
683 list_move_tail(&bh->b_assoc_buffers,
684 &mapping->private_list);
Jan Kara58ff4072006-10-17 00:10:19 -0700685 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 spin_unlock(&buffer_mapping->private_lock);
687 }
688}
689EXPORT_SYMBOL(mark_buffer_dirty_inode);
690
691/*
Nick Piggin787d2212007-07-17 04:03:34 -0700692 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
693 * dirty.
694 *
695 * If warn is true, then emit a warning if the page is not uptodate and has
696 * not been truncated.
697 */
698static int __set_page_dirty(struct page *page,
699 struct address_space *mapping, int warn)
700{
701 if (unlikely(!mapping))
702 return !TestSetPageDirty(page);
703
704 if (TestSetPageDirty(page))
705 return 0;
706
707 write_lock_irq(&mapping->tree_lock);
708 if (page->mapping) { /* Race with truncate? */
709 WARN_ON_ONCE(warn && !PageUptodate(page));
710
711 if (mapping_cap_account_dirty(mapping)) {
712 __inc_zone_page_state(page, NR_FILE_DIRTY);
713 task_io_account_write(PAGE_CACHE_SIZE);
714 }
715 radix_tree_tag_set(&mapping->page_tree,
716 page_index(page), PAGECACHE_TAG_DIRTY);
717 }
718 write_unlock_irq(&mapping->tree_lock);
719 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
720
721 return 1;
722}
723
724/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 * Add a page to the dirty page list.
726 *
727 * It is a sad fact of life that this function is called from several places
728 * deeply under spinlocking. It may not sleep.
729 *
730 * If the page has buffers, the uptodate buffers are set dirty, to preserve
731 * dirty-state coherency between the page and the buffers. It the page does
732 * not have buffers then when they are later attached they will all be set
733 * dirty.
734 *
735 * The buffers are dirtied before the page is dirtied. There's a small race
736 * window in which a writepage caller may see the page cleanness but not the
737 * buffer dirtiness. That's fine. If this code were to set the page dirty
738 * before the buffers, a concurrent writepage caller could clear the page dirty
739 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
740 * page on the dirty page list.
741 *
742 * We use private_lock to lock against try_to_free_buffers while using the
743 * page's buffer list. Also use this to protect against clean buffers being
744 * added to the page after it was set dirty.
745 *
746 * FIXME: may need to call ->reservepage here as well. That's rather up to the
747 * address_space though.
748 */
749int __set_page_dirty_buffers(struct page *page)
750{
Nick Piggin787d2212007-07-17 04:03:34 -0700751 struct address_space *mapping = page_mapping(page);
Nick Pigginebf7a222006-10-10 04:36:54 +0200752
753 if (unlikely(!mapping))
754 return !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 spin_lock(&mapping->private_lock);
757 if (page_has_buffers(page)) {
758 struct buffer_head *head = page_buffers(page);
759 struct buffer_head *bh = head;
760
761 do {
762 set_buffer_dirty(bh);
763 bh = bh->b_this_page;
764 } while (bh != head);
765 }
766 spin_unlock(&mapping->private_lock);
767
Nick Piggin787d2212007-07-17 04:03:34 -0700768 return __set_page_dirty(page, mapping, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769}
770EXPORT_SYMBOL(__set_page_dirty_buffers);
771
772/*
773 * Write out and wait upon a list of buffers.
774 *
775 * We have conflicting pressures: we want to make sure that all
776 * initially dirty buffers get waited on, but that any subsequently
777 * dirtied buffers don't. After all, we don't want fsync to last
778 * forever if somebody is actively writing to the file.
779 *
780 * Do this in two main stages: first we copy dirty buffers to a
781 * temporary inode list, queueing the writes as we go. Then we clean
782 * up, waiting for those writes to complete.
783 *
784 * During this second stage, any subsequent updates to the file may end
785 * up refiling the buffer on the original inode's dirty list again, so
786 * there is a chance we will end up with a buffer queued for write but
787 * not yet completed on that list. So, as a final cleanup we go through
788 * the osync code to catch these locked, dirty buffers without requeuing
789 * any newly dirty buffers for write.
790 */
791static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
792{
793 struct buffer_head *bh;
794 struct list_head tmp;
795 int err = 0, err2;
796
797 INIT_LIST_HEAD(&tmp);
798
799 spin_lock(lock);
800 while (!list_empty(list)) {
801 bh = BH_ENTRY(list->next);
Jan Kara58ff4072006-10-17 00:10:19 -0700802 __remove_assoc_queue(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 if (buffer_dirty(bh) || buffer_locked(bh)) {
804 list_add(&bh->b_assoc_buffers, &tmp);
805 if (buffer_dirty(bh)) {
806 get_bh(bh);
807 spin_unlock(lock);
808 /*
809 * Ensure any pending I/O completes so that
810 * ll_rw_block() actually writes the current
811 * contents - it is a noop if I/O is still in
812 * flight on potentially older contents.
813 */
Jan Karaa7662232005-09-06 15:19:10 -0700814 ll_rw_block(SWRITE, 1, &bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 brelse(bh);
816 spin_lock(lock);
817 }
818 }
819 }
820
821 while (!list_empty(&tmp)) {
822 bh = BH_ENTRY(tmp.prev);
Jan Kara58ff4072006-10-17 00:10:19 -0700823 list_del_init(&bh->b_assoc_buffers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 get_bh(bh);
825 spin_unlock(lock);
826 wait_on_buffer(bh);
827 if (!buffer_uptodate(bh))
828 err = -EIO;
829 brelse(bh);
830 spin_lock(lock);
831 }
832
833 spin_unlock(lock);
834 err2 = osync_buffers_list(lock, list);
835 if (err)
836 return err;
837 else
838 return err2;
839}
840
841/*
842 * Invalidate any and all dirty buffers on a given inode. We are
843 * probably unmounting the fs, but that doesn't mean we have already
844 * done a sync(). Just drop the buffers from the inode list.
845 *
846 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
847 * assumes that all the buffers are against the blockdev. Not true
848 * for reiserfs.
849 */
850void invalidate_inode_buffers(struct inode *inode)
851{
852 if (inode_has_buffers(inode)) {
853 struct address_space *mapping = &inode->i_data;
854 struct list_head *list = &mapping->private_list;
855 struct address_space *buffer_mapping = mapping->assoc_mapping;
856
857 spin_lock(&buffer_mapping->private_lock);
858 while (!list_empty(list))
859 __remove_assoc_queue(BH_ENTRY(list->next));
860 spin_unlock(&buffer_mapping->private_lock);
861 }
862}
863
864/*
865 * Remove any clean buffers from the inode's buffer list. This is called
866 * when we're trying to free the inode itself. Those buffers can pin it.
867 *
868 * Returns true if all buffers were removed.
869 */
870int remove_inode_buffers(struct inode *inode)
871{
872 int ret = 1;
873
874 if (inode_has_buffers(inode)) {
875 struct address_space *mapping = &inode->i_data;
876 struct list_head *list = &mapping->private_list;
877 struct address_space *buffer_mapping = mapping->assoc_mapping;
878
879 spin_lock(&buffer_mapping->private_lock);
880 while (!list_empty(list)) {
881 struct buffer_head *bh = BH_ENTRY(list->next);
882 if (buffer_dirty(bh)) {
883 ret = 0;
884 break;
885 }
886 __remove_assoc_queue(bh);
887 }
888 spin_unlock(&buffer_mapping->private_lock);
889 }
890 return ret;
891}
892
893/*
894 * Create the appropriate buffers when given a page for data area and
895 * the size of each buffer.. Use the bh->b_this_page linked list to
896 * follow the buffers created. Return NULL if unable to create more
897 * buffers.
898 *
899 * The retry flag is used to differentiate async IO (paging, swapping)
900 * which may not fail from ordinary buffer allocations.
901 */
902struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
903 int retry)
904{
905 struct buffer_head *bh, *head;
906 long offset;
907
908try_again:
909 head = NULL;
910 offset = PAGE_SIZE;
911 while ((offset -= size) >= 0) {
912 bh = alloc_buffer_head(GFP_NOFS);
913 if (!bh)
914 goto no_grow;
915
916 bh->b_bdev = NULL;
917 bh->b_this_page = head;
918 bh->b_blocknr = -1;
919 head = bh;
920
921 bh->b_state = 0;
922 atomic_set(&bh->b_count, 0);
Chris Masonfc5cd582006-02-01 03:06:48 -0800923 bh->b_private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 bh->b_size = size;
925
926 /* Link the buffer to its page */
927 set_bh_page(bh, page, offset);
928
Nathan Scott01ffe332006-01-17 09:02:07 +1100929 init_buffer(bh, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 }
931 return head;
932/*
933 * In case anything failed, we just free everything we got.
934 */
935no_grow:
936 if (head) {
937 do {
938 bh = head;
939 head = head->b_this_page;
940 free_buffer_head(bh);
941 } while (head);
942 }
943
944 /*
945 * Return failure for non-async IO requests. Async IO requests
946 * are not allowed to fail, so we have to wait until buffer heads
947 * become available. But we don't want tasks sleeping with
948 * partially complete buffers, so all were released above.
949 */
950 if (!retry)
951 return NULL;
952
953 /* We're _really_ low on memory. Now we just
954 * wait for old buffer heads to become free due to
955 * finishing IO. Since this is an async request and
956 * the reserve list is empty, we're sure there are
957 * async buffer heads in use.
958 */
959 free_more_memory();
960 goto try_again;
961}
962EXPORT_SYMBOL_GPL(alloc_page_buffers);
963
964static inline void
965link_dev_buffers(struct page *page, struct buffer_head *head)
966{
967 struct buffer_head *bh, *tail;
968
969 bh = head;
970 do {
971 tail = bh;
972 bh = bh->b_this_page;
973 } while (bh);
974 tail->b_this_page = head;
975 attach_page_buffers(page, head);
976}
977
978/*
979 * Initialise the state of a blockdev page's buffers.
980 */
981static void
982init_page_buffers(struct page *page, struct block_device *bdev,
983 sector_t block, int size)
984{
985 struct buffer_head *head = page_buffers(page);
986 struct buffer_head *bh = head;
987 int uptodate = PageUptodate(page);
988
989 do {
990 if (!buffer_mapped(bh)) {
991 init_buffer(bh, NULL, NULL);
992 bh->b_bdev = bdev;
993 bh->b_blocknr = block;
994 if (uptodate)
995 set_buffer_uptodate(bh);
996 set_buffer_mapped(bh);
997 }
998 block++;
999 bh = bh->b_this_page;
1000 } while (bh != head);
1001}
1002
1003/*
1004 * Create the page-cache page that contains the requested block.
1005 *
1006 * This is user purely for blockdev mappings.
1007 */
1008static struct page *
1009grow_dev_page(struct block_device *bdev, sector_t block,
1010 pgoff_t index, int size)
1011{
1012 struct inode *inode = bdev->bd_inode;
1013 struct page *page;
1014 struct buffer_head *bh;
1015
Christoph Lameterea125892007-05-16 22:11:21 -07001016 page = find_or_create_page(inode->i_mapping, index,
Mel Gorman769848c2007-07-17 04:03:05 -07001017 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 if (!page)
1019 return NULL;
1020
Eric Sesterhenne827f922006-03-26 18:24:46 +02001021 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 if (page_has_buffers(page)) {
1024 bh = page_buffers(page);
1025 if (bh->b_size == size) {
1026 init_page_buffers(page, bdev, block, size);
1027 return page;
1028 }
1029 if (!try_to_free_buffers(page))
1030 goto failed;
1031 }
1032
1033 /*
1034 * Allocate some buffers for this page
1035 */
1036 bh = alloc_page_buffers(page, size, 0);
1037 if (!bh)
1038 goto failed;
1039
1040 /*
1041 * Link the page to the buffers and initialise them. Take the
1042 * lock to be atomic wrt __find_get_block(), which does not
1043 * run under the page lock.
1044 */
1045 spin_lock(&inode->i_mapping->private_lock);
1046 link_dev_buffers(page, bh);
1047 init_page_buffers(page, bdev, block, size);
1048 spin_unlock(&inode->i_mapping->private_lock);
1049 return page;
1050
1051failed:
1052 BUG();
1053 unlock_page(page);
1054 page_cache_release(page);
1055 return NULL;
1056}
1057
1058/*
1059 * Create buffers for the specified block device block's page. If
1060 * that page was dirty, the buffers are set dirty also.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001062static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063grow_buffers(struct block_device *bdev, sector_t block, int size)
1064{
1065 struct page *page;
1066 pgoff_t index;
1067 int sizebits;
1068
1069 sizebits = -1;
1070 do {
1071 sizebits++;
1072 } while ((size << sizebits) < PAGE_SIZE);
1073
1074 index = block >> sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Andrew Mortone5657932006-10-11 01:21:46 -07001076 /*
1077 * Check for a block which wants to lie outside our maximum possible
1078 * pagecache index. (this comparison is done using sector_t types).
1079 */
1080 if (unlikely(index != block >> sizebits)) {
1081 char b[BDEVNAME_SIZE];
1082
1083 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1084 "device %s\n",
1085 __FUNCTION__, (unsigned long long)block,
1086 bdevname(bdev, b));
1087 return -EIO;
1088 }
1089 block = index << sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 /* Create a page with the proper size buffers.. */
1091 page = grow_dev_page(bdev, block, index, size);
1092 if (!page)
1093 return 0;
1094 unlock_page(page);
1095 page_cache_release(page);
1096 return 1;
1097}
1098
Adrian Bunk75c96f82005-05-05 16:16:09 -07001099static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100__getblk_slow(struct block_device *bdev, sector_t block, int size)
1101{
1102 /* Size must be multiple of hard sectorsize */
1103 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1104 (size < 512 || size > PAGE_SIZE))) {
1105 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1106 size);
1107 printk(KERN_ERR "hardsect size: %d\n",
1108 bdev_hardsect_size(bdev));
1109
1110 dump_stack();
1111 return NULL;
1112 }
1113
1114 for (;;) {
1115 struct buffer_head * bh;
Andrew Mortone5657932006-10-11 01:21:46 -07001116 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
1118 bh = __find_get_block(bdev, block, size);
1119 if (bh)
1120 return bh;
1121
Andrew Mortone5657932006-10-11 01:21:46 -07001122 ret = grow_buffers(bdev, block, size);
1123 if (ret < 0)
1124 return NULL;
1125 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 free_more_memory();
1127 }
1128}
1129
1130/*
1131 * The relationship between dirty buffers and dirty pages:
1132 *
1133 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1134 * the page is tagged dirty in its radix tree.
1135 *
1136 * At all times, the dirtiness of the buffers represents the dirtiness of
1137 * subsections of the page. If the page has buffers, the page dirty bit is
1138 * merely a hint about the true dirty state.
1139 *
1140 * When a page is set dirty in its entirety, all its buffers are marked dirty
1141 * (if the page has buffers).
1142 *
1143 * When a buffer is marked dirty, its page is dirtied, but the page's other
1144 * buffers are not.
1145 *
1146 * Also. When blockdev buffers are explicitly read with bread(), they
1147 * individually become uptodate. But their backing page remains not
1148 * uptodate - even if all of its buffers are uptodate. A subsequent
1149 * block_read_full_page() against that page will discover all the uptodate
1150 * buffers, will set the page uptodate and will perform no I/O.
1151 */
1152
1153/**
1154 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001155 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 *
1157 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1158 * backing page dirty, then tag the page as dirty in its address_space's radix
1159 * tree and then attach the address_space's inode to its superblock's dirty
1160 * inode list.
1161 *
1162 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1163 * mapping->tree_lock and the global inode_lock.
1164 */
1165void fastcall mark_buffer_dirty(struct buffer_head *bh)
1166{
Nick Piggin787d2212007-07-17 04:03:34 -07001167 WARN_ON_ONCE(!buffer_uptodate(bh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
Nick Piggin787d2212007-07-17 04:03:34 -07001169 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170}
1171
1172/*
1173 * Decrement a buffer_head's reference count. If all buffers against a page
1174 * have zero reference count, are clean and unlocked, and if the page is clean
1175 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1176 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1177 * a page but it ends up not being freed, and buffers may later be reattached).
1178 */
1179void __brelse(struct buffer_head * buf)
1180{
1181 if (atomic_read(&buf->b_count)) {
1182 put_bh(buf);
1183 return;
1184 }
1185 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1186 WARN_ON(1);
1187}
1188
1189/*
1190 * bforget() is like brelse(), except it discards any
1191 * potentially dirty data.
1192 */
1193void __bforget(struct buffer_head *bh)
1194{
1195 clear_buffer_dirty(bh);
1196 if (!list_empty(&bh->b_assoc_buffers)) {
1197 struct address_space *buffer_mapping = bh->b_page->mapping;
1198
1199 spin_lock(&buffer_mapping->private_lock);
1200 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -07001201 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 spin_unlock(&buffer_mapping->private_lock);
1203 }
1204 __brelse(bh);
1205}
1206
1207static struct buffer_head *__bread_slow(struct buffer_head *bh)
1208{
1209 lock_buffer(bh);
1210 if (buffer_uptodate(bh)) {
1211 unlock_buffer(bh);
1212 return bh;
1213 } else {
1214 get_bh(bh);
1215 bh->b_end_io = end_buffer_read_sync;
1216 submit_bh(READ, bh);
1217 wait_on_buffer(bh);
1218 if (buffer_uptodate(bh))
1219 return bh;
1220 }
1221 brelse(bh);
1222 return NULL;
1223}
1224
1225/*
1226 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1227 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1228 * refcount elevated by one when they're in an LRU. A buffer can only appear
1229 * once in a particular CPU's LRU. A single buffer can be present in multiple
1230 * CPU's LRUs at the same time.
1231 *
1232 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1233 * sb_find_get_block().
1234 *
1235 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1236 * a local interrupt disable for that.
1237 */
1238
1239#define BH_LRU_SIZE 8
1240
1241struct bh_lru {
1242 struct buffer_head *bhs[BH_LRU_SIZE];
1243};
1244
1245static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1246
1247#ifdef CONFIG_SMP
1248#define bh_lru_lock() local_irq_disable()
1249#define bh_lru_unlock() local_irq_enable()
1250#else
1251#define bh_lru_lock() preempt_disable()
1252#define bh_lru_unlock() preempt_enable()
1253#endif
1254
1255static inline void check_irqs_on(void)
1256{
1257#ifdef irqs_disabled
1258 BUG_ON(irqs_disabled());
1259#endif
1260}
1261
1262/*
1263 * The LRU management algorithm is dopey-but-simple. Sorry.
1264 */
1265static void bh_lru_install(struct buffer_head *bh)
1266{
1267 struct buffer_head *evictee = NULL;
1268 struct bh_lru *lru;
1269
1270 check_irqs_on();
1271 bh_lru_lock();
1272 lru = &__get_cpu_var(bh_lrus);
1273 if (lru->bhs[0] != bh) {
1274 struct buffer_head *bhs[BH_LRU_SIZE];
1275 int in;
1276 int out = 0;
1277
1278 get_bh(bh);
1279 bhs[out++] = bh;
1280 for (in = 0; in < BH_LRU_SIZE; in++) {
1281 struct buffer_head *bh2 = lru->bhs[in];
1282
1283 if (bh2 == bh) {
1284 __brelse(bh2);
1285 } else {
1286 if (out >= BH_LRU_SIZE) {
1287 BUG_ON(evictee != NULL);
1288 evictee = bh2;
1289 } else {
1290 bhs[out++] = bh2;
1291 }
1292 }
1293 }
1294 while (out < BH_LRU_SIZE)
1295 bhs[out++] = NULL;
1296 memcpy(lru->bhs, bhs, sizeof(bhs));
1297 }
1298 bh_lru_unlock();
1299
1300 if (evictee)
1301 __brelse(evictee);
1302}
1303
1304/*
1305 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1306 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001307static struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001308lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309{
1310 struct buffer_head *ret = NULL;
1311 struct bh_lru *lru;
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001312 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 check_irqs_on();
1315 bh_lru_lock();
1316 lru = &__get_cpu_var(bh_lrus);
1317 for (i = 0; i < BH_LRU_SIZE; i++) {
1318 struct buffer_head *bh = lru->bhs[i];
1319
1320 if (bh && bh->b_bdev == bdev &&
1321 bh->b_blocknr == block && bh->b_size == size) {
1322 if (i) {
1323 while (i) {
1324 lru->bhs[i] = lru->bhs[i - 1];
1325 i--;
1326 }
1327 lru->bhs[0] = bh;
1328 }
1329 get_bh(bh);
1330 ret = bh;
1331 break;
1332 }
1333 }
1334 bh_lru_unlock();
1335 return ret;
1336}
1337
1338/*
1339 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1340 * it in the LRU and mark it as accessed. If it is not present then return
1341 * NULL
1342 */
1343struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001344__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
1346 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1347
1348 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001349 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (bh)
1351 bh_lru_install(bh);
1352 }
1353 if (bh)
1354 touch_buffer(bh);
1355 return bh;
1356}
1357EXPORT_SYMBOL(__find_get_block);
1358
1359/*
1360 * __getblk will locate (and, if necessary, create) the buffer_head
1361 * which corresponds to the passed block_device, block and size. The
1362 * returned buffer has its reference count incremented.
1363 *
1364 * __getblk() cannot fail - it just keeps trying. If you pass it an
1365 * illegal block number, __getblk() will happily return a buffer_head
1366 * which represents the non-existent block. Very weird.
1367 *
1368 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1369 * attempt is failing. FIXME, perhaps?
1370 */
1371struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001372__getblk(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373{
1374 struct buffer_head *bh = __find_get_block(bdev, block, size);
1375
1376 might_sleep();
1377 if (bh == NULL)
1378 bh = __getblk_slow(bdev, block, size);
1379 return bh;
1380}
1381EXPORT_SYMBOL(__getblk);
1382
1383/*
1384 * Do async read-ahead on a buffer..
1385 */
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001386void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
1388 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001389 if (likely(bh)) {
1390 ll_rw_block(READA, 1, &bh);
1391 brelse(bh);
1392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393}
1394EXPORT_SYMBOL(__breadahead);
1395
1396/**
1397 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001398 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 * @block: number of block
1400 * @size: size (in bytes) to read
1401 *
1402 * Reads a specified block, and returns buffer head that contains it.
1403 * It returns NULL if the block was unreadable.
1404 */
1405struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001406__bread(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
1408 struct buffer_head *bh = __getblk(bdev, block, size);
1409
Andrew Mortona3e713b2005-10-30 15:03:15 -08001410 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 bh = __bread_slow(bh);
1412 return bh;
1413}
1414EXPORT_SYMBOL(__bread);
1415
1416/*
1417 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1418 * This doesn't race because it runs in each cpu either in irq
1419 * or with preempt disabled.
1420 */
1421static void invalidate_bh_lru(void *arg)
1422{
1423 struct bh_lru *b = &get_cpu_var(bh_lrus);
1424 int i;
1425
1426 for (i = 0; i < BH_LRU_SIZE; i++) {
1427 brelse(b->bhs[i]);
1428 b->bhs[i] = NULL;
1429 }
1430 put_cpu_var(bh_lrus);
1431}
1432
Peter Zijlstraf9a14392007-05-06 14:49:55 -07001433void invalidate_bh_lrus(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434{
1435 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1436}
1437
1438void set_bh_page(struct buffer_head *bh,
1439 struct page *page, unsigned long offset)
1440{
1441 bh->b_page = page;
Eric Sesterhenne827f922006-03-26 18:24:46 +02001442 BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 if (PageHighMem(page))
1444 /*
1445 * This catches illegal uses and preserves the offset:
1446 */
1447 bh->b_data = (char *)(0 + offset);
1448 else
1449 bh->b_data = page_address(page) + offset;
1450}
1451EXPORT_SYMBOL(set_bh_page);
1452
1453/*
1454 * Called when truncating a buffer on a page completely.
1455 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001456static void discard_buffer(struct buffer_head * bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457{
1458 lock_buffer(bh);
1459 clear_buffer_dirty(bh);
1460 bh->b_bdev = NULL;
1461 clear_buffer_mapped(bh);
1462 clear_buffer_req(bh);
1463 clear_buffer_new(bh);
1464 clear_buffer_delay(bh);
David Chinner33a266d2007-02-12 00:51:41 -08001465 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 unlock_buffer(bh);
1467}
1468
1469/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 * block_invalidatepage - invalidate part of all of a buffer-backed page
1471 *
1472 * @page: the page which is affected
1473 * @offset: the index of the truncation point
1474 *
1475 * block_invalidatepage() is called when all or part of the page has become
1476 * invalidatedby a truncate operation.
1477 *
1478 * block_invalidatepage() does not have to release all buffers, but it must
1479 * ensure that no dirty buffer is left outside @offset and that no I/O
1480 * is underway against any of the blocks which are outside the truncation
1481 * point. Because the caller is about to free (and possibly reuse) those
1482 * blocks on-disk.
1483 */
NeilBrown2ff28e22006-03-26 01:37:18 -08001484void block_invalidatepage(struct page *page, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
1486 struct buffer_head *head, *bh, *next;
1487 unsigned int curr_off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
1489 BUG_ON(!PageLocked(page));
1490 if (!page_has_buffers(page))
1491 goto out;
1492
1493 head = page_buffers(page);
1494 bh = head;
1495 do {
1496 unsigned int next_off = curr_off + bh->b_size;
1497 next = bh->b_this_page;
1498
1499 /*
1500 * is this block fully invalidated?
1501 */
1502 if (offset <= curr_off)
1503 discard_buffer(bh);
1504 curr_off = next_off;
1505 bh = next;
1506 } while (bh != head);
1507
1508 /*
1509 * We release buffers only if the entire page is being invalidated.
1510 * The get_block cached value has been unconditionally invalidated,
1511 * so real IO is not possible anymore.
1512 */
1513 if (offset == 0)
NeilBrown2ff28e22006-03-26 01:37:18 -08001514 try_to_release_page(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515out:
NeilBrown2ff28e22006-03-26 01:37:18 -08001516 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517}
1518EXPORT_SYMBOL(block_invalidatepage);
1519
1520/*
1521 * We attach and possibly dirty the buffers atomically wrt
1522 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1523 * is already excluded via the page lock.
1524 */
1525void create_empty_buffers(struct page *page,
1526 unsigned long blocksize, unsigned long b_state)
1527{
1528 struct buffer_head *bh, *head, *tail;
1529
1530 head = alloc_page_buffers(page, blocksize, 1);
1531 bh = head;
1532 do {
1533 bh->b_state |= b_state;
1534 tail = bh;
1535 bh = bh->b_this_page;
1536 } while (bh);
1537 tail->b_this_page = head;
1538
1539 spin_lock(&page->mapping->private_lock);
1540 if (PageUptodate(page) || PageDirty(page)) {
1541 bh = head;
1542 do {
1543 if (PageDirty(page))
1544 set_buffer_dirty(bh);
1545 if (PageUptodate(page))
1546 set_buffer_uptodate(bh);
1547 bh = bh->b_this_page;
1548 } while (bh != head);
1549 }
1550 attach_page_buffers(page, head);
1551 spin_unlock(&page->mapping->private_lock);
1552}
1553EXPORT_SYMBOL(create_empty_buffers);
1554
1555/*
1556 * We are taking a block for data and we don't want any output from any
1557 * buffer-cache aliases starting from return from that function and
1558 * until the moment when something will explicitly mark the buffer
1559 * dirty (hopefully that will not happen until we will free that block ;-)
1560 * We don't even need to mark it not-uptodate - nobody can expect
1561 * anything from a newly allocated buffer anyway. We used to used
1562 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1563 * don't want to mark the alias unmapped, for example - it would confuse
1564 * anyone who might pick it with bread() afterwards...
1565 *
1566 * Also.. Note that bforget() doesn't lock the buffer. So there can
1567 * be writeout I/O going on against recently-freed buffers. We don't
1568 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1569 * only if we really need to. That happens here.
1570 */
1571void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1572{
1573 struct buffer_head *old_bh;
1574
1575 might_sleep();
1576
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001577 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (old_bh) {
1579 clear_buffer_dirty(old_bh);
1580 wait_on_buffer(old_bh);
1581 clear_buffer_req(old_bh);
1582 __brelse(old_bh);
1583 }
1584}
1585EXPORT_SYMBOL(unmap_underlying_metadata);
1586
1587/*
1588 * NOTE! All mapped/uptodate combinations are valid:
1589 *
1590 * Mapped Uptodate Meaning
1591 *
1592 * No No "unknown" - must do get_block()
1593 * No Yes "hole" - zero-filled
1594 * Yes No "allocated" - allocated on disk, not read in
1595 * Yes Yes "valid" - allocated and up-to-date in memory.
1596 *
1597 * "Dirty" is valid only with the last case (mapped+uptodate).
1598 */
1599
1600/*
1601 * While block_write_full_page is writing back the dirty buffers under
1602 * the page lock, whoever dirtied the buffers may decide to clean them
1603 * again at any time. We handle that by only looking at the buffer
1604 * state inside lock_buffer().
1605 *
1606 * If block_write_full_page() is called for regular writeback
1607 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1608 * locked buffer. This only can happen if someone has written the buffer
1609 * directly, with submit_bh(). At the address_space level PageWriteback
1610 * prevents this contention from occurring.
1611 */
1612static int __block_write_full_page(struct inode *inode, struct page *page,
1613 get_block_t *get_block, struct writeback_control *wbc)
1614{
1615 int err;
1616 sector_t block;
1617 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001618 struct buffer_head *bh, *head;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001619 const unsigned blocksize = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 int nr_underway = 0;
1621
1622 BUG_ON(!PageLocked(page));
1623
1624 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1625
1626 if (!page_has_buffers(page)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001627 create_empty_buffers(page, blocksize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 (1 << BH_Dirty)|(1 << BH_Uptodate));
1629 }
1630
1631 /*
1632 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1633 * here, and the (potentially unmapped) buffers may become dirty at
1634 * any time. If a buffer becomes dirty here after we've inspected it
1635 * then we just miss that fact, and the page stays dirty.
1636 *
1637 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1638 * handle that here by just cleaning them.
1639 */
1640
Andrew Morton54b21a72006-01-08 01:03:05 -08001641 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 head = page_buffers(page);
1643 bh = head;
1644
1645 /*
1646 * Get all the dirty buffers mapped to disk addresses and
1647 * handle any aliases from the underlying blockdev's mapping.
1648 */
1649 do {
1650 if (block > last_block) {
1651 /*
1652 * mapped buffers outside i_size will occur, because
1653 * this page can be outside i_size when there is a
1654 * truncate in progress.
1655 */
1656 /*
1657 * The buffer was zeroed by block_write_full_page()
1658 */
1659 clear_buffer_dirty(bh);
1660 set_buffer_uptodate(bh);
1661 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001662 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 err = get_block(inode, block, bh, 1);
1664 if (err)
1665 goto recover;
1666 if (buffer_new(bh)) {
1667 /* blockdev mappings never come here */
1668 clear_buffer_new(bh);
1669 unmap_underlying_metadata(bh->b_bdev,
1670 bh->b_blocknr);
1671 }
1672 }
1673 bh = bh->b_this_page;
1674 block++;
1675 } while (bh != head);
1676
1677 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 if (!buffer_mapped(bh))
1679 continue;
1680 /*
1681 * If it's a fully non-blocking write attempt and we cannot
1682 * lock the buffer then redirty the page. Note that this can
1683 * potentially cause a busy-wait loop from pdflush and kswapd
1684 * activity, but those code paths have their own higher-level
1685 * throttling.
1686 */
1687 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1688 lock_buffer(bh);
1689 } else if (test_set_buffer_locked(bh)) {
1690 redirty_page_for_writepage(wbc, page);
1691 continue;
1692 }
1693 if (test_clear_buffer_dirty(bh)) {
1694 mark_buffer_async_write(bh);
1695 } else {
1696 unlock_buffer(bh);
1697 }
1698 } while ((bh = bh->b_this_page) != head);
1699
1700 /*
1701 * The page and its buffers are protected by PageWriteback(), so we can
1702 * drop the bh refcounts early.
1703 */
1704 BUG_ON(PageWriteback(page));
1705 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 do {
1708 struct buffer_head *next = bh->b_this_page;
1709 if (buffer_async_write(bh)) {
1710 submit_bh(WRITE, bh);
1711 nr_underway++;
1712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 bh = next;
1714 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001715 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
1717 err = 0;
1718done:
1719 if (nr_underway == 0) {
1720 /*
1721 * The page was marked dirty, but the buffers were
1722 * clean. Someone wrote them back by hand with
1723 * ll_rw_block/submit_bh. A rare case.
1724 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 end_page_writeback(page);
Nick Piggin3d67f2d2007-05-06 14:49:05 -07001726
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 /*
1728 * The page and buffer_heads can be released at any time from
1729 * here on.
1730 */
1731 wbc->pages_skipped++; /* We didn't write this page */
1732 }
1733 return err;
1734
1735recover:
1736 /*
1737 * ENOSPC, or some other error. We may already have added some
1738 * blocks to the file, so we need to write these out to avoid
1739 * exposing stale data.
1740 * The page is currently locked and not marked for writeback
1741 */
1742 bh = head;
1743 /* Recovery: lock and submit the mapped buffers */
1744 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1746 lock_buffer(bh);
1747 mark_buffer_async_write(bh);
1748 } else {
1749 /*
1750 * The buffer may have been set dirty during
1751 * attachment to a dirty page.
1752 */
1753 clear_buffer_dirty(bh);
1754 }
1755 } while ((bh = bh->b_this_page) != head);
1756 SetPageError(page);
1757 BUG_ON(PageWriteback(page));
Andrew Morton7e4c3692007-05-08 00:23:27 -07001758 mapping_set_error(page->mapping, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 do {
1761 struct buffer_head *next = bh->b_this_page;
1762 if (buffer_async_write(bh)) {
1763 clear_buffer_dirty(bh);
1764 submit_bh(WRITE, bh);
1765 nr_underway++;
1766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 bh = next;
1768 } while (bh != head);
Nick Pigginffda9d32007-02-20 13:57:54 -08001769 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 goto done;
1771}
1772
Nick Pigginafddba42007-10-16 01:25:01 -07001773/*
1774 * If a page has any new buffers, zero them out here, and mark them uptodate
1775 * and dirty so they'll be written out (in order to prevent uninitialised
1776 * block data from leaking). And clear the new bit.
1777 */
1778void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1779{
1780 unsigned int block_start, block_end;
1781 struct buffer_head *head, *bh;
1782
1783 BUG_ON(!PageLocked(page));
1784 if (!page_has_buffers(page))
1785 return;
1786
1787 bh = head = page_buffers(page);
1788 block_start = 0;
1789 do {
1790 block_end = block_start + bh->b_size;
1791
1792 if (buffer_new(bh)) {
1793 if (block_end > from && block_start < to) {
1794 if (!PageUptodate(page)) {
1795 unsigned start, size;
1796
1797 start = max(from, block_start);
1798 size = min(to, block_end) - start;
1799
1800 zero_user_page(page, start, size, KM_USER0);
1801 set_buffer_uptodate(bh);
1802 }
1803
1804 clear_buffer_new(bh);
1805 mark_buffer_dirty(bh);
1806 }
1807 }
1808
1809 block_start = block_end;
1810 bh = bh->b_this_page;
1811 } while (bh != head);
1812}
1813EXPORT_SYMBOL(page_zero_new_buffers);
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815static int __block_prepare_write(struct inode *inode, struct page *page,
1816 unsigned from, unsigned to, get_block_t *get_block)
1817{
1818 unsigned block_start, block_end;
1819 sector_t block;
1820 int err = 0;
1821 unsigned blocksize, bbits;
1822 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1823
1824 BUG_ON(!PageLocked(page));
1825 BUG_ON(from > PAGE_CACHE_SIZE);
1826 BUG_ON(to > PAGE_CACHE_SIZE);
1827 BUG_ON(from > to);
1828
1829 blocksize = 1 << inode->i_blkbits;
1830 if (!page_has_buffers(page))
1831 create_empty_buffers(page, blocksize, 0);
1832 head = page_buffers(page);
1833
1834 bbits = inode->i_blkbits;
1835 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1836
1837 for(bh = head, block_start = 0; bh != head || !block_start;
1838 block++, block_start=block_end, bh = bh->b_this_page) {
1839 block_end = block_start + blocksize;
1840 if (block_end <= from || block_start >= to) {
1841 if (PageUptodate(page)) {
1842 if (!buffer_uptodate(bh))
1843 set_buffer_uptodate(bh);
1844 }
1845 continue;
1846 }
1847 if (buffer_new(bh))
1848 clear_buffer_new(bh);
1849 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001850 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 err = get_block(inode, block, bh, 1);
1852 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001853 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 unmap_underlying_metadata(bh->b_bdev,
1856 bh->b_blocknr);
1857 if (PageUptodate(page)) {
Nick Piggin637aff42007-10-16 01:25:00 -07001858 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 set_buffer_uptodate(bh);
Nick Piggin637aff42007-10-16 01:25:00 -07001860 mark_buffer_dirty(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 continue;
1862 }
1863 if (block_end > to || block_start < from) {
1864 void *kaddr;
1865
1866 kaddr = kmap_atomic(page, KM_USER0);
1867 if (block_end > to)
1868 memset(kaddr+to, 0,
1869 block_end-to);
1870 if (block_start < from)
1871 memset(kaddr+block_start,
1872 0, from-block_start);
1873 flush_dcache_page(page);
1874 kunmap_atomic(kaddr, KM_USER0);
1875 }
1876 continue;
1877 }
1878 }
1879 if (PageUptodate(page)) {
1880 if (!buffer_uptodate(bh))
1881 set_buffer_uptodate(bh);
1882 continue;
1883 }
1884 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
David Chinner33a266d2007-02-12 00:51:41 -08001885 !buffer_unwritten(bh) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 (block_start < from || block_end > to)) {
1887 ll_rw_block(READ, 1, &bh);
1888 *wait_bh++=bh;
1889 }
1890 }
1891 /*
1892 * If we issued read requests - let them complete.
1893 */
1894 while(wait_bh > wait) {
1895 wait_on_buffer(*--wait_bh);
1896 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001897 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 }
Nick Pigginafddba42007-10-16 01:25:01 -07001899 if (unlikely(err))
1900 page_zero_new_buffers(page, from, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 return err;
1902}
1903
1904static int __block_commit_write(struct inode *inode, struct page *page,
1905 unsigned from, unsigned to)
1906{
1907 unsigned block_start, block_end;
1908 int partial = 0;
1909 unsigned blocksize;
1910 struct buffer_head *bh, *head;
1911
1912 blocksize = 1 << inode->i_blkbits;
1913
1914 for(bh = head = page_buffers(page), block_start = 0;
1915 bh != head || !block_start;
1916 block_start=block_end, bh = bh->b_this_page) {
1917 block_end = block_start + blocksize;
1918 if (block_end <= from || block_start >= to) {
1919 if (!buffer_uptodate(bh))
1920 partial = 1;
1921 } else {
1922 set_buffer_uptodate(bh);
1923 mark_buffer_dirty(bh);
1924 }
Nick Pigginafddba42007-10-16 01:25:01 -07001925 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 }
1927
1928 /*
1929 * If this is a partial write which happened to make all buffers
1930 * uptodate then we can optimize away a bogus readpage() for
1931 * the next read(). Here we 'discover' whether the page went
1932 * uptodate as a result of this (potentially partial) write.
1933 */
1934 if (!partial)
1935 SetPageUptodate(page);
1936 return 0;
1937}
1938
1939/*
Nick Pigginafddba42007-10-16 01:25:01 -07001940 * block_write_begin takes care of the basic task of block allocation and
1941 * bringing partial write blocks uptodate first.
1942 *
1943 * If *pagep is not NULL, then block_write_begin uses the locked page
1944 * at *pagep rather than allocating its own. In this case, the page will
1945 * not be unlocked or deallocated on failure.
1946 */
1947int block_write_begin(struct file *file, struct address_space *mapping,
1948 loff_t pos, unsigned len, unsigned flags,
1949 struct page **pagep, void **fsdata,
1950 get_block_t *get_block)
1951{
1952 struct inode *inode = mapping->host;
1953 int status = 0;
1954 struct page *page;
1955 pgoff_t index;
1956 unsigned start, end;
1957 int ownpage = 0;
1958
1959 index = pos >> PAGE_CACHE_SHIFT;
1960 start = pos & (PAGE_CACHE_SIZE - 1);
1961 end = start + len;
1962
1963 page = *pagep;
1964 if (page == NULL) {
1965 ownpage = 1;
1966 page = __grab_cache_page(mapping, index);
1967 if (!page) {
1968 status = -ENOMEM;
1969 goto out;
1970 }
1971 *pagep = page;
1972 } else
1973 BUG_ON(!PageLocked(page));
1974
1975 status = __block_prepare_write(inode, page, start, end, get_block);
1976 if (unlikely(status)) {
1977 ClearPageUptodate(page);
1978
1979 if (ownpage) {
1980 unlock_page(page);
1981 page_cache_release(page);
1982 *pagep = NULL;
1983
1984 /*
1985 * prepare_write() may have instantiated a few blocks
1986 * outside i_size. Trim these off again. Don't need
1987 * i_size_read because we hold i_mutex.
1988 */
1989 if (pos + len > inode->i_size)
1990 vmtruncate(inode, inode->i_size);
1991 }
1992 goto out;
1993 }
1994
1995out:
1996 return status;
1997}
1998EXPORT_SYMBOL(block_write_begin);
1999
2000int block_write_end(struct file *file, struct address_space *mapping,
2001 loff_t pos, unsigned len, unsigned copied,
2002 struct page *page, void *fsdata)
2003{
2004 struct inode *inode = mapping->host;
2005 unsigned start;
2006
2007 start = pos & (PAGE_CACHE_SIZE - 1);
2008
2009 if (unlikely(copied < len)) {
2010 /*
2011 * The buffers that were written will now be uptodate, so we
2012 * don't have to worry about a readpage reading them and
2013 * overwriting a partial write. However if we have encountered
2014 * a short write and only partially written into a buffer, it
2015 * will not be marked uptodate, so a readpage might come in and
2016 * destroy our partial write.
2017 *
2018 * Do the simplest thing, and just treat any short write to a
2019 * non uptodate page as a zero-length write, and force the
2020 * caller to redo the whole thing.
2021 */
2022 if (!PageUptodate(page))
2023 copied = 0;
2024
2025 page_zero_new_buffers(page, start+copied, start+len);
2026 }
2027 flush_dcache_page(page);
2028
2029 /* This could be a short (even 0-length) commit */
2030 __block_commit_write(inode, page, start, start+copied);
2031
2032 return copied;
2033}
2034EXPORT_SYMBOL(block_write_end);
2035
2036int generic_write_end(struct file *file, struct address_space *mapping,
2037 loff_t pos, unsigned len, unsigned copied,
2038 struct page *page, void *fsdata)
2039{
2040 struct inode *inode = mapping->host;
2041
2042 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2043
2044 /*
2045 * No need to use i_size_read() here, the i_size
2046 * cannot change under us because we hold i_mutex.
2047 *
2048 * But it's important to update i_size while still holding page lock:
2049 * page writeout could otherwise come in and zero beyond i_size.
2050 */
2051 if (pos+copied > inode->i_size) {
2052 i_size_write(inode, pos+copied);
2053 mark_inode_dirty(inode);
2054 }
2055
2056 unlock_page(page);
2057 page_cache_release(page);
2058
2059 return copied;
2060}
2061EXPORT_SYMBOL(generic_write_end);
2062
2063/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 * Generic "read page" function for block devices that have the normal
2065 * get_block functionality. This is most of the block device filesystems.
2066 * Reads the page asynchronously --- the unlock_buffer() and
2067 * set/clear_buffer_uptodate() functions propagate buffer state into the
2068 * page struct once IO has completed.
2069 */
2070int block_read_full_page(struct page *page, get_block_t *get_block)
2071{
2072 struct inode *inode = page->mapping->host;
2073 sector_t iblock, lblock;
2074 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2075 unsigned int blocksize;
2076 int nr, i;
2077 int fully_mapped = 1;
2078
Matt Mackallcd7619d2005-05-01 08:59:01 -07002079 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 blocksize = 1 << inode->i_blkbits;
2081 if (!page_has_buffers(page))
2082 create_empty_buffers(page, blocksize, 0);
2083 head = page_buffers(page);
2084
2085 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2086 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2087 bh = head;
2088 nr = 0;
2089 i = 0;
2090
2091 do {
2092 if (buffer_uptodate(bh))
2093 continue;
2094
2095 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002096 int err = 0;
2097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 fully_mapped = 0;
2099 if (iblock < lblock) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002100 WARN_ON(bh->b_size != blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002101 err = get_block(inode, iblock, bh, 0);
2102 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 SetPageError(page);
2104 }
2105 if (!buffer_mapped(bh)) {
Nate Diller01f27052007-05-09 02:35:07 -07002106 zero_user_page(page, i * blocksize, blocksize,
2107 KM_USER0);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002108 if (!err)
2109 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 continue;
2111 }
2112 /*
2113 * get_block() might have updated the buffer
2114 * synchronously
2115 */
2116 if (buffer_uptodate(bh))
2117 continue;
2118 }
2119 arr[nr++] = bh;
2120 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2121
2122 if (fully_mapped)
2123 SetPageMappedToDisk(page);
2124
2125 if (!nr) {
2126 /*
2127 * All buffers are uptodate - we can set the page uptodate
2128 * as well. But not if get_block() returned an error.
2129 */
2130 if (!PageError(page))
2131 SetPageUptodate(page);
2132 unlock_page(page);
2133 return 0;
2134 }
2135
2136 /* Stage two: lock the buffers */
2137 for (i = 0; i < nr; i++) {
2138 bh = arr[i];
2139 lock_buffer(bh);
2140 mark_buffer_async_read(bh);
2141 }
2142
2143 /*
2144 * Stage 3: start the IO. Check for uptodateness
2145 * inside the buffer lock in case another process reading
2146 * the underlying blockdev brought it uptodate (the sct fix).
2147 */
2148 for (i = 0; i < nr; i++) {
2149 bh = arr[i];
2150 if (buffer_uptodate(bh))
2151 end_buffer_async_read(bh, 1);
2152 else
2153 submit_bh(READ, bh);
2154 }
2155 return 0;
2156}
2157
2158/* utility function for filesystems that need to do work on expanding
Nick Piggin89e10782007-10-16 01:25:07 -07002159 * truncates. Uses filesystem pagecache writes to allow the filesystem to
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 * deal with the hole.
2161 */
Nick Piggin89e10782007-10-16 01:25:07 -07002162int generic_cont_expand_simple(struct inode *inode, loff_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163{
2164 struct address_space *mapping = inode->i_mapping;
2165 struct page *page;
Nick Piggin89e10782007-10-16 01:25:07 -07002166 void *fsdata;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002167 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 int err;
2169
2170 err = -EFBIG;
2171 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2172 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2173 send_sig(SIGXFSZ, current, 0);
2174 goto out;
2175 }
2176 if (size > inode->i_sb->s_maxbytes)
2177 goto out;
2178
Nick Piggin89e10782007-10-16 01:25:07 -07002179 err = pagecache_write_begin(NULL, mapping, size, 0,
2180 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2181 &page, &fsdata);
2182 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 goto out;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002184
Nick Piggin89e10782007-10-16 01:25:07 -07002185 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2186 BUG_ON(err > 0);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002187
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188out:
2189 return err;
2190}
2191
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002192int generic_cont_expand(struct inode *inode, loff_t size)
2193{
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002194 unsigned int offset;
2195
2196 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2197
2198 /* ugh. in prepare/commit_write, if from==to==start of block, we
Nick Piggin89e10782007-10-16 01:25:07 -07002199 * skip the prepare. make sure we never send an offset for the start
2200 * of a block.
2201 * XXX: actually, this should be handled in those filesystems by
2202 * checking for the AOP_FLAG_CONT_EXPAND flag.
2203 */
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002204 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2205 /* caller must handle this extra byte. */
Nick Piggin89e10782007-10-16 01:25:07 -07002206 size++;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002207 }
Nick Piggin89e10782007-10-16 01:25:07 -07002208 return generic_cont_expand_simple(inode, size);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002209}
2210
Nick Piggin89e10782007-10-16 01:25:07 -07002211int cont_expand_zero(struct file *file, struct address_space *mapping,
2212 loff_t pos, loff_t *bytes)
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002213{
Nick Piggin89e10782007-10-16 01:25:07 -07002214 struct inode *inode = mapping->host;
2215 unsigned blocksize = 1 << inode->i_blkbits;
2216 struct page *page;
2217 void *fsdata;
2218 pgoff_t index, curidx;
2219 loff_t curpos;
2220 unsigned zerofrom, offset, len;
2221 int err = 0;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002222
Nick Piggin89e10782007-10-16 01:25:07 -07002223 index = pos >> PAGE_CACHE_SHIFT;
2224 offset = pos & ~PAGE_CACHE_MASK;
2225
2226 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2227 zerofrom = curpos & ~PAGE_CACHE_MASK;
2228 if (zerofrom & (blocksize-1)) {
2229 *bytes |= (blocksize-1);
2230 (*bytes)++;
2231 }
2232 len = PAGE_CACHE_SIZE - zerofrom;
2233
2234 err = pagecache_write_begin(file, mapping, curpos, len,
2235 AOP_FLAG_UNINTERRUPTIBLE,
2236 &page, &fsdata);
2237 if (err)
2238 goto out;
2239 zero_user_page(page, zerofrom, len, KM_USER0);
2240 err = pagecache_write_end(file, mapping, curpos, len, len,
2241 page, fsdata);
2242 if (err < 0)
2243 goto out;
2244 BUG_ON(err != len);
2245 err = 0;
2246 }
2247
2248 /* page covers the boundary, find the boundary offset */
2249 if (index == curidx) {
2250 zerofrom = curpos & ~PAGE_CACHE_MASK;
2251 /* if we will expand the thing last block will be filled */
2252 if (offset <= zerofrom) {
2253 goto out;
2254 }
2255 if (zerofrom & (blocksize-1)) {
2256 *bytes |= (blocksize-1);
2257 (*bytes)++;
2258 }
2259 len = offset - zerofrom;
2260
2261 err = pagecache_write_begin(file, mapping, curpos, len,
2262 AOP_FLAG_UNINTERRUPTIBLE,
2263 &page, &fsdata);
2264 if (err)
2265 goto out;
2266 zero_user_page(page, zerofrom, len, KM_USER0);
2267 err = pagecache_write_end(file, mapping, curpos, len, len,
2268 page, fsdata);
2269 if (err < 0)
2270 goto out;
2271 BUG_ON(err != len);
2272 err = 0;
2273 }
2274out:
2275 return err;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002276}
2277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278/*
2279 * For moronic filesystems that do not allow holes in file.
2280 * We may have to extend the file.
2281 */
Nick Piggin89e10782007-10-16 01:25:07 -07002282int cont_write_begin(struct file *file, struct address_space *mapping,
2283 loff_t pos, unsigned len, unsigned flags,
2284 struct page **pagep, void **fsdata,
2285 get_block_t *get_block, loff_t *bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 unsigned blocksize = 1 << inode->i_blkbits;
Nick Piggin89e10782007-10-16 01:25:07 -07002289 unsigned zerofrom;
2290 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
Nick Piggin89e10782007-10-16 01:25:07 -07002292 err = cont_expand_zero(file, mapping, pos, bytes);
2293 if (err)
2294 goto out;
2295
2296 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2297 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2298 *bytes |= (blocksize-1);
2299 (*bytes)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 }
2301
Nick Piggin89e10782007-10-16 01:25:07 -07002302 *pagep = NULL;
2303 err = block_write_begin(file, mapping, pos, len,
2304 flags, pagep, fsdata, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305out:
Nick Piggin89e10782007-10-16 01:25:07 -07002306 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307}
2308
2309int block_prepare_write(struct page *page, unsigned from, unsigned to,
2310 get_block_t *get_block)
2311{
2312 struct inode *inode = page->mapping->host;
2313 int err = __block_prepare_write(inode, page, from, to, get_block);
2314 if (err)
2315 ClearPageUptodate(page);
2316 return err;
2317}
2318
2319int block_commit_write(struct page *page, unsigned from, unsigned to)
2320{
2321 struct inode *inode = page->mapping->host;
2322 __block_commit_write(inode,page,from,to);
2323 return 0;
2324}
2325
2326int generic_commit_write(struct file *file, struct page *page,
2327 unsigned from, unsigned to)
2328{
2329 struct inode *inode = page->mapping->host;
2330 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2331 __block_commit_write(inode,page,from,to);
2332 /*
2333 * No need to use i_size_read() here, the i_size
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002334 * cannot change under us because we hold i_mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 */
2336 if (pos > inode->i_size) {
2337 i_size_write(inode, pos);
2338 mark_inode_dirty(inode);
2339 }
2340 return 0;
2341}
2342
David Chinner54171692007-07-19 17:39:55 +10002343/*
2344 * block_page_mkwrite() is not allowed to change the file size as it gets
2345 * called from a page fault handler when a page is first dirtied. Hence we must
2346 * be careful to check for EOF conditions here. We set the page up correctly
2347 * for a written page which means we get ENOSPC checking when writing into
2348 * holes and correct delalloc and unwritten extent mapping on filesystems that
2349 * support these features.
2350 *
2351 * We are not allowed to take the i_mutex here so we have to play games to
2352 * protect against truncate races as the page could now be beyond EOF. Because
2353 * vmtruncate() writes the inode size before removing pages, once we have the
2354 * page lock we can determine safely if the page is beyond EOF. If it is not
2355 * beyond EOF, then the page is guaranteed safe against truncation until we
2356 * unlock the page.
2357 */
2358int
2359block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2360 get_block_t get_block)
2361{
2362 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2363 unsigned long end;
2364 loff_t size;
2365 int ret = -EINVAL;
2366
2367 lock_page(page);
2368 size = i_size_read(inode);
2369 if ((page->mapping != inode->i_mapping) ||
Nick Piggin18336332007-07-20 00:31:45 -07002370 (page_offset(page) > size)) {
David Chinner54171692007-07-19 17:39:55 +10002371 /* page got truncated out from underneath us */
2372 goto out_unlock;
2373 }
2374
2375 /* page is wholly or partially inside EOF */
2376 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2377 end = size & ~PAGE_CACHE_MASK;
2378 else
2379 end = PAGE_CACHE_SIZE;
2380
2381 ret = block_prepare_write(page, 0, end, get_block);
2382 if (!ret)
2383 ret = block_commit_write(page, 0, end);
2384
2385out_unlock:
2386 unlock_page(page);
2387 return ret;
2388}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
2390/*
2391 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2392 * immediately, while under the page lock. So it needs a special end_io
2393 * handler which does not touch the bh after unlocking it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 */
2395static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2396{
Dmitry Monakhov68671f32007-10-16 01:24:47 -07002397 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398}
2399
2400/*
2401 * On entry, the page is fully not uptodate.
2402 * On exit the page is fully uptodate in the areas outside (from,to)
2403 */
2404int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2405 get_block_t *get_block)
2406{
2407 struct inode *inode = page->mapping->host;
2408 const unsigned blkbits = inode->i_blkbits;
2409 const unsigned blocksize = 1 << blkbits;
Nick Piggina4b06722007-10-16 01:24:48 -07002410 struct buffer_head *head, *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 unsigned block_in_page;
Nick Piggina4b06722007-10-16 01:24:48 -07002412 unsigned block_start, block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 sector_t block_in_file;
2414 char *kaddr;
2415 int nr_reads = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 int ret = 0;
2417 int is_mapped_to_disk = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
Nick Piggina4b06722007-10-16 01:24:48 -07002419 if (page_has_buffers(page))
2420 return block_prepare_write(page, from, to, get_block);
2421
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 if (PageMappedToDisk(page))
2423 return 0;
2424
Nick Piggina4b06722007-10-16 01:24:48 -07002425 /*
2426 * Allocate buffers so that we can keep track of state, and potentially
2427 * attach them to the page if an error occurs. In the common case of
2428 * no error, they will just be freed again without ever being attached
2429 * to the page (which is all OK, because we're under the page lock).
2430 *
2431 * Be careful: the buffer linked list is a NULL terminated one, rather
2432 * than the circular one we're used to.
2433 */
2434 head = alloc_page_buffers(page, blocksize, 0);
2435 if (!head)
2436 return -ENOMEM;
2437
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
2440 /*
2441 * We loop across all blocks in the page, whether or not they are
2442 * part of the affected region. This is so we can discover if the
2443 * page is fully mapped-to-disk.
2444 */
Nick Piggina4b06722007-10-16 01:24:48 -07002445 for (block_start = 0, block_in_page = 0, bh = head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 block_start < PAGE_CACHE_SIZE;
Nick Piggina4b06722007-10-16 01:24:48 -07002447 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 int create;
2449
Nick Piggina4b06722007-10-16 01:24:48 -07002450 block_end = block_start + blocksize;
2451 bh->b_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 create = 1;
2453 if (block_start >= to)
2454 create = 0;
2455 ret = get_block(inode, block_in_file + block_in_page,
Nick Piggina4b06722007-10-16 01:24:48 -07002456 bh, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 if (ret)
2458 goto failed;
Nick Piggina4b06722007-10-16 01:24:48 -07002459 if (!buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 is_mapped_to_disk = 0;
Nick Piggina4b06722007-10-16 01:24:48 -07002461 if (buffer_new(bh))
2462 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2463 if (PageUptodate(page)) {
2464 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 continue;
Nick Piggina4b06722007-10-16 01:24:48 -07002466 }
2467 if (buffer_new(bh) || !buffer_mapped(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 kaddr = kmap_atomic(page, KM_USER0);
Nick Piggin22c8ca72007-02-20 13:58:09 -08002469 if (block_start < from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 memset(kaddr+block_start, 0, from-block_start);
Nick Piggin22c8ca72007-02-20 13:58:09 -08002471 if (block_end > to)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 memset(kaddr + to, 0, block_end - to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 flush_dcache_page(page);
2474 kunmap_atomic(kaddr, KM_USER0);
2475 continue;
2476 }
Nick Piggina4b06722007-10-16 01:24:48 -07002477 if (buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 continue; /* reiserfs does this */
2479 if (block_start < from || block_end > to) {
Nick Piggina4b06722007-10-16 01:24:48 -07002480 lock_buffer(bh);
2481 bh->b_end_io = end_buffer_read_nobh;
2482 submit_bh(READ, bh);
2483 nr_reads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 }
2485 }
2486
2487 if (nr_reads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 /*
2489 * The page is locked, so these buffers are protected from
2490 * any VM or truncate activity. Hence we don't need to care
2491 * for the buffer_head refcounts.
2492 */
Nick Piggina4b06722007-10-16 01:24:48 -07002493 for (bh = head; bh; bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 wait_on_buffer(bh);
2495 if (!buffer_uptodate(bh))
2496 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 }
2498 if (ret)
2499 goto failed;
2500 }
2501
2502 if (is_mapped_to_disk)
2503 SetPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504
Nick Piggina4b06722007-10-16 01:24:48 -07002505 do {
2506 bh = head;
2507 head = head->b_this_page;
2508 free_buffer_head(bh);
2509 } while (head);
2510
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 return 0;
2512
2513failed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 /*
Nick Piggina4b06722007-10-16 01:24:48 -07002515 * Error recovery is a bit difficult. We need to zero out blocks that
2516 * were newly allocated, and dirty them to ensure they get written out.
2517 * Buffers need to be attached to the page at this point, otherwise
2518 * the handling of potential IO errors during writeout would be hard
2519 * (could try doing synchronous writeout, but what if that fails too?)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 */
Nick Piggina4b06722007-10-16 01:24:48 -07002521 spin_lock(&page->mapping->private_lock);
2522 bh = head;
2523 block_start = 0;
2524 do {
2525 if (PageUptodate(page))
2526 set_buffer_uptodate(bh);
2527 if (PageDirty(page))
2528 set_buffer_dirty(bh);
2529
2530 block_end = block_start+blocksize;
2531 if (block_end <= from)
2532 goto next;
2533 if (block_start >= to)
2534 goto next;
2535
2536 if (buffer_new(bh)) {
2537 clear_buffer_new(bh);
2538 if (!buffer_uptodate(bh)) {
2539 zero_user_page(page, block_start, bh->b_size, KM_USER0);
2540 set_buffer_uptodate(bh);
2541 }
2542 mark_buffer_dirty(bh);
2543 }
2544next:
2545 block_start = block_end;
2546 if (!bh->b_this_page)
2547 bh->b_this_page = head;
2548 bh = bh->b_this_page;
2549 } while (bh != head);
2550 attach_page_buffers(page, head);
2551 spin_unlock(&page->mapping->private_lock);
2552
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 return ret;
2554}
2555EXPORT_SYMBOL(nobh_prepare_write);
2556
Dave Kleikamp57bf63d2007-03-06 01:42:12 -08002557/*
2558 * Make sure any changes to nobh_commit_write() are reflected in
2559 * nobh_truncate_page(), since it doesn't call commit_write().
2560 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561int nobh_commit_write(struct file *file, struct page *page,
2562 unsigned from, unsigned to)
2563{
2564 struct inode *inode = page->mapping->host;
2565 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2566
Nick Piggina4b06722007-10-16 01:24:48 -07002567 if (page_has_buffers(page))
2568 return generic_commit_write(file, page, from, to);
2569
Nick Piggin22c8ca72007-02-20 13:58:09 -08002570 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 set_page_dirty(page);
2572 if (pos > inode->i_size) {
2573 i_size_write(inode, pos);
2574 mark_inode_dirty(inode);
2575 }
2576 return 0;
2577}
2578EXPORT_SYMBOL(nobh_commit_write);
2579
2580/*
2581 * nobh_writepage() - based on block_full_write_page() except
2582 * that it tries to operate without attaching bufferheads to
2583 * the page.
2584 */
2585int nobh_writepage(struct page *page, get_block_t *get_block,
2586 struct writeback_control *wbc)
2587{
2588 struct inode * const inode = page->mapping->host;
2589 loff_t i_size = i_size_read(inode);
2590 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2591 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 int ret;
2593
2594 /* Is the page fully inside i_size? */
2595 if (page->index < end_index)
2596 goto out;
2597
2598 /* Is the page fully outside i_size? (truncate in progress) */
2599 offset = i_size & (PAGE_CACHE_SIZE-1);
2600 if (page->index >= end_index+1 || !offset) {
2601 /*
2602 * The page may have dirty, unmapped buffers. For example,
2603 * they may have been added in ext3_writepage(). Make them
2604 * freeable here, so the page does not leak.
2605 */
2606#if 0
2607 /* Not really sure about this - do we need this ? */
2608 if (page->mapping->a_ops->invalidatepage)
2609 page->mapping->a_ops->invalidatepage(page, offset);
2610#endif
2611 unlock_page(page);
2612 return 0; /* don't care */
2613 }
2614
2615 /*
2616 * The page straddles i_size. It must be zeroed out on each and every
2617 * writepage invocation because it may be mmapped. "A file is mapped
2618 * in multiples of the page size. For a file that is not a multiple of
2619 * the page size, the remaining memory is zeroed when mapped, and
2620 * writes to that region are not written out to the file."
2621 */
Nate Diller01f27052007-05-09 02:35:07 -07002622 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623out:
2624 ret = mpage_writepage(page, get_block, wbc);
2625 if (ret == -EAGAIN)
2626 ret = __block_write_full_page(inode, page, get_block, wbc);
2627 return ret;
2628}
2629EXPORT_SYMBOL(nobh_writepage);
2630
2631/*
2632 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2633 */
2634int nobh_truncate_page(struct address_space *mapping, loff_t from)
2635{
2636 struct inode *inode = mapping->host;
2637 unsigned blocksize = 1 << inode->i_blkbits;
2638 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2639 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2640 unsigned to;
2641 struct page *page;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07002642 const struct address_space_operations *a_ops = mapping->a_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 int ret = 0;
2644
2645 if ((offset & (blocksize - 1)) == 0)
2646 goto out;
2647
2648 ret = -ENOMEM;
2649 page = grab_cache_page(mapping, index);
2650 if (!page)
2651 goto out;
2652
2653 to = (offset + blocksize) & ~(blocksize - 1);
2654 ret = a_ops->prepare_write(NULL, page, offset, to);
2655 if (ret == 0) {
Nate Diller01f27052007-05-09 02:35:07 -07002656 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
2657 KM_USER0);
Dave Kleikamp57bf63d2007-03-06 01:42:12 -08002658 /*
2659 * It would be more correct to call aops->commit_write()
2660 * here, but this is more efficient.
2661 */
2662 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 set_page_dirty(page);
2664 }
2665 unlock_page(page);
2666 page_cache_release(page);
2667out:
2668 return ret;
2669}
2670EXPORT_SYMBOL(nobh_truncate_page);
2671
2672int block_truncate_page(struct address_space *mapping,
2673 loff_t from, get_block_t *get_block)
2674{
2675 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2676 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2677 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002678 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 unsigned length, pos;
2680 struct inode *inode = mapping->host;
2681 struct page *page;
2682 struct buffer_head *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 int err;
2684
2685 blocksize = 1 << inode->i_blkbits;
2686 length = offset & (blocksize - 1);
2687
2688 /* Block boundary? Nothing to do */
2689 if (!length)
2690 return 0;
2691
2692 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002693 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
2695 page = grab_cache_page(mapping, index);
2696 err = -ENOMEM;
2697 if (!page)
2698 goto out;
2699
2700 if (!page_has_buffers(page))
2701 create_empty_buffers(page, blocksize, 0);
2702
2703 /* Find the buffer that contains "offset" */
2704 bh = page_buffers(page);
2705 pos = blocksize;
2706 while (offset >= pos) {
2707 bh = bh->b_this_page;
2708 iblock++;
2709 pos += blocksize;
2710 }
2711
2712 err = 0;
2713 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002714 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 err = get_block(inode, iblock, bh, 0);
2716 if (err)
2717 goto unlock;
2718 /* unmapped? It's a hole - nothing to do */
2719 if (!buffer_mapped(bh))
2720 goto unlock;
2721 }
2722
2723 /* Ok, it's mapped. Make sure it's up-to-date */
2724 if (PageUptodate(page))
2725 set_buffer_uptodate(bh);
2726
David Chinner33a266d2007-02-12 00:51:41 -08002727 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 err = -EIO;
2729 ll_rw_block(READ, 1, &bh);
2730 wait_on_buffer(bh);
2731 /* Uhhuh. Read error. Complain and punt. */
2732 if (!buffer_uptodate(bh))
2733 goto unlock;
2734 }
2735
Nate Diller01f27052007-05-09 02:35:07 -07002736 zero_user_page(page, offset, length, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 mark_buffer_dirty(bh);
2738 err = 0;
2739
2740unlock:
2741 unlock_page(page);
2742 page_cache_release(page);
2743out:
2744 return err;
2745}
2746
2747/*
2748 * The generic ->writepage function for buffer-backed address_spaces
2749 */
2750int block_write_full_page(struct page *page, get_block_t *get_block,
2751 struct writeback_control *wbc)
2752{
2753 struct inode * const inode = page->mapping->host;
2754 loff_t i_size = i_size_read(inode);
2755 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2756 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757
2758 /* Is the page fully inside i_size? */
2759 if (page->index < end_index)
2760 return __block_write_full_page(inode, page, get_block, wbc);
2761
2762 /* Is the page fully outside i_size? (truncate in progress) */
2763 offset = i_size & (PAGE_CACHE_SIZE-1);
2764 if (page->index >= end_index+1 || !offset) {
2765 /*
2766 * The page may have dirty, unmapped buffers. For example,
2767 * they may have been added in ext3_writepage(). Make them
2768 * freeable here, so the page does not leak.
2769 */
Jan Karaaaa40592005-10-30 15:00:16 -08002770 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 unlock_page(page);
2772 return 0; /* don't care */
2773 }
2774
2775 /*
2776 * The page straddles i_size. It must be zeroed out on each and every
2777 * writepage invokation because it may be mmapped. "A file is mapped
2778 * in multiples of the page size. For a file that is not a multiple of
2779 * the page size, the remaining memory is zeroed when mapped, and
2780 * writes to that region are not written out to the file."
2781 */
Nate Diller01f27052007-05-09 02:35:07 -07002782 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 return __block_write_full_page(inode, page, get_block, wbc);
2784}
2785
2786sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2787 get_block_t *get_block)
2788{
2789 struct buffer_head tmp;
2790 struct inode *inode = mapping->host;
2791 tmp.b_state = 0;
2792 tmp.b_blocknr = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002793 tmp.b_size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 get_block(inode, block, &tmp, 0);
2795 return tmp.b_blocknr;
2796}
2797
NeilBrown6712ecf2007-09-27 12:47:43 +02002798static void end_bio_bh_io_sync(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
2800 struct buffer_head *bh = bio->bi_private;
2801
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 if (err == -EOPNOTSUPP) {
2803 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2804 set_bit(BH_Eopnotsupp, &bh->b_state);
2805 }
2806
2807 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2808 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809}
2810
2811int submit_bh(int rw, struct buffer_head * bh)
2812{
2813 struct bio *bio;
2814 int ret = 0;
2815
2816 BUG_ON(!buffer_locked(bh));
2817 BUG_ON(!buffer_mapped(bh));
2818 BUG_ON(!bh->b_end_io);
2819
2820 if (buffer_ordered(bh) && (rw == WRITE))
2821 rw = WRITE_BARRIER;
2822
2823 /*
2824 * Only clear out a write error when rewriting, should this
2825 * include WRITE_SYNC as well?
2826 */
2827 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2828 clear_buffer_write_io_error(bh);
2829
2830 /*
2831 * from here on down, it's all bio -- do the initial mapping,
2832 * submit_bio -> generic_make_request may further map this bio around
2833 */
2834 bio = bio_alloc(GFP_NOIO, 1);
2835
2836 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2837 bio->bi_bdev = bh->b_bdev;
2838 bio->bi_io_vec[0].bv_page = bh->b_page;
2839 bio->bi_io_vec[0].bv_len = bh->b_size;
2840 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2841
2842 bio->bi_vcnt = 1;
2843 bio->bi_idx = 0;
2844 bio->bi_size = bh->b_size;
2845
2846 bio->bi_end_io = end_bio_bh_io_sync;
2847 bio->bi_private = bh;
2848
2849 bio_get(bio);
2850 submit_bio(rw, bio);
2851
2852 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2853 ret = -EOPNOTSUPP;
2854
2855 bio_put(bio);
2856 return ret;
2857}
2858
2859/**
2860 * ll_rw_block: low-level access to block devices (DEPRECATED)
Jan Karaa7662232005-09-06 15:19:10 -07002861 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 * @nr: number of &struct buffer_heads in the array
2863 * @bhs: array of pointers to &struct buffer_head
2864 *
Jan Karaa7662232005-09-06 15:19:10 -07002865 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2866 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2867 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2868 * are sent to disk. The fourth %READA option is described in the documentation
2869 * for generic_make_request() which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 *
2871 * This function drops any buffer that it cannot get a lock on (with the
Jan Karaa7662232005-09-06 15:19:10 -07002872 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2873 * clean when doing a write request, and any buffer that appears to be
2874 * up-to-date when doing read request. Further it marks as clean buffers that
2875 * are processed for writing (the buffer cache won't assume that they are
2876 * actually clean until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 *
2878 * ll_rw_block sets b_end_io to simple completion handler that marks
2879 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2880 * any waiters.
2881 *
2882 * All of the buffers must be for the same device, and must also be a
2883 * multiple of the current approved size for the device.
2884 */
2885void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2886{
2887 int i;
2888
2889 for (i = 0; i < nr; i++) {
2890 struct buffer_head *bh = bhs[i];
2891
Jan Karaa7662232005-09-06 15:19:10 -07002892 if (rw == SWRITE)
2893 lock_buffer(bh);
2894 else if (test_set_buffer_locked(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 continue;
2896
Jan Karaa7662232005-09-06 15:19:10 -07002897 if (rw == WRITE || rw == SWRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002899 bh->b_end_io = end_buffer_write_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08002900 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 submit_bh(WRITE, bh);
2902 continue;
2903 }
2904 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002906 bh->b_end_io = end_buffer_read_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08002907 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 submit_bh(rw, bh);
2909 continue;
2910 }
2911 }
2912 unlock_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 }
2914}
2915
2916/*
2917 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2918 * and then start new I/O and then wait upon it. The caller must have a ref on
2919 * the buffer_head.
2920 */
2921int sync_dirty_buffer(struct buffer_head *bh)
2922{
2923 int ret = 0;
2924
2925 WARN_ON(atomic_read(&bh->b_count) < 1);
2926 lock_buffer(bh);
2927 if (test_clear_buffer_dirty(bh)) {
2928 get_bh(bh);
2929 bh->b_end_io = end_buffer_write_sync;
2930 ret = submit_bh(WRITE, bh);
2931 wait_on_buffer(bh);
2932 if (buffer_eopnotsupp(bh)) {
2933 clear_buffer_eopnotsupp(bh);
2934 ret = -EOPNOTSUPP;
2935 }
2936 if (!ret && !buffer_uptodate(bh))
2937 ret = -EIO;
2938 } else {
2939 unlock_buffer(bh);
2940 }
2941 return ret;
2942}
2943
2944/*
2945 * try_to_free_buffers() checks if all the buffers on this particular page
2946 * are unused, and releases them if so.
2947 *
2948 * Exclusion against try_to_free_buffers may be obtained by either
2949 * locking the page or by holding its mapping's private_lock.
2950 *
2951 * If the page is dirty but all the buffers are clean then we need to
2952 * be sure to mark the page clean as well. This is because the page
2953 * may be against a block device, and a later reattachment of buffers
2954 * to a dirty page will set *all* buffers dirty. Which would corrupt
2955 * filesystem data on the same device.
2956 *
2957 * The same applies to regular filesystem pages: if all the buffers are
2958 * clean then we set the page clean and proceed. To do that, we require
2959 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2960 * private_lock.
2961 *
2962 * try_to_free_buffers() is non-blocking.
2963 */
2964static inline int buffer_busy(struct buffer_head *bh)
2965{
2966 return atomic_read(&bh->b_count) |
2967 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2968}
2969
2970static int
2971drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2972{
2973 struct buffer_head *head = page_buffers(page);
2974 struct buffer_head *bh;
2975
2976 bh = head;
2977 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07002978 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 set_bit(AS_EIO, &page->mapping->flags);
2980 if (buffer_busy(bh))
2981 goto failed;
2982 bh = bh->b_this_page;
2983 } while (bh != head);
2984
2985 do {
2986 struct buffer_head *next = bh->b_this_page;
2987
2988 if (!list_empty(&bh->b_assoc_buffers))
2989 __remove_assoc_queue(bh);
2990 bh = next;
2991 } while (bh != head);
2992 *buffers_to_free = head;
2993 __clear_page_buffers(page);
2994 return 1;
2995failed:
2996 return 0;
2997}
2998
2999int try_to_free_buffers(struct page *page)
3000{
3001 struct address_space * const mapping = page->mapping;
3002 struct buffer_head *buffers_to_free = NULL;
3003 int ret = 0;
3004
3005 BUG_ON(!PageLocked(page));
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003006 if (PageWriteback(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 return 0;
3008
3009 if (mapping == NULL) { /* can this still happen? */
3010 ret = drop_buffers(page, &buffers_to_free);
3011 goto out;
3012 }
3013
3014 spin_lock(&mapping->private_lock);
3015 ret = drop_buffers(page, &buffers_to_free);
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003016
3017 /*
3018 * If the filesystem writes its buffers by hand (eg ext3)
3019 * then we can have clean buffers against a dirty page. We
3020 * clean the page here; otherwise the VM will never notice
3021 * that the filesystem did any IO at all.
3022 *
3023 * Also, during truncate, discard_buffer will have marked all
3024 * the page's buffers clean. We discover that here and clean
3025 * the page also.
Nick Piggin87df7242007-01-30 14:36:27 +11003026 *
3027 * private_lock must be held over this entire operation in order
3028 * to synchronise against __set_page_dirty_buffers and prevent the
3029 * dirty bit from being lost.
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003030 */
3031 if (ret)
3032 cancel_dirty_page(page, PAGE_CACHE_SIZE);
Nick Piggin87df7242007-01-30 14:36:27 +11003033 spin_unlock(&mapping->private_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034out:
3035 if (buffers_to_free) {
3036 struct buffer_head *bh = buffers_to_free;
3037
3038 do {
3039 struct buffer_head *next = bh->b_this_page;
3040 free_buffer_head(bh);
3041 bh = next;
3042 } while (bh != buffers_to_free);
3043 }
3044 return ret;
3045}
3046EXPORT_SYMBOL(try_to_free_buffers);
3047
NeilBrown3978d712006-03-26 01:37:17 -08003048void block_sync_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049{
3050 struct address_space *mapping;
3051
3052 smp_mb();
3053 mapping = page_mapping(page);
3054 if (mapping)
3055 blk_run_backing_dev(mapping->backing_dev_info, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056}
3057
3058/*
3059 * There are no bdflush tunables left. But distributions are
3060 * still running obsolete flush daemons, so we terminate them here.
3061 *
3062 * Use of bdflush() is deprecated and will be removed in a future kernel.
3063 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3064 */
3065asmlinkage long sys_bdflush(int func, long data)
3066{
3067 static int msg_count;
3068
3069 if (!capable(CAP_SYS_ADMIN))
3070 return -EPERM;
3071
3072 if (msg_count < 5) {
3073 msg_count++;
3074 printk(KERN_INFO
3075 "warning: process `%s' used the obsolete bdflush"
3076 " system call\n", current->comm);
3077 printk(KERN_INFO "Fix your initscripts?\n");
3078 }
3079
3080 if (func == 1)
3081 do_exit(0);
3082 return 0;
3083}
3084
3085/*
3086 * Buffer-head allocation
3087 */
Christoph Lametere18b8902006-12-06 20:33:20 -08003088static struct kmem_cache *bh_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089
3090/*
3091 * Once the number of bh's in the machine exceeds this level, we start
3092 * stripping them in writeback.
3093 */
3094static int max_buffer_heads;
3095
3096int buffer_heads_over_limit;
3097
3098struct bh_accounting {
3099 int nr; /* Number of live bh's */
3100 int ratelimit; /* Limit cacheline bouncing */
3101};
3102
3103static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3104
3105static void recalc_bh_state(void)
3106{
3107 int i;
3108 int tot = 0;
3109
3110 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3111 return;
3112 __get_cpu_var(bh_accounting).ratelimit = 0;
Eric Dumazet8a143422006-03-24 03:18:10 -08003113 for_each_online_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 tot += per_cpu(bh_accounting, i).nr;
3115 buffer_heads_over_limit = (tot > max_buffer_heads);
3116}
3117
Al Virodd0fc662005-10-07 07:46:04 +01003118struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119{
Christoph Lametera35afb82007-05-16 22:10:57 -07003120 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 if (ret) {
Christoph Lametera35afb82007-05-16 22:10:57 -07003122 INIT_LIST_HEAD(&ret->b_assoc_buffers);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003123 get_cpu_var(bh_accounting).nr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003125 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 }
3127 return ret;
3128}
3129EXPORT_SYMBOL(alloc_buffer_head);
3130
3131void free_buffer_head(struct buffer_head *bh)
3132{
3133 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3134 kmem_cache_free(bh_cachep, bh);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003135 get_cpu_var(bh_accounting).nr--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003137 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138}
3139EXPORT_SYMBOL(free_buffer_head);
3140
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141static void buffer_exit_cpu(int cpu)
3142{
3143 int i;
3144 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3145
3146 for (i = 0; i < BH_LRU_SIZE; i++) {
3147 brelse(b->bhs[i]);
3148 b->bhs[i] = NULL;
3149 }
Eric Dumazet8a143422006-03-24 03:18:10 -08003150 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3151 per_cpu(bh_accounting, cpu).nr = 0;
3152 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153}
3154
3155static int buffer_cpu_notify(struct notifier_block *self,
3156 unsigned long action, void *hcpu)
3157{
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003158 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 buffer_exit_cpu((unsigned long)hcpu);
3160 return NOTIFY_OK;
3161}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162
3163void __init buffer_init(void)
3164{
3165 int nrpages;
3166
Christoph Lametera35afb82007-05-16 22:10:57 -07003167 bh_cachep = KMEM_CACHE(buffer_head,
3168 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169
3170 /*
3171 * Limit the bh occupancy to 10% of ZONE_NORMAL
3172 */
3173 nrpages = (nr_free_buffer_pages() * 10) / 100;
3174 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3175 hotcpu_notifier(buffer_cpu_notify, 0);
3176}
3177
3178EXPORT_SYMBOL(__bforget);
3179EXPORT_SYMBOL(__brelse);
3180EXPORT_SYMBOL(__wait_on_buffer);
3181EXPORT_SYMBOL(block_commit_write);
3182EXPORT_SYMBOL(block_prepare_write);
David Chinner54171692007-07-19 17:39:55 +10003183EXPORT_SYMBOL(block_page_mkwrite);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184EXPORT_SYMBOL(block_read_full_page);
3185EXPORT_SYMBOL(block_sync_page);
3186EXPORT_SYMBOL(block_truncate_page);
3187EXPORT_SYMBOL(block_write_full_page);
Nick Piggin89e10782007-10-16 01:25:07 -07003188EXPORT_SYMBOL(cont_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189EXPORT_SYMBOL(end_buffer_read_sync);
3190EXPORT_SYMBOL(end_buffer_write_sync);
3191EXPORT_SYMBOL(file_fsync);
3192EXPORT_SYMBOL(fsync_bdev);
3193EXPORT_SYMBOL(generic_block_bmap);
3194EXPORT_SYMBOL(generic_commit_write);
3195EXPORT_SYMBOL(generic_cont_expand);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08003196EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197EXPORT_SYMBOL(init_buffer);
3198EXPORT_SYMBOL(invalidate_bdev);
3199EXPORT_SYMBOL(ll_rw_block);
3200EXPORT_SYMBOL(mark_buffer_dirty);
3201EXPORT_SYMBOL(submit_bh);
3202EXPORT_SYMBOL(sync_dirty_buffer);
3203EXPORT_SYMBOL(unlock_buffer);