blob: b9bb7ad6897b90456332813bc15615741171c9c8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
21#include <linux/config.h>
22#include <linux/kernel.h>
23#include <linux/syscalls.h>
24#include <linux/fs.h>
25#include <linux/mm.h>
26#include <linux/percpu.h>
27#include <linux/slab.h>
28#include <linux/smp_lock.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080029#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/blkdev.h>
31#include <linux/file.h>
32#include <linux/quotaops.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/writeback.h>
36#include <linux/hash.h>
37#include <linux/suspend.h>
38#include <linux/buffer_head.h>
39#include <linux/bio.h>
40#include <linux/notifier.h>
41#include <linux/cpu.h>
42#include <linux/bitops.h>
43#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070044#include <linux/bit_spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47static void invalidate_bh_lrus(void);
48
49#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50
51inline void
52init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53{
54 bh->b_end_io = handler;
55 bh->b_private = private;
56}
57
58static int sync_buffer(void *word)
59{
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
63
64 smp_mb();
65 bd = bh->b_bdev;
66 if (bd)
67 blk_run_address_space(bd->bd_inode->i_mapping);
68 io_schedule();
69 return 0;
70}
71
72void fastcall __lock_buffer(struct buffer_head *bh)
73{
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
76}
77EXPORT_SYMBOL(__lock_buffer);
78
79void fastcall unlock_buffer(struct buffer_head *bh)
80{
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
84}
85
86/*
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
90 */
91void __wait_on_buffer(struct buffer_head * bh)
92{
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94}
95
96static void
97__clear_page_buffers(struct page *page)
98{
99 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700100 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 page_cache_release(page);
102}
103
104static void buffer_io_error(struct buffer_head *bh)
105{
106 char b[BDEVNAME_SIZE];
107
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
111}
112
113/*
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
116 */
117void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
118{
119 if (uptodate) {
120 set_buffer_uptodate(bh);
121 } else {
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
124 }
125 unlock_buffer(bh);
126 put_bh(bh);
127}
128
129void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130{
131 char b[BDEVNAME_SIZE];
132
133 if (uptodate) {
134 set_buffer_uptodate(bh);
135 } else {
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 buffer_io_error(bh);
138 printk(KERN_WARNING "lost page write due to "
139 "I/O error on %s\n",
140 bdevname(bh->b_bdev, b));
141 }
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
144 }
145 unlock_buffer(bh);
146 put_bh(bh);
147}
148
149/*
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
152 */
153int sync_blockdev(struct block_device *bdev)
154{
155 int ret = 0;
156
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800157 if (bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 return ret;
160}
161EXPORT_SYMBOL(sync_blockdev);
162
163/*
164 * Write out and wait upon all dirty data associated with this
165 * superblock. Filesystem data as well as the underlying block
166 * device. Takes the superblock lock.
167 */
168int fsync_super(struct super_block *sb)
169{
170 sync_inodes_sb(sb, 0);
171 DQUOT_SYNC(sb);
172 lock_super(sb);
173 if (sb->s_dirt && sb->s_op->write_super)
174 sb->s_op->write_super(sb);
175 unlock_super(sb);
176 if (sb->s_op->sync_fs)
177 sb->s_op->sync_fs(sb, 1);
178 sync_blockdev(sb->s_bdev);
179 sync_inodes_sb(sb, 1);
180
181 return sync_blockdev(sb->s_bdev);
182}
183
184/*
185 * Write out and wait upon all dirty data associated with this
186 * device. Filesystem data as well as the underlying block
187 * device. Takes the superblock lock.
188 */
189int fsync_bdev(struct block_device *bdev)
190{
191 struct super_block *sb = get_super(bdev);
192 if (sb) {
193 int res = fsync_super(sb);
194 drop_super(sb);
195 return res;
196 }
197 return sync_blockdev(bdev);
198}
199
200/**
201 * freeze_bdev -- lock a filesystem and force it into a consistent state
202 * @bdev: blockdevice to lock
203 *
204 * This takes the block device bd_mount_sem to make sure no new mounts
205 * happen on bdev until thaw_bdev() is called.
206 * If a superblock is found on this device, we take the s_umount semaphore
207 * on it to make sure nobody unmounts until the snapshot creation is done.
208 */
209struct super_block *freeze_bdev(struct block_device *bdev)
210{
211 struct super_block *sb;
212
213 down(&bdev->bd_mount_sem);
214 sb = get_super(bdev);
215 if (sb && !(sb->s_flags & MS_RDONLY)) {
216 sb->s_frozen = SB_FREEZE_WRITE;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700217 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 sync_inodes_sb(sb, 0);
220 DQUOT_SYNC(sb);
221
222 lock_super(sb);
223 if (sb->s_dirt && sb->s_op->write_super)
224 sb->s_op->write_super(sb);
225 unlock_super(sb);
226
227 if (sb->s_op->sync_fs)
228 sb->s_op->sync_fs(sb, 1);
229
230 sync_blockdev(sb->s_bdev);
231 sync_inodes_sb(sb, 1);
232
233 sb->s_frozen = SB_FREEZE_TRANS;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700234 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
236 sync_blockdev(sb->s_bdev);
237
238 if (sb->s_op->write_super_lockfs)
239 sb->s_op->write_super_lockfs(sb);
240 }
241
242 sync_blockdev(bdev);
243 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
244}
245EXPORT_SYMBOL(freeze_bdev);
246
247/**
248 * thaw_bdev -- unlock filesystem
249 * @bdev: blockdevice to unlock
250 * @sb: associated superblock
251 *
252 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
253 */
254void thaw_bdev(struct block_device *bdev, struct super_block *sb)
255{
256 if (sb) {
257 BUG_ON(sb->s_bdev != bdev);
258
259 if (sb->s_op->unlockfs)
260 sb->s_op->unlockfs(sb);
261 sb->s_frozen = SB_UNFROZEN;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700262 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 wake_up(&sb->s_wait_unfrozen);
264 drop_super(sb);
265 }
266
267 up(&bdev->bd_mount_sem);
268}
269EXPORT_SYMBOL(thaw_bdev);
270
271/*
272 * sync everything. Start out by waking pdflush, because that writes back
273 * all queues in parallel.
274 */
275static void do_sync(unsigned long wait)
276{
Pekka J Enberg687a21c2005-06-28 20:44:55 -0700277 wakeup_pdflush(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 sync_inodes(0); /* All mappings, inodes and their blockdevs */
279 DQUOT_SYNC(NULL);
280 sync_supers(); /* Write the superblocks */
281 sync_filesystems(0); /* Start syncing the filesystems */
282 sync_filesystems(wait); /* Waitingly sync the filesystems */
283 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
284 if (!wait)
285 printk("Emergency Sync complete\n");
286 if (unlikely(laptop_mode))
287 laptop_sync_completion();
288}
289
290asmlinkage long sys_sync(void)
291{
292 do_sync(1);
293 return 0;
294}
295
296void emergency_sync(void)
297{
298 pdflush_operation(do_sync, 0);
299}
300
301/*
302 * Generic function to fsync a file.
303 *
304 * filp may be NULL if called via the msync of a vma.
305 */
306
307int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
308{
309 struct inode * inode = dentry->d_inode;
310 struct super_block * sb;
311 int ret, err;
312
313 /* sync the inode to buffers */
314 ret = write_inode_now(inode, 0);
315
316 /* sync the superblock to buffers */
317 sb = inode->i_sb;
318 lock_super(sb);
319 if (sb->s_op->write_super)
320 sb->s_op->write_super(sb);
321 unlock_super(sb);
322
323 /* .. finally sync the buffers to disk */
324 err = sync_blockdev(sb->s_bdev);
325 if (!ret)
326 ret = err;
327 return ret;
328}
329
Oleg Nesterovdfb388b2005-06-23 00:10:02 -0700330static long do_fsync(unsigned int fd, int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
332 struct file * file;
333 struct address_space *mapping;
334 int ret, err;
335
336 ret = -EBADF;
337 file = fget(fd);
338 if (!file)
339 goto out;
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 ret = -EINVAL;
342 if (!file->f_op || !file->f_op->fsync) {
343 /* Why? We can still call filemap_fdatawrite */
344 goto out_putf;
345 }
346
Oleg Nesterovdfb388b2005-06-23 00:10:02 -0700347 mapping = file->f_mapping;
348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 current->flags |= PF_SYNCWRITE;
350 ret = filemap_fdatawrite(mapping);
351
352 /*
353 * We need to protect against concurrent writers,
354 * which could cause livelocks in fsync_buffers_list
355 */
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800356 mutex_lock(&mapping->host->i_mutex);
Oleg Nesterovdfb388b2005-06-23 00:10:02 -0700357 err = file->f_op->fsync(file, file->f_dentry, datasync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 if (!ret)
359 ret = err;
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800360 mutex_unlock(&mapping->host->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 err = filemap_fdatawait(mapping);
362 if (!ret)
363 ret = err;
364 current->flags &= ~PF_SYNCWRITE;
365
366out_putf:
367 fput(file);
368out:
369 return ret;
370}
371
Oleg Nesterovdfb388b2005-06-23 00:10:02 -0700372asmlinkage long sys_fsync(unsigned int fd)
373{
374 return do_fsync(fd, 0);
375}
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377asmlinkage long sys_fdatasync(unsigned int fd)
378{
Oleg Nesterovdfb388b2005-06-23 00:10:02 -0700379 return do_fsync(fd, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
381
382/*
383 * Various filesystems appear to want __find_get_block to be non-blocking.
384 * But it's the page lock which protects the buffers. To get around this,
385 * we get exclusion from try_to_free_buffers with the blockdev mapping's
386 * private_lock.
387 *
388 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
389 * may be quite high. This code could TryLock the page, and if that
390 * succeeds, there is no need to take private_lock. (But if
391 * private_lock is contended then so is mapping->tree_lock).
392 */
393static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800394__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
396 struct inode *bd_inode = bdev->bd_inode;
397 struct address_space *bd_mapping = bd_inode->i_mapping;
398 struct buffer_head *ret = NULL;
399 pgoff_t index;
400 struct buffer_head *bh;
401 struct buffer_head *head;
402 struct page *page;
403 int all_mapped = 1;
404
405 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
406 page = find_get_page(bd_mapping, index);
407 if (!page)
408 goto out;
409
410 spin_lock(&bd_mapping->private_lock);
411 if (!page_has_buffers(page))
412 goto out_unlock;
413 head = page_buffers(page);
414 bh = head;
415 do {
416 if (bh->b_blocknr == block) {
417 ret = bh;
418 get_bh(bh);
419 goto out_unlock;
420 }
421 if (!buffer_mapped(bh))
422 all_mapped = 0;
423 bh = bh->b_this_page;
424 } while (bh != head);
425
426 /* we might be here because some of the buffers on this page are
427 * not mapped. This is due to various races between
428 * file io on the block device and getblk. It gets dealt with
429 * elsewhere, don't buffer_error if we had some unmapped buffers
430 */
431 if (all_mapped) {
432 printk("__find_get_block_slow() failed. "
433 "block=%llu, b_blocknr=%llu\n",
434 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
435 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
436 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
437 }
438out_unlock:
439 spin_unlock(&bd_mapping->private_lock);
440 page_cache_release(page);
441out:
442 return ret;
443}
444
445/* If invalidate_buffers() will trash dirty buffers, it means some kind
446 of fs corruption is going on. Trashing dirty data always imply losing
447 information that was supposed to be just stored on the physical layer
448 by the user.
449
450 Thus invalidate_buffers in general usage is not allwowed to trash
451 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
452 be preserved. These buffers are simply skipped.
453
454 We also skip buffers which are still in use. For example this can
455 happen if a userspace program is reading the block device.
456
457 NOTE: In the case where the user removed a removable-media-disk even if
458 there's still dirty data not synced on disk (due a bug in the device driver
459 or due an error of the user), by not destroying the dirty buffers we could
460 generate corruption also on the next media inserted, thus a parameter is
461 necessary to handle this case in the most safe way possible (trying
462 to not corrupt also the new disk inserted with the data belonging to
463 the old now corrupted disk). Also for the ramdisk the natural thing
464 to do in order to release the ramdisk memory is to destroy dirty buffers.
465
466 These are two special cases. Normal usage imply the device driver
467 to issue a sync on the device (without waiting I/O completion) and
468 then an invalidate_buffers call that doesn't trash dirty buffers.
469
470 For handling cache coherency with the blkdev pagecache the 'update' case
471 is been introduced. It is needed to re-read from disk any pinned
472 buffer. NOTE: re-reading from disk is destructive so we can do it only
473 when we assume nobody is changing the buffercache under our I/O and when
474 we think the disk contains more recent information than the buffercache.
475 The update == 1 pass marks the buffers we need to update, the update == 2
476 pass does the actual I/O. */
477void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
478{
479 invalidate_bh_lrus();
480 /*
481 * FIXME: what about destroy_dirty_buffers?
482 * We really want to use invalidate_inode_pages2() for
483 * that, but not until that's cleaned up.
484 */
485 invalidate_inode_pages(bdev->bd_inode->i_mapping);
486}
487
488/*
489 * Kick pdflush then try to free up some ZONE_NORMAL memory.
490 */
491static void free_more_memory(void)
492{
493 struct zone **zones;
494 pg_data_t *pgdat;
495
Pekka J Enberg687a21c2005-06-28 20:44:55 -0700496 wakeup_pdflush(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 yield();
498
499 for_each_pgdat(pgdat) {
Al Viroaf4ca452005-10-21 02:55:38 -0400500 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 if (*zones)
Darren Hart1ad539b2005-06-21 17:14:53 -0700502 try_to_free_pages(zones, GFP_NOFS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 }
504}
505
506/*
507 * I/O completion handler for block_read_full_page() - pages
508 * which come unlocked at the end of I/O.
509 */
510static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
511{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700513 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 struct buffer_head *tmp;
515 struct page *page;
516 int page_uptodate = 1;
517
518 BUG_ON(!buffer_async_read(bh));
519
520 page = bh->b_page;
521 if (uptodate) {
522 set_buffer_uptodate(bh);
523 } else {
524 clear_buffer_uptodate(bh);
525 if (printk_ratelimit())
526 buffer_io_error(bh);
527 SetPageError(page);
528 }
529
530 /*
531 * Be _very_ careful from here on. Bad things can happen if
532 * two buffer heads end IO at almost the same time and both
533 * decide that the page is now completely done.
534 */
Nick Piggina3972202005-07-07 17:56:56 -0700535 first = page_buffers(page);
536 local_irq_save(flags);
537 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 clear_buffer_async_read(bh);
539 unlock_buffer(bh);
540 tmp = bh;
541 do {
542 if (!buffer_uptodate(tmp))
543 page_uptodate = 0;
544 if (buffer_async_read(tmp)) {
545 BUG_ON(!buffer_locked(tmp));
546 goto still_busy;
547 }
548 tmp = tmp->b_this_page;
549 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700550 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
551 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 /*
554 * If none of the buffers had errors and they are all
555 * uptodate then we can set the page uptodate.
556 */
557 if (page_uptodate && !PageError(page))
558 SetPageUptodate(page);
559 unlock_page(page);
560 return;
561
562still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700563 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
564 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 return;
566}
567
568/*
569 * Completion handler for block_write_full_page() - pages which are unlocked
570 * during I/O, and which have PageWriteback cleared upon I/O completion.
571 */
572void end_buffer_async_write(struct buffer_head *bh, int uptodate)
573{
574 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700576 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 struct buffer_head *tmp;
578 struct page *page;
579
580 BUG_ON(!buffer_async_write(bh));
581
582 page = bh->b_page;
583 if (uptodate) {
584 set_buffer_uptodate(bh);
585 } else {
586 if (printk_ratelimit()) {
587 buffer_io_error(bh);
588 printk(KERN_WARNING "lost page write due to "
589 "I/O error on %s\n",
590 bdevname(bh->b_bdev, b));
591 }
592 set_bit(AS_EIO, &page->mapping->flags);
593 clear_buffer_uptodate(bh);
594 SetPageError(page);
595 }
596
Nick Piggina3972202005-07-07 17:56:56 -0700597 first = page_buffers(page);
598 local_irq_save(flags);
599 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
600
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 clear_buffer_async_write(bh);
602 unlock_buffer(bh);
603 tmp = bh->b_this_page;
604 while (tmp != bh) {
605 if (buffer_async_write(tmp)) {
606 BUG_ON(!buffer_locked(tmp));
607 goto still_busy;
608 }
609 tmp = tmp->b_this_page;
610 }
Nick Piggina3972202005-07-07 17:56:56 -0700611 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
612 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 end_page_writeback(page);
614 return;
615
616still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700617 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
618 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 return;
620}
621
622/*
623 * If a page's buffers are under async readin (end_buffer_async_read
624 * completion) then there is a possibility that another thread of
625 * control could lock one of the buffers after it has completed
626 * but while some of the other buffers have not completed. This
627 * locked buffer would confuse end_buffer_async_read() into not unlocking
628 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
629 * that this buffer is not under async I/O.
630 *
631 * The page comes unlocked when it has no locked buffer_async buffers
632 * left.
633 *
634 * PageLocked prevents anyone starting new async I/O reads any of
635 * the buffers.
636 *
637 * PageWriteback is used to prevent simultaneous writeout of the same
638 * page.
639 *
640 * PageLocked prevents anyone from starting writeback of a page which is
641 * under read I/O (PageWriteback is only ever set against a locked page).
642 */
643static void mark_buffer_async_read(struct buffer_head *bh)
644{
645 bh->b_end_io = end_buffer_async_read;
646 set_buffer_async_read(bh);
647}
648
649void mark_buffer_async_write(struct buffer_head *bh)
650{
651 bh->b_end_io = end_buffer_async_write;
652 set_buffer_async_write(bh);
653}
654EXPORT_SYMBOL(mark_buffer_async_write);
655
656
657/*
658 * fs/buffer.c contains helper functions for buffer-backed address space's
659 * fsync functions. A common requirement for buffer-based filesystems is
660 * that certain data from the backing blockdev needs to be written out for
661 * a successful fsync(). For example, ext2 indirect blocks need to be
662 * written back and waited upon before fsync() returns.
663 *
664 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
665 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
666 * management of a list of dependent buffers at ->i_mapping->private_list.
667 *
668 * Locking is a little subtle: try_to_free_buffers() will remove buffers
669 * from their controlling inode's queue when they are being freed. But
670 * try_to_free_buffers() will be operating against the *blockdev* mapping
671 * at the time, not against the S_ISREG file which depends on those buffers.
672 * So the locking for private_list is via the private_lock in the address_space
673 * which backs the buffers. Which is different from the address_space
674 * against which the buffers are listed. So for a particular address_space,
675 * mapping->private_lock does *not* protect mapping->private_list! In fact,
676 * mapping->private_list will always be protected by the backing blockdev's
677 * ->private_lock.
678 *
679 * Which introduces a requirement: all buffers on an address_space's
680 * ->private_list must be from the same address_space: the blockdev's.
681 *
682 * address_spaces which do not place buffers at ->private_list via these
683 * utility functions are free to use private_lock and private_list for
684 * whatever they want. The only requirement is that list_empty(private_list)
685 * be true at clear_inode() time.
686 *
687 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
688 * filesystems should do that. invalidate_inode_buffers() should just go
689 * BUG_ON(!list_empty).
690 *
691 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
692 * take an address_space, not an inode. And it should be called
693 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
694 * queued up.
695 *
696 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
697 * list if it is already on a list. Because if the buffer is on a list,
698 * it *must* already be on the right one. If not, the filesystem is being
699 * silly. This will save a ton of locking. But first we have to ensure
700 * that buffers are taken *off* the old inode's list when they are freed
701 * (presumably in truncate). That requires careful auditing of all
702 * filesystems (do it inside bforget()). It could also be done by bringing
703 * b_inode back.
704 */
705
706/*
707 * The buffer's backing address_space's private_lock must be held
708 */
709static inline void __remove_assoc_queue(struct buffer_head *bh)
710{
711 list_del_init(&bh->b_assoc_buffers);
712}
713
714int inode_has_buffers(struct inode *inode)
715{
716 return !list_empty(&inode->i_data.private_list);
717}
718
719/*
720 * osync is designed to support O_SYNC io. It waits synchronously for
721 * all already-submitted IO to complete, but does not queue any new
722 * writes to the disk.
723 *
724 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
725 * you dirty the buffers, and then use osync_inode_buffers to wait for
726 * completion. Any other dirty buffers which are not yet queued for
727 * write will not be flushed to disk by the osync.
728 */
729static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
730{
731 struct buffer_head *bh;
732 struct list_head *p;
733 int err = 0;
734
735 spin_lock(lock);
736repeat:
737 list_for_each_prev(p, list) {
738 bh = BH_ENTRY(p);
739 if (buffer_locked(bh)) {
740 get_bh(bh);
741 spin_unlock(lock);
742 wait_on_buffer(bh);
743 if (!buffer_uptodate(bh))
744 err = -EIO;
745 brelse(bh);
746 spin_lock(lock);
747 goto repeat;
748 }
749 }
750 spin_unlock(lock);
751 return err;
752}
753
754/**
755 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
756 * buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700757 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 *
759 * Starts I/O against the buffers at mapping->private_list, and waits upon
760 * that I/O.
761 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700762 * Basically, this is a convenience function for fsync().
763 * @mapping is a file or directory which needs those buffers to be written for
764 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 */
766int sync_mapping_buffers(struct address_space *mapping)
767{
768 struct address_space *buffer_mapping = mapping->assoc_mapping;
769
770 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
771 return 0;
772
773 return fsync_buffers_list(&buffer_mapping->private_lock,
774 &mapping->private_list);
775}
776EXPORT_SYMBOL(sync_mapping_buffers);
777
778/*
779 * Called when we've recently written block `bblock', and it is known that
780 * `bblock' was for a buffer_boundary() buffer. This means that the block at
781 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
782 * dirty, schedule it for IO. So that indirects merge nicely with their data.
783 */
784void write_boundary_block(struct block_device *bdev,
785 sector_t bblock, unsigned blocksize)
786{
787 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
788 if (bh) {
789 if (buffer_dirty(bh))
790 ll_rw_block(WRITE, 1, &bh);
791 put_bh(bh);
792 }
793}
794
795void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
796{
797 struct address_space *mapping = inode->i_mapping;
798 struct address_space *buffer_mapping = bh->b_page->mapping;
799
800 mark_buffer_dirty(bh);
801 if (!mapping->assoc_mapping) {
802 mapping->assoc_mapping = buffer_mapping;
803 } else {
804 if (mapping->assoc_mapping != buffer_mapping)
805 BUG();
806 }
807 if (list_empty(&bh->b_assoc_buffers)) {
808 spin_lock(&buffer_mapping->private_lock);
809 list_move_tail(&bh->b_assoc_buffers,
810 &mapping->private_list);
811 spin_unlock(&buffer_mapping->private_lock);
812 }
813}
814EXPORT_SYMBOL(mark_buffer_dirty_inode);
815
816/*
817 * Add a page to the dirty page list.
818 *
819 * It is a sad fact of life that this function is called from several places
820 * deeply under spinlocking. It may not sleep.
821 *
822 * If the page has buffers, the uptodate buffers are set dirty, to preserve
823 * dirty-state coherency between the page and the buffers. It the page does
824 * not have buffers then when they are later attached they will all be set
825 * dirty.
826 *
827 * The buffers are dirtied before the page is dirtied. There's a small race
828 * window in which a writepage caller may see the page cleanness but not the
829 * buffer dirtiness. That's fine. If this code were to set the page dirty
830 * before the buffers, a concurrent writepage caller could clear the page dirty
831 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
832 * page on the dirty page list.
833 *
834 * We use private_lock to lock against try_to_free_buffers while using the
835 * page's buffer list. Also use this to protect against clean buffers being
836 * added to the page after it was set dirty.
837 *
838 * FIXME: may need to call ->reservepage here as well. That's rather up to the
839 * address_space though.
840 */
841int __set_page_dirty_buffers(struct page *page)
842{
843 struct address_space * const mapping = page->mapping;
844
845 spin_lock(&mapping->private_lock);
846 if (page_has_buffers(page)) {
847 struct buffer_head *head = page_buffers(page);
848 struct buffer_head *bh = head;
849
850 do {
851 set_buffer_dirty(bh);
852 bh = bh->b_this_page;
853 } while (bh != head);
854 }
855 spin_unlock(&mapping->private_lock);
856
857 if (!TestSetPageDirty(page)) {
858 write_lock_irq(&mapping->tree_lock);
859 if (page->mapping) { /* Race with truncate? */
860 if (mapping_cap_account_dirty(mapping))
861 inc_page_state(nr_dirty);
862 radix_tree_tag_set(&mapping->page_tree,
863 page_index(page),
864 PAGECACHE_TAG_DIRTY);
865 }
866 write_unlock_irq(&mapping->tree_lock);
867 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
868 }
869
870 return 0;
871}
872EXPORT_SYMBOL(__set_page_dirty_buffers);
873
874/*
875 * Write out and wait upon a list of buffers.
876 *
877 * We have conflicting pressures: we want to make sure that all
878 * initially dirty buffers get waited on, but that any subsequently
879 * dirtied buffers don't. After all, we don't want fsync to last
880 * forever if somebody is actively writing to the file.
881 *
882 * Do this in two main stages: first we copy dirty buffers to a
883 * temporary inode list, queueing the writes as we go. Then we clean
884 * up, waiting for those writes to complete.
885 *
886 * During this second stage, any subsequent updates to the file may end
887 * up refiling the buffer on the original inode's dirty list again, so
888 * there is a chance we will end up with a buffer queued for write but
889 * not yet completed on that list. So, as a final cleanup we go through
890 * the osync code to catch these locked, dirty buffers without requeuing
891 * any newly dirty buffers for write.
892 */
893static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
894{
895 struct buffer_head *bh;
896 struct list_head tmp;
897 int err = 0, err2;
898
899 INIT_LIST_HEAD(&tmp);
900
901 spin_lock(lock);
902 while (!list_empty(list)) {
903 bh = BH_ENTRY(list->next);
904 list_del_init(&bh->b_assoc_buffers);
905 if (buffer_dirty(bh) || buffer_locked(bh)) {
906 list_add(&bh->b_assoc_buffers, &tmp);
907 if (buffer_dirty(bh)) {
908 get_bh(bh);
909 spin_unlock(lock);
910 /*
911 * Ensure any pending I/O completes so that
912 * ll_rw_block() actually writes the current
913 * contents - it is a noop if I/O is still in
914 * flight on potentially older contents.
915 */
Jan Karaa7662232005-09-06 15:19:10 -0700916 ll_rw_block(SWRITE, 1, &bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 brelse(bh);
918 spin_lock(lock);
919 }
920 }
921 }
922
923 while (!list_empty(&tmp)) {
924 bh = BH_ENTRY(tmp.prev);
925 __remove_assoc_queue(bh);
926 get_bh(bh);
927 spin_unlock(lock);
928 wait_on_buffer(bh);
929 if (!buffer_uptodate(bh))
930 err = -EIO;
931 brelse(bh);
932 spin_lock(lock);
933 }
934
935 spin_unlock(lock);
936 err2 = osync_buffers_list(lock, list);
937 if (err)
938 return err;
939 else
940 return err2;
941}
942
943/*
944 * Invalidate any and all dirty buffers on a given inode. We are
945 * probably unmounting the fs, but that doesn't mean we have already
946 * done a sync(). Just drop the buffers from the inode list.
947 *
948 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
949 * assumes that all the buffers are against the blockdev. Not true
950 * for reiserfs.
951 */
952void invalidate_inode_buffers(struct inode *inode)
953{
954 if (inode_has_buffers(inode)) {
955 struct address_space *mapping = &inode->i_data;
956 struct list_head *list = &mapping->private_list;
957 struct address_space *buffer_mapping = mapping->assoc_mapping;
958
959 spin_lock(&buffer_mapping->private_lock);
960 while (!list_empty(list))
961 __remove_assoc_queue(BH_ENTRY(list->next));
962 spin_unlock(&buffer_mapping->private_lock);
963 }
964}
965
966/*
967 * Remove any clean buffers from the inode's buffer list. This is called
968 * when we're trying to free the inode itself. Those buffers can pin it.
969 *
970 * Returns true if all buffers were removed.
971 */
972int remove_inode_buffers(struct inode *inode)
973{
974 int ret = 1;
975
976 if (inode_has_buffers(inode)) {
977 struct address_space *mapping = &inode->i_data;
978 struct list_head *list = &mapping->private_list;
979 struct address_space *buffer_mapping = mapping->assoc_mapping;
980
981 spin_lock(&buffer_mapping->private_lock);
982 while (!list_empty(list)) {
983 struct buffer_head *bh = BH_ENTRY(list->next);
984 if (buffer_dirty(bh)) {
985 ret = 0;
986 break;
987 }
988 __remove_assoc_queue(bh);
989 }
990 spin_unlock(&buffer_mapping->private_lock);
991 }
992 return ret;
993}
994
995/*
996 * Create the appropriate buffers when given a page for data area and
997 * the size of each buffer.. Use the bh->b_this_page linked list to
998 * follow the buffers created. Return NULL if unable to create more
999 * buffers.
1000 *
1001 * The retry flag is used to differentiate async IO (paging, swapping)
1002 * which may not fail from ordinary buffer allocations.
1003 */
1004struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1005 int retry)
1006{
1007 struct buffer_head *bh, *head;
1008 long offset;
1009
1010try_again:
1011 head = NULL;
1012 offset = PAGE_SIZE;
1013 while ((offset -= size) >= 0) {
1014 bh = alloc_buffer_head(GFP_NOFS);
1015 if (!bh)
1016 goto no_grow;
1017
1018 bh->b_bdev = NULL;
1019 bh->b_this_page = head;
1020 bh->b_blocknr = -1;
1021 head = bh;
1022
1023 bh->b_state = 0;
1024 atomic_set(&bh->b_count, 0);
1025 bh->b_size = size;
1026
1027 /* Link the buffer to its page */
1028 set_bh_page(bh, page, offset);
1029
1030 bh->b_end_io = NULL;
1031 }
1032 return head;
1033/*
1034 * In case anything failed, we just free everything we got.
1035 */
1036no_grow:
1037 if (head) {
1038 do {
1039 bh = head;
1040 head = head->b_this_page;
1041 free_buffer_head(bh);
1042 } while (head);
1043 }
1044
1045 /*
1046 * Return failure for non-async IO requests. Async IO requests
1047 * are not allowed to fail, so we have to wait until buffer heads
1048 * become available. But we don't want tasks sleeping with
1049 * partially complete buffers, so all were released above.
1050 */
1051 if (!retry)
1052 return NULL;
1053
1054 /* We're _really_ low on memory. Now we just
1055 * wait for old buffer heads to become free due to
1056 * finishing IO. Since this is an async request and
1057 * the reserve list is empty, we're sure there are
1058 * async buffer heads in use.
1059 */
1060 free_more_memory();
1061 goto try_again;
1062}
1063EXPORT_SYMBOL_GPL(alloc_page_buffers);
1064
1065static inline void
1066link_dev_buffers(struct page *page, struct buffer_head *head)
1067{
1068 struct buffer_head *bh, *tail;
1069
1070 bh = head;
1071 do {
1072 tail = bh;
1073 bh = bh->b_this_page;
1074 } while (bh);
1075 tail->b_this_page = head;
1076 attach_page_buffers(page, head);
1077}
1078
1079/*
1080 * Initialise the state of a blockdev page's buffers.
1081 */
1082static void
1083init_page_buffers(struct page *page, struct block_device *bdev,
1084 sector_t block, int size)
1085{
1086 struct buffer_head *head = page_buffers(page);
1087 struct buffer_head *bh = head;
1088 int uptodate = PageUptodate(page);
1089
1090 do {
1091 if (!buffer_mapped(bh)) {
1092 init_buffer(bh, NULL, NULL);
1093 bh->b_bdev = bdev;
1094 bh->b_blocknr = block;
1095 if (uptodate)
1096 set_buffer_uptodate(bh);
1097 set_buffer_mapped(bh);
1098 }
1099 block++;
1100 bh = bh->b_this_page;
1101 } while (bh != head);
1102}
1103
1104/*
1105 * Create the page-cache page that contains the requested block.
1106 *
1107 * This is user purely for blockdev mappings.
1108 */
1109static struct page *
1110grow_dev_page(struct block_device *bdev, sector_t block,
1111 pgoff_t index, int size)
1112{
1113 struct inode *inode = bdev->bd_inode;
1114 struct page *page;
1115 struct buffer_head *bh;
1116
1117 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1118 if (!page)
1119 return NULL;
1120
1121 if (!PageLocked(page))
1122 BUG();
1123
1124 if (page_has_buffers(page)) {
1125 bh = page_buffers(page);
1126 if (bh->b_size == size) {
1127 init_page_buffers(page, bdev, block, size);
1128 return page;
1129 }
1130 if (!try_to_free_buffers(page))
1131 goto failed;
1132 }
1133
1134 /*
1135 * Allocate some buffers for this page
1136 */
1137 bh = alloc_page_buffers(page, size, 0);
1138 if (!bh)
1139 goto failed;
1140
1141 /*
1142 * Link the page to the buffers and initialise them. Take the
1143 * lock to be atomic wrt __find_get_block(), which does not
1144 * run under the page lock.
1145 */
1146 spin_lock(&inode->i_mapping->private_lock);
1147 link_dev_buffers(page, bh);
1148 init_page_buffers(page, bdev, block, size);
1149 spin_unlock(&inode->i_mapping->private_lock);
1150 return page;
1151
1152failed:
1153 BUG();
1154 unlock_page(page);
1155 page_cache_release(page);
1156 return NULL;
1157}
1158
1159/*
1160 * Create buffers for the specified block device block's page. If
1161 * that page was dirty, the buffers are set dirty also.
1162 *
1163 * Except that's a bug. Attaching dirty buffers to a dirty
1164 * blockdev's page can result in filesystem corruption, because
1165 * some of those buffers may be aliases of filesystem data.
1166 * grow_dev_page() will go BUG() if this happens.
1167 */
1168static inline int
1169grow_buffers(struct block_device *bdev, sector_t block, int size)
1170{
1171 struct page *page;
1172 pgoff_t index;
1173 int sizebits;
1174
1175 sizebits = -1;
1176 do {
1177 sizebits++;
1178 } while ((size << sizebits) < PAGE_SIZE);
1179
1180 index = block >> sizebits;
1181 block = index << sizebits;
1182
1183 /* Create a page with the proper size buffers.. */
1184 page = grow_dev_page(bdev, block, index, size);
1185 if (!page)
1186 return 0;
1187 unlock_page(page);
1188 page_cache_release(page);
1189 return 1;
1190}
1191
Adrian Bunk75c96f82005-05-05 16:16:09 -07001192static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193__getblk_slow(struct block_device *bdev, sector_t block, int size)
1194{
1195 /* Size must be multiple of hard sectorsize */
1196 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1197 (size < 512 || size > PAGE_SIZE))) {
1198 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1199 size);
1200 printk(KERN_ERR "hardsect size: %d\n",
1201 bdev_hardsect_size(bdev));
1202
1203 dump_stack();
1204 return NULL;
1205 }
1206
1207 for (;;) {
1208 struct buffer_head * bh;
1209
1210 bh = __find_get_block(bdev, block, size);
1211 if (bh)
1212 return bh;
1213
1214 if (!grow_buffers(bdev, block, size))
1215 free_more_memory();
1216 }
1217}
1218
1219/*
1220 * The relationship between dirty buffers and dirty pages:
1221 *
1222 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1223 * the page is tagged dirty in its radix tree.
1224 *
1225 * At all times, the dirtiness of the buffers represents the dirtiness of
1226 * subsections of the page. If the page has buffers, the page dirty bit is
1227 * merely a hint about the true dirty state.
1228 *
1229 * When a page is set dirty in its entirety, all its buffers are marked dirty
1230 * (if the page has buffers).
1231 *
1232 * When a buffer is marked dirty, its page is dirtied, but the page's other
1233 * buffers are not.
1234 *
1235 * Also. When blockdev buffers are explicitly read with bread(), they
1236 * individually become uptodate. But their backing page remains not
1237 * uptodate - even if all of its buffers are uptodate. A subsequent
1238 * block_read_full_page() against that page will discover all the uptodate
1239 * buffers, will set the page uptodate and will perform no I/O.
1240 */
1241
1242/**
1243 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001244 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 *
1246 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1247 * backing page dirty, then tag the page as dirty in its address_space's radix
1248 * tree and then attach the address_space's inode to its superblock's dirty
1249 * inode list.
1250 *
1251 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1252 * mapping->tree_lock and the global inode_lock.
1253 */
1254void fastcall mark_buffer_dirty(struct buffer_head *bh)
1255{
1256 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1257 __set_page_dirty_nobuffers(bh->b_page);
1258}
1259
1260/*
1261 * Decrement a buffer_head's reference count. If all buffers against a page
1262 * have zero reference count, are clean and unlocked, and if the page is clean
1263 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1264 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1265 * a page but it ends up not being freed, and buffers may later be reattached).
1266 */
1267void __brelse(struct buffer_head * buf)
1268{
1269 if (atomic_read(&buf->b_count)) {
1270 put_bh(buf);
1271 return;
1272 }
1273 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1274 WARN_ON(1);
1275}
1276
1277/*
1278 * bforget() is like brelse(), except it discards any
1279 * potentially dirty data.
1280 */
1281void __bforget(struct buffer_head *bh)
1282{
1283 clear_buffer_dirty(bh);
1284 if (!list_empty(&bh->b_assoc_buffers)) {
1285 struct address_space *buffer_mapping = bh->b_page->mapping;
1286
1287 spin_lock(&buffer_mapping->private_lock);
1288 list_del_init(&bh->b_assoc_buffers);
1289 spin_unlock(&buffer_mapping->private_lock);
1290 }
1291 __brelse(bh);
1292}
1293
1294static struct buffer_head *__bread_slow(struct buffer_head *bh)
1295{
1296 lock_buffer(bh);
1297 if (buffer_uptodate(bh)) {
1298 unlock_buffer(bh);
1299 return bh;
1300 } else {
1301 get_bh(bh);
1302 bh->b_end_io = end_buffer_read_sync;
1303 submit_bh(READ, bh);
1304 wait_on_buffer(bh);
1305 if (buffer_uptodate(bh))
1306 return bh;
1307 }
1308 brelse(bh);
1309 return NULL;
1310}
1311
1312/*
1313 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1314 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1315 * refcount elevated by one when they're in an LRU. A buffer can only appear
1316 * once in a particular CPU's LRU. A single buffer can be present in multiple
1317 * CPU's LRUs at the same time.
1318 *
1319 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1320 * sb_find_get_block().
1321 *
1322 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1323 * a local interrupt disable for that.
1324 */
1325
1326#define BH_LRU_SIZE 8
1327
1328struct bh_lru {
1329 struct buffer_head *bhs[BH_LRU_SIZE];
1330};
1331
1332static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1333
1334#ifdef CONFIG_SMP
1335#define bh_lru_lock() local_irq_disable()
1336#define bh_lru_unlock() local_irq_enable()
1337#else
1338#define bh_lru_lock() preempt_disable()
1339#define bh_lru_unlock() preempt_enable()
1340#endif
1341
1342static inline void check_irqs_on(void)
1343{
1344#ifdef irqs_disabled
1345 BUG_ON(irqs_disabled());
1346#endif
1347}
1348
1349/*
1350 * The LRU management algorithm is dopey-but-simple. Sorry.
1351 */
1352static void bh_lru_install(struct buffer_head *bh)
1353{
1354 struct buffer_head *evictee = NULL;
1355 struct bh_lru *lru;
1356
1357 check_irqs_on();
1358 bh_lru_lock();
1359 lru = &__get_cpu_var(bh_lrus);
1360 if (lru->bhs[0] != bh) {
1361 struct buffer_head *bhs[BH_LRU_SIZE];
1362 int in;
1363 int out = 0;
1364
1365 get_bh(bh);
1366 bhs[out++] = bh;
1367 for (in = 0; in < BH_LRU_SIZE; in++) {
1368 struct buffer_head *bh2 = lru->bhs[in];
1369
1370 if (bh2 == bh) {
1371 __brelse(bh2);
1372 } else {
1373 if (out >= BH_LRU_SIZE) {
1374 BUG_ON(evictee != NULL);
1375 evictee = bh2;
1376 } else {
1377 bhs[out++] = bh2;
1378 }
1379 }
1380 }
1381 while (out < BH_LRU_SIZE)
1382 bhs[out++] = NULL;
1383 memcpy(lru->bhs, bhs, sizeof(bhs));
1384 }
1385 bh_lru_unlock();
1386
1387 if (evictee)
1388 __brelse(evictee);
1389}
1390
1391/*
1392 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1393 */
1394static inline struct buffer_head *
1395lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1396{
1397 struct buffer_head *ret = NULL;
1398 struct bh_lru *lru;
1399 int i;
1400
1401 check_irqs_on();
1402 bh_lru_lock();
1403 lru = &__get_cpu_var(bh_lrus);
1404 for (i = 0; i < BH_LRU_SIZE; i++) {
1405 struct buffer_head *bh = lru->bhs[i];
1406
1407 if (bh && bh->b_bdev == bdev &&
1408 bh->b_blocknr == block && bh->b_size == size) {
1409 if (i) {
1410 while (i) {
1411 lru->bhs[i] = lru->bhs[i - 1];
1412 i--;
1413 }
1414 lru->bhs[0] = bh;
1415 }
1416 get_bh(bh);
1417 ret = bh;
1418 break;
1419 }
1420 }
1421 bh_lru_unlock();
1422 return ret;
1423}
1424
1425/*
1426 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1427 * it in the LRU and mark it as accessed. If it is not present then return
1428 * NULL
1429 */
1430struct buffer_head *
1431__find_get_block(struct block_device *bdev, sector_t block, int size)
1432{
1433 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1434
1435 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001436 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 if (bh)
1438 bh_lru_install(bh);
1439 }
1440 if (bh)
1441 touch_buffer(bh);
1442 return bh;
1443}
1444EXPORT_SYMBOL(__find_get_block);
1445
1446/*
1447 * __getblk will locate (and, if necessary, create) the buffer_head
1448 * which corresponds to the passed block_device, block and size. The
1449 * returned buffer has its reference count incremented.
1450 *
1451 * __getblk() cannot fail - it just keeps trying. If you pass it an
1452 * illegal block number, __getblk() will happily return a buffer_head
1453 * which represents the non-existent block. Very weird.
1454 *
1455 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1456 * attempt is failing. FIXME, perhaps?
1457 */
1458struct buffer_head *
1459__getblk(struct block_device *bdev, sector_t block, int size)
1460{
1461 struct buffer_head *bh = __find_get_block(bdev, block, size);
1462
1463 might_sleep();
1464 if (bh == NULL)
1465 bh = __getblk_slow(bdev, block, size);
1466 return bh;
1467}
1468EXPORT_SYMBOL(__getblk);
1469
1470/*
1471 * Do async read-ahead on a buffer..
1472 */
1473void __breadahead(struct block_device *bdev, sector_t block, int size)
1474{
1475 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001476 if (likely(bh)) {
1477 ll_rw_block(READA, 1, &bh);
1478 brelse(bh);
1479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480}
1481EXPORT_SYMBOL(__breadahead);
1482
1483/**
1484 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001485 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 * @block: number of block
1487 * @size: size (in bytes) to read
1488 *
1489 * Reads a specified block, and returns buffer head that contains it.
1490 * It returns NULL if the block was unreadable.
1491 */
1492struct buffer_head *
1493__bread(struct block_device *bdev, sector_t block, int size)
1494{
1495 struct buffer_head *bh = __getblk(bdev, block, size);
1496
Andrew Mortona3e713b2005-10-30 15:03:15 -08001497 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 bh = __bread_slow(bh);
1499 return bh;
1500}
1501EXPORT_SYMBOL(__bread);
1502
1503/*
1504 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1505 * This doesn't race because it runs in each cpu either in irq
1506 * or with preempt disabled.
1507 */
1508static void invalidate_bh_lru(void *arg)
1509{
1510 struct bh_lru *b = &get_cpu_var(bh_lrus);
1511 int i;
1512
1513 for (i = 0; i < BH_LRU_SIZE; i++) {
1514 brelse(b->bhs[i]);
1515 b->bhs[i] = NULL;
1516 }
1517 put_cpu_var(bh_lrus);
1518}
1519
1520static void invalidate_bh_lrus(void)
1521{
1522 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1523}
1524
1525void set_bh_page(struct buffer_head *bh,
1526 struct page *page, unsigned long offset)
1527{
1528 bh->b_page = page;
1529 if (offset >= PAGE_SIZE)
1530 BUG();
1531 if (PageHighMem(page))
1532 /*
1533 * This catches illegal uses and preserves the offset:
1534 */
1535 bh->b_data = (char *)(0 + offset);
1536 else
1537 bh->b_data = page_address(page) + offset;
1538}
1539EXPORT_SYMBOL(set_bh_page);
1540
1541/*
1542 * Called when truncating a buffer on a page completely.
1543 */
1544static inline void discard_buffer(struct buffer_head * bh)
1545{
1546 lock_buffer(bh);
1547 clear_buffer_dirty(bh);
1548 bh->b_bdev = NULL;
1549 clear_buffer_mapped(bh);
1550 clear_buffer_req(bh);
1551 clear_buffer_new(bh);
1552 clear_buffer_delay(bh);
1553 unlock_buffer(bh);
1554}
1555
1556/**
1557 * try_to_release_page() - release old fs-specific metadata on a page
1558 *
1559 * @page: the page which the kernel is trying to free
1560 * @gfp_mask: memory allocation flags (and I/O mode)
1561 *
1562 * The address_space is to try to release any data against the page
1563 * (presumably at page->private). If the release was successful, return `1'.
1564 * Otherwise return zero.
1565 *
1566 * The @gfp_mask argument specifies whether I/O may be performed to release
1567 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1568 *
1569 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1570 */
Al Viro27496a82005-10-21 03:20:48 -04001571int try_to_release_page(struct page *page, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572{
1573 struct address_space * const mapping = page->mapping;
1574
1575 BUG_ON(!PageLocked(page));
1576 if (PageWriteback(page))
1577 return 0;
1578
1579 if (mapping && mapping->a_ops->releasepage)
1580 return mapping->a_ops->releasepage(page, gfp_mask);
1581 return try_to_free_buffers(page);
1582}
1583EXPORT_SYMBOL(try_to_release_page);
1584
1585/**
1586 * block_invalidatepage - invalidate part of all of a buffer-backed page
1587 *
1588 * @page: the page which is affected
1589 * @offset: the index of the truncation point
1590 *
1591 * block_invalidatepage() is called when all or part of the page has become
1592 * invalidatedby a truncate operation.
1593 *
1594 * block_invalidatepage() does not have to release all buffers, but it must
1595 * ensure that no dirty buffer is left outside @offset and that no I/O
1596 * is underway against any of the blocks which are outside the truncation
1597 * point. Because the caller is about to free (and possibly reuse) those
1598 * blocks on-disk.
1599 */
1600int block_invalidatepage(struct page *page, unsigned long offset)
1601{
1602 struct buffer_head *head, *bh, *next;
1603 unsigned int curr_off = 0;
1604 int ret = 1;
1605
1606 BUG_ON(!PageLocked(page));
1607 if (!page_has_buffers(page))
1608 goto out;
1609
1610 head = page_buffers(page);
1611 bh = head;
1612 do {
1613 unsigned int next_off = curr_off + bh->b_size;
1614 next = bh->b_this_page;
1615
1616 /*
1617 * is this block fully invalidated?
1618 */
1619 if (offset <= curr_off)
1620 discard_buffer(bh);
1621 curr_off = next_off;
1622 bh = next;
1623 } while (bh != head);
1624
1625 /*
1626 * We release buffers only if the entire page is being invalidated.
1627 * The get_block cached value has been unconditionally invalidated,
1628 * so real IO is not possible anymore.
1629 */
1630 if (offset == 0)
1631 ret = try_to_release_page(page, 0);
1632out:
1633 return ret;
1634}
1635EXPORT_SYMBOL(block_invalidatepage);
1636
Jan Karaaaa40592005-10-30 15:00:16 -08001637int do_invalidatepage(struct page *page, unsigned long offset)
1638{
1639 int (*invalidatepage)(struct page *, unsigned long);
1640 invalidatepage = page->mapping->a_ops->invalidatepage;
1641 if (invalidatepage == NULL)
1642 invalidatepage = block_invalidatepage;
1643 return (*invalidatepage)(page, offset);
1644}
1645
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646/*
1647 * We attach and possibly dirty the buffers atomically wrt
1648 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1649 * is already excluded via the page lock.
1650 */
1651void create_empty_buffers(struct page *page,
1652 unsigned long blocksize, unsigned long b_state)
1653{
1654 struct buffer_head *bh, *head, *tail;
1655
1656 head = alloc_page_buffers(page, blocksize, 1);
1657 bh = head;
1658 do {
1659 bh->b_state |= b_state;
1660 tail = bh;
1661 bh = bh->b_this_page;
1662 } while (bh);
1663 tail->b_this_page = head;
1664
1665 spin_lock(&page->mapping->private_lock);
1666 if (PageUptodate(page) || PageDirty(page)) {
1667 bh = head;
1668 do {
1669 if (PageDirty(page))
1670 set_buffer_dirty(bh);
1671 if (PageUptodate(page))
1672 set_buffer_uptodate(bh);
1673 bh = bh->b_this_page;
1674 } while (bh != head);
1675 }
1676 attach_page_buffers(page, head);
1677 spin_unlock(&page->mapping->private_lock);
1678}
1679EXPORT_SYMBOL(create_empty_buffers);
1680
1681/*
1682 * We are taking a block for data and we don't want any output from any
1683 * buffer-cache aliases starting from return from that function and
1684 * until the moment when something will explicitly mark the buffer
1685 * dirty (hopefully that will not happen until we will free that block ;-)
1686 * We don't even need to mark it not-uptodate - nobody can expect
1687 * anything from a newly allocated buffer anyway. We used to used
1688 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1689 * don't want to mark the alias unmapped, for example - it would confuse
1690 * anyone who might pick it with bread() afterwards...
1691 *
1692 * Also.. Note that bforget() doesn't lock the buffer. So there can
1693 * be writeout I/O going on against recently-freed buffers. We don't
1694 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1695 * only if we really need to. That happens here.
1696 */
1697void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1698{
1699 struct buffer_head *old_bh;
1700
1701 might_sleep();
1702
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001703 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 if (old_bh) {
1705 clear_buffer_dirty(old_bh);
1706 wait_on_buffer(old_bh);
1707 clear_buffer_req(old_bh);
1708 __brelse(old_bh);
1709 }
1710}
1711EXPORT_SYMBOL(unmap_underlying_metadata);
1712
1713/*
1714 * NOTE! All mapped/uptodate combinations are valid:
1715 *
1716 * Mapped Uptodate Meaning
1717 *
1718 * No No "unknown" - must do get_block()
1719 * No Yes "hole" - zero-filled
1720 * Yes No "allocated" - allocated on disk, not read in
1721 * Yes Yes "valid" - allocated and up-to-date in memory.
1722 *
1723 * "Dirty" is valid only with the last case (mapped+uptodate).
1724 */
1725
1726/*
1727 * While block_write_full_page is writing back the dirty buffers under
1728 * the page lock, whoever dirtied the buffers may decide to clean them
1729 * again at any time. We handle that by only looking at the buffer
1730 * state inside lock_buffer().
1731 *
1732 * If block_write_full_page() is called for regular writeback
1733 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1734 * locked buffer. This only can happen if someone has written the buffer
1735 * directly, with submit_bh(). At the address_space level PageWriteback
1736 * prevents this contention from occurring.
1737 */
1738static int __block_write_full_page(struct inode *inode, struct page *page,
1739 get_block_t *get_block, struct writeback_control *wbc)
1740{
1741 int err;
1742 sector_t block;
1743 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001744 struct buffer_head *bh, *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 int nr_underway = 0;
1746
1747 BUG_ON(!PageLocked(page));
1748
1749 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1750
1751 if (!page_has_buffers(page)) {
1752 create_empty_buffers(page, 1 << inode->i_blkbits,
1753 (1 << BH_Dirty)|(1 << BH_Uptodate));
1754 }
1755
1756 /*
1757 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1758 * here, and the (potentially unmapped) buffers may become dirty at
1759 * any time. If a buffer becomes dirty here after we've inspected it
1760 * then we just miss that fact, and the page stays dirty.
1761 *
1762 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1763 * handle that here by just cleaning them.
1764 */
1765
Andrew Morton54b21a72006-01-08 01:03:05 -08001766 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 head = page_buffers(page);
1768 bh = head;
1769
1770 /*
1771 * Get all the dirty buffers mapped to disk addresses and
1772 * handle any aliases from the underlying blockdev's mapping.
1773 */
1774 do {
1775 if (block > last_block) {
1776 /*
1777 * mapped buffers outside i_size will occur, because
1778 * this page can be outside i_size when there is a
1779 * truncate in progress.
1780 */
1781 /*
1782 * The buffer was zeroed by block_write_full_page()
1783 */
1784 clear_buffer_dirty(bh);
1785 set_buffer_uptodate(bh);
1786 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1787 err = get_block(inode, block, bh, 1);
1788 if (err)
1789 goto recover;
1790 if (buffer_new(bh)) {
1791 /* blockdev mappings never come here */
1792 clear_buffer_new(bh);
1793 unmap_underlying_metadata(bh->b_bdev,
1794 bh->b_blocknr);
1795 }
1796 }
1797 bh = bh->b_this_page;
1798 block++;
1799 } while (bh != head);
1800
1801 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 if (!buffer_mapped(bh))
1803 continue;
1804 /*
1805 * If it's a fully non-blocking write attempt and we cannot
1806 * lock the buffer then redirty the page. Note that this can
1807 * potentially cause a busy-wait loop from pdflush and kswapd
1808 * activity, but those code paths have their own higher-level
1809 * throttling.
1810 */
1811 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1812 lock_buffer(bh);
1813 } else if (test_set_buffer_locked(bh)) {
1814 redirty_page_for_writepage(wbc, page);
1815 continue;
1816 }
1817 if (test_clear_buffer_dirty(bh)) {
1818 mark_buffer_async_write(bh);
1819 } else {
1820 unlock_buffer(bh);
1821 }
1822 } while ((bh = bh->b_this_page) != head);
1823
1824 /*
1825 * The page and its buffers are protected by PageWriteback(), so we can
1826 * drop the bh refcounts early.
1827 */
1828 BUG_ON(PageWriteback(page));
1829 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
1831 do {
1832 struct buffer_head *next = bh->b_this_page;
1833 if (buffer_async_write(bh)) {
1834 submit_bh(WRITE, bh);
1835 nr_underway++;
1836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 bh = next;
1838 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001839 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
1841 err = 0;
1842done:
1843 if (nr_underway == 0) {
1844 /*
1845 * The page was marked dirty, but the buffers were
1846 * clean. Someone wrote them back by hand with
1847 * ll_rw_block/submit_bh. A rare case.
1848 */
1849 int uptodate = 1;
1850 do {
1851 if (!buffer_uptodate(bh)) {
1852 uptodate = 0;
1853 break;
1854 }
1855 bh = bh->b_this_page;
1856 } while (bh != head);
1857 if (uptodate)
1858 SetPageUptodate(page);
1859 end_page_writeback(page);
1860 /*
1861 * The page and buffer_heads can be released at any time from
1862 * here on.
1863 */
1864 wbc->pages_skipped++; /* We didn't write this page */
1865 }
1866 return err;
1867
1868recover:
1869 /*
1870 * ENOSPC, or some other error. We may already have added some
1871 * blocks to the file, so we need to write these out to avoid
1872 * exposing stale data.
1873 * The page is currently locked and not marked for writeback
1874 */
1875 bh = head;
1876 /* Recovery: lock and submit the mapped buffers */
1877 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1879 lock_buffer(bh);
1880 mark_buffer_async_write(bh);
1881 } else {
1882 /*
1883 * The buffer may have been set dirty during
1884 * attachment to a dirty page.
1885 */
1886 clear_buffer_dirty(bh);
1887 }
1888 } while ((bh = bh->b_this_page) != head);
1889 SetPageError(page);
1890 BUG_ON(PageWriteback(page));
1891 set_page_writeback(page);
1892 unlock_page(page);
1893 do {
1894 struct buffer_head *next = bh->b_this_page;
1895 if (buffer_async_write(bh)) {
1896 clear_buffer_dirty(bh);
1897 submit_bh(WRITE, bh);
1898 nr_underway++;
1899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 bh = next;
1901 } while (bh != head);
1902 goto done;
1903}
1904
1905static int __block_prepare_write(struct inode *inode, struct page *page,
1906 unsigned from, unsigned to, get_block_t *get_block)
1907{
1908 unsigned block_start, block_end;
1909 sector_t block;
1910 int err = 0;
1911 unsigned blocksize, bbits;
1912 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1913
1914 BUG_ON(!PageLocked(page));
1915 BUG_ON(from > PAGE_CACHE_SIZE);
1916 BUG_ON(to > PAGE_CACHE_SIZE);
1917 BUG_ON(from > to);
1918
1919 blocksize = 1 << inode->i_blkbits;
1920 if (!page_has_buffers(page))
1921 create_empty_buffers(page, blocksize, 0);
1922 head = page_buffers(page);
1923
1924 bbits = inode->i_blkbits;
1925 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1926
1927 for(bh = head, block_start = 0; bh != head || !block_start;
1928 block++, block_start=block_end, bh = bh->b_this_page) {
1929 block_end = block_start + blocksize;
1930 if (block_end <= from || block_start >= to) {
1931 if (PageUptodate(page)) {
1932 if (!buffer_uptodate(bh))
1933 set_buffer_uptodate(bh);
1934 }
1935 continue;
1936 }
1937 if (buffer_new(bh))
1938 clear_buffer_new(bh);
1939 if (!buffer_mapped(bh)) {
1940 err = get_block(inode, block, bh, 1);
1941 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001942 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 unmap_underlying_metadata(bh->b_bdev,
1945 bh->b_blocknr);
1946 if (PageUptodate(page)) {
1947 set_buffer_uptodate(bh);
1948 continue;
1949 }
1950 if (block_end > to || block_start < from) {
1951 void *kaddr;
1952
1953 kaddr = kmap_atomic(page, KM_USER0);
1954 if (block_end > to)
1955 memset(kaddr+to, 0,
1956 block_end-to);
1957 if (block_start < from)
1958 memset(kaddr+block_start,
1959 0, from-block_start);
1960 flush_dcache_page(page);
1961 kunmap_atomic(kaddr, KM_USER0);
1962 }
1963 continue;
1964 }
1965 }
1966 if (PageUptodate(page)) {
1967 if (!buffer_uptodate(bh))
1968 set_buffer_uptodate(bh);
1969 continue;
1970 }
1971 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1972 (block_start < from || block_end > to)) {
1973 ll_rw_block(READ, 1, &bh);
1974 *wait_bh++=bh;
1975 }
1976 }
1977 /*
1978 * If we issued read requests - let them complete.
1979 */
1980 while(wait_bh > wait) {
1981 wait_on_buffer(*--wait_bh);
1982 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001983 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 }
Anton Altaparmakov152becd2005-06-23 00:10:21 -07001985 if (!err) {
1986 bh = head;
1987 do {
1988 if (buffer_new(bh))
1989 clear_buffer_new(bh);
1990 } while ((bh = bh->b_this_page) != head);
1991 return 0;
1992 }
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001993 /* Error case: */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 /*
1995 * Zero out any newly allocated blocks to avoid exposing stale
1996 * data. If BH_New is set, we know that the block was newly
1997 * allocated in the above loop.
1998 */
1999 bh = head;
2000 block_start = 0;
2001 do {
2002 block_end = block_start+blocksize;
2003 if (block_end <= from)
2004 goto next_bh;
2005 if (block_start >= to)
2006 break;
2007 if (buffer_new(bh)) {
2008 void *kaddr;
2009
2010 clear_buffer_new(bh);
2011 kaddr = kmap_atomic(page, KM_USER0);
2012 memset(kaddr+block_start, 0, bh->b_size);
2013 kunmap_atomic(kaddr, KM_USER0);
2014 set_buffer_uptodate(bh);
2015 mark_buffer_dirty(bh);
2016 }
2017next_bh:
2018 block_start = block_end;
2019 bh = bh->b_this_page;
2020 } while (bh != head);
2021 return err;
2022}
2023
2024static int __block_commit_write(struct inode *inode, struct page *page,
2025 unsigned from, unsigned to)
2026{
2027 unsigned block_start, block_end;
2028 int partial = 0;
2029 unsigned blocksize;
2030 struct buffer_head *bh, *head;
2031
2032 blocksize = 1 << inode->i_blkbits;
2033
2034 for(bh = head = page_buffers(page), block_start = 0;
2035 bh != head || !block_start;
2036 block_start=block_end, bh = bh->b_this_page) {
2037 block_end = block_start + blocksize;
2038 if (block_end <= from || block_start >= to) {
2039 if (!buffer_uptodate(bh))
2040 partial = 1;
2041 } else {
2042 set_buffer_uptodate(bh);
2043 mark_buffer_dirty(bh);
2044 }
2045 }
2046
2047 /*
2048 * If this is a partial write which happened to make all buffers
2049 * uptodate then we can optimize away a bogus readpage() for
2050 * the next read(). Here we 'discover' whether the page went
2051 * uptodate as a result of this (potentially partial) write.
2052 */
2053 if (!partial)
2054 SetPageUptodate(page);
2055 return 0;
2056}
2057
2058/*
2059 * Generic "read page" function for block devices that have the normal
2060 * get_block functionality. This is most of the block device filesystems.
2061 * Reads the page asynchronously --- the unlock_buffer() and
2062 * set/clear_buffer_uptodate() functions propagate buffer state into the
2063 * page struct once IO has completed.
2064 */
2065int block_read_full_page(struct page *page, get_block_t *get_block)
2066{
2067 struct inode *inode = page->mapping->host;
2068 sector_t iblock, lblock;
2069 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2070 unsigned int blocksize;
2071 int nr, i;
2072 int fully_mapped = 1;
2073
Matt Mackallcd7619d2005-05-01 08:59:01 -07002074 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 blocksize = 1 << inode->i_blkbits;
2076 if (!page_has_buffers(page))
2077 create_empty_buffers(page, blocksize, 0);
2078 head = page_buffers(page);
2079
2080 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2081 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2082 bh = head;
2083 nr = 0;
2084 i = 0;
2085
2086 do {
2087 if (buffer_uptodate(bh))
2088 continue;
2089
2090 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002091 int err = 0;
2092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 fully_mapped = 0;
2094 if (iblock < lblock) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002095 err = get_block(inode, iblock, bh, 0);
2096 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 SetPageError(page);
2098 }
2099 if (!buffer_mapped(bh)) {
2100 void *kaddr = kmap_atomic(page, KM_USER0);
2101 memset(kaddr + i * blocksize, 0, blocksize);
2102 flush_dcache_page(page);
2103 kunmap_atomic(kaddr, KM_USER0);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002104 if (!err)
2105 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 continue;
2107 }
2108 /*
2109 * get_block() might have updated the buffer
2110 * synchronously
2111 */
2112 if (buffer_uptodate(bh))
2113 continue;
2114 }
2115 arr[nr++] = bh;
2116 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2117
2118 if (fully_mapped)
2119 SetPageMappedToDisk(page);
2120
2121 if (!nr) {
2122 /*
2123 * All buffers are uptodate - we can set the page uptodate
2124 * as well. But not if get_block() returned an error.
2125 */
2126 if (!PageError(page))
2127 SetPageUptodate(page);
2128 unlock_page(page);
2129 return 0;
2130 }
2131
2132 /* Stage two: lock the buffers */
2133 for (i = 0; i < nr; i++) {
2134 bh = arr[i];
2135 lock_buffer(bh);
2136 mark_buffer_async_read(bh);
2137 }
2138
2139 /*
2140 * Stage 3: start the IO. Check for uptodateness
2141 * inside the buffer lock in case another process reading
2142 * the underlying blockdev brought it uptodate (the sct fix).
2143 */
2144 for (i = 0; i < nr; i++) {
2145 bh = arr[i];
2146 if (buffer_uptodate(bh))
2147 end_buffer_async_read(bh, 1);
2148 else
2149 submit_bh(READ, bh);
2150 }
2151 return 0;
2152}
2153
2154/* utility function for filesystems that need to do work on expanding
2155 * truncates. Uses prepare/commit_write to allow the filesystem to
2156 * deal with the hole.
2157 */
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002158static int __generic_cont_expand(struct inode *inode, loff_t size,
2159 pgoff_t index, unsigned int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160{
2161 struct address_space *mapping = inode->i_mapping;
2162 struct page *page;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002163 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 int err;
2165
2166 err = -EFBIG;
2167 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2168 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2169 send_sig(SIGXFSZ, current, 0);
2170 goto out;
2171 }
2172 if (size > inode->i_sb->s_maxbytes)
2173 goto out;
2174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 err = -ENOMEM;
2176 page = grab_cache_page(mapping, index);
2177 if (!page)
2178 goto out;
2179 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002180 if (err) {
2181 /*
2182 * ->prepare_write() may have instantiated a few blocks
2183 * outside i_size. Trim these off again.
2184 */
2185 unlock_page(page);
2186 page_cache_release(page);
2187 vmtruncate(inode, inode->i_size);
2188 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 }
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002190
2191 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2192
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 unlock_page(page);
2194 page_cache_release(page);
2195 if (err > 0)
2196 err = 0;
2197out:
2198 return err;
2199}
2200
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002201int generic_cont_expand(struct inode *inode, loff_t size)
2202{
2203 pgoff_t index;
2204 unsigned int offset;
2205
2206 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2207
2208 /* ugh. in prepare/commit_write, if from==to==start of block, we
2209 ** skip the prepare. make sure we never send an offset for the start
2210 ** of a block
2211 */
2212 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2213 /* caller must handle this extra byte. */
2214 offset++;
2215 }
2216 index = size >> PAGE_CACHE_SHIFT;
2217
2218 return __generic_cont_expand(inode, size, index, offset);
2219}
2220
2221int generic_cont_expand_simple(struct inode *inode, loff_t size)
2222{
2223 loff_t pos = size - 1;
2224 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2225 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2226
2227 /* prepare/commit_write can handle even if from==to==start of block. */
2228 return __generic_cont_expand(inode, size, index, offset);
2229}
2230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231/*
2232 * For moronic filesystems that do not allow holes in file.
2233 * We may have to extend the file.
2234 */
2235
2236int cont_prepare_write(struct page *page, unsigned offset,
2237 unsigned to, get_block_t *get_block, loff_t *bytes)
2238{
2239 struct address_space *mapping = page->mapping;
2240 struct inode *inode = mapping->host;
2241 struct page *new_page;
2242 pgoff_t pgpos;
2243 long status;
2244 unsigned zerofrom;
2245 unsigned blocksize = 1 << inode->i_blkbits;
2246 void *kaddr;
2247
2248 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2249 status = -ENOMEM;
2250 new_page = grab_cache_page(mapping, pgpos);
2251 if (!new_page)
2252 goto out;
2253 /* we might sleep */
2254 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2255 unlock_page(new_page);
2256 page_cache_release(new_page);
2257 continue;
2258 }
2259 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2260 if (zerofrom & (blocksize-1)) {
2261 *bytes |= (blocksize-1);
2262 (*bytes)++;
2263 }
2264 status = __block_prepare_write(inode, new_page, zerofrom,
2265 PAGE_CACHE_SIZE, get_block);
2266 if (status)
2267 goto out_unmap;
2268 kaddr = kmap_atomic(new_page, KM_USER0);
2269 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2270 flush_dcache_page(new_page);
2271 kunmap_atomic(kaddr, KM_USER0);
2272 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2273 unlock_page(new_page);
2274 page_cache_release(new_page);
2275 }
2276
2277 if (page->index < pgpos) {
2278 /* completely inside the area */
2279 zerofrom = offset;
2280 } else {
2281 /* page covers the boundary, find the boundary offset */
2282 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2283
2284 /* if we will expand the thing last block will be filled */
2285 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2286 *bytes |= (blocksize-1);
2287 (*bytes)++;
2288 }
2289
2290 /* starting below the boundary? Nothing to zero out */
2291 if (offset <= zerofrom)
2292 zerofrom = offset;
2293 }
2294 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2295 if (status)
2296 goto out1;
2297 if (zerofrom < offset) {
2298 kaddr = kmap_atomic(page, KM_USER0);
2299 memset(kaddr+zerofrom, 0, offset-zerofrom);
2300 flush_dcache_page(page);
2301 kunmap_atomic(kaddr, KM_USER0);
2302 __block_commit_write(inode, page, zerofrom, offset);
2303 }
2304 return 0;
2305out1:
2306 ClearPageUptodate(page);
2307 return status;
2308
2309out_unmap:
2310 ClearPageUptodate(new_page);
2311 unlock_page(new_page);
2312 page_cache_release(new_page);
2313out:
2314 return status;
2315}
2316
2317int block_prepare_write(struct page *page, unsigned from, unsigned to,
2318 get_block_t *get_block)
2319{
2320 struct inode *inode = page->mapping->host;
2321 int err = __block_prepare_write(inode, page, from, to, get_block);
2322 if (err)
2323 ClearPageUptodate(page);
2324 return err;
2325}
2326
2327int block_commit_write(struct page *page, unsigned from, unsigned to)
2328{
2329 struct inode *inode = page->mapping->host;
2330 __block_commit_write(inode,page,from,to);
2331 return 0;
2332}
2333
2334int generic_commit_write(struct file *file, struct page *page,
2335 unsigned from, unsigned to)
2336{
2337 struct inode *inode = page->mapping->host;
2338 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2339 __block_commit_write(inode,page,from,to);
2340 /*
2341 * No need to use i_size_read() here, the i_size
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002342 * cannot change under us because we hold i_mutex.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 */
2344 if (pos > inode->i_size) {
2345 i_size_write(inode, pos);
2346 mark_inode_dirty(inode);
2347 }
2348 return 0;
2349}
2350
2351
2352/*
2353 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2354 * immediately, while under the page lock. So it needs a special end_io
2355 * handler which does not touch the bh after unlocking it.
2356 *
2357 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2358 * a race there is benign: unlock_buffer() only use the bh's address for
2359 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2360 * itself.
2361 */
2362static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2363{
2364 if (uptodate) {
2365 set_buffer_uptodate(bh);
2366 } else {
2367 /* This happens, due to failed READA attempts. */
2368 clear_buffer_uptodate(bh);
2369 }
2370 unlock_buffer(bh);
2371}
2372
2373/*
2374 * On entry, the page is fully not uptodate.
2375 * On exit the page is fully uptodate in the areas outside (from,to)
2376 */
2377int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2378 get_block_t *get_block)
2379{
2380 struct inode *inode = page->mapping->host;
2381 const unsigned blkbits = inode->i_blkbits;
2382 const unsigned blocksize = 1 << blkbits;
2383 struct buffer_head map_bh;
2384 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2385 unsigned block_in_page;
2386 unsigned block_start;
2387 sector_t block_in_file;
2388 char *kaddr;
2389 int nr_reads = 0;
2390 int i;
2391 int ret = 0;
2392 int is_mapped_to_disk = 1;
2393 int dirtied_it = 0;
2394
2395 if (PageMappedToDisk(page))
2396 return 0;
2397
2398 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2399 map_bh.b_page = page;
2400
2401 /*
2402 * We loop across all blocks in the page, whether or not they are
2403 * part of the affected region. This is so we can discover if the
2404 * page is fully mapped-to-disk.
2405 */
2406 for (block_start = 0, block_in_page = 0;
2407 block_start < PAGE_CACHE_SIZE;
2408 block_in_page++, block_start += blocksize) {
2409 unsigned block_end = block_start + blocksize;
2410 int create;
2411
2412 map_bh.b_state = 0;
2413 create = 1;
2414 if (block_start >= to)
2415 create = 0;
2416 ret = get_block(inode, block_in_file + block_in_page,
2417 &map_bh, create);
2418 if (ret)
2419 goto failed;
2420 if (!buffer_mapped(&map_bh))
2421 is_mapped_to_disk = 0;
2422 if (buffer_new(&map_bh))
2423 unmap_underlying_metadata(map_bh.b_bdev,
2424 map_bh.b_blocknr);
2425 if (PageUptodate(page))
2426 continue;
2427 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2428 kaddr = kmap_atomic(page, KM_USER0);
2429 if (block_start < from) {
2430 memset(kaddr+block_start, 0, from-block_start);
2431 dirtied_it = 1;
2432 }
2433 if (block_end > to) {
2434 memset(kaddr + to, 0, block_end - to);
2435 dirtied_it = 1;
2436 }
2437 flush_dcache_page(page);
2438 kunmap_atomic(kaddr, KM_USER0);
2439 continue;
2440 }
2441 if (buffer_uptodate(&map_bh))
2442 continue; /* reiserfs does this */
2443 if (block_start < from || block_end > to) {
2444 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2445
2446 if (!bh) {
2447 ret = -ENOMEM;
2448 goto failed;
2449 }
2450 bh->b_state = map_bh.b_state;
2451 atomic_set(&bh->b_count, 0);
2452 bh->b_this_page = NULL;
2453 bh->b_page = page;
2454 bh->b_blocknr = map_bh.b_blocknr;
2455 bh->b_size = blocksize;
2456 bh->b_data = (char *)(long)block_start;
2457 bh->b_bdev = map_bh.b_bdev;
2458 bh->b_private = NULL;
2459 read_bh[nr_reads++] = bh;
2460 }
2461 }
2462
2463 if (nr_reads) {
2464 struct buffer_head *bh;
2465
2466 /*
2467 * The page is locked, so these buffers are protected from
2468 * any VM or truncate activity. Hence we don't need to care
2469 * for the buffer_head refcounts.
2470 */
2471 for (i = 0; i < nr_reads; i++) {
2472 bh = read_bh[i];
2473 lock_buffer(bh);
2474 bh->b_end_io = end_buffer_read_nobh;
2475 submit_bh(READ, bh);
2476 }
2477 for (i = 0; i < nr_reads; i++) {
2478 bh = read_bh[i];
2479 wait_on_buffer(bh);
2480 if (!buffer_uptodate(bh))
2481 ret = -EIO;
2482 free_buffer_head(bh);
2483 read_bh[i] = NULL;
2484 }
2485 if (ret)
2486 goto failed;
2487 }
2488
2489 if (is_mapped_to_disk)
2490 SetPageMappedToDisk(page);
2491 SetPageUptodate(page);
2492
2493 /*
2494 * Setting the page dirty here isn't necessary for the prepare_write
2495 * function - commit_write will do that. But if/when this function is
2496 * used within the pagefault handler to ensure that all mmapped pages
2497 * have backing space in the filesystem, we will need to dirty the page
2498 * if its contents were altered.
2499 */
2500 if (dirtied_it)
2501 set_page_dirty(page);
2502
2503 return 0;
2504
2505failed:
2506 for (i = 0; i < nr_reads; i++) {
2507 if (read_bh[i])
2508 free_buffer_head(read_bh[i]);
2509 }
2510
2511 /*
2512 * Error recovery is pretty slack. Clear the page and mark it dirty
2513 * so we'll later zero out any blocks which _were_ allocated.
2514 */
2515 kaddr = kmap_atomic(page, KM_USER0);
2516 memset(kaddr, 0, PAGE_CACHE_SIZE);
2517 kunmap_atomic(kaddr, KM_USER0);
2518 SetPageUptodate(page);
2519 set_page_dirty(page);
2520 return ret;
2521}
2522EXPORT_SYMBOL(nobh_prepare_write);
2523
2524int nobh_commit_write(struct file *file, struct page *page,
2525 unsigned from, unsigned to)
2526{
2527 struct inode *inode = page->mapping->host;
2528 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2529
2530 set_page_dirty(page);
2531 if (pos > inode->i_size) {
2532 i_size_write(inode, pos);
2533 mark_inode_dirty(inode);
2534 }
2535 return 0;
2536}
2537EXPORT_SYMBOL(nobh_commit_write);
2538
2539/*
2540 * nobh_writepage() - based on block_full_write_page() except
2541 * that it tries to operate without attaching bufferheads to
2542 * the page.
2543 */
2544int nobh_writepage(struct page *page, get_block_t *get_block,
2545 struct writeback_control *wbc)
2546{
2547 struct inode * const inode = page->mapping->host;
2548 loff_t i_size = i_size_read(inode);
2549 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2550 unsigned offset;
2551 void *kaddr;
2552 int ret;
2553
2554 /* Is the page fully inside i_size? */
2555 if (page->index < end_index)
2556 goto out;
2557
2558 /* Is the page fully outside i_size? (truncate in progress) */
2559 offset = i_size & (PAGE_CACHE_SIZE-1);
2560 if (page->index >= end_index+1 || !offset) {
2561 /*
2562 * The page may have dirty, unmapped buffers. For example,
2563 * they may have been added in ext3_writepage(). Make them
2564 * freeable here, so the page does not leak.
2565 */
2566#if 0
2567 /* Not really sure about this - do we need this ? */
2568 if (page->mapping->a_ops->invalidatepage)
2569 page->mapping->a_ops->invalidatepage(page, offset);
2570#endif
2571 unlock_page(page);
2572 return 0; /* don't care */
2573 }
2574
2575 /*
2576 * The page straddles i_size. It must be zeroed out on each and every
2577 * writepage invocation because it may be mmapped. "A file is mapped
2578 * in multiples of the page size. For a file that is not a multiple of
2579 * the page size, the remaining memory is zeroed when mapped, and
2580 * writes to that region are not written out to the file."
2581 */
2582 kaddr = kmap_atomic(page, KM_USER0);
2583 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2584 flush_dcache_page(page);
2585 kunmap_atomic(kaddr, KM_USER0);
2586out:
2587 ret = mpage_writepage(page, get_block, wbc);
2588 if (ret == -EAGAIN)
2589 ret = __block_write_full_page(inode, page, get_block, wbc);
2590 return ret;
2591}
2592EXPORT_SYMBOL(nobh_writepage);
2593
2594/*
2595 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2596 */
2597int nobh_truncate_page(struct address_space *mapping, loff_t from)
2598{
2599 struct inode *inode = mapping->host;
2600 unsigned blocksize = 1 << inode->i_blkbits;
2601 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2602 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2603 unsigned to;
2604 struct page *page;
2605 struct address_space_operations *a_ops = mapping->a_ops;
2606 char *kaddr;
2607 int ret = 0;
2608
2609 if ((offset & (blocksize - 1)) == 0)
2610 goto out;
2611
2612 ret = -ENOMEM;
2613 page = grab_cache_page(mapping, index);
2614 if (!page)
2615 goto out;
2616
2617 to = (offset + blocksize) & ~(blocksize - 1);
2618 ret = a_ops->prepare_write(NULL, page, offset, to);
2619 if (ret == 0) {
2620 kaddr = kmap_atomic(page, KM_USER0);
2621 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2622 flush_dcache_page(page);
2623 kunmap_atomic(kaddr, KM_USER0);
2624 set_page_dirty(page);
2625 }
2626 unlock_page(page);
2627 page_cache_release(page);
2628out:
2629 return ret;
2630}
2631EXPORT_SYMBOL(nobh_truncate_page);
2632
2633int block_truncate_page(struct address_space *mapping,
2634 loff_t from, get_block_t *get_block)
2635{
2636 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2637 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2638 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002639 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 unsigned length, pos;
2641 struct inode *inode = mapping->host;
2642 struct page *page;
2643 struct buffer_head *bh;
2644 void *kaddr;
2645 int err;
2646
2647 blocksize = 1 << inode->i_blkbits;
2648 length = offset & (blocksize - 1);
2649
2650 /* Block boundary? Nothing to do */
2651 if (!length)
2652 return 0;
2653
2654 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002655 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656
2657 page = grab_cache_page(mapping, index);
2658 err = -ENOMEM;
2659 if (!page)
2660 goto out;
2661
2662 if (!page_has_buffers(page))
2663 create_empty_buffers(page, blocksize, 0);
2664
2665 /* Find the buffer that contains "offset" */
2666 bh = page_buffers(page);
2667 pos = blocksize;
2668 while (offset >= pos) {
2669 bh = bh->b_this_page;
2670 iblock++;
2671 pos += blocksize;
2672 }
2673
2674 err = 0;
2675 if (!buffer_mapped(bh)) {
2676 err = get_block(inode, iblock, bh, 0);
2677 if (err)
2678 goto unlock;
2679 /* unmapped? It's a hole - nothing to do */
2680 if (!buffer_mapped(bh))
2681 goto unlock;
2682 }
2683
2684 /* Ok, it's mapped. Make sure it's up-to-date */
2685 if (PageUptodate(page))
2686 set_buffer_uptodate(bh);
2687
2688 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2689 err = -EIO;
2690 ll_rw_block(READ, 1, &bh);
2691 wait_on_buffer(bh);
2692 /* Uhhuh. Read error. Complain and punt. */
2693 if (!buffer_uptodate(bh))
2694 goto unlock;
2695 }
2696
2697 kaddr = kmap_atomic(page, KM_USER0);
2698 memset(kaddr + offset, 0, length);
2699 flush_dcache_page(page);
2700 kunmap_atomic(kaddr, KM_USER0);
2701
2702 mark_buffer_dirty(bh);
2703 err = 0;
2704
2705unlock:
2706 unlock_page(page);
2707 page_cache_release(page);
2708out:
2709 return err;
2710}
2711
2712/*
2713 * The generic ->writepage function for buffer-backed address_spaces
2714 */
2715int block_write_full_page(struct page *page, get_block_t *get_block,
2716 struct writeback_control *wbc)
2717{
2718 struct inode * const inode = page->mapping->host;
2719 loff_t i_size = i_size_read(inode);
2720 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2721 unsigned offset;
2722 void *kaddr;
2723
2724 /* Is the page fully inside i_size? */
2725 if (page->index < end_index)
2726 return __block_write_full_page(inode, page, get_block, wbc);
2727
2728 /* Is the page fully outside i_size? (truncate in progress) */
2729 offset = i_size & (PAGE_CACHE_SIZE-1);
2730 if (page->index >= end_index+1 || !offset) {
2731 /*
2732 * The page may have dirty, unmapped buffers. For example,
2733 * they may have been added in ext3_writepage(). Make them
2734 * freeable here, so the page does not leak.
2735 */
Jan Karaaaa40592005-10-30 15:00:16 -08002736 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 unlock_page(page);
2738 return 0; /* don't care */
2739 }
2740
2741 /*
2742 * The page straddles i_size. It must be zeroed out on each and every
2743 * writepage invokation because it may be mmapped. "A file is mapped
2744 * in multiples of the page size. For a file that is not a multiple of
2745 * the page size, the remaining memory is zeroed when mapped, and
2746 * writes to that region are not written out to the file."
2747 */
2748 kaddr = kmap_atomic(page, KM_USER0);
2749 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2750 flush_dcache_page(page);
2751 kunmap_atomic(kaddr, KM_USER0);
2752 return __block_write_full_page(inode, page, get_block, wbc);
2753}
2754
2755sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2756 get_block_t *get_block)
2757{
2758 struct buffer_head tmp;
2759 struct inode *inode = mapping->host;
2760 tmp.b_state = 0;
2761 tmp.b_blocknr = 0;
2762 get_block(inode, block, &tmp, 0);
2763 return tmp.b_blocknr;
2764}
2765
2766static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2767{
2768 struct buffer_head *bh = bio->bi_private;
2769
2770 if (bio->bi_size)
2771 return 1;
2772
2773 if (err == -EOPNOTSUPP) {
2774 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2775 set_bit(BH_Eopnotsupp, &bh->b_state);
2776 }
2777
2778 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2779 bio_put(bio);
2780 return 0;
2781}
2782
2783int submit_bh(int rw, struct buffer_head * bh)
2784{
2785 struct bio *bio;
2786 int ret = 0;
2787
2788 BUG_ON(!buffer_locked(bh));
2789 BUG_ON(!buffer_mapped(bh));
2790 BUG_ON(!bh->b_end_io);
2791
2792 if (buffer_ordered(bh) && (rw == WRITE))
2793 rw = WRITE_BARRIER;
2794
2795 /*
2796 * Only clear out a write error when rewriting, should this
2797 * include WRITE_SYNC as well?
2798 */
2799 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2800 clear_buffer_write_io_error(bh);
2801
2802 /*
2803 * from here on down, it's all bio -- do the initial mapping,
2804 * submit_bio -> generic_make_request may further map this bio around
2805 */
2806 bio = bio_alloc(GFP_NOIO, 1);
2807
2808 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2809 bio->bi_bdev = bh->b_bdev;
2810 bio->bi_io_vec[0].bv_page = bh->b_page;
2811 bio->bi_io_vec[0].bv_len = bh->b_size;
2812 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2813
2814 bio->bi_vcnt = 1;
2815 bio->bi_idx = 0;
2816 bio->bi_size = bh->b_size;
2817
2818 bio->bi_end_io = end_bio_bh_io_sync;
2819 bio->bi_private = bh;
2820
2821 bio_get(bio);
2822 submit_bio(rw, bio);
2823
2824 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2825 ret = -EOPNOTSUPP;
2826
2827 bio_put(bio);
2828 return ret;
2829}
2830
2831/**
2832 * ll_rw_block: low-level access to block devices (DEPRECATED)
Jan Karaa7662232005-09-06 15:19:10 -07002833 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 * @nr: number of &struct buffer_heads in the array
2835 * @bhs: array of pointers to &struct buffer_head
2836 *
Jan Karaa7662232005-09-06 15:19:10 -07002837 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2838 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2839 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2840 * are sent to disk. The fourth %READA option is described in the documentation
2841 * for generic_make_request() which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 *
2843 * This function drops any buffer that it cannot get a lock on (with the
Jan Karaa7662232005-09-06 15:19:10 -07002844 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2845 * clean when doing a write request, and any buffer that appears to be
2846 * up-to-date when doing read request. Further it marks as clean buffers that
2847 * are processed for writing (the buffer cache won't assume that they are
2848 * actually clean until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 *
2850 * ll_rw_block sets b_end_io to simple completion handler that marks
2851 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2852 * any waiters.
2853 *
2854 * All of the buffers must be for the same device, and must also be a
2855 * multiple of the current approved size for the device.
2856 */
2857void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2858{
2859 int i;
2860
2861 for (i = 0; i < nr; i++) {
2862 struct buffer_head *bh = bhs[i];
2863
Jan Karaa7662232005-09-06 15:19:10 -07002864 if (rw == SWRITE)
2865 lock_buffer(bh);
2866 else if (test_set_buffer_locked(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 continue;
2868
2869 get_bh(bh);
Jan Karaa7662232005-09-06 15:19:10 -07002870 if (rw == WRITE || rw == SWRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002872 bh->b_end_io = end_buffer_write_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 submit_bh(WRITE, bh);
2874 continue;
2875 }
2876 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002878 bh->b_end_io = end_buffer_read_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 submit_bh(rw, bh);
2880 continue;
2881 }
2882 }
2883 unlock_buffer(bh);
2884 put_bh(bh);
2885 }
2886}
2887
2888/*
2889 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2890 * and then start new I/O and then wait upon it. The caller must have a ref on
2891 * the buffer_head.
2892 */
2893int sync_dirty_buffer(struct buffer_head *bh)
2894{
2895 int ret = 0;
2896
2897 WARN_ON(atomic_read(&bh->b_count) < 1);
2898 lock_buffer(bh);
2899 if (test_clear_buffer_dirty(bh)) {
2900 get_bh(bh);
2901 bh->b_end_io = end_buffer_write_sync;
2902 ret = submit_bh(WRITE, bh);
2903 wait_on_buffer(bh);
2904 if (buffer_eopnotsupp(bh)) {
2905 clear_buffer_eopnotsupp(bh);
2906 ret = -EOPNOTSUPP;
2907 }
2908 if (!ret && !buffer_uptodate(bh))
2909 ret = -EIO;
2910 } else {
2911 unlock_buffer(bh);
2912 }
2913 return ret;
2914}
2915
2916/*
2917 * try_to_free_buffers() checks if all the buffers on this particular page
2918 * are unused, and releases them if so.
2919 *
2920 * Exclusion against try_to_free_buffers may be obtained by either
2921 * locking the page or by holding its mapping's private_lock.
2922 *
2923 * If the page is dirty but all the buffers are clean then we need to
2924 * be sure to mark the page clean as well. This is because the page
2925 * may be against a block device, and a later reattachment of buffers
2926 * to a dirty page will set *all* buffers dirty. Which would corrupt
2927 * filesystem data on the same device.
2928 *
2929 * The same applies to regular filesystem pages: if all the buffers are
2930 * clean then we set the page clean and proceed. To do that, we require
2931 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2932 * private_lock.
2933 *
2934 * try_to_free_buffers() is non-blocking.
2935 */
2936static inline int buffer_busy(struct buffer_head *bh)
2937{
2938 return atomic_read(&bh->b_count) |
2939 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2940}
2941
2942static int
2943drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2944{
2945 struct buffer_head *head = page_buffers(page);
2946 struct buffer_head *bh;
2947
2948 bh = head;
2949 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07002950 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 set_bit(AS_EIO, &page->mapping->flags);
2952 if (buffer_busy(bh))
2953 goto failed;
2954 bh = bh->b_this_page;
2955 } while (bh != head);
2956
2957 do {
2958 struct buffer_head *next = bh->b_this_page;
2959
2960 if (!list_empty(&bh->b_assoc_buffers))
2961 __remove_assoc_queue(bh);
2962 bh = next;
2963 } while (bh != head);
2964 *buffers_to_free = head;
2965 __clear_page_buffers(page);
2966 return 1;
2967failed:
2968 return 0;
2969}
2970
2971int try_to_free_buffers(struct page *page)
2972{
2973 struct address_space * const mapping = page->mapping;
2974 struct buffer_head *buffers_to_free = NULL;
2975 int ret = 0;
2976
2977 BUG_ON(!PageLocked(page));
2978 if (PageWriteback(page))
2979 return 0;
2980
2981 if (mapping == NULL) { /* can this still happen? */
2982 ret = drop_buffers(page, &buffers_to_free);
2983 goto out;
2984 }
2985
2986 spin_lock(&mapping->private_lock);
2987 ret = drop_buffers(page, &buffers_to_free);
2988 if (ret) {
2989 /*
2990 * If the filesystem writes its buffers by hand (eg ext3)
2991 * then we can have clean buffers against a dirty page. We
2992 * clean the page here; otherwise later reattachment of buffers
2993 * could encounter a non-uptodate page, which is unresolvable.
2994 * This only applies in the rare case where try_to_free_buffers
2995 * succeeds but the page is not freed.
2996 */
2997 clear_page_dirty(page);
2998 }
2999 spin_unlock(&mapping->private_lock);
3000out:
3001 if (buffers_to_free) {
3002 struct buffer_head *bh = buffers_to_free;
3003
3004 do {
3005 struct buffer_head *next = bh->b_this_page;
3006 free_buffer_head(bh);
3007 bh = next;
3008 } while (bh != buffers_to_free);
3009 }
3010 return ret;
3011}
3012EXPORT_SYMBOL(try_to_free_buffers);
3013
3014int block_sync_page(struct page *page)
3015{
3016 struct address_space *mapping;
3017
3018 smp_mb();
3019 mapping = page_mapping(page);
3020 if (mapping)
3021 blk_run_backing_dev(mapping->backing_dev_info, page);
3022 return 0;
3023}
3024
3025/*
3026 * There are no bdflush tunables left. But distributions are
3027 * still running obsolete flush daemons, so we terminate them here.
3028 *
3029 * Use of bdflush() is deprecated and will be removed in a future kernel.
3030 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3031 */
3032asmlinkage long sys_bdflush(int func, long data)
3033{
3034 static int msg_count;
3035
3036 if (!capable(CAP_SYS_ADMIN))
3037 return -EPERM;
3038
3039 if (msg_count < 5) {
3040 msg_count++;
3041 printk(KERN_INFO
3042 "warning: process `%s' used the obsolete bdflush"
3043 " system call\n", current->comm);
3044 printk(KERN_INFO "Fix your initscripts?\n");
3045 }
3046
3047 if (func == 1)
3048 do_exit(0);
3049 return 0;
3050}
3051
3052/*
3053 * Buffer-head allocation
3054 */
3055static kmem_cache_t *bh_cachep;
3056
3057/*
3058 * Once the number of bh's in the machine exceeds this level, we start
3059 * stripping them in writeback.
3060 */
3061static int max_buffer_heads;
3062
3063int buffer_heads_over_limit;
3064
3065struct bh_accounting {
3066 int nr; /* Number of live bh's */
3067 int ratelimit; /* Limit cacheline bouncing */
3068};
3069
3070static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3071
3072static void recalc_bh_state(void)
3073{
3074 int i;
3075 int tot = 0;
3076
3077 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3078 return;
3079 __get_cpu_var(bh_accounting).ratelimit = 0;
3080 for_each_cpu(i)
3081 tot += per_cpu(bh_accounting, i).nr;
3082 buffer_heads_over_limit = (tot > max_buffer_heads);
3083}
3084
Al Virodd0fc662005-10-07 07:46:04 +01003085struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086{
3087 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3088 if (ret) {
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003089 get_cpu_var(bh_accounting).nr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003091 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 }
3093 return ret;
3094}
3095EXPORT_SYMBOL(alloc_buffer_head);
3096
3097void free_buffer_head(struct buffer_head *bh)
3098{
3099 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3100 kmem_cache_free(bh_cachep, bh);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003101 get_cpu_var(bh_accounting).nr--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003103 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104}
3105EXPORT_SYMBOL(free_buffer_head);
3106
3107static void
3108init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3109{
3110 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3111 SLAB_CTOR_CONSTRUCTOR) {
3112 struct buffer_head * bh = (struct buffer_head *)data;
3113
3114 memset(bh, 0, sizeof(*bh));
3115 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3116 }
3117}
3118
3119#ifdef CONFIG_HOTPLUG_CPU
3120static void buffer_exit_cpu(int cpu)
3121{
3122 int i;
3123 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3124
3125 for (i = 0; i < BH_LRU_SIZE; i++) {
3126 brelse(b->bhs[i]);
3127 b->bhs[i] = NULL;
3128 }
3129}
3130
3131static int buffer_cpu_notify(struct notifier_block *self,
3132 unsigned long action, void *hcpu)
3133{
3134 if (action == CPU_DEAD)
3135 buffer_exit_cpu((unsigned long)hcpu);
3136 return NOTIFY_OK;
3137}
3138#endif /* CONFIG_HOTPLUG_CPU */
3139
3140void __init buffer_init(void)
3141{
3142 int nrpages;
3143
3144 bh_cachep = kmem_cache_create("buffer_head",
3145 sizeof(struct buffer_head), 0,
Andrea Arcangelie422fd2c2005-05-05 16:15:04 -07003146 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147
3148 /*
3149 * Limit the bh occupancy to 10% of ZONE_NORMAL
3150 */
3151 nrpages = (nr_free_buffer_pages() * 10) / 100;
3152 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3153 hotcpu_notifier(buffer_cpu_notify, 0);
3154}
3155
3156EXPORT_SYMBOL(__bforget);
3157EXPORT_SYMBOL(__brelse);
3158EXPORT_SYMBOL(__wait_on_buffer);
3159EXPORT_SYMBOL(block_commit_write);
3160EXPORT_SYMBOL(block_prepare_write);
3161EXPORT_SYMBOL(block_read_full_page);
3162EXPORT_SYMBOL(block_sync_page);
3163EXPORT_SYMBOL(block_truncate_page);
3164EXPORT_SYMBOL(block_write_full_page);
3165EXPORT_SYMBOL(cont_prepare_write);
3166EXPORT_SYMBOL(end_buffer_async_write);
3167EXPORT_SYMBOL(end_buffer_read_sync);
3168EXPORT_SYMBOL(end_buffer_write_sync);
3169EXPORT_SYMBOL(file_fsync);
3170EXPORT_SYMBOL(fsync_bdev);
3171EXPORT_SYMBOL(generic_block_bmap);
3172EXPORT_SYMBOL(generic_commit_write);
3173EXPORT_SYMBOL(generic_cont_expand);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08003174EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175EXPORT_SYMBOL(init_buffer);
3176EXPORT_SYMBOL(invalidate_bdev);
3177EXPORT_SYMBOL(ll_rw_block);
3178EXPORT_SYMBOL(mark_buffer_dirty);
3179EXPORT_SYMBOL(submit_bh);
3180EXPORT_SYMBOL(sync_dirty_buffer);
3181EXPORT_SYMBOL(unlock_buffer);